summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/devicetree/bindings/display/allwinner,sun6i-a31-mipi-dsi.yaml100
-rw-r--r--Documentation/devicetree/bindings/display/bridge/renesas,lvds.txt19
-rw-r--r--Documentation/devicetree/bindings/display/bridge/sii902x.txt42
-rw-r--r--Documentation/devicetree/bindings/display/bridge/thine,thc63lvd1024.txt6
-rw-r--r--Documentation/devicetree/bindings/display/bridge/toshiba,tc358767.txt1
-rw-r--r--Documentation/devicetree/bindings/display/panel/armadeus,st0700-adapt.txt9
-rw-r--r--Documentation/devicetree/bindings/display/panel/edt,et-series.txt16
-rw-r--r--Documentation/devicetree/bindings/display/panel/evervision,vgg804821.txt12
-rw-r--r--Documentation/devicetree/bindings/display/panel/friendlyarm,hd702e.txt32
-rw-r--r--Documentation/devicetree/bindings/display/panel/koe,tx14d24vm1bpa.txt42
-rw-r--r--Documentation/devicetree/bindings/display/panel/osddisplays,osd101t2045-53ts.txt11
-rw-r--r--Documentation/devicetree/bindings/display/panel/osddisplays,osd101t2587-53ts.txt14
-rw-r--r--Documentation/devicetree/bindings/display/panel/samsung,s6e63m0.txt33
-rw-r--r--Documentation/devicetree/bindings/display/panel/tfc,s9700rtwv43tr-01b.txt15
-rw-r--r--Documentation/devicetree/bindings/display/panel/vl050_8048nt_c01.txt12
-rw-r--r--Documentation/devicetree/bindings/display/renesas,du.txt2
-rw-r--r--Documentation/devicetree/bindings/display/rockchip/dw_hdmi-rockchip.txt7
-rw-r--r--Documentation/devicetree/bindings/display/st,stm32-ltdc.txt3
-rw-r--r--Documentation/devicetree/bindings/display/sunxi/sun6i-dsi.txt93
-rw-r--r--Documentation/devicetree/bindings/gpu/arm,mali-midgard.txt19
-rw-r--r--Documentation/devicetree/bindings/phy/allwinner,sun6i-a31-mipi-dphy.yaml57
-rw-r--r--Documentation/devicetree/bindings/vendor-prefixes.yaml6
-rw-r--r--Documentation/gpu/amdgpu.rst24
-rw-r--r--Documentation/gpu/drivers.rst1
-rw-r--r--Documentation/gpu/drm-client.rst3
-rw-r--r--Documentation/gpu/drm-kms-helpers.rst15
-rw-r--r--Documentation/gpu/drm-mm.rst34
-rw-r--r--Documentation/gpu/drm-uapi.rst19
-rw-r--r--Documentation/gpu/mcde.rst8
-rw-r--r--Documentation/gpu/todo.rst49
-rw-r--r--MAINTAINERS9
-rw-r--r--drivers/dma-buf/dma-buf.c39
-rw-r--r--drivers/dma-buf/dma-fence.c21
-rw-r--r--drivers/dma-buf/reservation.c4
-rw-r--r--drivers/dma-buf/sync_debug.c26
-rw-r--r--drivers/dma-buf/sync_debug.h1
-rw-r--r--drivers/gpu/drm/Kconfig9
-rw-r--r--drivers/gpu/drm/Makefile10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/Kconfig7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/Makefile4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h35
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_afmt.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c72
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c85
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c239
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c148
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c398
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_display.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c (renamed from drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c)133
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.h46
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c97
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_encoders.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c18
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c39
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gds.h21
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c24
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gem.h16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ioc32.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_job.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c18
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c187
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mn.h8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_pll.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c259
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c30
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h37
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c250
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sched.h5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_test.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_trace_points.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c310
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c65
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c141
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h23
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c48
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c81
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/atom.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/atombios_crtc.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/atombios_dp.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/atombios_encoders.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/atombios_i2c.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik.c16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik_ih.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik_sdma.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cz_ih.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v10_0.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v11_0.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v6_0.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v8_0.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_virtual.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/df_v3_6.c428
-rw-r--r--drivers/gpu/drm/amd/amdgpu/df_v3_6.h17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c38
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c40
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c446
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c28
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c19
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c39
-rw-r--r--drivers/gpu/drm/amd/amdgpu/iceland_ih.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/kv_dpm.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/kv_smc.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c25
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c21
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c15
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c15
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v10_0.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v11_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v3_1.c135
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c50
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si.c16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si_dma.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si_dpm.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si_ih.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si_smc.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15.c92
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15.h10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15_common.h68
-rw-r--r--drivers/gpu/drm/amd/amdgpu/tonga_ih.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v2_0.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v3_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v4_0.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c137
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vega10_ih.c95
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vi.c17
-rw-r--r--drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h483
-rw-r--r--drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx8.asm13
-rw-r--r--drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx9.asm63
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_chardev.c83
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_crat.c12
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_crat.h3
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device.c49
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c375
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h14
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_cik.c2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v9.c1
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_vi.c2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_events.c2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c3
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c6
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_v9.c5
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_vi.c4
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c70
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h8
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c53
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c85
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c55
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c4
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_pm4_headers_ai.h14
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_pm4_headers_vi.h7
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_priv.h24
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c71
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_topology.c18
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_topology.h3
-rw-r--r--drivers/gpu/drm/amd/display/Kconfig6
-rw-r--r--drivers/gpu/drm/amd/display/Makefile1
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c308
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h6
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c1
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c4
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c1
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c2
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c1
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/Makefile2
-rw-r--r--drivers/gpu/drm/amd/display/dc/basics/vector.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/bios_parser.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/calcs/dcn_calc_auto.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c57
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/Makefile75
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c136
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dce100/dce_clk_mgr.c471
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dce100/dce_clk_mgr.h81
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dce110/dce110_clk_mgr.c276
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dce110/dce110_clk_mgr.h44
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dce112/dce112_clk_mgr.c239
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dce112/dce112_clk_mgr.h39
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dce120/dce120_clk_mgr.c145
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dce120/dce120_clk_mgr.h34
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr.c (renamed from drivers/gpu/drm/amd/display/dc/dcn10/dcn10_clk_mgr.c)184
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr.h31
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr_clk.c79
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr_clk.h29
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr_vbios_smu.c126
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr_vbios_smu.h32
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv2_clk_mgr.c43
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv2_clk_mgr.h (renamed from drivers/gpu/drm/amd/display/dc/dcn10/dcn10_clk_mgr.h)13
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc.c252
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c31
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link.c167
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c12
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_resource.c135
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_sink.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_stream.c59
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_surface.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc.h8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_helper.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_hw_types.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_link.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_stream.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_types.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/Makefile2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_abm.c15
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_audio.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_aux.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c9
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c10
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_i2c_sw.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_ipp.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_opp.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c16
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.c9
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c75
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_compressor.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c123
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_opp_regamma_v.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c69
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator_v.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_transform_v.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce112/dce112_compressor.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c33
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c31
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce120/dce120_timing_generator.c96
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c52
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce80/dce80_timing_generator.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/Makefile2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.h31
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c432
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h16
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c85
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_ipp.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c184
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h59
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c70
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c129
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h39
-rw-r--r--drivers/gpu/drm/amd/display/dc/dm_pp_smu.h33
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_mode_enums.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/gpio_base.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/hw_ddc.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/hw_hpd.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/core_types.h27
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/abm.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/audio.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h33
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h (renamed from drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.h)152
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h17
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h20
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h28
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/resource.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dce110/irq_service_dce110.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dce120/irq_service_dce120.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dce80/irq_service_dce80.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dcn10/irq_service_dcn10.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/irq_service.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/os_types.h8
-rw-r--r--drivers/gpu/drm/amd/display/dc/virtual/virtual_link_encoder.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/virtual/virtual_stream_encoder.c2
-rw-r--r--drivers/gpu/drm/amd/display/include/bios_parser_types.h3
-rw-r--r--drivers/gpu/drm/amd/display/include/dal_asic_id.h14
-rw-r--r--drivers/gpu/drm/amd/display/include/dal_types.h2
-rw-r--r--drivers/gpu/drm/amd/display/include/set_mode_types.h5
-rw-r--r--drivers/gpu/drm/amd/display/modules/color/color_gamma.c65
-rw-r--r--drivers/gpu/drm/amd/display/modules/color/color_gamma.h1
-rw-r--r--drivers/gpu/drm/amd/display/modules/freesync/freesync.c2
-rw-r--r--drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c4
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/df/df_3_6_offset.h18
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_0_offset.h31
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_6_1_smn.h3
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_0_smn.h3
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_4_0_smn.h3
-rw-r--r--drivers/gpu/drm/amd/include/cik_structs.h3
-rw-r--r--drivers/gpu/drm/amd/include/kgd_kfd_interface.h1
-rw-r--r--drivers/gpu/drm/amd/include/kgd_pp_interface.h11
-rw-r--r--drivers/gpu/drm/amd/include/v9_structs.h3
-rw-r--r--drivers/gpu/drm/amd/include/vi_structs.h3
-rw-r--r--drivers/gpu/drm/amd/powerplay/amdgpu_smu.c80
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c18
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c6
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c3
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c157
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c25
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.h1
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c123
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.h3
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c84
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h12
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/power_state.h7
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/pp_thermal.h12
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h3
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/smumgr.h1
-rw-r--r--drivers/gpu/drm/amd/powerplay/smu_v11_0.c195
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c3
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c2
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c3
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c4
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c2
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c3
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c2
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c21
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c2
-rw-r--r--drivers/gpu/drm/amd/powerplay/vega20_ppt.c153
-rw-r--r--drivers/gpu/drm/amd/powerplay/vega20_ppt.h44
-rw-r--r--drivers/gpu/drm/arm/malidp_crtc.c28
-rw-r--r--drivers/gpu/drm/arm/malidp_hw.c3
-rw-r--r--drivers/gpu/drm/arm/malidp_mw.c2
-rw-r--r--drivers/gpu/drm/arm/malidp_planes.c8
-rw-r--r--drivers/gpu/drm/armada/armada_fb.c3
-rw-r--r--drivers/gpu/drm/ast/Kconfig3
-rw-r--r--drivers/gpu/drm/ast/ast_drv.c13
-rw-r--r--drivers/gpu/drm/ast/ast_drv.h78
-rw-r--r--drivers/gpu/drm/ast/ast_fb.c61
-rw-r--r--drivers/gpu/drm/ast/ast_main.c77
-rw-r--r--drivers/gpu/drm/ast/ast_mode.c157
-rw-r--r--drivers/gpu/drm/ast/ast_ttm.c302
-rw-r--r--drivers/gpu/drm/ati_pcigart.c5
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c18
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c120
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.h2
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c11
-rw-r--r--drivers/gpu/drm/bochs/Kconfig2
-rw-r--r--drivers/gpu/drm/bochs/bochs.h54
-rw-r--r--drivers/gpu/drm/bochs/bochs_drv.c24
-rw-r--r--drivers/gpu/drm/bochs/bochs_kms.c18
-rw-r--r--drivers/gpu/drm/bochs/bochs_mm.c427
-rw-r--r--drivers/gpu/drm/bridge/adv7511/adv7511_drv.c8
-rw-r--r--drivers/gpu/drm/bridge/analogix-anx78xx.c9
-rw-r--r--drivers/gpu/drm/bridge/analogix/analogix_dp_core.c58
-rw-r--r--drivers/gpu/drm/bridge/analogix/analogix_dp_core.h6
-rw-r--r--drivers/gpu/drm/bridge/analogix/analogix_dp_reg.c24
-rw-r--r--drivers/gpu/drm/bridge/dumb-vga-dac.c2
-rw-r--r--drivers/gpu/drm/bridge/lvds-encoder.c10
-rw-r--r--drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c3
-rw-r--r--drivers/gpu/drm/bridge/nxp-ptn3460.c3
-rw-r--r--drivers/gpu/drm/bridge/panel.c5
-rw-r--r--drivers/gpu/drm/bridge/parade-ps8622.c3
-rw-r--r--drivers/gpu/drm/bridge/sii902x.c491
-rw-r--r--drivers/gpu/drm/bridge/sii9234.c4
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-hdmi.c193
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c17
-rw-r--r--drivers/gpu/drm/bridge/tc358764.c14
-rw-r--r--drivers/gpu/drm/bridge/tc358767.c593
-rw-r--r--drivers/gpu/drm/bridge/thc63lvd1024.c64
-rw-r--r--drivers/gpu/drm/bridge/ti-sn65dsi86.c18
-rw-r--r--drivers/gpu/drm/bridge/ti-tfp410.c4
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_ttm.c337
-rw-r--r--drivers/gpu/drm/drm_agpsupport.c11
-rw-r--r--drivers/gpu/drm/drm_atomic.c248
-rw-r--r--drivers/gpu/drm/drm_atomic_helper.c232
-rw-r--r--drivers/gpu/drm/drm_atomic_state_helper.c52
-rw-r--r--drivers/gpu/drm/drm_atomic_uapi.c23
-rw-r--r--drivers/gpu/drm/drm_auth.c30
-rw-r--r--drivers/gpu/drm/drm_blend.c9
-rw-r--r--drivers/gpu/drm/drm_bridge.c110
-rw-r--r--drivers/gpu/drm/drm_bufs.c21
-rw-r--r--drivers/gpu/drm/drm_client.c15
-rw-r--r--drivers/gpu/drm/drm_client_modeset.c1087
-rw-r--r--drivers/gpu/drm/drm_color_mgmt.c8
-rw-r--r--drivers/gpu/drm/drm_connector.c96
-rw-r--r--drivers/gpu/drm/drm_context.c8
-rw-r--r--drivers/gpu/drm/drm_crtc.c4
-rw-r--r--drivers/gpu/drm/drm_crtc_helper.c14
-rw-r--r--drivers/gpu/drm/drm_crtc_internal.h31
-rw-r--r--drivers/gpu/drm/drm_damage_helper.c2
-rw-r--r--drivers/gpu/drm/drm_debugfs.c66
-rw-r--r--drivers/gpu/drm/drm_debugfs_crc.c46
-rw-r--r--drivers/gpu/drm/drm_dma.c6
-rw-r--r--drivers/gpu/drm/drm_dp_aux_dev.c8
-rw-r--r--drivers/gpu/drm/drm_dp_dual_mode_helper.c4
-rw-r--r--drivers/gpu/drm/drm_dp_helper.c12
-rw-r--r--drivers/gpu/drm/drm_dp_mst_topology.c19
-rw-r--r--drivers/gpu/drm/drm_drv.c14
-rw-r--r--drivers/gpu/drm/drm_dumb_buffers.c4
-rw-r--r--drivers/gpu/drm/drm_edid.c139
-rw-r--r--drivers/gpu/drm/drm_edid_load.c9
-rw-r--r--drivers/gpu/drm/drm_encoder.c4
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c1408
-rw-r--r--drivers/gpu/drm/drm_file.c133
-rw-r--r--drivers/gpu/drm/drm_flip_work.c6
-rw-r--r--drivers/gpu/drm/drm_format_helper.c4
-rw-r--r--drivers/gpu/drm/drm_fourcc.c120
-rw-r--r--drivers/gpu/drm/drm_framebuffer.c13
-rw-r--r--drivers/gpu/drm/drm_gem.c8
-rw-r--r--drivers/gpu/drm/drm_gem_cma_helper.c11
-rw-r--r--drivers/gpu/drm/drm_gem_framebuffer_helper.c7
-rw-r--r--drivers/gpu/drm/drm_gem_vram_helper.c641
-rw-r--r--drivers/gpu/drm/drm_hashtab.c10
-rw-r--r--drivers/gpu/drm/drm_hdcp.c382
-rw-r--r--drivers/gpu/drm/drm_internal.h37
-rw-r--r--drivers/gpu/drm/drm_ioc32.c9
-rw-r--r--drivers/gpu/drm/drm_ioctl.c22
-rw-r--r--drivers/gpu/drm/drm_irq.c13
-rw-r--r--drivers/gpu/drm/drm_kms_helper_common.c3
-rw-r--r--drivers/gpu/drm/drm_lease.c15
-rw-r--r--drivers/gpu/drm/drm_legacy.h6
-rw-r--r--drivers/gpu/drm/drm_legacy_misc.c27
-rw-r--r--drivers/gpu/drm/drm_lock.c8
-rw-r--r--drivers/gpu/drm/drm_memory.c9
-rw-r--r--drivers/gpu/drm/drm_mm.c9
-rw-r--r--drivers/gpu/drm/drm_mode_config.c6
-rw-r--r--drivers/gpu/drm/drm_mode_object.c9
-rw-r--r--drivers/gpu/drm/drm_modes.c7
-rw-r--r--drivers/gpu/drm/drm_modeset_lock.c2
-rw-r--r--drivers/gpu/drm/drm_of.c5
-rw-r--r--drivers/gpu/drm/drm_pci.c11
-rw-r--r--drivers/gpu/drm/drm_plane_helper.c9
-rw-r--r--drivers/gpu/drm/drm_prime.c84
-rw-r--r--drivers/gpu/drm/drm_print.c7
-rw-r--r--drivers/gpu/drm/drm_probe_helper.c16
-rw-r--r--drivers/gpu/drm/drm_property.c7
-rw-r--r--drivers/gpu/drm/drm_rect.c4
-rw-r--r--drivers/gpu/drm/drm_scatter.c9
-rw-r--r--drivers/gpu/drm/drm_scdc_helper.c2
-rw-r--r--drivers/gpu/drm/drm_self_refresh_helper.c216
-rw-r--r--drivers/gpu/drm/drm_simple_kms_helper.c5
-rw-r--r--drivers/gpu/drm/drm_syncobj.c13
-rw-r--r--drivers/gpu/drm/drm_sysfs.c17
-rw-r--r--drivers/gpu/drm/drm_trace.h2
-rw-r--r--drivers/gpu/drm/drm_trace_points.c3
-rw-r--r--drivers/gpu/drm/drm_vblank.c12
-rw-r--r--drivers/gpu/drm/drm_vm.c14
-rw-r--r--drivers/gpu/drm/drm_vma_manager.c6
-rw-r--r--drivers/gpu/drm/drm_vram_helper_common.c96
-rw-r--r--drivers/gpu/drm/drm_vram_mm_helper.c297
-rw-r--r--drivers/gpu/drm/drm_writeback.c6
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_dump.c5
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_sched.c2
-rw-r--r--drivers/gpu/drm/gma500/accel_2d.c18
-rw-r--r--drivers/gpu/drm/gma500/blitter.h2
-rw-r--r--drivers/gpu/drm/gma500/cdv_device.c13
-rw-r--r--drivers/gpu/drm/gma500/cdv_device.h4
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_crt.c8
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_display.c10
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_dp.c9
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_hdmi.c9
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_lvds.c9
-rw-r--r--drivers/gpu/drm/gma500/framebuffer.c26
-rw-r--r--drivers/gpu/drm/gma500/framebuffer.h1
-rw-r--r--drivers/gpu/drm/gma500/gem.c5
-rw-r--r--drivers/gpu/drm/gma500/gma_device.c1
-rw-r--r--drivers/gpu/drm/gma500/gma_device.h1
-rw-r--r--drivers/gpu/drm/gma500/gma_display.c12
-rw-r--r--drivers/gpu/drm/gma500/gma_display.h3
-rw-r--r--drivers/gpu/drm/gma500/gtt.c5
-rw-r--r--drivers/gpu/drm/gma500/gtt.h1
-rw-r--r--drivers/gpu/drm/gma500/intel_bios.c6
-rw-r--r--drivers/gpu/drm/gma500/intel_bios.h3
-rw-r--r--drivers/gpu/drm/gma500/intel_gmbus.c11
-rw-r--r--drivers/gpu/drm/gma500/intel_i2c.c4
-rw-r--r--drivers/gpu/drm/gma500/mdfld_device.c16
-rw-r--r--drivers/gpu/drm/gma500/mdfld_dsi_dpi.c4
-rw-r--r--drivers/gpu/drm/gma500/mdfld_dsi_output.c12
-rw-r--r--drivers/gpu/drm/gma500/mdfld_dsi_output.h8
-rw-r--r--drivers/gpu/drm/gma500/mdfld_dsi_pkg_sender.c4
-rw-r--r--drivers/gpu/drm/gma500/mdfld_intel_display.c11
-rw-r--r--drivers/gpu/drm/gma500/mdfld_tmd_vid.c2
-rw-r--r--drivers/gpu/drm/gma500/mid_bios.c5
-rw-r--r--drivers/gpu/drm/gma500/mid_bios.h1
-rw-r--r--drivers/gpu/drm/gma500/mmu.c6
-rw-r--r--drivers/gpu/drm/gma500/oaktrail.h2
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_crtc.c8
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_device.c20
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_hdmi.c8
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_lvds.c6
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_lvds_i2c.c11
-rw-r--r--drivers/gpu/drm/gma500/power.h4
-rw-r--r--drivers/gpu/drm/gma500/psb_device.c12
-rw-r--r--drivers/gpu/drm/gma500/psb_drv.c33
-rw-r--r--drivers/gpu/drm/gma500/psb_drv.h16
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_display.c7
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_lvds.c5
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_modes.c2
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_sdvo.c15
-rw-r--r--drivers/gpu/drm/gma500/psb_irq.c9
-rw-r--r--drivers/gpu/drm/gma500/psb_irq.h2
-rw-r--r--drivers/gpu/drm/gma500/psb_lid.c6
-rw-r--r--drivers/gpu/drm/gma500/tc35876x-dsi-lvds.c13
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/Kconfig2
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_de.c19
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c14
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h33
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_fbdev.c37
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c341
-rw-r--r--drivers/gpu/drm/i915/Kconfig6
-rw-r--r--drivers/gpu/drm/i915/Kconfig.profile13
-rw-r--r--drivers/gpu/drm/i915/Makefile52
-rw-r--r--drivers/gpu/drm/i915/Makefile.header-test34
-rw-r--r--drivers/gpu/drm/i915/dvo_ch7017.c3
-rw-r--r--drivers/gpu/drm/i915/dvo_ch7xxx.c3
-rw-r--r--drivers/gpu/drm/i915/dvo_ivch.c3
-rw-r--r--drivers/gpu/drm/i915/dvo_ns2501.c5
-rw-r--r--drivers/gpu/drm/i915/dvo_sil164.c3
-rw-r--r--drivers/gpu/drm/i915/dvo_tfp410.c3
-rw-r--r--drivers/gpu/drm/i915/gt/Makefile2
-rw-r--r--drivers/gpu/drm/i915/gt/Makefile.header-test16
-rw-r--r--drivers/gpu/drm/i915/gt/intel_breadcrumbs.c (renamed from drivers/gpu/drm/i915/intel_breadcrumbs.c)19
-rw-r--r--drivers/gpu/drm/i915/gt/intel_context.c179
-rw-r--r--drivers/gpu/drm/i915/gt/intel_context.h130
-rw-r--r--drivers/gpu/drm/i915/gt/intel_context_types.h (renamed from drivers/gpu/drm/i915/intel_context_types.h)19
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine.h (renamed from drivers/gpu/drm/i915/intel_ringbuffer.h)54
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_cs.c (renamed from drivers/gpu/drm/i915/intel_engine_cs.c)381
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_pm.c164
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_pm.h22
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_types.h (renamed from drivers/gpu/drm/i915/intel_engine_types.h)38
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gpu_commands.h (renamed from drivers/gpu/drm/i915/intel_gpu_commands.h)0
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_pm.c143
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_pm.h27
-rw-r--r--drivers/gpu/drm/i915/gt/intel_hangcheck.c (renamed from drivers/gpu/drm/i915/intel_hangcheck.c)29
-rw-r--r--drivers/gpu/drm/i915/gt/intel_lrc.c (renamed from drivers/gpu/drm/i915/intel_lrc.c)1187
-rw-r--r--drivers/gpu/drm/i915/gt/intel_lrc.h (renamed from drivers/gpu/drm/i915/intel_lrc.h)22
-rw-r--r--drivers/gpu/drm/i915/gt/intel_lrc_reg.h (renamed from drivers/gpu/drm/i915/intel_lrc_reg.h)0
-rw-r--r--drivers/gpu/drm/i915/gt/intel_mocs.c (renamed from drivers/gpu/drm/i915/intel_mocs.c)4
-rw-r--r--drivers/gpu/drm/i915/gt/intel_mocs.h (renamed from drivers/gpu/drm/i915/intel_mocs.h)4
-rw-r--r--drivers/gpu/drm/i915/gt/intel_reset.c (renamed from drivers/gpu/drm/i915/i915_reset.c)114
-rw-r--r--drivers/gpu/drm/i915/gt/intel_reset.h (renamed from drivers/gpu/drm/i915/i915_reset.h)3
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ringbuffer.c (renamed from drivers/gpu/drm/i915/intel_ringbuffer.c)360
-rw-r--r--drivers/gpu/drm/i915/gt/intel_sseu.c142
-rw-r--r--drivers/gpu/drm/i915/gt/intel_sseu.h67
-rw-r--r--drivers/gpu/drm/i915/gt/intel_workarounds.c (renamed from drivers/gpu/drm/i915/intel_workarounds.c)305
-rw-r--r--drivers/gpu/drm/i915/gt/intel_workarounds.h (renamed from drivers/gpu/drm/i915/intel_workarounds.h)10
-rw-r--r--drivers/gpu/drm/i915/gt/intel_workarounds_types.h (renamed from drivers/gpu/drm/i915/intel_workarounds_types.h)7
-rw-r--r--drivers/gpu/drm/i915/gt/mock_engine.c (renamed from drivers/gpu/drm/i915/selftests/mock_engine.c)53
-rw-r--r--drivers/gpu/drm/i915/gt/mock_engine.h (renamed from drivers/gpu/drm/i915/selftests/mock_engine.h)4
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_engine_cs.c (renamed from drivers/gpu/drm/i915/selftests/intel_engine_cs.c)0
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_hangcheck.c (renamed from drivers/gpu/drm/i915/selftests/intel_hangcheck.c)227
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_lrc.c (renamed from drivers/gpu/drm/i915/selftests/intel_lrc.c)538
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_reset.c118
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_workarounds.c (renamed from drivers/gpu/drm/i915/selftests/intel_workarounds.c)429
-rw-r--r--drivers/gpu/drm/i915/gvt/gvt.h2
-rw-r--r--drivers/gpu/drm/i915/gvt/kvmgt.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/mmio_context.c3
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.c147
-rw-r--r--drivers/gpu/drm/i915/i915_cmd_parser.c3
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c121
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.h20
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c45
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h411
-rw-r--r--drivers/gpu/drm/i915/i915_fixed.h6
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c551
-rw-r--r--drivers/gpu/drm/i915/i915_gem.h8
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.c974
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.h83
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context_types.h45
-rw-r--r--drivers/gpu/drm/i915/i915_gem_evict.c47
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c175
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c179
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.h32
-rw-r--r--drivers/gpu/drm/i915/i915_gem_internal.c3
-rw-r--r--drivers/gpu/drm/i915/i915_gem_pm.c250
-rw-r--r--drivers/gpu/drm/i915/i915_gem_pm.h25
-rw-r--r--drivers/gpu/drm/i915/i915_gem_shrinker.c75
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c5
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.h7
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c8
-rw-r--r--drivers/gpu/drm/i915/i915_irq.h114
-rw-r--r--drivers/gpu/drm/i915/i915_pci.c7
-rw-r--r--drivers/gpu/drm/i915/i915_perf.c90
-rw-r--r--drivers/gpu/drm/i915/i915_pmu.c7
-rw-r--r--drivers/gpu/drm/i915/i915_query.c49
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h17
-rw-r--r--drivers/gpu/drm/i915/i915_request.c353
-rw-r--r--drivers/gpu/drm/i915/i915_request.h17
-rw-r--r--drivers/gpu/drm/i915/i915_scheduler.c57
-rw-r--r--drivers/gpu/drm/i915/i915_scheduler.h18
-rw-r--r--drivers/gpu/drm/i915/i915_scheduler_types.h2
-rw-r--r--drivers/gpu/drm/i915/i915_suspend.c3
-rw-r--r--drivers/gpu/drm/i915/i915_sysfs.c51
-rw-r--r--drivers/gpu/drm/i915/i915_timeline_types.h1
-rw-r--r--drivers/gpu/drm/i915/i915_trace.h4
-rw-r--r--drivers/gpu/drm/i915/i915_utils.h197
-rw-r--r--drivers/gpu/drm/i915/i915_vma.c13
-rw-r--r--drivers/gpu/drm/i915/i915_vma.h3
-rw-r--r--drivers/gpu/drm/i915/icl_dsi.c63
-rw-r--r--drivers/gpu/drm/i915/intel_acpi.c3
-rw-r--r--drivers/gpu/drm/i915/intel_acpi.h17
-rw-r--r--drivers/gpu/drm/i915/intel_atomic.c21
-rw-r--r--drivers/gpu/drm/i915/intel_atomic.h49
-rw-r--r--drivers/gpu/drm/i915/intel_audio.c61
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c2
-rw-r--r--drivers/gpu/drm/i915/intel_bios.h21
-rw-r--r--drivers/gpu/drm/i915/intel_cdclk.c59
-rw-r--r--drivers/gpu/drm/i915/intel_color.c96
-rw-r--r--drivers/gpu/drm/i915/intel_combo_phy.c77
-rw-r--r--drivers/gpu/drm/i915/intel_combo_phy.h20
-rw-r--r--drivers/gpu/drm/i915/intel_context.h87
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c3
-rw-r--r--drivers/gpu/drm/i915/intel_csr.c2
-rw-r--r--drivers/gpu/drm/i915/intel_csr.h4
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c40
-rw-r--r--drivers/gpu/drm/i915/intel_device_info.h33
-rw-r--r--drivers/gpu/drm/i915/intel_display.c1026
-rw-r--r--drivers/gpu/drm/i915/intel_display.h10
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c263
-rw-r--r--drivers/gpu/drm/i915/intel_dp.h1
-rw-r--r--drivers/gpu/drm/i915/intel_dp_aux_backlight.c1
-rw-r--r--drivers/gpu/drm/i915/intel_dp_aux_backlight.h13
-rw-r--r--drivers/gpu/drm/i915/intel_dp_link_training.c1
-rw-r--r--drivers/gpu/drm/i915/intel_dp_link_training.h14
-rw-r--r--drivers/gpu/drm/i915/intel_dp_mst.c10
-rw-r--r--drivers/gpu/drm/i915/intel_dp_mst.h14
-rw-r--r--drivers/gpu/drm/i915/intel_dpio_phy.c39
-rw-r--r--drivers/gpu/drm/i915/intel_dpio_phy.h58
-rw-r--r--drivers/gpu/drm/i915/intel_dpll_mgr.c67
-rw-r--r--drivers/gpu/drm/i915/intel_dpll_mgr.h8
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h412
-rw-r--r--drivers/gpu/drm/i915/intel_dsi.h7
-rw-r--r--drivers/gpu/drm/i915/intel_dsi_dcs_backlight.c8
-rw-r--r--drivers/gpu/drm/i915/intel_dsi_dcs_backlight.h13
-rw-r--r--drivers/gpu/drm/i915/intel_dsi_vbt.c21
-rw-r--r--drivers/gpu/drm/i915/intel_dvo.c8
-rw-r--r--drivers/gpu/drm/i915/intel_dvo_dev.h (renamed from drivers/gpu/drm/i915/dvo.h)10
-rw-r--r--drivers/gpu/drm/i915/intel_fifo_underrun.c1
-rw-r--r--drivers/gpu/drm/i915/intel_fifo_underrun.h27
-rw-r--r--drivers/gpu/drm/i915/intel_gmbus.c (renamed from drivers/gpu/drm/i915/intel_i2c.c)33
-rw-r--r--drivers/gpu/drm/i915/intel_gmbus.h27
-rw-r--r--drivers/gpu/drm/i915/intel_guc.c40
-rw-r--r--drivers/gpu/drm/i915/intel_guc.h12
-rw-r--r--drivers/gpu/drm/i915/intel_guc_ct.h5
-rw-r--r--drivers/gpu/drm/i915/intel_guc_fw.c20
-rw-r--r--drivers/gpu/drm/i915/intel_guc_submission.c32
-rw-r--r--drivers/gpu/drm/i915/intel_guc_submission.h3
-rw-r--r--drivers/gpu/drm/i915/intel_hdcp.c55
-rw-r--r--drivers/gpu/drm/i915/intel_hdcp.h1
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c30
-rw-r--r--drivers/gpu/drm/i915/intel_hotplug.c1
-rw-r--r--drivers/gpu/drm/i915/intel_hotplug.h30
-rw-r--r--drivers/gpu/drm/i915/intel_huc.c74
-rw-r--r--drivers/gpu/drm/i915/intel_huc.h6
-rw-r--r--drivers/gpu/drm/i915/intel_huc_fw.c49
-rw-r--r--drivers/gpu/drm/i915/intel_lpe_audio.c8
-rw-r--r--drivers/gpu/drm/i915/intel_lpe_audio.h22
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c2
-rw-r--r--drivers/gpu/drm/i915/intel_overlay.c9
-rw-r--r--drivers/gpu/drm/i915/intel_overlay.h29
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c2
-rw-r--r--drivers/gpu/drm/i915/intel_pipe_crc.c14
-rw-r--r--drivers/gpu/drm/i915/intel_pipe_crc.h3
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c393
-rw-r--r--drivers/gpu/drm/i915/intel_pm.h10
-rw-r--r--drivers/gpu/drm/i915/intel_psr.c12
-rw-r--r--drivers/gpu/drm/i915/intel_quirks.c1
-rw-r--r--drivers/gpu/drm/i915/intel_quirks.h13
-rw-r--r--drivers/gpu/drm/i915/intel_runtime_pm.c885
-rw-r--r--drivers/gpu/drm/i915/intel_runtime_pm.h142
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c13
-rw-r--r--drivers/gpu/drm/i915/intel_sideband.c480
-rw-r--r--drivers/gpu/drm/i915/intel_sideband.h140
-rw-r--r--drivers/gpu/drm/i915/intel_sprite.c37
-rw-r--r--drivers/gpu/drm/i915/intel_sprite.h12
-rw-r--r--drivers/gpu/drm/i915/intel_tv.c9
-rw-r--r--drivers/gpu/drm/i915/intel_uc.c90
-rw-r--r--drivers/gpu/drm/i915/intel_uc.h3
-rw-r--r--drivers/gpu/drm/i915/intel_uc_fw.c103
-rw-r--r--drivers/gpu/drm/i915/intel_uc_fw.h10
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c8
-rw-r--r--drivers/gpu/drm/i915/intel_vdsc.c2
-rw-r--r--drivers/gpu/drm/i915/intel_vdsc.h21
-rw-r--r--drivers/gpu/drm/i915/intel_wakeref.c75
-rw-r--r--drivers/gpu/drm/i915/intel_wakeref.h133
-rw-r--r--drivers/gpu/drm/i915/selftests/huge_pages.c3
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_active.c2
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem.c21
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_coherency.c2
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_context.c223
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_evict.c3
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_object.c31
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_live_selftests.h2
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_request.c13
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_timeline.c4
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_vma.c246
-rw-r--r--drivers/gpu/drm/i915/selftests/igt_atomic.h56
-rw-r--r--drivers/gpu/drm/i915/selftests/igt_flush_test.c32
-rw-r--r--drivers/gpu/drm/i915/selftests/igt_gem_utils.c34
-rw-r--r--drivers/gpu/drm/i915/selftests/igt_gem_utils.h17
-rw-r--r--drivers/gpu/drm/i915/selftests/igt_reset.c11
-rw-r--r--drivers/gpu/drm/i915/selftests/igt_reset.h1
-rw-r--r--drivers/gpu/drm/i915/selftests/igt_spinner.c3
-rw-r--r--drivers/gpu/drm/i915/selftests/igt_spinner.h3
-rw-r--r--drivers/gpu/drm/i915/selftests/lib_sw_fence.c3
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_context.c15
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_gem_device.c37
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_request.c6
-rw-r--r--drivers/gpu/drm/i915/vlv_dsi.c34
-rw-r--r--drivers/gpu/drm/i915/vlv_dsi_pll.c18
-rw-r--r--drivers/gpu/drm/imx/ipuv3-plane.c15
-rw-r--r--drivers/gpu/drm/lima/lima_drv.c2
-rw-r--r--drivers/gpu/drm/lima/lima_pp.c8
-rw-r--r--drivers/gpu/drm/lima/lima_sched.c13
-rw-r--r--drivers/gpu/drm/mcde/Kconfig18
-rw-r--r--drivers/gpu/drm/mcde/Makefile3
-rw-r--r--drivers/gpu/drm/mcde/mcde_display.c1142
-rw-r--r--drivers/gpu/drm/mcde/mcde_display_regs.h518
-rw-r--r--drivers/gpu/drm/mcde/mcde_drm.h44
-rw-r--r--drivers/gpu/drm/mcde/mcde_drv.c572
-rw-r--r--drivers/gpu/drm/mcde/mcde_dsi.c1044
-rw-r--r--drivers/gpu/drm/mcde/mcde_dsi_regs.h385
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_fb.c8
-rw-r--r--drivers/gpu/drm/mediatek/mtk_hdmi.c3
-rw-r--r--drivers/gpu/drm/meson/Kconfig1
-rw-r--r--drivers/gpu/drm/meson/meson_overlay.c17
-rw-r--r--drivers/gpu/drm/meson/meson_plane.c19
-rw-r--r--drivers/gpu/drm/mgag200/Kconfig2
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_cursor.c183
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_drv.c13
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_drv.h75
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_fb.c59
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_main.c91
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_mode.c59
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_ttm.c301
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c6
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c9
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c9
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c31
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c24
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.c7
-rw-r--r--drivers/gpu/drm/msm/msm_fb.c18
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/disp.c5
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/head.c13
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bus/nv04.c2
-rw-r--r--drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c18
-rw-r--r--drivers/gpu/drm/omapdrm/dss/omapdss-boot-init.c1
-rw-r--r--drivers/gpu/drm/omapdrm/omap_crtc.c180
-rw-r--r--drivers/gpu/drm/omapdrm/omap_crtc.h2
-rw-r--r--drivers/gpu/drm/omapdrm/omap_drv.c16
-rw-r--r--drivers/gpu/drm/omapdrm/omap_drv.h4
-rw-r--r--drivers/gpu/drm/omapdrm/omap_fb.c25
-rw-r--r--drivers/gpu/drm/omapdrm/omap_irq.c25
-rw-r--r--drivers/gpu/drm/omapdrm/omap_irq.h1
-rw-r--r--drivers/gpu/drm/panel/Kconfig18
-rw-r--r--drivers/gpu/drm/panel/Makefile2
-rw-r--r--drivers/gpu/drm/panel/panel-arm-versatile.c6
-rw-r--r--drivers/gpu/drm/panel/panel-ilitek-ili9322.c9
-rw-r--r--drivers/gpu/drm/panel/panel-innolux-p079zca.c10
-rw-r--r--drivers/gpu/drm/panel/panel-jdi-lt070me05000.c8
-rw-r--r--drivers/gpu/drm/panel/panel-kingdisplay-kd097d04.c9
-rw-r--r--drivers/gpu/drm/panel/panel-lg-lg4573.c9
-rw-r--r--drivers/gpu/drm/panel/panel-lvds.c7
-rw-r--r--drivers/gpu/drm/panel/panel-olimex-lcd-olinuxino.c8
-rw-r--r--drivers/gpu/drm/panel/panel-orisetech-otm8009a.c11
-rw-r--r--drivers/gpu/drm/panel/panel-osd-osd101t2587-53ts.c254
-rw-r--r--drivers/gpu/drm/panel/panel-panasonic-vvx10f034n00.c7
-rw-r--r--drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c3
-rw-r--r--drivers/gpu/drm/panel/panel-raydium-rm68200.c5
-rw-r--r--drivers/gpu/drm/panel/panel-rocktech-jh057n00900.c14
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-ld9040.c10
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-s6e3ha2.c10
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-s6e63j0x03.c11
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-s6e63m0.c514
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-s6e8aa0.c12
-rw-r--r--drivers/gpu/drm/panel/panel-seiko-43wvf1g.c10
-rw-r--r--drivers/gpu/drm/panel/panel-sharp-lq101r1sx01.c7
-rw-r--r--drivers/gpu/drm/panel/panel-sharp-ls043t1le01.c7
-rw-r--r--drivers/gpu/drm/panel/panel-simple.c276
-rw-r--r--drivers/gpu/drm/panel/panel-sitronix-st7701.c6
-rw-r--r--drivers/gpu/drm/panel/panel-sitronix-st7789v.c10
-rw-r--r--drivers/gpu/drm/panel/panel-truly-nt35597.c13
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_device.c22
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_device.h1
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_job.c2
-rw-r--r--drivers/gpu/drm/qxl/qxl_release.c2
-rw-r--r--drivers/gpu/drm/r128/r128_cce.c28
-rw-r--r--drivers/gpu/drm/r128/r128_drv.c9
-rw-r--r--drivers/gpu/drm/r128/r128_drv.h16
-rw-r--r--drivers/gpu/drm/r128/r128_state.c25
-rw-r--r--drivers/gpu/drm/radeon/atom.c2
-rw-r--r--drivers/gpu/drm/radeon/atom.h1
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c7
-rw-r--r--drivers/gpu/drm/radeon/atombios_dp.c2
-rw-r--r--drivers/gpu/drm/radeon/atombios_encoders.c14
-rw-r--r--drivers/gpu/drm/radeon/atombios_i2c.c2
-rw-r--r--drivers/gpu/drm/radeon/btc_dpm.c16
-rw-r--r--drivers/gpu/drm/radeon/btc_dpm.h3
-rw-r--r--drivers/gpu/drm/radeon/ci_dpm.c14
-rw-r--r--drivers/gpu/drm/radeon/ci_dpm.h1
-rw-r--r--drivers/gpu/drm/radeon/ci_smc.c2
-rw-r--r--drivers/gpu/drm/radeon/cik.c18
-rw-r--r--drivers/gpu/drm/radeon/cik_sdma.c6
-rw-r--r--drivers/gpu/drm/radeon/clearstate_cayman.h2
-rw-r--r--drivers/gpu/drm/radeon/clearstate_ci.h2
-rw-r--r--drivers/gpu/drm/radeon/clearstate_si.h2
-rw-r--r--drivers/gpu/drm/radeon/cypress_dpm.c11
-rw-r--r--drivers/gpu/drm/radeon/dce3_1_afmt.c2
-rw-r--r--drivers/gpu/drm/radeon/dce6_afmt.c2
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c16
-rw-r--r--drivers/gpu/drm/radeon/evergreen_cs.c2
-rw-r--r--drivers/gpu/drm/radeon/evergreen_dma.c2
-rw-r--r--drivers/gpu/drm/radeon/evergreen_hdmi.c2
-rw-r--r--drivers/gpu/drm/radeon/kv_dpm.c10
-rw-r--r--drivers/gpu/drm/radeon/kv_smc.c1
-rw-r--r--drivers/gpu/drm/radeon/ni.c17
-rw-r--r--drivers/gpu/drm/radeon/ni_dma.c2
-rw-r--r--drivers/gpu/drm/radeon/ni_dpm.c16
-rw-r--r--drivers/gpu/drm/radeon/r100.c36
-rw-r--r--drivers/gpu/drm/radeon/r100_track.h2
-rw-r--r--drivers/gpu/drm/radeon/r200.c2
-rw-r--r--drivers/gpu/drm/radeon/r300.c18
-rw-r--r--drivers/gpu/drm/radeon/r420.c16
-rw-r--r--drivers/gpu/drm/radeon/r520.c4
-rw-r--r--drivers/gpu/drm/radeon/r600.c18
-rw-r--r--drivers/gpu/drm/radeon/r600_cs.c2
-rw-r--r--drivers/gpu/drm/radeon/r600_dma.c6
-rw-r--r--drivers/gpu/drm/radeon/r600_dpm.c1
-rw-r--r--drivers/gpu/drm/radeon/r600_dpm.h2
-rw-r--r--drivers/gpu/drm/radeon/r600_hdmi.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_acpi.c13
-rw-r--r--drivers/gpu/drm/radeon/radeon_agp.c8
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.c10
-rw-r--r--drivers/gpu/drm/radeon/radeon_atombios.c5
-rw-r--r--drivers/gpu/drm/radeon/radeon_audio.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_benchmark.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_bios.c12
-rw-r--r--drivers/gpu/drm/radeon/radeon_clocks.c9
-rw-r--r--drivers/gpu/drm/radeon/radeon_combios.c5
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c10
-rw-r--r--drivers/gpu/drm/radeon/radeon_cursor.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c18
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c21
-rw-r--r--drivers/gpu/drm/radeon/radeon_dp_auxch.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_dp_mst.c5
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c19
-rw-r--r--drivers/gpu/drm/radeon/radeon_encoders.c5
-rw-r--r--drivers/gpu/drm/radeon/radeon_fb.c17
-rw-r--r--drivers/gpu/drm/radeon/radeon_fence.c16
-rw-r--r--drivers/gpu/drm/radeon/radeon_gart.c5
-rw-r--r--drivers/gpu/drm/radeon/radeon_gem.c9
-rw-r--r--drivers/gpu/drm/radeon/radeon_i2c.c5
-rw-r--r--drivers/gpu/drm/radeon/radeon_ib.c5
-rw-r--r--drivers/gpu/drm/radeon/radeon_irq_kms.c14
-rw-r--r--drivers/gpu/drm/radeon/radeon_kms.c17
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_crtc.c9
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_encoders.c11
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_tv.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_mn.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c11
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c17
-rw-r--r--drivers/gpu/drm/radeon/radeon_prime.c8
-rw-r--r--drivers/gpu/drm/radeon/radeon_ring.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon_sa.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_semaphore.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_sync.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon_test.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_trace.h4
-rw-r--r--drivers/gpu/drm/radeon/radeon_trace_points.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c26
-rw-r--r--drivers/gpu/drm/radeon/radeon_ucode.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_uvd.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_vce.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_vm.c2
-rw-r--r--drivers/gpu/drm/radeon/rs400.c11
-rw-r--r--drivers/gpu/drm/radeon/rs600.c13
-rw-r--r--drivers/gpu/drm/radeon/rs690.c6
-rw-r--r--drivers/gpu/drm/radeon/rs780_dpm.c12
-rw-r--r--drivers/gpu/drm/radeon/rv515.c13
-rw-r--r--drivers/gpu/drm/radeon/rv6xx_dpm.c1
-rw-r--r--drivers/gpu/drm/radeon/rv730_dpm.c1
-rw-r--r--drivers/gpu/drm/radeon/rv740_dpm.c1
-rw-r--r--drivers/gpu/drm/radeon/rv770.c12
-rw-r--r--drivers/gpu/drm/radeon/rv770_dma.c2
-rw-r--r--drivers/gpu/drm/radeon/rv770_dpm.c1
-rw-r--r--drivers/gpu/drm/radeon/rv770_dpm.h1
-rw-r--r--drivers/gpu/drm/radeon/rv770_smc.c2
-rw-r--r--drivers/gpu/drm/radeon/si.c16
-rw-r--r--drivers/gpu/drm/radeon/si_dma.c2
-rw-r--r--drivers/gpu/drm/radeon/si_dpm.c14
-rw-r--r--drivers/gpu/drm/radeon/si_smc.c2
-rw-r--r--drivers/gpu/drm/radeon/sumo_dpm.c1
-rw-r--r--drivers/gpu/drm/radeon/sumo_dpm.h1
-rw-r--r--drivers/gpu/drm/radeon/sumo_smc.c1
-rw-r--r--drivers/gpu/drm/radeon/trinity_dpm.c10
-rw-r--r--drivers/gpu/drm/radeon/trinity_smc.c1
-rw-r--r--drivers/gpu/drm/radeon/uvd_v1_0.c4
-rw-r--r--drivers/gpu/drm/radeon/uvd_v2_2.c2
-rw-r--r--drivers/gpu/drm/radeon/uvd_v3_1.c1
-rw-r--r--drivers/gpu/drm/radeon/uvd_v4_2.c2
-rw-r--r--drivers/gpu/drm/radeon/vce_v1_0.c2
-rw-r--r--drivers/gpu/drm/radeon/vce_v2_0.c2
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_drv.c30
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_encoder.c12
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_kms.c82
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_writeback.c1
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_lvds.c139
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_lvds.h5
-rw-r--r--drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c14
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_fb.c30
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop.c39
-rw-r--r--drivers/gpu/drm/savage/savage_bci.c25
-rw-r--r--drivers/gpu/drm/savage/savage_drv.c9
-rw-r--r--drivers/gpu/drm/savage/savage_drv.h10
-rw-r--r--drivers/gpu/drm/savage/savage_state.c9
-rw-r--r--drivers/gpu/drm/scheduler/sched_main.c179
-rw-r--r--drivers/gpu/drm/sis/sis_drv.c8
-rw-r--r--drivers/gpu/drm/sis/sis_drv.h10
-rw-r--r--drivers/gpu/drm/sis/sis_mm.c7
-rw-r--r--drivers/gpu/drm/sti/sti_awg_utils.c2
-rw-r--r--drivers/gpu/drm/sti/sti_awg_utils.h2
-rw-r--r--drivers/gpu/drm/sti/sti_compositor.c5
-rw-r--r--drivers/gpu/drm/sti/sti_crtc.c4
-rw-r--r--drivers/gpu/drm/sti/sti_crtc.h6
-rw-r--r--drivers/gpu/drm/sti/sti_cursor.c2
-rw-r--r--drivers/gpu/drm/sti/sti_cursor.h3
-rw-r--r--drivers/gpu/drm/sti/sti_drv.c21
-rw-r--r--drivers/gpu/drm/sti/sti_drv.h5
-rw-r--r--drivers/gpu/drm/sti/sti_dvo.c3
-rw-r--r--drivers/gpu/drm/sti/sti_gdp.c4
-rw-r--r--drivers/gpu/drm/sti/sti_gdp.h5
-rw-r--r--drivers/gpu/drm/sti/sti_hda.c6
-rw-r--r--drivers/gpu/drm/sti/sti_hdmi.c5
-rw-r--r--drivers/gpu/drm/sti/sti_hdmi.h4
-rw-r--r--drivers/gpu/drm/sti/sti_hdmi_tx3g4c28phy.c2
-rw-r--r--drivers/gpu/drm/sti/sti_hqvdp.c8
-rw-r--r--drivers/gpu/drm/sti/sti_mixer.c4
-rw-r--r--drivers/gpu/drm/sti/sti_mixer.h7
-rw-r--r--drivers/gpu/drm/sti/sti_plane.c4
-rw-r--r--drivers/gpu/drm/sti/sti_plane.h1
-rw-r--r--drivers/gpu/drm/sti/sti_tvout.c6
-rw-r--r--drivers/gpu/drm/sti/sti_vid.c4
-rw-r--r--drivers/gpu/drm/sti/sti_vtg.c4
-rw-r--r--drivers/gpu/drm/sti/sti_vtg.h1
-rw-r--r--drivers/gpu/drm/stm/drv.c43
-rw-r--r--drivers/gpu/drm/stm/dw_mipi_dsi-stm.c105
-rw-r--r--drivers/gpu/drm/stm/ltdc.c142
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_drv.c16
-rw-r--r--drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c1
-rw-r--r--drivers/gpu/drm/tegra/dc.c17
-rw-r--r--drivers/gpu/drm/tegra/fb.c14
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c270
-rw-r--r--drivers/gpu/drm/ttm/ttm_execbuf_util.c20
-rw-r--r--drivers/gpu/drm/v3d/v3d_debugfs.c35
-rw-r--r--drivers/gpu/drm/v3d/v3d_drv.c17
-rw-r--r--drivers/gpu/drm/v3d/v3d_drv.h106
-rw-r--r--drivers/gpu/drm/v3d/v3d_fence.c2
-rw-r--r--drivers/gpu/drm/v3d/v3d_gem.c552
-rw-r--r--drivers/gpu/drm/v3d/v3d_irq.c55
-rw-r--r--drivers/gpu/drm/v3d/v3d_mmu.c7
-rw-r--r--drivers/gpu/drm/v3d/v3d_regs.h122
-rw-r--r--drivers/gpu/drm/v3d/v3d_sched.c382
-rw-r--r--drivers/gpu/drm/v3d/v3d_trace.h94
-rw-r--r--drivers/gpu/drm/vboxvideo/Kconfig2
-rw-r--r--drivers/gpu/drm/vboxvideo/vbox_drv.c12
-rw-r--r--drivers/gpu/drm/vboxvideo/vbox_drv.h75
-rw-r--r--drivers/gpu/drm/vboxvideo/vbox_fb.c22
-rw-r--r--drivers/gpu/drm/vboxvideo/vbox_main.c75
-rw-r--r--drivers/gpu/drm/vboxvideo/vbox_mode.c36
-rw-r--r--drivers/gpu/drm/vboxvideo/vbox_ttm.c355
-rw-r--r--drivers/gpu/drm/vc4/vc4_bo.c31
-rw-r--r--drivers/gpu/drm/vc4/vc4_drv.c6
-rw-r--r--drivers/gpu/drm/vc4/vc4_drv.h14
-rw-r--r--drivers/gpu/drm/vc4/vc4_gem.c11
-rw-r--r--drivers/gpu/drm/vc4/vc4_irq.c20
-rw-r--r--drivers/gpu/drm/vc4/vc4_plane.c15
-rw-r--r--drivers/gpu/drm/vc4/vc4_txp.c7
-rw-r--r--drivers/gpu/drm/vc4/vc4_v3d.c72
-rw-r--r--drivers/gpu/drm/virtio/Makefile4
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_display.c20
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_drv.h10
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_fb.c150
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_fence.c25
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_ioctl.c38
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_trace.h52
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_trace_points.c5
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_vq.c36
-rw-r--r--drivers/gpu/drm/vkms/vkms_crtc.c55
-rw-r--r--drivers/gpu/drm/vkms/vkms_output.c10
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource.c3
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_validation.h2
-rw-r--r--drivers/gpu/drm/zte/zx_plane.c6
-rw-r--r--drivers/gpu/vga/vga_switcheroo.c34
-rw-r--r--drivers/misc/mei/hdcp/mei_hdcp.c2
-rw-r--r--drivers/video/hdmi.c275
-rw-r--r--include/drm/bridge/dw_hdmi.h2
-rw-r--r--include/drm/bridge/dw_mipi_dsi.h10
-rw-r--r--include/drm/drm_atomic.h22
-rw-r--r--include/drm/drm_atomic_helper.h4
-rw-r--r--include/drm/drm_atomic_state_helper.h2
-rw-r--r--include/drm/drm_auth.h11
-rw-r--r--include/drm/drm_bridge.h114
-rw-r--r--include/drm/drm_client.h46
-rw-r--r--include/drm/drm_connector.h40
-rw-r--r--include/drm/drm_crtc.h20
-rw-r--r--include/drm/drm_debugfs.h2
-rw-r--r--include/drm/drm_device.h4
-rw-r--r--include/drm/drm_dp_helper.h33
-rw-r--r--include/drm/drm_edid.h37
-rw-r--r--include/drm/drm_fb_helper.h102
-rw-r--r--include/drm/drm_fourcc.h50
-rw-r--r--include/drm/drm_framebuffer.h3
-rw-r--r--include/drm/drm_gem_vram_helper.h153
-rw-r--r--include/drm/drm_hdcp.h31
-rw-r--r--include/drm/drm_legacy.h12
-rw-r--r--include/drm/drm_mode_config.h13
-rw-r--r--include/drm/drm_modeset_helper_vtables.h61
-rw-r--r--include/drm/drm_plane.h2
-rw-r--r--include/drm/drm_print.h2
-rw-r--r--include/drm/drm_self_refresh_helper.h20
-rw-r--r--include/drm/drm_vram_mm_helper.h102
-rw-r--r--include/drm/gpu_scheduler.h8
-rw-r--r--include/drm/i915_pciids.h4
-rw-r--r--include/drm/ttm/ttm_bo_driver.h9
-rw-r--r--include/drm/ttm/ttm_execbuf_util.h3
-rw-r--r--include/linux/dma-buf.h47
-rw-r--r--include/linux/hdmi.h67
-rw-r--r--include/linux/reservation.h8
-rw-r--r--include/uapi/drm/drm.h1
-rw-r--r--include/uapi/drm/drm_mode.h117
-rw-r--r--include/uapi/drm/i915_drm.h209
-rw-r--r--include/uapi/drm/v3d_drm.h28
-rw-r--r--include/uapi/linux/kfd_ioctl.h35
1087 files changed, 36652 insertions, 15830 deletions
diff --git a/Documentation/devicetree/bindings/display/allwinner,sun6i-a31-mipi-dsi.yaml b/Documentation/devicetree/bindings/display/allwinner,sun6i-a31-mipi-dsi.yaml
new file mode 100644
index 000000000000..47950fced28d
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/allwinner,sun6i-a31-mipi-dsi.yaml
@@ -0,0 +1,100 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/allwinner,sun6i-a31-mipi-dsi.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Allwinner A31 MIPI-DSI Controller Device Tree Bindings
+
+maintainers:
+ - Chen-Yu Tsai <wens@csie.org>
+ - Maxime Ripard <maxime.ripard@bootlin.com>
+
+properties:
+ "#address-cells": true
+ "#size-cells": true
+
+ compatible:
+ const: allwinner,sun6i-a31-mipi-dsi
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ clocks:
+ items:
+ - description: Bus Clock
+ - description: Module Clock
+
+ clock-names:
+ items:
+ - const: bus
+ - const: mod
+
+ resets:
+ maxItems: 1
+
+ phys:
+ maxItems: 1
+
+ phy-names:
+ const: dphy
+
+ port:
+ type: object
+ description:
+ A port node with endpoint definitions as defined in
+ Documentation/devicetree/bindings/media/video-interfaces.txt. That
+ port should be the input endpoint, usually coming from the
+ associated TCON.
+
+patternProperties:
+ "^panel@[0-9]+$": true
+
+required:
+ - "#address-cells"
+ - "#size-cells"
+ - compatible
+ - reg
+ - interrupts
+ - clocks
+ - clock-names
+ - phys
+ - phy-names
+ - resets
+ - port
+
+additionalProperties: false
+
+examples:
+ - |
+ dsi0: dsi@1ca0000 {
+ compatible = "allwinner,sun6i-a31-mipi-dsi";
+ reg = <0x01ca0000 0x1000>;
+ interrupts = <0 89 4>;
+ clocks = <&ccu 23>, <&ccu 96>;
+ clock-names = "bus", "mod";
+ resets = <&ccu 4>;
+ phys = <&dphy0>;
+ phy-names = "dphy";
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ panel@0 {
+ compatible = "bananapi,lhr050h41", "ilitek,ili9881c";
+ reg = <0>;
+ power-gpios = <&pio 1 7 0>; /* PB07 */
+ reset-gpios = <&r_pio 0 5 1>; /* PL05 */
+ backlight = <&pwm_bl>;
+ };
+
+ port {
+ dsi0_in_tcon0: endpoint {
+ remote-endpoint = <&tcon0_out_dsi0>;
+ };
+ };
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/display/bridge/renesas,lvds.txt b/Documentation/devicetree/bindings/display/bridge/renesas,lvds.txt
index 900a884ad9f5..c6a196d0b075 100644
--- a/Documentation/devicetree/bindings/display/bridge/renesas,lvds.txt
+++ b/Documentation/devicetree/bindings/display/bridge/renesas,lvds.txt
@@ -9,6 +9,7 @@ Required properties:
- compatible : Shall contain one of
- "renesas,r8a7743-lvds" for R8A7743 (RZ/G1M) compatible LVDS encoders
- "renesas,r8a7744-lvds" for R8A7744 (RZ/G1N) compatible LVDS encoders
+ - "renesas,r8a774a1-lvds" for R8A774A1 (RZ/G2M) compatible LVDS encoders
- "renesas,r8a774c0-lvds" for R8A774C0 (RZ/G2E) compatible LVDS encoders
- "renesas,r8a7790-lvds" for R8A7790 (R-Car H2) compatible LVDS encoders
- "renesas,r8a7791-lvds" for R8A7791 (R-Car M2-W) compatible LVDS encoders
@@ -45,14 +46,24 @@ OF graph bindings specified in Documentation/devicetree/bindings/graph.txt.
Each port shall have a single endpoint.
+Optional properties:
+
+- renesas,companion : phandle to the companion LVDS encoder. This property is
+ mandatory for the first LVDS encoder on D3 and E3 SoCs, and shall point to
+ the second encoder to be used as a companion in dual-link mode. It shall not
+ be set for any other LVDS encoder.
+
Example:
lvds0: lvds@feb90000 {
- compatible = "renesas,r8a7790-lvds";
- reg = <0 0xfeb90000 0 0x1c>;
- clocks = <&cpg CPG_MOD 726>;
- resets = <&cpg 726>;
+ compatible = "renesas,r8a77990-lvds";
+ reg = <0 0xfeb90000 0 0x20>;
+ clocks = <&cpg CPG_MOD 727>;
+ power-domains = <&sysc R8A77990_PD_ALWAYS_ON>;
+ resets = <&cpg 727>;
+
+ renesas,companion = <&lvds1>;
ports {
#address-cells = <1>;
diff --git a/Documentation/devicetree/bindings/display/bridge/sii902x.txt b/Documentation/devicetree/bindings/display/bridge/sii902x.txt
index 72d2dc6c3e6b..2df44b7d3821 100644
--- a/Documentation/devicetree/bindings/display/bridge/sii902x.txt
+++ b/Documentation/devicetree/bindings/display/bridge/sii902x.txt
@@ -5,10 +5,44 @@ Required properties:
- reg: i2c address of the bridge
Optional properties:
- - interrupts: describe the interrupt line used to inform the host
+ - interrupts: describe the interrupt line used to inform the host
about hotplug events.
- reset-gpios: OF device-tree gpio specification for RST_N pin.
+ HDMI audio properties:
+ - #sound-dai-cells: <0> or <1>. <0> if only i2s or spdif pin
+ is wired, <1> if the both are wired. HDMI audio is
+ configured only if this property is found.
+ - sil,i2s-data-lanes: Array of up to 4 integers with values of 0-3
+ Each integer indicates which i2s pin is connected to which
+ audio fifo. The first integer selects i2s audio pin for the
+ first audio fifo#0 (HDMI channels 1&2), second for fifo#1
+ (HDMI channels 3&4), and so on. There is 4 fifos and 4 i2s
+ pins (SD0 - SD3). Any i2s pin can be connected to any fifo,
+ but there can be no gaps. E.g. an i2s pin must be mapped to
+ fifo#0 and fifo#1 before mapping a channel to fifo#2. Default
+ value is <0>, describing SD0 pin beiging routed to hdmi audio
+ fifo #0.
+ - clocks: phandle and clock specifier for each clock listed in
+ the clock-names property
+ - clock-names: "mclk"
+ Describes SII902x MCLK input. MCLK is used to produce
+ HDMI audio CTS values. This property is required if
+ "#sound-dai-cells"-property is present. This property follows
+ Documentation/devicetree/bindings/clock/clock-bindings.txt
+ consumer binding.
+
+ If HDMI audio is configured the sii902x device becomes an I2S
+ and/or spdif audio codec component (e.g a digital audio sink),
+ that can be used in configuring a full audio devices with
+ simple-card or audio-graph-card binding. See their binding
+ documents on how to describe the way the sii902x device is
+ connected to the rest of the audio system:
+ Documentation/devicetree/bindings/sound/simple-card.txt
+ Documentation/devicetree/bindings/sound/audio-graph-card.txt
+ Note: In case of the audio-graph-card binding the used port
+ index should be 3.
+
Optional subnodes:
- video input: this subnode can contain a video input port node
to connect the bridge to a display controller output (See this
@@ -21,6 +55,12 @@ Example:
compatible = "sil,sii9022";
reg = <0x39>;
reset-gpios = <&pioA 1 0>;
+
+ #sound-dai-cells = <0>;
+ sil,i2s-data-lanes = < 0 1 2 >;
+ clocks = <&mclk>;
+ clock-names = "mclk";
+
ports {
#address-cells = <1>;
#size-cells = <0>;
diff --git a/Documentation/devicetree/bindings/display/bridge/thine,thc63lvd1024.txt b/Documentation/devicetree/bindings/display/bridge/thine,thc63lvd1024.txt
index 37f0c04d5a28..d17d1e5820d7 100644
--- a/Documentation/devicetree/bindings/display/bridge/thine,thc63lvd1024.txt
+++ b/Documentation/devicetree/bindings/display/bridge/thine,thc63lvd1024.txt
@@ -28,6 +28,12 @@ Optional video port nodes:
- port@1: Second LVDS input port
- port@3: Second digital CMOS/TTL parallel output
+The device can operate in single-link mode or dual-link mode. In single-link
+mode, all pixels are received on port@0, and port@1 shall not contain any
+endpoint. In dual-link mode, even-numbered pixels are received on port@0 and
+odd-numbered pixels on port@1, and both port@0 and port@1 shall contain
+endpoints.
+
Example:
--------
diff --git a/Documentation/devicetree/bindings/display/bridge/toshiba,tc358767.txt b/Documentation/devicetree/bindings/display/bridge/toshiba,tc358767.txt
index e3f6aa6a214d..583c5e9dbe6b 100644
--- a/Documentation/devicetree/bindings/display/bridge/toshiba,tc358767.txt
+++ b/Documentation/devicetree/bindings/display/bridge/toshiba,tc358767.txt
@@ -12,6 +12,7 @@ Optional properties:
(active high shutdown input)
- reset-gpios: OF device-tree gpio specification for RSTX pin
(active low system reset)
+ - toshiba,hpd-pin: TC358767 GPIO pin number to which HPD is connected to (0 or 1)
- ports: the ports node can contain video interface port nodes to connect
to a DPI/DSI source and to an eDP/DP sink according to [1][2]:
- port@0: DSI input port
diff --git a/Documentation/devicetree/bindings/display/panel/armadeus,st0700-adapt.txt b/Documentation/devicetree/bindings/display/panel/armadeus,st0700-adapt.txt
new file mode 100644
index 000000000000..a30d63db3c8f
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/panel/armadeus,st0700-adapt.txt
@@ -0,0 +1,9 @@
+Armadeus ST0700 Adapt. A Santek ST0700I5Y-RBSLW 7.0" WVGA (800x480) TFT with
+an adapter board.
+
+Required properties:
+- compatible: "armadeus,st0700-adapt"
+- power-supply: see panel-common.txt
+
+Optional properties:
+- backlight: see panel-common.txt
diff --git a/Documentation/devicetree/bindings/display/panel/edt,et-series.txt b/Documentation/devicetree/bindings/display/panel/edt,et-series.txt
index f56b99ebd9be..be8684327ee4 100644
--- a/Documentation/devicetree/bindings/display/panel/edt,et-series.txt
+++ b/Documentation/devicetree/bindings/display/panel/edt,et-series.txt
@@ -6,6 +6,22 @@ Display bindings for EDT Display Technology Corp. Displays which are
compatible with the simple-panel binding, which is specified in
simple-panel.txt
+3,5" QVGA TFT Panels
+--------------------
++-----------------+---------------------+-------------------------------------+
+| Identifier | compatbile | description |
++=================+=====================+=====================================+
+| ET035012DM6 | edt,et035012dm6 | 3.5" QVGA TFT LCD panel |
++-----------------+---------------------+-------------------------------------+
+
+4,3" WVGA TFT Panels
+--------------------
+
++-----------------+---------------------+-------------------------------------+
+| Identifier | compatbile | description |
++=================+=====================+=====================================+
+| ETM0430G0DH6 | edt,etm0430g0dh6 | 480x272 TFT Display |
++-----------------+---------------------+-------------------------------------+
5,7" WVGA TFT Panels
--------------------
diff --git a/Documentation/devicetree/bindings/display/panel/evervision,vgg804821.txt b/Documentation/devicetree/bindings/display/panel/evervision,vgg804821.txt
new file mode 100644
index 000000000000..82d22e191ac3
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/panel/evervision,vgg804821.txt
@@ -0,0 +1,12 @@
+Evervision Electronics Co. Ltd. VGG804821 5.0" WVGA TFT LCD Panel
+
+Required properties:
+- compatible: should be "evervision,vgg804821"
+- power-supply: See simple-panel.txt
+
+Optional properties:
+- backlight: See simple-panel.txt
+- enable-gpios: See simple-panel.txt
+
+This binding is compatible with the simple-panel binding, which is specified
+in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/friendlyarm,hd702e.txt b/Documentation/devicetree/bindings/display/panel/friendlyarm,hd702e.txt
new file mode 100644
index 000000000000..6c9156fc3478
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/panel/friendlyarm,hd702e.txt
@@ -0,0 +1,32 @@
+FriendlyELEC HD702E 800x1280 LCD panel
+
+HD702E lcd is FriendlyELEC developed eDP LCD panel with 800x1280
+resolution. It has built in Goodix, GT9271 captive touchscreen
+with backlight adjustable via PWM.
+
+Required properties:
+- compatible: should be "friendlyarm,hd702e"
+- power-supply: regulator to provide the supply voltage
+
+Optional properties:
+- backlight: phandle of the backlight device attached to the panel
+
+Optional nodes:
+- Video port for LCD panel input.
+
+This binding is compatible with the simple-panel binding, which is specified
+in simple-panel.txt in this directory.
+
+Example:
+
+ panel {
+ compatible ="friendlyarm,hd702e", "simple-panel";
+ backlight = <&backlight>;
+ power-supply = <&vcc3v3_sys>;
+
+ port {
+ panel_in_edp: endpoint {
+ remote-endpoint = <&edp_out_panel>;
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/display/panel/koe,tx14d24vm1bpa.txt b/Documentation/devicetree/bindings/display/panel/koe,tx14d24vm1bpa.txt
new file mode 100644
index 000000000000..be7ac666807b
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/panel/koe,tx14d24vm1bpa.txt
@@ -0,0 +1,42 @@
+Kaohsiung Opto-Electronics Inc. 5.7" QVGA (320 x 240) TFT LCD panel
+
+Required properties:
+- compatible: should be "koe,tx14d24vm1bpa"
+- backlight: phandle of the backlight device attached to the panel
+- power-supply: single regulator to provide the supply voltage
+
+Required nodes:
+- port: Parallel port mapping to connect this display
+
+This panel needs single power supply voltage. Its backlight is conntrolled
+via PWM signal.
+
+Example:
+--------
+
+Example device-tree definition when connected to iMX53 based board
+
+ lcd_panel: lcd-panel {
+ compatible = "koe,tx14d24vm1bpa";
+ backlight = <&backlight_lcd>;
+ power-supply = <&reg_3v3>;
+
+ port {
+ lcd_panel_in: endpoint {
+ remote-endpoint = <&lcd_display_out>;
+ };
+ };
+ };
+
+Then one needs to extend the dispX node:
+
+ lcd_display: disp1 {
+
+ port@1 {
+ reg = <1>;
+
+ lcd_display_out: endpoint {
+ remote-endpoint = <&lcd_panel_in>;
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/display/panel/osddisplays,osd101t2045-53ts.txt b/Documentation/devicetree/bindings/display/panel/osddisplays,osd101t2045-53ts.txt
new file mode 100644
index 000000000000..85c0b2cacfda
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/panel/osddisplays,osd101t2045-53ts.txt
@@ -0,0 +1,11 @@
+One Stop Displays OSD101T2045-53TS 10.1" 1920x1200 panel
+
+Required properties:
+- compatible: should be "osddisplays,osd101t2045-53ts"
+- power-supply: as specified in the base binding
+
+Optional properties:
+- backlight: as specified in the base binding
+
+This binding is compatible with the simple-panel binding, which is specified
+in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/osddisplays,osd101t2587-53ts.txt b/Documentation/devicetree/bindings/display/panel/osddisplays,osd101t2587-53ts.txt
new file mode 100644
index 000000000000..9d88e96003fc
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/panel/osddisplays,osd101t2587-53ts.txt
@@ -0,0 +1,14 @@
+One Stop Displays OSD101T2587-53TS 10.1" 1920x1200 panel
+
+The panel is similar to OSD101T2045-53TS, but it needs additional
+MIPI_DSI_TURN_ON_PERIPHERAL message from the host.
+
+Required properties:
+- compatible: should be "osddisplays,osd101t2587-53ts"
+- power-supply: as specified in the base binding
+
+Optional properties:
+- backlight: as specified in the base binding
+
+This binding is compatible with the simple-panel binding, which is specified
+in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/samsung,s6e63m0.txt b/Documentation/devicetree/bindings/display/panel/samsung,s6e63m0.txt
new file mode 100644
index 000000000000..9fb9ebeef8e4
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/panel/samsung,s6e63m0.txt
@@ -0,0 +1,33 @@
+Samsung s6e63m0 AMOLED LCD panel
+
+Required properties:
+ - compatible: "samsung,s6e63m0"
+ - reset-gpios: GPIO spec for reset pin
+ - vdd3-supply: VDD regulator
+ - vci-supply: VCI regulator
+
+The panel must obey rules for SPI slave device specified in document [1].
+
+The device node can contain one 'port' child node with one child
+'endpoint' node, according to the bindings defined in [2]. This
+node should describe panel's video bus.
+
+[1]: Documentation/devicetree/bindings/spi/spi-bus.txt
+[2]: Documentation/devicetree/bindings/media/video-interfaces.txt
+
+Example:
+
+ s6e63m0: display@0 {
+ compatible = "samsung,s6e63m0";
+ reg = <0>;
+ reset-gpio = <&mp05 5 1>;
+ vdd3-supply = <&ldo12_reg>;
+ vci-supply = <&ldo11_reg>;
+ spi-max-frequency = <1200000>;
+
+ port {
+ lcd_ep: endpoint {
+ remote-endpoint = <&fimd_ep>;
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/display/panel/tfc,s9700rtwv43tr-01b.txt b/Documentation/devicetree/bindings/display/panel/tfc,s9700rtwv43tr-01b.txt
new file mode 100644
index 000000000000..dfb572f085eb
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/panel/tfc,s9700rtwv43tr-01b.txt
@@ -0,0 +1,15 @@
+TFC S9700RTWV43TR-01B 7" Three Five Corp 800x480 LCD panel with
+resistive touch
+
+The panel is found on TI AM335x-evm.
+
+Required properties:
+- compatible: should be "tfc,s9700rtwv43tr-01b"
+- power-supply: See panel-common.txt
+
+Optional properties:
+- enable-gpios: GPIO pin to enable or disable the panel, if there is one
+- backlight: phandle of the backlight device attached to the panel
+
+This binding is compatible with the simple-panel binding, which is specified
+in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/vl050_8048nt_c01.txt b/Documentation/devicetree/bindings/display/panel/vl050_8048nt_c01.txt
new file mode 100644
index 000000000000..b42bf06bbd99
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/panel/vl050_8048nt_c01.txt
@@ -0,0 +1,12 @@
+VXT 800x480 color TFT LCD panel
+
+Required properties:
+- compatible: should be "vxt,vl050-8048nt-c01"
+- power-supply: as specified in the base binding
+
+Optional properties:
+- backlight: as specified in the base binding
+- enable-gpios: as specified in the base binding
+
+This binding is compatible with the simple-panel binding, which is specified
+in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/renesas,du.txt b/Documentation/devicetree/bindings/display/renesas,du.txt
index aedb22b4d161..c97dfacad281 100644
--- a/Documentation/devicetree/bindings/display/renesas,du.txt
+++ b/Documentation/devicetree/bindings/display/renesas,du.txt
@@ -7,6 +7,7 @@ Required Properties:
- "renesas,du-r8a7744" for R8A7744 (RZ/G1N) compatible DU
- "renesas,du-r8a7745" for R8A7745 (RZ/G1E) compatible DU
- "renesas,du-r8a77470" for R8A77470 (RZ/G1C) compatible DU
+ - "renesas,du-r8a774a1" for R8A774A1 (RZ/G2M) compatible DU
- "renesas,du-r8a774c0" for R8A774C0 (RZ/G2E) compatible DU
- "renesas,du-r8a7779" for R8A7779 (R-Car H1) compatible DU
- "renesas,du-r8a7790" for R8A7790 (R-Car H2) compatible DU
@@ -58,6 +59,7 @@ corresponding to each DU output.
R8A7744 (RZ/G1N) DPAD 0 LVDS 0 - -
R8A7745 (RZ/G1E) DPAD 0 DPAD 1 - -
R8A77470 (RZ/G1C) DPAD 0 DPAD 1 LVDS 0 -
+ R8A774A1 (RZ/G2M) DPAD 0 HDMI 0 LVDS 0 -
R8A774C0 (RZ/G2E) DPAD 0 LVDS 0 LVDS 1 -
R8A7779 (R-Car H1) DPAD 0 DPAD 1 - -
R8A7790 (R-Car H2) DPAD 0 LVDS 0 LVDS 1 -
diff --git a/Documentation/devicetree/bindings/display/rockchip/dw_hdmi-rockchip.txt b/Documentation/devicetree/bindings/display/rockchip/dw_hdmi-rockchip.txt
index 39143424a474..8346bac81f1c 100644
--- a/Documentation/devicetree/bindings/display/rockchip/dw_hdmi-rockchip.txt
+++ b/Documentation/devicetree/bindings/display/rockchip/dw_hdmi-rockchip.txt
@@ -38,6 +38,13 @@ Optional properties
- phys: from general PHY binding: the phandle for the PHY device.
- phy-names: Should be "hdmi" if phys references an external phy.
+Optional pinctrl entry:
+- If you have both a "unwedge" and "default" pinctrl entry, dw_hdmi
+ will switch to the unwedge pinctrl state for 10ms if it ever gets an
+ i2c timeout. It's intended that this unwedge pinctrl entry will
+ cause the SDA line to be driven low to work around a hardware
+ errata.
+
Example:
hdmi: hdmi@ff980000 {
diff --git a/Documentation/devicetree/bindings/display/st,stm32-ltdc.txt b/Documentation/devicetree/bindings/display/st,stm32-ltdc.txt
index 3eb1b48b47dd..60c54da4e526 100644
--- a/Documentation/devicetree/bindings/display/st,stm32-ltdc.txt
+++ b/Documentation/devicetree/bindings/display/st,stm32-ltdc.txt
@@ -40,6 +40,8 @@ Mandatory nodes specific to STM32 DSI:
- panel or bridge node: A node containing the panel or bridge description as
documented in [6].
- port: panel or bridge port node, connected to the DSI output port (port@1).
+Optional properties:
+- phy-dsi-supply: phandle of the regulator that provides the supply voltage.
Note: You can find more documentation in the following references
[1] Documentation/devicetree/bindings/clock/clock-bindings.txt
@@ -101,6 +103,7 @@ Example 2: DSI panel
clock-names = "pclk", "ref";
resets = <&rcc STM32F4_APB2_RESET(DSI)>;
reset-names = "apb";
+ phy-dsi-supply = <&reg18>;
ports {
#address-cells = <1>;
diff --git a/Documentation/devicetree/bindings/display/sunxi/sun6i-dsi.txt b/Documentation/devicetree/bindings/display/sunxi/sun6i-dsi.txt
deleted file mode 100644
index 6a6cf5de08b0..000000000000
--- a/Documentation/devicetree/bindings/display/sunxi/sun6i-dsi.txt
+++ /dev/null
@@ -1,93 +0,0 @@
-Allwinner A31 DSI Encoder
-=========================
-
-The DSI pipeline consists of two separate blocks: the DSI controller
-itself, and its associated D-PHY.
-
-DSI Encoder
------------
-
-The DSI Encoder generates the DSI signal from the TCON's.
-
-Required properties:
- - compatible: value must be one of:
- * allwinner,sun6i-a31-mipi-dsi
- - reg: base address and size of memory-mapped region
- - interrupts: interrupt associated to this IP
- - clocks: phandles to the clocks feeding the DSI encoder
- * bus: the DSI interface clock
- * mod: the DSI module clock
- - clock-names: the clock names mentioned above
- - phys: phandle to the D-PHY
- - phy-names: must be "dphy"
- - resets: phandle to the reset controller driving the encoder
-
- - ports: A ports node with endpoint definitions as defined in
- Documentation/devicetree/bindings/media/video-interfaces.txt. The
- first port should be the input endpoint, usually coming from the
- associated TCON.
-
-Any MIPI-DSI device attached to this should be described according to
-the bindings defined in ../mipi-dsi-bus.txt
-
-D-PHY
------
-
-Required properties:
- - compatible: value must be one of:
- * allwinner,sun6i-a31-mipi-dphy
- - reg: base address and size of memory-mapped region
- - clocks: phandles to the clocks feeding the DSI encoder
- * bus: the DSI interface clock
- * mod: the DSI module clock
- - clock-names: the clock names mentioned above
- - resets: phandle to the reset controller driving the encoder
-
-Example:
-
-dsi0: dsi@1ca0000 {
- compatible = "allwinner,sun6i-a31-mipi-dsi";
- reg = <0x01ca0000 0x1000>;
- interrupts = <GIC_SPI 89 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&ccu CLK_BUS_MIPI_DSI>,
- <&ccu CLK_DSI_SCLK>;
- clock-names = "bus", "mod";
- resets = <&ccu RST_BUS_MIPI_DSI>;
- phys = <&dphy0>;
- phy-names = "dphy";
- #address-cells = <1>;
- #size-cells = <0>;
-
- panel@0 {
- compatible = "bananapi,lhr050h41", "ilitek,ili9881c";
- reg = <0>;
- power-gpios = <&pio 1 7 GPIO_ACTIVE_HIGH>; /* PB07 */
- reset-gpios = <&r_pio 0 5 GPIO_ACTIVE_LOW>; /* PL05 */
- backlight = <&pwm_bl>;
- };
-
- ports {
- #address-cells = <1>;
- #size-cells = <0>;
-
- port@0 {
- #address-cells = <1>;
- #size-cells = <0>;
- reg = <0>;
-
- dsi0_in_tcon0: endpoint {
- remote-endpoint = <&tcon0_out_dsi0>;
- };
- };
- };
-};
-
-dphy0: d-phy@1ca1000 {
- compatible = "allwinner,sun6i-a31-mipi-dphy";
- reg = <0x01ca1000 0x1000>;
- clocks = <&ccu CLK_BUS_MIPI_DSI>,
- <&ccu CLK_DSI_DPHY>;
- clock-names = "bus", "mod";
- resets = <&ccu RST_BUS_MIPI_DSI>;
- #phy-cells = <0>;
-};
diff --git a/Documentation/devicetree/bindings/gpu/arm,mali-midgard.txt b/Documentation/devicetree/bindings/gpu/arm,mali-midgard.txt
index 1b1a74129141..e5ad3b2afe17 100644
--- a/Documentation/devicetree/bindings/gpu/arm,mali-midgard.txt
+++ b/Documentation/devicetree/bindings/gpu/arm,mali-midgard.txt
@@ -15,6 +15,7 @@ Required properties:
+ "arm,mali-t860"
+ "arm,mali-t880"
* which must be preceded by one of the following vendor specifics:
+ + "allwinner,sun50i-h6-mali"
+ "amlogic,meson-gxm-mali"
+ "rockchip,rk3288-mali"
+ "rockchip,rk3399-mali"
@@ -31,21 +32,36 @@ Optional properties:
- clocks : Phandle to clock for the Mali Midgard device.
+- clock-names : Specify the names of the clocks specified in clocks
+ when multiple clocks are present.
+ * core: clock driving the GPU itself (When only one clock is present,
+ assume it's this clock.)
+ * bus: bus clock for the GPU
+
- mali-supply : Phandle to regulator for the Mali device. Refer to
Documentation/devicetree/bindings/regulator/regulator.txt for details.
- operating-points-v2 : Refer to Documentation/devicetree/bindings/opp/opp.txt
for details.
+- #cooling-cells: Refer to Documentation/devicetree/bindings/thermal/thermal.txt
+ for details.
+
- resets : Phandle of the GPU reset line.
Vendor-specific bindings
------------------------
The Mali GPU is integrated very differently from one SoC to
-another. In order to accomodate those differences, you have the option
+another. In order to accommodate those differences, you have the option
to specify one more vendor-specific compatible, among:
+- "allwinner,sun50i-h6-mali"
+ Required properties:
+ - clocks : phandles to core and bus clocks
+ - clock-names : must contain "core" and "bus"
+ - resets: phandle to GPU reset line
+
- "amlogic,meson-gxm-mali"
Required properties:
- resets : Should contain phandles of :
@@ -65,6 +81,7 @@ gpu@ffa30000 {
mali-supply = <&vdd_gpu>;
operating-points-v2 = <&gpu_opp_table>;
power-domains = <&power RK3288_PD_GPU>;
+ #cooling-cells = <2>;
};
gpu_opp_table: opp_table0 {
diff --git a/Documentation/devicetree/bindings/phy/allwinner,sun6i-a31-mipi-dphy.yaml b/Documentation/devicetree/bindings/phy/allwinner,sun6i-a31-mipi-dphy.yaml
new file mode 100644
index 000000000000..250f9d5aabdf
--- /dev/null
+++ b/Documentation/devicetree/bindings/phy/allwinner,sun6i-a31-mipi-dphy.yaml
@@ -0,0 +1,57 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/allwinner,sun6i-a31-mipi-dphy.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Allwinner A31 MIPI D-PHY Controller Device Tree Bindings
+
+maintainers:
+ - Chen-Yu Tsai <wens@csie.org>
+ - Maxime Ripard <maxime.ripard@bootlin.com>
+
+properties:
+ "#phy-cells":
+ const: 0
+
+ compatible:
+ const: allwinner,sun6i-a31-mipi-dphy
+
+ reg:
+ maxItems: 1
+
+ clocks:
+ items:
+ - description: Bus Clock
+ - description: Module Clock
+
+ clock-names:
+ items:
+ - const: bus
+ - const: mod
+
+ resets:
+ maxItems: 1
+
+required:
+ - "#phy-cells"
+ - compatible
+ - reg
+ - clocks
+ - clock-names
+ - resets
+
+additionalProperties: false
+
+examples:
+ - |
+ dphy0: d-phy@1ca1000 {
+ compatible = "allwinner,sun6i-a31-mipi-dphy";
+ reg = <0x01ca1000 0x1000>;
+ clocks = <&ccu 23>, <&ccu 97>;
+ clock-names = "bus", "mod";
+ resets = <&ccu 4>;
+ #phy-cells = <0>;
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/vendor-prefixes.yaml b/Documentation/devicetree/bindings/vendor-prefixes.yaml
index 33a65a45e319..f0bcff033ecc 100644
--- a/Documentation/devicetree/bindings/vendor-prefixes.yaml
+++ b/Documentation/devicetree/bindings/vendor-prefixes.yaml
@@ -287,6 +287,8 @@ patternProperties:
description: Everest Semiconductor Co. Ltd.
"^everspin,.*":
description: Everspin Technologies, Inc.
+ "^evervision,.*":
+ description: Evervision Electronics Co. Ltd.
"^exar,.*":
description: Exar Corporation
"^excito,.*":
@@ -849,6 +851,8 @@ patternProperties:
description: Shenzhen Techstar Electronics Co., Ltd.
"^terasic,.*":
description: Terasic Inc.
+ "^tfc,.*":
+ description: Three Five Corp
"^thine,.*":
description: THine Electronics, Inc.
"^ti,.*":
@@ -923,6 +927,8 @@ patternProperties:
description: Voipac Technologies s.r.o.
"^vot,.*":
description: Vision Optical Technology Co., Ltd.
+ "^vxt,.*":
+ description: VXT Ltd
"^wd,.*":
description: Western Digital Corp.
"^wetek,.*":
diff --git a/Documentation/gpu/amdgpu.rst b/Documentation/gpu/amdgpu.rst
index a740e491dfcc..5acdd1842ea2 100644
--- a/Documentation/gpu/amdgpu.rst
+++ b/Documentation/gpu/amdgpu.rst
@@ -37,10 +37,10 @@ Buffer Objects
PRIME Buffer Sharing
--------------------
-.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
+.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
:doc: PRIME Buffer Sharing
-.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
+.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
:internal:
MMU Notifier
@@ -70,6 +70,26 @@ Interrupt Handling
.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
:internal:
+AMDGPU XGMI Support
+===================
+
+.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
+ :doc: AMDGPU XGMI Support
+
+.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
+ :internal:
+
+AMDGPU RAS debugfs control interface
+====================================
+
+.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
+ :doc: AMDGPU RAS debugfs control interface
+
+
+.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
+ :internal:
+
+
GPU Power/Thermal Controls and Monitoring
=========================================
diff --git a/Documentation/gpu/drivers.rst b/Documentation/gpu/drivers.rst
index 044a7025477c..4bfb7068e9f7 100644
--- a/Documentation/gpu/drivers.rst
+++ b/Documentation/gpu/drivers.rst
@@ -7,6 +7,7 @@ GPU Driver Documentation
amdgpu
amdgpu-dc
i915
+ mcde
meson
pl111
tegra
diff --git a/Documentation/gpu/drm-client.rst b/Documentation/gpu/drm-client.rst
index 7e672063e7eb..58b5a1d1219d 100644
--- a/Documentation/gpu/drm-client.rst
+++ b/Documentation/gpu/drm-client.rst
@@ -10,3 +10,6 @@ Kernel clients
.. kernel-doc:: drivers/gpu/drm/drm_client.c
:export:
+
+.. kernel-doc:: drivers/gpu/drm/drm_client_modeset.c
+ :export:
diff --git a/Documentation/gpu/drm-kms-helpers.rst b/Documentation/gpu/drm-kms-helpers.rst
index 14102ae035dc..b327bbc11182 100644
--- a/Documentation/gpu/drm-kms-helpers.rst
+++ b/Documentation/gpu/drm-kms-helpers.rst
@@ -181,6 +181,21 @@ Panel Helper Reference
.. kernel-doc:: drivers/gpu/drm/drm_panel_orientation_quirks.c
:export:
+Panel Self Refresh Helper Reference
+===================================
+
+.. kernel-doc:: drivers/gpu/drm/drm_self_refresh_helper.c
+ :doc: overview
+
+.. kernel-doc:: drivers/gpu/drm/drm_self_refresh_helper.c
+ :export:
+
+HDCP Helper Functions Reference
+===============================
+
+.. kernel-doc:: drivers/gpu/drm/drm_hdcp.c
+ :export:
+
Display Port Helper Functions Reference
=======================================
diff --git a/Documentation/gpu/drm-mm.rst b/Documentation/gpu/drm-mm.rst
index 54a696d961a7..c8ebd4f66a6a 100644
--- a/Documentation/gpu/drm-mm.rst
+++ b/Documentation/gpu/drm-mm.rst
@@ -79,7 +79,6 @@ count for the TTM, which will call your initialization function.
See the radeon_ttm.c file for an example of usage.
-
The Graphics Execution Manager (GEM)
====================================
@@ -380,6 +379,39 @@ GEM CMA Helper Functions Reference
.. kernel-doc:: drivers/gpu/drm/drm_gem_cma_helper.c
:export:
+VRAM Helper Function Reference
+==============================
+
+.. kernel-doc:: drivers/gpu/drm/drm_vram_helper_common.c
+ :doc: overview
+
+.. kernel-doc:: include/drm/drm_gem_vram_helper.h
+ :internal:
+
+GEM VRAM Helper Functions Reference
+-----------------------------------
+
+.. kernel-doc:: drivers/gpu/drm/drm_gem_vram_helper.c
+ :doc: overview
+
+.. kernel-doc:: include/drm/drm_gem_vram_helper.h
+ :internal:
+
+.. kernel-doc:: drivers/gpu/drm/drm_gem_vram_helper.c
+ :export:
+
+VRAM MM Helper Functions Reference
+----------------------------------
+
+.. kernel-doc:: drivers/gpu/drm/drm_vram_mm_helper.c
+ :doc: overview
+
+.. kernel-doc:: include/drm/drm_vram_mm_helper.h
+ :internal:
+
+.. kernel-doc:: drivers/gpu/drm/drm_vram_mm_helper.c
+ :export:
+
VMA Offset Manager
==================
diff --git a/Documentation/gpu/drm-uapi.rst b/Documentation/gpu/drm-uapi.rst
index c9fd23efd957..94f90521f58c 100644
--- a/Documentation/gpu/drm-uapi.rst
+++ b/Documentation/gpu/drm-uapi.rst
@@ -85,16 +85,18 @@ leads to a few additional requirements:
- The userspace side must be fully reviewed and tested to the standards of that
userspace project. For e.g. mesa this means piglit testcases and review on the
mailing list. This is again to ensure that the new interface actually gets the
- job done.
+ job done. The userspace-side reviewer should also provide an Acked-by on the
+ kernel uAPI patch indicating that they believe the proposed uAPI is sound and
+ sufficiently documented and validated for userspace's consumption.
- The userspace patches must be against the canonical upstream, not some vendor
fork. This is to make sure that no one cheats on the review and testing
requirements by doing a quick fork.
- The kernel patch can only be merged after all the above requirements are met,
- but it **must** be merged **before** the userspace patches land. uAPI always flows
- from the kernel, doing things the other way round risks divergence of the uAPI
- definitions and header files.
+ but it **must** be merged to either drm-next or drm-misc-next **before** the
+ userspace patches land. uAPI always flows from the kernel, doing things the
+ other way round risks divergence of the uAPI definitions and header files.
These are fairly steep requirements, but have grown out from years of shared
pain and experience with uAPI added hastily, and almost always regretted about
@@ -327,3 +329,12 @@ DRM_IOCTL_MODESET_CTL
mode setting, since on many devices the vertical blank counter is
reset to 0 at some point during modeset. Modern drivers should not
call this any more since with kernel mode setting it is a no-op.
+
+Userspace API Structures
+========================
+
+.. kernel-doc:: include/uapi/drm/drm_mode.h
+ :doc: overview
+
+.. kernel-doc:: include/uapi/drm/drm_mode.h
+ :internal:
diff --git a/Documentation/gpu/mcde.rst b/Documentation/gpu/mcde.rst
new file mode 100644
index 000000000000..c69e977defda
--- /dev/null
+++ b/Documentation/gpu/mcde.rst
@@ -0,0 +1,8 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+=======================================================
+ drm/mcde ST-Ericsson MCDE Multi-channel display engine
+=======================================================
+
+.. kernel-doc:: drivers/gpu/drm/mcde/mcde_drv.c
+ :doc: ST-Ericsson MCDE DRM Driver
diff --git a/Documentation/gpu/todo.rst b/Documentation/gpu/todo.rst
index 1528ad2d598b..b4a76c2703e5 100644
--- a/Documentation/gpu/todo.rst
+++ b/Documentation/gpu/todo.rst
@@ -10,25 +10,6 @@ graphics subsystem useful as newbie projects. Or for slow rainy days.
Subsystem-wide refactorings
===========================
-De-midlayer drivers
--------------------
-
-With the recent ``drm_bus`` cleanup patches for 3.17 it is no longer required
-to have a ``drm_bus`` structure set up. Drivers can directly set up the
-``drm_device`` structure instead of relying on bus methods in ``drm_usb.c``
-and ``drm_pci.c``. The goal is to get rid of the driver's ``->load`` /
-``->unload`` callbacks and open-code the load/unload sequence properly, using
-the new two-stage ``drm_device`` setup/teardown.
-
-Once all existing drivers are converted we can also remove those bus support
-files for USB and platform devices.
-
-All you need is a GPU for a non-converted driver (currently almost all of
-them, but also all the virtual ones used by KVM, so everyone qualifies).
-
-Contact: Daniel Vetter, Thierry Reding, respective driver maintainers
-
-
Remove custom dumb_map_offset implementations
---------------------------------------------
@@ -300,6 +281,21 @@ it to use drm_mode_hsync() instead.
Contact: Sean Paul
+drm_fb_helper tasks
+-------------------
+
+- drm_fb_helper_restore_fbdev_mode_unlocked() should call restore_fbdev_mode()
+ not the _force variant so it can bail out if there is a master. But first
+ these igt tests need to be fixed: kms_fbcon_fbt@psr and
+ kms_fbcon_fbt@psr-suspend.
+
+- The max connector argument for drm_fb_helper_init() and
+ drm_fb_helper_fbdev_setup() isn't used anymore and can be removed.
+
+- The helper doesn't keep an array of connectors anymore so these can be
+ removed: drm_fb_helper_single_add_all_connectors(),
+ drm_fb_helper_add_one_connector() and drm_fb_helper_remove_one_connector().
+
Core refactorings
=================
@@ -488,5 +484,20 @@ i915
device_link_add to model the dependency between i915 and snd_had. See
https://dri.freedesktop.org/docs/drm/driver-api/device_link.html
+Bootsplash
+==========
+
+There is support in place now for writing internal DRM clients making it
+possible to pick up the bootsplash work that was rejected because it was written
+for fbdev.
+
+- [v6,8/8] drm/client: Hack: Add bootsplash example
+ https://patchwork.freedesktop.org/patch/306579/
+
+- [RFC PATCH v2 00/13] Kernel based bootsplash
+ https://lkml.org/lkml/2017/12/13/764
+
+Contact: Sam Ravnborg
+
Outside DRM
===========
diff --git a/MAINTAINERS b/MAINTAINERS
index 57f496cff999..2abf6d28db64 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -5140,6 +5140,13 @@ S: Maintained
F: drivers/gpu/drm/tinydrm/st7735r.c
F: Documentation/devicetree/bindings/display/sitronix,st7735r.txt
+DRM DRIVER FOR ST-ERICSSON MCDE
+M: Linus Walleij <linus.walleij@linaro.org>
+T: git git://anongit.freedesktop.org/drm/drm-misc
+S: Maintained
+F: drivers/gpu/drm/mcde/
+F: Documentation/devicetree/bindings/display/ste,mcde.txt
+
DRM DRIVER FOR TDFX VIDEO CARDS
S: Orphan / Obsolete
F: drivers/gpu/drm/tdfx/
@@ -5425,6 +5432,7 @@ T: git git://anongit.freedesktop.org/drm/drm-misc
DRM PANEL DRIVERS
M: Thierry Reding <thierry.reding@gmail.com>
+R: Sam Ravnborg <sam@ravnborg.org>
L: dri-devel@lists.freedesktop.org
T: git git://anongit.freedesktop.org/drm/drm-misc
S: Maintained
@@ -5453,7 +5461,6 @@ F: Documentation/gpu/xen-front.rst
DRM TTM SUBSYSTEM
M: Christian Koenig <christian.koenig@amd.com>
M: Huang Rui <ray.huang@amd.com>
-M: Junwei Zhang <Jerry.Zhang@amd.com>
T: git git://people.freedesktop.org/~agd5f/linux
S: Maintained
L: dri-devel@lists.freedesktop.org
diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c
index 7c858020d14b..f4104a21b069 100644
--- a/drivers/dma-buf/dma-buf.c
+++ b/drivers/dma-buf/dma-buf.c
@@ -90,6 +90,10 @@ static int dma_buf_mmap_internal(struct file *file, struct vm_area_struct *vma)
dmabuf = file->private_data;
+ /* check if buffer supports mmap */
+ if (!dmabuf->ops->mmap)
+ return -EINVAL;
+
/* check for overflowing the buffer's size */
if (vma->vm_pgoff + vma_pages(vma) >
dmabuf->size >> PAGE_SHIFT)
@@ -404,8 +408,7 @@ struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info)
|| !exp_info->ops
|| !exp_info->ops->map_dma_buf
|| !exp_info->ops->unmap_dma_buf
- || !exp_info->ops->release
- || !exp_info->ops->mmap)) {
+ || !exp_info->ops->release)) {
return ERR_PTR(-EINVAL);
}
@@ -573,6 +576,7 @@ struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf,
list_add(&attach->node, &dmabuf->attachments);
mutex_unlock(&dmabuf->lock);
+
return attach;
err_attach:
@@ -595,6 +599,9 @@ void dma_buf_detach(struct dma_buf *dmabuf, struct dma_buf_attachment *attach)
if (WARN_ON(!dmabuf || !attach))
return;
+ if (attach->sgt)
+ dmabuf->ops->unmap_dma_buf(attach, attach->sgt, attach->dir);
+
mutex_lock(&dmabuf->lock);
list_del(&attach->node);
if (dmabuf->ops->detach)
@@ -630,10 +637,27 @@ struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach,
if (WARN_ON(!attach || !attach->dmabuf))
return ERR_PTR(-EINVAL);
+ if (attach->sgt) {
+ /*
+ * Two mappings with different directions for the same
+ * attachment are not allowed.
+ */
+ if (attach->dir != direction &&
+ attach->dir != DMA_BIDIRECTIONAL)
+ return ERR_PTR(-EBUSY);
+
+ return attach->sgt;
+ }
+
sg_table = attach->dmabuf->ops->map_dma_buf(attach, direction);
if (!sg_table)
sg_table = ERR_PTR(-ENOMEM);
+ if (!IS_ERR(sg_table) && attach->dmabuf->ops->cache_sgt_mapping) {
+ attach->sgt = sg_table;
+ attach->dir = direction;
+ }
+
return sg_table;
}
EXPORT_SYMBOL_GPL(dma_buf_map_attachment);
@@ -657,8 +681,10 @@ void dma_buf_unmap_attachment(struct dma_buf_attachment *attach,
if (WARN_ON(!attach || !attach->dmabuf || !sg_table))
return;
- attach->dmabuf->ops->unmap_dma_buf(attach, sg_table,
- direction);
+ if (attach->sgt == sg_table)
+ return;
+
+ attach->dmabuf->ops->unmap_dma_buf(attach, sg_table, direction);
}
EXPORT_SYMBOL_GPL(dma_buf_unmap_attachment);
@@ -906,6 +932,10 @@ int dma_buf_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma,
if (WARN_ON(!dmabuf || !vma))
return -EINVAL;
+ /* check if buffer supports mmap */
+ if (!dmabuf->ops->mmap)
+ return -EINVAL;
+
/* check for offset overflow */
if (pgoff + vma_pages(vma) < pgoff)
return -EOVERFLOW;
@@ -1068,6 +1098,7 @@ static int dma_buf_debug_show(struct seq_file *s, void *unused)
fence->ops->get_driver_name(fence),
fence->ops->get_timeline_name(fence),
dma_fence_is_signaled(fence) ? "" : "un");
+ dma_fence_put(fence);
}
rcu_read_unlock();
diff --git a/drivers/dma-buf/dma-fence.c b/drivers/dma-buf/dma-fence.c
index 227a19476d56..59ac96ec7ba8 100644
--- a/drivers/dma-buf/dma-fence.c
+++ b/drivers/dma-buf/dma-fence.c
@@ -248,8 +248,25 @@ void dma_fence_release(struct kref *kref)
trace_dma_fence_destroy(fence);
- /* Failed to signal before release, could be a refcounting issue */
- WARN_ON(!list_empty(&fence->cb_list));
+ if (WARN(!list_empty(&fence->cb_list),
+ "Fence %s:%s:%llx:%llx released with pending signals!\n",
+ fence->ops->get_driver_name(fence),
+ fence->ops->get_timeline_name(fence),
+ fence->context, fence->seqno)) {
+ unsigned long flags;
+
+ /*
+ * Failed to signal before release, likely a refcounting issue.
+ *
+ * This should never happen, but if it does make sure that we
+ * don't leave chains dangling. We set the error flag first
+ * so that the callbacks know this signal is due to an error.
+ */
+ spin_lock_irqsave(fence->lock, flags);
+ fence->error = -EDEADLK;
+ dma_fence_signal_locked(fence);
+ spin_unlock_irqrestore(fence->lock, flags);
+ }
if (fence->ops->release)
fence->ops->release(fence);
diff --git a/drivers/dma-buf/reservation.c b/drivers/dma-buf/reservation.c
index 4d32e2c67862..4447e13d1e89 100644
--- a/drivers/dma-buf/reservation.c
+++ b/drivers/dma-buf/reservation.c
@@ -365,6 +365,10 @@ int reservation_object_get_fences_rcu(struct reservation_object *obj,
GFP_NOWAIT | __GFP_NOWARN);
if (!nshared) {
rcu_read_unlock();
+
+ dma_fence_put(fence_excl);
+ fence_excl = NULL;
+
nshared = krealloc(shared, sz, GFP_KERNEL);
if (nshared) {
shared = nshared;
diff --git a/drivers/dma-buf/sync_debug.c b/drivers/dma-buf/sync_debug.c
index 3bb462cfb06c..101394f16930 100644
--- a/drivers/dma-buf/sync_debug.c
+++ b/drivers/dma-buf/sync_debug.c
@@ -188,29 +188,3 @@ static __init int sync_debugfs_init(void)
return 0;
}
late_initcall(sync_debugfs_init);
-
-#define DUMP_CHUNK 256
-static char sync_dump_buf[64 * 1024];
-void sync_dump(void)
-{
- struct seq_file s = {
- .buf = sync_dump_buf,
- .size = sizeof(sync_dump_buf) - 1,
- };
- int i;
-
- sync_info_debugfs_show(&s, NULL);
-
- for (i = 0; i < s.count; i += DUMP_CHUNK) {
- if ((s.count - i) > DUMP_CHUNK) {
- char c = s.buf[i + DUMP_CHUNK];
-
- s.buf[i + DUMP_CHUNK] = 0;
- pr_cont("%s", s.buf + i);
- s.buf[i + DUMP_CHUNK] = c;
- } else {
- s.buf[s.count] = 0;
- pr_cont("%s", s.buf + i);
- }
- }
-}
diff --git a/drivers/dma-buf/sync_debug.h b/drivers/dma-buf/sync_debug.h
index 05e33f937ad0..6176e52ba2d7 100644
--- a/drivers/dma-buf/sync_debug.h
+++ b/drivers/dma-buf/sync_debug.h
@@ -68,6 +68,5 @@ void sync_timeline_debug_add(struct sync_timeline *obj);
void sync_timeline_debug_remove(struct sync_timeline *obj);
void sync_file_debug_add(struct sync_file *fence);
void sync_file_debug_remove(struct sync_file *fence);
-void sync_dump(void);
#endif /* _LINUX_SYNC_H */
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index 36f900d63979..6b34949416b1 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -161,6 +161,13 @@ config DRM_TTM
GPU memory types. Will be enabled automatically if a device driver
uses it.
+config DRM_VRAM_HELPER
+ tristate
+ depends on DRM
+ select DRM_TTM
+ help
+ Helpers for VRAM memory management
+
config DRM_GEM_CMA_HELPER
bool
depends on DRM
@@ -343,6 +350,8 @@ source "drivers/gpu/drm/panfrost/Kconfig"
source "drivers/gpu/drm/aspeed/Kconfig"
+source "drivers/gpu/drm/mcde/Kconfig"
+
# Keep legacy drivers last
menuconfig DRM_LEGACY
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index 72f5036d9bfa..9d630a28a788 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -17,7 +17,7 @@ drm-y := drm_auth.o drm_cache.o \
drm_plane.o drm_color_mgmt.o drm_print.o \
drm_dumb_buffers.o drm_mode_config.o drm_vblank.o \
drm_syncobj.o drm_lease.o drm_writeback.o drm_client.o \
- drm_atomic_uapi.o
+ drm_client_modeset.o drm_atomic_uapi.o drm_hdcp.o
drm-$(CONFIG_DRM_LEGACY) += drm_legacy_misc.o drm_bufs.o drm_context.o drm_dma.o drm_scatter.o drm_lock.o
drm-$(CONFIG_DRM_LIB_RANDOM) += lib/drm_random.o
@@ -32,13 +32,18 @@ drm-$(CONFIG_AGP) += drm_agpsupport.o
drm-$(CONFIG_DEBUG_FS) += drm_debugfs.o drm_debugfs_crc.o
drm-$(CONFIG_DRM_LOAD_EDID_FIRMWARE) += drm_edid_load.o
+drm_vram_helper-y := drm_gem_vram_helper.o \
+ drm_vram_helper_common.o \
+ drm_vram_mm_helper.o
+obj-$(CONFIG_DRM_VRAM_HELPER) += drm_vram_helper.o
+
drm_kms_helper-y := drm_crtc_helper.o drm_dp_helper.o drm_dsc.o drm_probe_helper.o \
drm_plane_helper.o drm_dp_mst_topology.o drm_atomic_helper.o \
drm_kms_helper_common.o drm_dp_dual_mode_helper.o \
drm_simple_kms_helper.o drm_modeset_helper.o \
drm_scdc_helper.o drm_gem_framebuffer_helper.o \
drm_atomic_state_helper.o drm_damage_helper.o \
- drm_format_helper.o
+ drm_format_helper.o drm_self_refresh_helper.o
drm_kms_helper-$(CONFIG_DRM_PANEL_BRIDGE) += bridge/panel.o
drm_kms_helper-$(CONFIG_DRM_FBDEV_EMULATION) += drm_fb_helper.o
@@ -113,3 +118,4 @@ obj-$(CONFIG_DRM_VBOXVIDEO) += vboxvideo/
obj-$(CONFIG_DRM_LIMA) += lima/
obj-$(CONFIG_DRM_PANFROST) += panfrost/
obj-$(CONFIG_DRM_ASPEED_GFX) += aspeed/
+obj-$(CONFIG_DRM_MCDE) += mcde/
diff --git a/drivers/gpu/drm/amd/amdgpu/Kconfig b/drivers/gpu/drm/amd/amdgpu/Kconfig
index 844f0a162981..a04f2fc7bf37 100644
--- a/drivers/gpu/drm/amd/amdgpu/Kconfig
+++ b/drivers/gpu/drm/amd/amdgpu/Kconfig
@@ -27,10 +27,11 @@ config DRM_AMDGPU_CIK
config DRM_AMDGPU_USERPTR
bool "Always enable userptr write support"
depends on DRM_AMDGPU
- select MMU_NOTIFIER
+ depends on ARCH_HAS_HMM
+ select HMM_MIRROR
help
- This option selects CONFIG_MMU_NOTIFIER if it isn't already
- selected to enabled full userptr support.
+ This option selects CONFIG_HMM and CONFIG_HMM_MIRROR if it
+ isn't already selected to enabled full userptr support.
config DRM_AMDGPU_GART_DEBUGFS
bool "Allow GART access through debugfs"
diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile
index fdd0ca4b0f0b..57ce44cc3226 100644
--- a/drivers/gpu/drm/amd/amdgpu/Makefile
+++ b/drivers/gpu/drm/amd/amdgpu/Makefile
@@ -49,7 +49,7 @@ amdgpu-y += amdgpu_device.o amdgpu_kms.o \
amdgpu_cs.o amdgpu_bios.o amdgpu_benchmark.o amdgpu_test.o \
amdgpu_pm.o atombios_dp.o amdgpu_afmt.o amdgpu_trace_points.o \
atombios_encoders.o amdgpu_sa.o atombios_i2c.o \
- amdgpu_prime.o amdgpu_vm.o amdgpu_ib.o amdgpu_pll.o \
+ amdgpu_dma_buf.o amdgpu_vm.o amdgpu_ib.o amdgpu_pll.o \
amdgpu_ucode.o amdgpu_bo_list.o amdgpu_ctx.o amdgpu_sync.o \
amdgpu_gtt_mgr.o amdgpu_vram_mgr.o amdgpu_virt.o amdgpu_atomfirmware.o \
amdgpu_vf_error.o amdgpu_sched.o amdgpu_debugfs.o amdgpu_ids.o \
@@ -173,7 +173,7 @@ endif
amdgpu-$(CONFIG_COMPAT) += amdgpu_ioc32.o
amdgpu-$(CONFIG_VGA_SWITCHEROO) += amdgpu_atpx_handler.o
amdgpu-$(CONFIG_ACPI) += amdgpu_acpi.o
-amdgpu-$(CONFIG_MMU_NOTIFIER) += amdgpu_mn.o
+amdgpu-$(CONFIG_HMM_MIRROR) += amdgpu_mn.o
include $(FULL_AMD_PATH)/powerplay/Makefile
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 14398f55f602..cbcd253d18d5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -44,9 +44,9 @@
#include <drm/ttm/ttm_module.h>
#include <drm/ttm/ttm_execbuf_util.h>
-#include <drm/drmP.h>
-#include <drm/drm_gem.h>
#include <drm/amdgpu_drm.h>
+#include <drm/drm_gem.h>
+#include <drm/drm_ioctl.h>
#include <drm/gpu_scheduler.h>
#include <kgd_kfd_interface.h>
@@ -118,7 +118,6 @@ extern int amdgpu_disp_priority;
extern int amdgpu_hw_i2c;
extern int amdgpu_pcie_gen2;
extern int amdgpu_msi;
-extern int amdgpu_lockup_timeout;
extern int amdgpu_dpm;
extern int amdgpu_fw_load_type;
extern int amdgpu_aspm;
@@ -211,6 +210,7 @@ struct amdgpu_irq_src;
struct amdgpu_fpriv;
struct amdgpu_bo_va_mapping;
struct amdgpu_atif;
+struct kfd_vm_fault_info;
enum amdgpu_cp_irq {
AMDGPU_CP_IRQ_GFX_EOP = 0,
@@ -415,6 +415,7 @@ struct amdgpu_fpriv {
};
int amdgpu_file_to_fpriv(struct file *filp, struct amdgpu_fpriv **fpriv);
+int amdgpu_device_get_job_timeout_settings(struct amdgpu_device *adev);
int amdgpu_ib_get(struct amdgpu_device *adev, struct amdgpu_vm *vm,
unsigned size, struct amdgpu_ib *ib);
@@ -558,6 +559,8 @@ struct amdgpu_asic_funcs {
uint64_t *count1);
/* do we need to reset the asic at init time (e.g., kexec) */
bool (*need_reset_on_init)(struct amdgpu_device *adev);
+ /* PCIe replay counter */
+ uint64_t (*get_pcie_replay_count)(struct amdgpu_device *adev);
};
/*
@@ -639,6 +642,11 @@ struct nbio_hdp_flush_reg {
u32 ref_and_mask_sdma1;
};
+struct amdgpu_mmio_remap {
+ u32 reg_offset;
+ resource_size_t bus_addr;
+};
+
struct amdgpu_nbio_funcs {
const struct nbio_hdp_flush_reg *hdp_flush_reg;
u32 (*get_hdp_flush_req_offset)(struct amdgpu_device *adev);
@@ -666,6 +674,7 @@ struct amdgpu_nbio_funcs {
void (*ih_control)(struct amdgpu_device *adev);
void (*init_registers)(struct amdgpu_device *adev);
void (*detect_hw_virt)(struct amdgpu_device *adev);
+ void (*remap_hdp_registers)(struct amdgpu_device *adev);
};
struct amdgpu_df_funcs {
@@ -680,6 +689,12 @@ struct amdgpu_df_funcs {
u32 *flags);
void (*enable_ecc_force_par_wr_rmw)(struct amdgpu_device *adev,
bool enable);
+ int (*pmc_start)(struct amdgpu_device *adev, uint64_t config,
+ int is_enable);
+ int (*pmc_stop)(struct amdgpu_device *adev, uint64_t config,
+ int is_disable);
+ void (*pmc_get_count)(struct amdgpu_device *adev, uint64_t config,
+ uint64_t *count);
};
/* Define the HW IP blocks will be used in driver , add more if necessary */
enum amd_hw_ip_block_type {
@@ -764,6 +779,7 @@ struct amdgpu_device {
void __iomem *rmmio;
/* protects concurrent MM_INDEX/DATA based register access */
spinlock_t mmio_idx_lock;
+ struct amdgpu_mmio_remap rmmio_remap;
/* protects concurrent SMC based register access */
spinlock_t smc_idx_lock;
amdgpu_rreg_t smc_rreg;
@@ -906,7 +922,7 @@ struct amdgpu_device {
const struct amdgpu_df_funcs *df_funcs;
/* delayed work_func for deferring clockgating during resume */
- struct delayed_work late_init_work;
+ struct delayed_work delayed_init_work;
struct amdgpu_virt virt;
/* firmware VRAM reservation */
@@ -936,6 +952,13 @@ struct amdgpu_device {
struct work_struct xgmi_reset_work;
bool in_baco_reset;
+
+ long gfx_timeout;
+ long sdma_timeout;
+ long video_timeout;
+ long compute_timeout;
+
+ uint64_t unique_id;
};
static inline struct amdgpu_device *amdgpu_ttm_adev(struct ttm_bo_device *bdev)
@@ -1065,6 +1088,7 @@ int emu_soc_asic_init(struct amdgpu_device *adev);
#define amdgpu_asic_init_doorbell_index(adev) (adev)->asic_funcs->init_doorbell_index((adev))
#define amdgpu_asic_get_pcie_usage(adev, cnt0, cnt1) ((adev)->asic_funcs->get_pcie_usage((adev), (cnt0), (cnt1)))
#define amdgpu_asic_need_reset_on_init(adev) (adev)->asic_funcs->need_reset_on_init((adev))
+#define amdgpu_asic_get_pcie_replay_count(adev) ((adev)->asic_funcs->get_pcie_replay_count((adev)))
/* Common functions */
bool amdgpu_device_should_recover_gpu(struct amdgpu_device *adev);
@@ -1081,6 +1105,9 @@ void amdgpu_device_program_register_sequence(struct amdgpu_device *adev,
const u32 array_size);
bool amdgpu_device_is_px(struct drm_device *dev);
+bool amdgpu_device_is_peer_accessible(struct amdgpu_device *adev,
+ struct amdgpu_device *peer_adev);
+
/* atpx handler */
#if defined(CONFIG_VGA_SWITCHEROO)
void amdgpu_register_atpx_handler(void);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
index 0a4fba196b84..eba42c752bca 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
@@ -24,6 +24,7 @@
*/
#include <linux/irqdomain.h>
+#include <linux/pci.h>
#include <linux/pm_domain.h>
#include <linux/platform_device.h>
#include <sound/designware_i2s.h>
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
index 56f8ca2a3bb4..1e41367ef74e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
@@ -27,7 +27,7 @@
#include <linux/power_supply.h>
#include <linux/pm_runtime.h>
#include <acpi/video.h>
-#include <drm/drmP.h>
+
#include <drm/drm_crtc_helper.h>
#include "amdgpu.h"
#include "amdgpu_pm.h"
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_afmt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_afmt.c
index 3889486f71fe..a4d65973bf7c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_afmt.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_afmt.c
@@ -25,7 +25,7 @@
*/
#include <linux/hdmi.h>
#include <linux/gcd.h>
-#include <drm/drmP.h>
+
#include <drm/amdgpu_drm.h>
#include "amdgpu.h"
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
index aeead072fa79..c8887a1c852a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
@@ -22,11 +22,13 @@
#include "amdgpu_amdkfd.h"
#include "amd_shared.h"
-#include <drm/drmP.h>
+
#include "amdgpu.h"
#include "amdgpu_gfx.h"
+#include "amdgpu_dma_buf.h"
#include <linux/module.h>
#include <linux/dma-buf.h>
+#include "amdgpu_xgmi.h"
static const unsigned int compute_vmid_bitmap = 0xFF00;
@@ -148,7 +150,8 @@ void amdgpu_amdkfd_device_init(struct amdgpu_device *adev)
};
/* this is going to have a few of the MSBs set that we need to
- * clear */
+ * clear
+ */
bitmap_complement(gpu_resources.queue_bitmap,
adev->gfx.mec.queue_bitmap,
KGD_MAX_QUEUES);
@@ -162,7 +165,8 @@ void amdgpu_amdkfd_device_init(struct amdgpu_device *adev)
gpu_resources.queue_bitmap);
/* According to linux/bitmap.h we shouldn't use bitmap_clear if
- * nbits is not compile time constant */
+ * nbits is not compile time constant
+ */
last_valid_bit = 1 /* only first MEC can have compute queues */
* adev->gfx.mec.num_pipe_per_mec
* adev->gfx.mec.num_queue_per_pipe;
@@ -335,6 +339,40 @@ void amdgpu_amdkfd_free_gtt_mem(struct kgd_dev *kgd, void *mem_obj)
amdgpu_bo_unref(&(bo));
}
+int amdgpu_amdkfd_alloc_gws(struct kgd_dev *kgd, size_t size,
+ void **mem_obj)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
+ struct amdgpu_bo *bo = NULL;
+ struct amdgpu_bo_param bp;
+ int r;
+
+ memset(&bp, 0, sizeof(bp));
+ bp.size = size;
+ bp.byte_align = 1;
+ bp.domain = AMDGPU_GEM_DOMAIN_GWS;
+ bp.flags = AMDGPU_GEM_CREATE_NO_CPU_ACCESS;
+ bp.type = ttm_bo_type_device;
+ bp.resv = NULL;
+
+ r = amdgpu_bo_create(adev, &bp, &bo);
+ if (r) {
+ dev_err(adev->dev,
+ "failed to allocate gws BO for amdkfd (%d)\n", r);
+ return r;
+ }
+
+ *mem_obj = bo;
+ return 0;
+}
+
+void amdgpu_amdkfd_free_gws(struct kgd_dev *kgd, void *mem_obj)
+{
+ struct amdgpu_bo *bo = (struct amdgpu_bo *)mem_obj;
+
+ amdgpu_bo_unref(&bo);
+}
+
uint32_t amdgpu_amdkfd_get_fw_version(struct kgd_dev *kgd,
enum kgd_engine_type type)
{
@@ -518,6 +556,34 @@ uint64_t amdgpu_amdkfd_get_hive_id(struct kgd_dev *kgd)
return adev->gmc.xgmi.hive_id;
}
+uint8_t amdgpu_amdkfd_get_xgmi_hops_count(struct kgd_dev *dst, struct kgd_dev *src)
+{
+ struct amdgpu_device *peer_adev = (struct amdgpu_device *)src;
+ struct amdgpu_device *adev = (struct amdgpu_device *)dst;
+ int ret = amdgpu_xgmi_get_hops_count(adev, peer_adev);
+
+ if (ret < 0) {
+ DRM_ERROR("amdgpu: failed to get xgmi hops count between node %d and %d. ret = %d\n",
+ adev->gmc.xgmi.physical_node_id,
+ peer_adev->gmc.xgmi.physical_node_id, ret);
+ ret = 0;
+ }
+ return (uint8_t)ret;
+}
+
+uint64_t amdgpu_amdkfd_get_mmio_remap_phys_addr(struct kgd_dev *kgd)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
+
+ return adev->rmmio_remap.bus_addr;
+}
+
+uint32_t amdgpu_amdkfd_get_num_gws(struct kgd_dev *kgd)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
+
+ return adev->gds.gws_size;
+}
int amdgpu_amdkfd_submit_ib(struct kgd_dev *kgd, enum kgd_engine_type engine,
uint32_t vmid, uint64_t gpu_addr,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
index 4e37fa7e85b1..f968bf147c5e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
@@ -61,7 +61,6 @@ struct kgd_mem {
atomic_t invalid;
struct amdkfd_process_info *process_info;
- struct page **user_pages;
struct amdgpu_sync sync;
@@ -154,6 +153,10 @@ int amdgpu_amdkfd_alloc_gtt_mem(struct kgd_dev *kgd, size_t size,
void **mem_obj, uint64_t *gpu_addr,
void **cpu_ptr, bool mqd_gfx9);
void amdgpu_amdkfd_free_gtt_mem(struct kgd_dev *kgd, void *mem_obj);
+int amdgpu_amdkfd_alloc_gws(struct kgd_dev *kgd, size_t size, void **mem_obj);
+void amdgpu_amdkfd_free_gws(struct kgd_dev *kgd, void *mem_obj);
+int amdgpu_amdkfd_add_gws_to_process(void *info, void *gws, struct kgd_mem **mem);
+int amdgpu_amdkfd_remove_gws_from_process(void *info, void *mem);
uint32_t amdgpu_amdkfd_get_fw_version(struct kgd_dev *kgd,
enum kgd_engine_type type);
void amdgpu_amdkfd_get_local_mem_info(struct kgd_dev *kgd,
@@ -169,6 +172,9 @@ int amdgpu_amdkfd_get_dmabuf_info(struct kgd_dev *kgd, int dma_buf_fd,
uint32_t *flags);
uint64_t amdgpu_amdkfd_get_vram_usage(struct kgd_dev *kgd);
uint64_t amdgpu_amdkfd_get_hive_id(struct kgd_dev *kgd);
+uint64_t amdgpu_amdkfd_get_mmio_remap_phys_addr(struct kgd_dev *kgd);
+uint32_t amdgpu_amdkfd_get_num_gws(struct kgd_dev *kgd);
+uint8_t amdgpu_amdkfd_get_xgmi_hops_count(struct kgd_dev *dst, struct kgd_dev *src);
#define read_user_wptr(mmptr, wptr, dst) \
({ \
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
index fa09e11a600c..5f459bf5f622 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
@@ -23,7 +23,7 @@
#include <linux/fdtable.h>
#include <linux/uaccess.h>
#include <linux/mmu_context.h>
-#include <drm/drmP.h>
+
#include "amdgpu.h"
#include "amdgpu_amdkfd.h"
#include "cikd.h"
@@ -310,7 +310,7 @@ static inline uint32_t get_sdma_base_addr(struct cik_sdma_rlc_registers *m)
retval = m->sdma_engine_id * SDMA1_REGISTER_OFFSET +
m->sdma_queue_id * KFD_CIK_SDMA_QUEUE_OFFSET;
- pr_debug("kfd: sdma base address: 0x%x\n", retval);
+ pr_debug("sdma base address: 0x%x\n", retval);
return retval;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
index fec3a6aa1de6..6d2f61449606 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
@@ -24,7 +24,7 @@
#include <linux/fdtable.h>
#include <linux/uaccess.h>
#include <linux/mmu_context.h>
-#include <drm/drmP.h>
+
#include "amdgpu.h"
#include "amdgpu_amdkfd.h"
#include "gfx_v8_0.h"
@@ -266,7 +266,7 @@ static inline uint32_t get_sdma_base_addr(struct vi_sdma_mqd *m)
retval = m->sdma_engine_id * SDMA1_REGISTER_OFFSET +
m->sdma_queue_id * KFD_VI_SDMA_QUEUE_OFFSET;
- pr_debug("kfd: sdma base address: 0x%x\n", retval);
+ pr_debug("sdma base address: 0x%x\n", retval);
return retval;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
index ef3d93b995b2..85395f2d83a6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
@@ -26,7 +26,7 @@
#include <linux/fdtable.h>
#include <linux/uaccess.h>
#include <linux/mmu_context.h>
-#include <drm/drmP.h>
+
#include "amdgpu.h"
#include "amdgpu_amdkfd.h"
#include "soc15_hw_ip.h"
@@ -225,8 +225,8 @@ static void kgd_program_sh_mem_settings(struct kgd_dev *kgd, uint32_t vmid,
lock_srbm(kgd, 0, 0, 0, vmid);
- WREG32(SOC15_REG_OFFSET(GC, 0, mmSH_MEM_CONFIG), sh_mem_config);
- WREG32(SOC15_REG_OFFSET(GC, 0, mmSH_MEM_BASES), sh_mem_bases);
+ WREG32_RLC(SOC15_REG_OFFSET(GC, 0, mmSH_MEM_CONFIG), sh_mem_config);
+ WREG32_RLC(SOC15_REG_OFFSET(GC, 0, mmSH_MEM_BASES), sh_mem_bases);
/* APE1 no longer exists on GFX9 */
unlock_srbm(kgd);
@@ -369,7 +369,7 @@ static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
value = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_CP_SCHEDULERS));
value = REG_SET_FIELD(value, RLC_CP_SCHEDULERS, scheduler1,
((mec << 5) | (pipe << 3) | queue_id | 0x80));
- WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_CP_SCHEDULERS), value);
+ WREG32_RLC(SOC15_REG_OFFSET(GC, 0, mmRLC_CP_SCHEDULERS), value);
}
/* HQD registers extend from CP_MQD_BASE_ADDR to CP_HQD_EOP_WPTR_MEM. */
@@ -378,13 +378,13 @@ static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
for (reg = hqd_base;
reg <= SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_HI); reg++)
- WREG32(reg, mqd_hqd[reg - hqd_base]);
+ WREG32_RLC(reg, mqd_hqd[reg - hqd_base]);
/* Activate doorbell logic before triggering WPTR poll. */
data = REG_SET_FIELD(m->cp_hqd_pq_doorbell_control,
CP_HQD_PQ_DOORBELL_CONTROL, DOORBELL_EN, 1);
- WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL), data);
+ WREG32_RLC(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL), data);
if (wptr) {
/* Don't read wptr with get_user because the user
@@ -413,25 +413,25 @@ static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
guessed_wptr += m->cp_hqd_pq_wptr_lo & ~(queue_size - 1);
guessed_wptr += (uint64_t)m->cp_hqd_pq_wptr_hi << 32;
- WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_LO),
+ WREG32_RLC(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_LO),
lower_32_bits(guessed_wptr));
- WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_HI),
+ WREG32_RLC(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_HI),
upper_32_bits(guessed_wptr));
- WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_POLL_ADDR),
+ WREG32_RLC(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_POLL_ADDR),
lower_32_bits((uintptr_t)wptr));
- WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_POLL_ADDR_HI),
+ WREG32_RLC(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_POLL_ADDR_HI),
upper_32_bits((uintptr_t)wptr));
WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_PQ_WPTR_POLL_CNTL1),
get_queue_mask(adev, pipe_id, queue_id));
}
/* Start the EOP fetcher */
- WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_EOP_RPTR),
+ WREG32_RLC(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_EOP_RPTR),
REG_SET_FIELD(m->cp_hqd_eop_rptr,
CP_HQD_EOP_RPTR, INIT_FETCHER, 1));
data = REG_SET_FIELD(m->cp_hqd_active, CP_HQD_ACTIVE, ACTIVE, 1);
- WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_ACTIVE), data);
+ WREG32_RLC(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_ACTIVE), data);
release_queue(kgd);
@@ -633,7 +633,7 @@ static int kgd_hqd_destroy(struct kgd_dev *kgd, void *mqd,
acquire_queue(kgd, pipe_id, queue_id);
if (m->cp_hqd_vmid == 0)
- WREG32_FIELD15(GC, 0, RLC_CP_SCHEDULERS, scheduler1, 0);
+ WREG32_FIELD15_RLC(GC, 0, RLC_CP_SCHEDULERS, scheduler1, 0);
switch (reset_type) {
case KFD_PREEMPT_TYPE_WAVEFRONT_DRAIN:
@@ -647,7 +647,7 @@ static int kgd_hqd_destroy(struct kgd_dev *kgd, void *mqd,
break;
}
- WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_DEQUEUE_REQUEST), type);
+ WREG32_RLC(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_DEQUEUE_REQUEST), type);
end_jiffies = (utimeout * HZ / 1000) + jiffies;
while (true) {
@@ -726,29 +726,8 @@ static uint16_t get_atc_vmid_pasid_mapping_pasid(struct kgd_dev *kgd,
return reg & ATC_VMID0_PASID_MAPPING__PASID_MASK;
}
-static void write_vmid_invalidate_request(struct kgd_dev *kgd, uint8_t vmid)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
-
- /* Use legacy mode tlb invalidation.
- *
- * Currently on Raven the code below is broken for anything but
- * legacy mode due to a MMHUB power gating problem. A workaround
- * is for MMHUB to wait until the condition PER_VMID_INVALIDATE_REQ
- * == PER_VMID_INVALIDATE_ACK instead of simply waiting for the ack
- * bit.
- *
- * TODO 1: agree on the right set of invalidation registers for
- * KFD use. Use the last one for now. Invalidate both GC and
- * MMHUB.
- *
- * TODO 2: support range-based invalidation, requires kfg2kgd
- * interface change
- */
- amdgpu_gmc_flush_gpu_tlb(adev, vmid, 0);
-}
-
-static int invalidate_tlbs_with_kiq(struct amdgpu_device *adev, uint16_t pasid)
+static int invalidate_tlbs_with_kiq(struct amdgpu_device *adev, uint16_t pasid,
+ uint32_t flush_type)
{
signed long r;
uint32_t seq;
@@ -761,7 +740,7 @@ static int invalidate_tlbs_with_kiq(struct amdgpu_device *adev, uint16_t pasid)
PACKET3_INVALIDATE_TLBS_DST_SEL(1) |
PACKET3_INVALIDATE_TLBS_ALL_HUB(1) |
PACKET3_INVALIDATE_TLBS_PASID(pasid) |
- PACKET3_INVALIDATE_TLBS_FLUSH_TYPE(0)); /* legacy */
+ PACKET3_INVALIDATE_TLBS_FLUSH_TYPE(flush_type));
amdgpu_fence_emit_polling(ring, &seq);
amdgpu_ring_commit(ring);
spin_unlock(&adev->gfx.kiq.ring_lock);
@@ -780,12 +759,16 @@ static int invalidate_tlbs(struct kgd_dev *kgd, uint16_t pasid)
struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
int vmid;
struct amdgpu_ring *ring = &adev->gfx.kiq.ring;
+ uint32_t flush_type = 0;
if (adev->in_gpu_reset)
return -EIO;
+ if (adev->gmc.xgmi.num_physical_nodes &&
+ adev->asic_type == CHIP_VEGA20)
+ flush_type = 2;
if (ring->sched.ready)
- return invalidate_tlbs_with_kiq(adev, pasid);
+ return invalidate_tlbs_with_kiq(adev, pasid, flush_type);
for (vmid = 0; vmid < 16; vmid++) {
if (!amdgpu_amdkfd_is_kfd_vmid(adev, vmid))
@@ -793,7 +776,8 @@ static int invalidate_tlbs(struct kgd_dev *kgd, uint16_t pasid)
if (get_atc_vmid_pasid_mapping_valid(kgd, vmid)) {
if (get_atc_vmid_pasid_mapping_pasid(kgd, vmid)
== pasid) {
- write_vmid_invalidate_request(kgd, vmid);
+ amdgpu_gmc_flush_gpu_tlb(adev, vmid,
+ flush_type);
break;
}
}
@@ -811,7 +795,22 @@ static int invalidate_tlbs_vmid(struct kgd_dev *kgd, uint16_t vmid)
return 0;
}
- write_vmid_invalidate_request(kgd, vmid);
+ /* Use legacy mode tlb invalidation.
+ *
+ * Currently on Raven the code below is broken for anything but
+ * legacy mode due to a MMHUB power gating problem. A workaround
+ * is for MMHUB to wait until the condition PER_VMID_INVALIDATE_REQ
+ * == PER_VMID_INVALIDATE_ACK instead of simply waiting for the ack
+ * bit.
+ *
+ * TODO 1: agree on the right set of invalidation registers for
+ * KFD use. Use the last one for now. Invalidate both GC and
+ * MMHUB.
+ *
+ * TODO 2: support range-based invalidation, requires kfg2kgd
+ * interface change
+ */
+ amdgpu_gmc_flush_gpu_tlb(adev, vmid, 0);
return 0;
}
@@ -838,7 +837,7 @@ static int kgd_wave_control_execute(struct kgd_dev *kgd,
mutex_lock(&adev->grbm_idx_mutex);
- WREG32(SOC15_REG_OFFSET(GC, 0, mmGRBM_GFX_INDEX), gfx_index_val);
+ WREG32_SOC15_RLC_SHADOW(GC, 0, mmGRBM_GFX_INDEX, gfx_index_val);
WREG32(SOC15_REG_OFFSET(GC, 0, mmSQ_CMD), sq_cmd);
data = REG_SET_FIELD(data, GRBM_GFX_INDEX,
@@ -848,7 +847,7 @@ static int kgd_wave_control_execute(struct kgd_dev *kgd,
data = REG_SET_FIELD(data, GRBM_GFX_INDEX,
SE_BROADCAST_WRITES, 1);
- WREG32(SOC15_REG_OFFSET(GC, 0, mmGRBM_GFX_INDEX), data);
+ WREG32_SOC15_RLC_SHADOW(GC, 0, mmGRBM_GFX_INDEX, data);
mutex_unlock(&adev->grbm_idx_mutex);
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
index a6e5184d436c..df26bf34b675 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
@@ -22,14 +22,16 @@
#define pr_fmt(fmt) "kfd2kgd: " fmt
+#include <linux/dma-buf.h>
#include <linux/list.h>
#include <linux/pagemap.h>
#include <linux/sched/mm.h>
-#include <linux/dma-buf.h>
-#include <drm/drmP.h>
+#include <linux/sched/task.h>
+
#include "amdgpu_object.h"
#include "amdgpu_vm.h"
#include "amdgpu_amdkfd.h"
+#include "amdgpu_dma_buf.h"
/* Special VM and GART address alignment needed for VI pre-Fiji due to
* a HW bug.
@@ -456,6 +458,17 @@ static void add_kgd_mem_to_kfd_bo_list(struct kgd_mem *mem,
mutex_unlock(&process_info->lock);
}
+static void remove_kgd_mem_from_kfd_bo_list(struct kgd_mem *mem,
+ struct amdkfd_process_info *process_info)
+{
+ struct ttm_validate_buffer *bo_list_entry;
+
+ bo_list_entry = &mem->validate_list;
+ mutex_lock(&process_info->lock);
+ list_del(&bo_list_entry->head);
+ mutex_unlock(&process_info->lock);
+}
+
/* Initializes user pages. It registers the MMU notifier and validates
* the userptr BO in the GTT domain.
*
@@ -491,28 +504,12 @@ static int init_user_pages(struct kgd_mem *mem, struct mm_struct *mm,
goto out;
}
- /* If no restore worker is running concurrently, user_pages
- * should not be allocated
- */
- WARN(mem->user_pages, "Leaking user_pages array");
-
- mem->user_pages = kvmalloc_array(bo->tbo.ttm->num_pages,
- sizeof(struct page *),
- GFP_KERNEL | __GFP_ZERO);
- if (!mem->user_pages) {
- pr_err("%s: Failed to allocate pages array\n", __func__);
- ret = -ENOMEM;
- goto unregister_out;
- }
-
- ret = amdgpu_ttm_tt_get_user_pages(bo->tbo.ttm, mem->user_pages);
+ ret = amdgpu_ttm_tt_get_user_pages(bo->tbo.ttm, bo->tbo.ttm->pages);
if (ret) {
pr_err("%s: Failed to get user pages: %d\n", __func__, ret);
- goto free_out;
+ goto unregister_out;
}
- amdgpu_ttm_tt_set_user_pages(bo->tbo.ttm, mem->user_pages);
-
ret = amdgpu_bo_reserve(bo, true);
if (ret) {
pr_err("%s: Failed to reserve BO\n", __func__);
@@ -525,11 +522,7 @@ static int init_user_pages(struct kgd_mem *mem, struct mm_struct *mm,
amdgpu_bo_unreserve(bo);
release_out:
- if (ret)
- release_pages(mem->user_pages, bo->tbo.ttm->num_pages);
-free_out:
- kvfree(mem->user_pages);
- mem->user_pages = NULL;
+ amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm);
unregister_out:
if (ret)
amdgpu_mn_unregister(bo);
@@ -588,13 +581,12 @@ static int reserve_bo_and_vm(struct kgd_mem *mem,
ctx->kfd_bo.priority = 0;
ctx->kfd_bo.tv.bo = &bo->tbo;
ctx->kfd_bo.tv.num_shared = 1;
- ctx->kfd_bo.user_pages = NULL;
list_add(&ctx->kfd_bo.tv.head, &ctx->list);
amdgpu_vm_get_pd_bo(vm, &ctx->list, &ctx->vm_pd[0]);
ret = ttm_eu_reserve_buffers(&ctx->ticket, &ctx->list,
- false, &ctx->duplicates);
+ false, &ctx->duplicates, true);
if (!ret)
ctx->reserved = true;
else {
@@ -652,7 +644,6 @@ static int reserve_bo_and_cond_vms(struct kgd_mem *mem,
ctx->kfd_bo.priority = 0;
ctx->kfd_bo.tv.bo = &bo->tbo;
ctx->kfd_bo.tv.num_shared = 1;
- ctx->kfd_bo.user_pages = NULL;
list_add(&ctx->kfd_bo.tv.head, &ctx->list);
i = 0;
@@ -668,7 +659,7 @@ static int reserve_bo_and_cond_vms(struct kgd_mem *mem,
}
ret = ttm_eu_reserve_buffers(&ctx->ticket, &ctx->list,
- false, &ctx->duplicates);
+ false, &ctx->duplicates, true);
if (!ret)
ctx->reserved = true;
else
@@ -896,6 +887,9 @@ static int init_kfd_vm(struct amdgpu_vm *vm, void **process_info,
AMDGPU_FENCE_OWNER_KFD, false);
if (ret)
goto wait_pd_fail;
+ ret = reservation_object_reserve_shared(vm->root.base.bo->tbo.resv, 1);
+ if (ret)
+ goto reserve_shared_fail;
amdgpu_bo_fence(vm->root.base.bo,
&vm->process_info->eviction_fence->base, true);
amdgpu_bo_unreserve(vm->root.base.bo);
@@ -909,6 +903,7 @@ static int init_kfd_vm(struct amdgpu_vm *vm, void **process_info,
return 0;
+reserve_shared_fail:
wait_pd_fail:
validate_pd_fail:
amdgpu_bo_unreserve(vm->root.base.bo);
@@ -1109,7 +1104,8 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
if (!offset || !*offset)
return -EINVAL;
user_addr = *offset;
- } else if (flags & ALLOC_MEM_FLAGS_DOORBELL) {
+ } else if (flags & (ALLOC_MEM_FLAGS_DOORBELL |
+ ALLOC_MEM_FLAGS_MMIO_REMAP)) {
domain = AMDGPU_GEM_DOMAIN_GTT;
alloc_domain = AMDGPU_GEM_DOMAIN_CPU;
bo_type = ttm_bo_type_sg;
@@ -1199,12 +1195,8 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
if (user_addr) {
ret = init_user_pages(*mem, current->mm, user_addr);
- if (ret) {
- mutex_lock(&avm->process_info->lock);
- list_del(&(*mem)->validate_list.head);
- mutex_unlock(&avm->process_info->lock);
+ if (ret)
goto allocate_init_user_pages_failed;
- }
}
if (offset)
@@ -1213,6 +1205,7 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
return 0;
allocate_init_user_pages_failed:
+ remove_kgd_mem_from_kfd_bo_list(*mem, avm->process_info);
amdgpu_bo_unref(&bo);
/* Don't unreserve system mem limit twice */
goto err_reserve_limit;
@@ -1262,15 +1255,6 @@ int amdgpu_amdkfd_gpuvm_free_memory_of_gpu(
list_del(&bo_list_entry->head);
mutex_unlock(&process_info->lock);
- /* Free user pages if necessary */
- if (mem->user_pages) {
- pr_debug("%s: Freeing user_pages array\n", __func__);
- if (mem->user_pages[0])
- release_pages(mem->user_pages,
- mem->bo->tbo.ttm->num_pages);
- kvfree(mem->user_pages);
- }
-
ret = reserve_bo_and_cond_vms(mem, NULL, BO_VM_ALL, &ctx);
if (unlikely(ret))
return ret;
@@ -1294,8 +1278,8 @@ int amdgpu_amdkfd_gpuvm_free_memory_of_gpu(
/* Free the sync object */
amdgpu_sync_free(&mem->sync);
- /* If the SG is not NULL, it's one we created for a doorbell
- * BO. We need to free it.
+ /* If the SG is not NULL, it's one we created for a doorbell or mmio
+ * remap BO. We need to free it.
*/
if (mem->bo->tbo.sg) {
sg_free_table(mem->bo->tbo.sg);
@@ -1409,7 +1393,7 @@ int amdgpu_amdkfd_gpuvm_map_memory_to_gpu(
ret = map_bo_to_gpuvm(adev, entry, ctx.sync,
is_invalid_userptr);
if (ret) {
- pr_err("Failed to map radeon bo to gpuvm\n");
+ pr_err("Failed to map bo to gpuvm\n");
goto map_bo_to_gpuvm_failed;
}
@@ -1744,25 +1728,11 @@ static int update_invalid_user_pages(struct amdkfd_process_info *process_info,
bo = mem->bo;
- if (!mem->user_pages) {
- mem->user_pages =
- kvmalloc_array(bo->tbo.ttm->num_pages,
- sizeof(struct page *),
- GFP_KERNEL | __GFP_ZERO);
- if (!mem->user_pages) {
- pr_err("%s: Failed to allocate pages array\n",
- __func__);
- return -ENOMEM;
- }
- } else if (mem->user_pages[0]) {
- release_pages(mem->user_pages, bo->tbo.ttm->num_pages);
- }
-
/* Get updated user pages */
ret = amdgpu_ttm_tt_get_user_pages(bo->tbo.ttm,
- mem->user_pages);
+ bo->tbo.ttm->pages);
if (ret) {
- mem->user_pages[0] = NULL;
+ bo->tbo.ttm->pages[0] = NULL;
pr_info("%s: Failed to get user pages: %d\n",
__func__, ret);
/* Pretend it succeeded. It will fail later
@@ -1771,17 +1741,28 @@ static int update_invalid_user_pages(struct amdkfd_process_info *process_info,
* stalled user mode queues.
*/
}
-
- /* Mark the BO as valid unless it was invalidated
- * again concurrently
- */
- if (atomic_cmpxchg(&mem->invalid, invalid, 0) != invalid)
- return -EAGAIN;
}
return 0;
}
+/* Remove invalid userptr BOs from hmm track list
+ *
+ * Stop HMM track the userptr update
+ */
+static void untrack_invalid_user_pages(struct amdkfd_process_info *process_info)
+{
+ struct kgd_mem *mem, *tmp_mem;
+ struct amdgpu_bo *bo;
+
+ list_for_each_entry_safe(mem, tmp_mem,
+ &process_info->userptr_inval_list,
+ validate_list.head) {
+ bo = mem->bo;
+ amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm);
+ }
+}
+
/* Validate invalid userptr BOs
*
* Validates BOs on the userptr_inval_list, and moves them back to the
@@ -1806,7 +1787,8 @@ static int validate_invalid_user_pages(struct amdkfd_process_info *process_info)
GFP_KERNEL);
if (!pd_bo_list_entries) {
pr_err("%s: Failed to allocate PD BO list entries\n", __func__);
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto out_no_mem;
}
INIT_LIST_HEAD(&resv_list);
@@ -1827,10 +1809,11 @@ static int validate_invalid_user_pages(struct amdkfd_process_info *process_info)
}
/* Reserve all BOs and page tables for validation */
- ret = ttm_eu_reserve_buffers(&ticket, &resv_list, false, &duplicates);
+ ret = ttm_eu_reserve_buffers(&ticket, &resv_list, false, &duplicates,
+ true);
WARN(!list_empty(&duplicates), "Duplicates should be empty");
if (ret)
- goto out;
+ goto out_free;
amdgpu_sync_create(&sync);
@@ -1846,10 +1829,8 @@ static int validate_invalid_user_pages(struct amdkfd_process_info *process_info)
bo = mem->bo;
- /* Copy pages array and validate the BO if we got user pages */
- if (mem->user_pages[0]) {
- amdgpu_ttm_tt_set_user_pages(bo->tbo.ttm,
- mem->user_pages);
+ /* Validate the BO if we got user pages */
+ if (bo->tbo.ttm->pages[0]) {
amdgpu_bo_placement_from_domain(bo, mem->domain);
ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
if (ret) {
@@ -1858,16 +1839,16 @@ static int validate_invalid_user_pages(struct amdkfd_process_info *process_info)
}
}
- /* Validate succeeded, now the BO owns the pages, free
- * our copy of the pointer array. Put this BO back on
- * the userptr_valid_list. If we need to revalidate
- * it, we need to start from scratch.
- */
- kvfree(mem->user_pages);
- mem->user_pages = NULL;
list_move_tail(&mem->validate_list.head,
&process_info->userptr_valid_list);
+ /* Stop HMM track the userptr update. We dont check the return
+ * value for concurrent CPU page table update because we will
+ * reschedule the restore worker if process_info->evicted_bos
+ * is updated.
+ */
+ amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm);
+
/* Update mapping. If the BO was not validated
* (because we couldn't get user pages), this will
* clear the page table entries, which will result in
@@ -1897,8 +1878,9 @@ unreserve_out:
ttm_eu_backoff_reservation(&ticket, &resv_list);
amdgpu_sync_wait(&sync, false);
amdgpu_sync_free(&sync);
-out:
+out_free:
kfree(pd_bo_list_entries);
+out_no_mem:
return ret;
}
@@ -1963,7 +1945,9 @@ static void amdgpu_amdkfd_restore_userptr_worker(struct work_struct *work)
* hanging. No point trying again.
*/
}
+
unlock_out:
+ untrack_invalid_user_pages(process_info);
mutex_unlock(&process_info->lock);
mmput(mm);
put_task_struct(usertask);
@@ -2032,7 +2016,7 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef)
}
ret = ttm_eu_reserve_buffers(&ctx.ticket, &ctx.list,
- false, &duplicate_save);
+ false, &duplicate_save, true);
if (ret) {
pr_debug("Memory eviction: TTM Reserve Failed. Try again\n");
goto ttm_reserve_fail;
@@ -2130,3 +2114,88 @@ ttm_reserve_fail:
kfree(pd_bo_list);
return ret;
}
+
+int amdgpu_amdkfd_add_gws_to_process(void *info, void *gws, struct kgd_mem **mem)
+{
+ struct amdkfd_process_info *process_info = (struct amdkfd_process_info *)info;
+ struct amdgpu_bo *gws_bo = (struct amdgpu_bo *)gws;
+ int ret;
+
+ if (!info || !gws)
+ return -EINVAL;
+
+ *mem = kzalloc(sizeof(struct kgd_mem), GFP_KERNEL);
+ if (!*mem)
+ return -ENOMEM;
+
+ mutex_init(&(*mem)->lock);
+ (*mem)->bo = amdgpu_bo_ref(gws_bo);
+ (*mem)->domain = AMDGPU_GEM_DOMAIN_GWS;
+ (*mem)->process_info = process_info;
+ add_kgd_mem_to_kfd_bo_list(*mem, process_info, false);
+ amdgpu_sync_create(&(*mem)->sync);
+
+
+ /* Validate gws bo the first time it is added to process */
+ mutex_lock(&(*mem)->process_info->lock);
+ ret = amdgpu_bo_reserve(gws_bo, false);
+ if (unlikely(ret)) {
+ pr_err("Reserve gws bo failed %d\n", ret);
+ goto bo_reservation_failure;
+ }
+
+ ret = amdgpu_amdkfd_bo_validate(gws_bo, AMDGPU_GEM_DOMAIN_GWS, true);
+ if (ret) {
+ pr_err("GWS BO validate failed %d\n", ret);
+ goto bo_validation_failure;
+ }
+ /* GWS resource is shared b/t amdgpu and amdkfd
+ * Add process eviction fence to bo so they can
+ * evict each other.
+ */
+ amdgpu_bo_fence(gws_bo, &process_info->eviction_fence->base, true);
+ amdgpu_bo_unreserve(gws_bo);
+ mutex_unlock(&(*mem)->process_info->lock);
+
+ return ret;
+
+bo_validation_failure:
+ amdgpu_bo_unreserve(gws_bo);
+bo_reservation_failure:
+ mutex_unlock(&(*mem)->process_info->lock);
+ amdgpu_sync_free(&(*mem)->sync);
+ remove_kgd_mem_from_kfd_bo_list(*mem, process_info);
+ amdgpu_bo_unref(&gws_bo);
+ mutex_destroy(&(*mem)->lock);
+ kfree(*mem);
+ *mem = NULL;
+ return ret;
+}
+
+int amdgpu_amdkfd_remove_gws_from_process(void *info, void *mem)
+{
+ int ret;
+ struct amdkfd_process_info *process_info = (struct amdkfd_process_info *)info;
+ struct kgd_mem *kgd_mem = (struct kgd_mem *)mem;
+ struct amdgpu_bo *gws_bo = kgd_mem->bo;
+
+ /* Remove BO from process's validate list so restore worker won't touch
+ * it anymore
+ */
+ remove_kgd_mem_from_kfd_bo_list(kgd_mem, process_info);
+
+ ret = amdgpu_bo_reserve(gws_bo, false);
+ if (unlikely(ret)) {
+ pr_err("Reserve gws bo failed %d\n", ret);
+ //TODO add BO back to validate_list?
+ return ret;
+ }
+ amdgpu_amdkfd_remove_eviction_fence(gws_bo,
+ process_info->eviction_fence);
+ amdgpu_bo_unreserve(gws_bo);
+ amdgpu_sync_free(&kgd_mem->sync);
+ amdgpu_bo_unref(&gws_bo);
+ mutex_destroy(&kgd_mem->lock);
+ kfree(mem);
+ return 0;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
index e02781b37e73..1c9d40f97a9b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
@@ -23,7 +23,7 @@
* Authors: Dave Airlie
* Alex Deucher
*/
-#include <drm/drmP.h>
+
#include <drm/amdgpu_drm.h>
#include "amdgpu.h"
#include "amdgpu_atombios.h"
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
index f96d75c6e099..a2dbdf13c4c7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
@@ -20,7 +20,7 @@
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
-#include <drm/drmP.h>
+
#include <drm/amdgpu_drm.h>
#include "amdgpu.h"
#include "atomfirmware.h"
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c
index 3079ea8523c5..649e68c4479b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c
@@ -21,7 +21,7 @@
*
* Authors: Jerome Glisse
*/
-#include <drm/drmP.h>
+
#include <drm/amdgpu_drm.h>
#include "amdgpu.h"
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c
index a5df80d50d44..50dff69a0f6e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c
@@ -25,10 +25,11 @@
* Alex Deucher
* Jerome Glisse
*/
-#include <drm/drmP.h>
+
#include "amdgpu.h"
#include "atom.h"
+#include <linux/pci.h>
#include <linux/slab.h>
#include <linux/acpi.h>
/*
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
index 5c79da8e1150..7bcf86c61999 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
@@ -28,7 +28,8 @@
* Christian König <deathsimple@vodafone.de>
*/
-#include <drm/drmP.h>
+#include <linux/uaccess.h>
+
#include "amdgpu.h"
#include "amdgpu_trace.h"
@@ -81,9 +82,9 @@ int amdgpu_bo_list_create(struct amdgpu_device *adev, struct drm_file *filp,
return -ENOMEM;
kref_init(&list->refcount);
- list->gds_obj = adev->gds.gds_gfx_bo;
- list->gws_obj = adev->gds.gws_gfx_bo;
- list->oa_obj = adev->gds.oa_gfx_bo;
+ list->gds_obj = NULL;
+ list->gws_obj = NULL;
+ list->oa_obj = NULL;
array = amdgpu_bo_list_array_entry(list, 0);
memset(array, 0, num_entries * sizeof(struct amdgpu_bo_list_entry));
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h
index 7c5f5d1601e6..a130e766cbdb 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h
@@ -36,7 +36,7 @@ struct amdgpu_bo_list_entry {
struct amdgpu_bo_va *bo_va;
uint32_t priority;
struct page **user_pages;
- int user_invalidated;
+ bool user_invalidated;
};
struct amdgpu_bo_list {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
index 387f1cf1dc20..031b094607bd 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
@@ -22,8 +22,9 @@
*
*/
#include <linux/list.h>
+#include <linux/pci.h>
#include <linux/slab.h>
-#include <drm/drmP.h>
+
#include <linux/firmware.h>
#include <drm/amdgpu_drm.h>
#include "amdgpu.h"
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
index bf04c12bd324..73b2ede773d3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
@@ -23,7 +23,7 @@
* Authors: Dave Airlie
* Alex Deucher
*/
-#include <drm/drmP.h>
+
#include <drm/drm_edid.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_probe_helper.h>
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index 2f6239b6be6f..dc63707e426f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -24,9 +24,11 @@
* Authors:
* Jerome Glisse <glisse@freedesktop.org>
*/
+
+#include <linux/file.h>
#include <linux/pagemap.h>
#include <linux/sync_file.h>
-#include <drm/drmP.h>
+
#include <drm/amdgpu_drm.h>
#include <drm/drm_syncobj.h>
#include "amdgpu.h"
@@ -52,7 +54,6 @@ static int amdgpu_cs_user_fence_chunk(struct amdgpu_cs_parser *p,
p->uf_entry.tv.bo = &bo->tbo;
/* One for TTM and one for the CS job */
p->uf_entry.tv.num_shared = 2;
- p->uf_entry.user_pages = NULL;
drm_gem_object_put_unlocked(gobj);
@@ -542,14 +543,14 @@ static int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p,
if (usermm && usermm != current->mm)
return -EPERM;
- /* Check if we have user pages and nobody bound the BO already */
- if (amdgpu_ttm_tt_userptr_needs_pages(bo->tbo.ttm) &&
- lobj->user_pages) {
+ if (amdgpu_ttm_tt_is_userptr(bo->tbo.ttm) &&
+ lobj->user_invalidated && lobj->user_pages) {
amdgpu_bo_placement_from_domain(bo,
AMDGPU_GEM_DOMAIN_CPU);
r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
if (r)
return r;
+
amdgpu_ttm_tt_set_user_pages(bo->tbo.ttm,
lobj->user_pages);
binding_userptr = true;
@@ -580,7 +581,6 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
struct amdgpu_bo *gds;
struct amdgpu_bo *gws;
struct amdgpu_bo *oa;
- unsigned tries = 10;
int r;
INIT_LIST_HEAD(&p->validated);
@@ -616,79 +616,45 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
if (p->uf_entry.tv.bo && !ttm_to_amdgpu_bo(p->uf_entry.tv.bo)->parent)
list_add(&p->uf_entry.tv.head, &p->validated);
- while (1) {
- struct list_head need_pages;
-
- r = ttm_eu_reserve_buffers(&p->ticket, &p->validated, true,
- &duplicates);
- if (unlikely(r != 0)) {
- if (r != -ERESTARTSYS)
- DRM_ERROR("ttm_eu_reserve_buffers failed.\n");
- goto error_free_pages;
- }
-
- INIT_LIST_HEAD(&need_pages);
- amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) {
- struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);
-
- if (amdgpu_ttm_tt_userptr_invalidated(bo->tbo.ttm,
- &e->user_invalidated) && e->user_pages) {
-
- /* We acquired a page array, but somebody
- * invalidated it. Free it and try again
- */
- release_pages(e->user_pages,
- bo->tbo.ttm->num_pages);
- kvfree(e->user_pages);
- e->user_pages = NULL;
- }
-
- if (amdgpu_ttm_tt_userptr_needs_pages(bo->tbo.ttm) &&
- !e->user_pages) {
- list_del(&e->tv.head);
- list_add(&e->tv.head, &need_pages);
-
- amdgpu_bo_unreserve(bo);
- }
+ /* Get userptr backing pages. If pages are updated after registered
+ * in amdgpu_gem_userptr_ioctl(), amdgpu_cs_list_validate() will do
+ * amdgpu_ttm_backend_bind() to flush and invalidate new pages
+ */
+ amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) {
+ struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);
+ bool userpage_invalidated = false;
+ int i;
+
+ e->user_pages = kvmalloc_array(bo->tbo.ttm->num_pages,
+ sizeof(struct page *),
+ GFP_KERNEL | __GFP_ZERO);
+ if (!e->user_pages) {
+ DRM_ERROR("calloc failure\n");
+ return -ENOMEM;
}
- if (list_empty(&need_pages))
- break;
-
- /* Unreserve everything again. */
- ttm_eu_backoff_reservation(&p->ticket, &p->validated);
-
- /* We tried too many times, just abort */
- if (!--tries) {
- r = -EDEADLK;
- DRM_ERROR("deadlock in %s\n", __func__);
- goto error_free_pages;
+ r = amdgpu_ttm_tt_get_user_pages(bo->tbo.ttm, e->user_pages);
+ if (r) {
+ kvfree(e->user_pages);
+ e->user_pages = NULL;
+ return r;
}
- /* Fill the page arrays for all userptrs. */
- list_for_each_entry(e, &need_pages, tv.head) {
- struct ttm_tt *ttm = e->tv.bo->ttm;
-
- e->user_pages = kvmalloc_array(ttm->num_pages,
- sizeof(struct page*),
- GFP_KERNEL | __GFP_ZERO);
- if (!e->user_pages) {
- r = -ENOMEM;
- DRM_ERROR("calloc failure in %s\n", __func__);
- goto error_free_pages;
- }
-
- r = amdgpu_ttm_tt_get_user_pages(ttm, e->user_pages);
- if (r) {
- DRM_ERROR("amdgpu_ttm_tt_get_user_pages failed.\n");
- kvfree(e->user_pages);
- e->user_pages = NULL;
- goto error_free_pages;
+ for (i = 0; i < bo->tbo.ttm->num_pages; i++) {
+ if (bo->tbo.ttm->pages[i] != e->user_pages[i]) {
+ userpage_invalidated = true;
+ break;
}
}
+ e->user_invalidated = userpage_invalidated;
+ }
- /* And try again. */
- list_splice(&need_pages, &p->validated);
+ r = ttm_eu_reserve_buffers(&p->ticket, &p->validated, true,
+ &duplicates, true);
+ if (unlikely(r != 0)) {
+ if (r != -ERESTARTSYS)
+ DRM_ERROR("ttm_eu_reserve_buffers failed.\n");
+ goto out;
}
amdgpu_cs_get_threshold_for_moves(p->adev, &p->bytes_moved_threshold,
@@ -757,17 +723,7 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
error_validate:
if (r)
ttm_eu_backoff_reservation(&p->ticket, &p->validated);
-
-error_free_pages:
-
- amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) {
- if (!e->user_pages)
- continue;
-
- release_pages(e->user_pages, e->tv.bo->ttm->num_pages);
- kvfree(e->user_pages);
- }
-
+out:
return r;
}
@@ -1054,11 +1010,9 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev,
j++;
}
- /* UVD & VCE fw doesn't support user fences */
+ /* MM engine doesn't support user fences */
ring = to_amdgpu_ring(parser->entity->rq->sched);
- if (parser->job->uf_addr && (
- ring->funcs->type == AMDGPU_RING_TYPE_UVD ||
- ring->funcs->type == AMDGPU_RING_TYPE_VCE))
+ if (parser->job->uf_addr && ring->funcs->no_user_fence)
return -EINVAL;
return amdgpu_ctx_wait_prev_fence(parser->ctx, parser->entity);
@@ -1328,7 +1282,6 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
struct amdgpu_bo_list_entry *e;
struct amdgpu_job *job;
uint64_t seq;
-
int r;
job = p->job;
@@ -1338,15 +1291,23 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
if (r)
goto error_unlock;
- /* No memory allocation is allowed while holding the mn lock */
+ /* No memory allocation is allowed while holding the mn lock.
+ * p->mn is hold until amdgpu_cs_submit is finished and fence is added
+ * to BOs.
+ */
amdgpu_mn_lock(p->mn);
+
+ /* If userptr are invalidated after amdgpu_cs_parser_bos(), return
+ * -EAGAIN, drmIoctl in libdrm will restart the amdgpu_cs_ioctl.
+ */
amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) {
struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);
- if (amdgpu_ttm_tt_userptr_needs_pages(bo->tbo.ttm)) {
- r = -ERESTARTSYS;
- goto error_abort;
- }
+ r |= !amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm);
+ }
+ if (r) {
+ r = -EAGAIN;
+ goto error_abort;
}
job->owner = p->filp;
@@ -1442,6 +1403,7 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
out:
amdgpu_cs_parser_fini(&parser, r, reserved_buffers);
+
return r;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c
index 54dd02a898b9..06f83cac0d3a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c
@@ -79,7 +79,7 @@ int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm,
list_add(&csa_tv.head, &list);
amdgpu_vm_get_pd_bo(vm, &list, &pd);
- r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL);
+ r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL, true);
if (r) {
DRM_ERROR("failed to reserve CSA,PD BOs: err=%d\n", r);
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
index a28a3d722ba2..f539a2a92774 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
@@ -22,7 +22,6 @@
* Authors: monk liu <monk.liu@amd.com>
*/
-#include <drm/drmP.h>
#include <drm/drm_auth.h>
#include "amdgpu.h"
#include "amdgpu_sched.h"
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
index 8930d66f2204..f255a00c4492 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
@@ -24,8 +24,11 @@
*/
#include <linux/kthread.h>
-#include <drm/drmP.h>
-#include <linux/debugfs.h>
+#include <linux/pci.h>
+#include <linux/uaccess.h>
+
+#include <drm/drm_debugfs.h>
+
#include "amdgpu.h"
/**
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index f4ac632a87b2..a65c0661253a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -27,9 +27,10 @@
*/
#include <linux/power_supply.h>
#include <linux/kthread.h>
+#include <linux/module.h>
#include <linux/console.h>
#include <linux/slab.h>
-#include <drm/drmP.h>
+
#include <drm/drm_atomic_helper.h>
#include <drm/drm_probe_helper.h>
#include <drm/amdgpu_drm.h>
@@ -97,6 +98,28 @@ static const char *amdgpu_asic_name[] = {
"LAST",
};
+/**
+ * DOC: pcie_replay_count
+ *
+ * The amdgpu driver provides a sysfs API for reporting the total number
+ * of PCIe replays (NAKs)
+ * The file pcie_replay_count is used for this and returns the total
+ * number of replays as a sum of the NAKs generated and NAKs received
+ */
+
+static ssize_t amdgpu_device_get_pcie_replay_count(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = ddev->dev_private;
+ uint64_t cnt = amdgpu_asic_get_pcie_replay_count(adev);
+
+ return snprintf(buf, PAGE_SIZE, "%llu\n", cnt);
+}
+
+static DEVICE_ATTR(pcie_replay_count, S_IRUGO,
+ amdgpu_device_get_pcie_replay_count, NULL);
+
static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev);
/**
@@ -910,8 +933,10 @@ def_value:
* Validates certain module parameters and updates
* the associated values used by the driver (all asics).
*/
-static void amdgpu_device_check_arguments(struct amdgpu_device *adev)
+static int amdgpu_device_check_arguments(struct amdgpu_device *adev)
{
+ int ret = 0;
+
if (amdgpu_sched_jobs < 4) {
dev_warn(adev->dev, "sched jobs (%d) must be at least 4\n",
amdgpu_sched_jobs);
@@ -956,12 +981,15 @@ static void amdgpu_device_check_arguments(struct amdgpu_device *adev)
amdgpu_vram_page_split = 1024;
}
- if (amdgpu_lockup_timeout == 0) {
- dev_warn(adev->dev, "lockup_timeout msut be > 0, adjusting to 10000\n");
- amdgpu_lockup_timeout = 10000;
+ ret = amdgpu_device_get_job_timeout_settings(adev);
+ if (ret) {
+ dev_err(adev->dev, "invalid lockup_timeout parameter syntax\n");
+ return ret;
}
adev->firmware.load_type = amdgpu_ucode_get_load_type(adev, amdgpu_fw_load_type);
+
+ return ret;
}
/**
@@ -1505,12 +1533,26 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
r = amdgpu_virt_request_full_gpu(adev, true);
if (r)
return -EAGAIN;
+
+ /* query the reg access mode at the very beginning */
+ amdgpu_virt_init_reg_access_mode(adev);
}
adev->pm.pp_feature = amdgpu_pp_feature_mask;
if (amdgpu_sriov_vf(adev))
adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
+ /* Read BIOS */
+ if (!amdgpu_get_bios(adev))
+ return -EINVAL;
+
+ r = amdgpu_atombios_init(adev);
+ if (r) {
+ dev_err(adev->dev, "amdgpu_atombios_init failed\n");
+ amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_INIT_FAIL, 0, 0);
+ return r;
+ }
+
for (i = 0; i < adev->num_ip_blocks; i++) {
if ((amdgpu_ip_block_mask & (1 << i)) == 0) {
DRM_ERROR("disabled ip block: %d <%s>\n",
@@ -1550,6 +1592,7 @@ static int amdgpu_device_ip_hw_init_phase1(struct amdgpu_device *adev)
if (adev->ip_blocks[i].status.hw)
continue;
if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
+ (amdgpu_sriov_vf(adev) && (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP)) ||
adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH) {
r = adev->ip_blocks[i].version->funcs->hw_init(adev);
if (r) {
@@ -1821,6 +1864,43 @@ static int amdgpu_device_set_pg_state(struct amdgpu_device *adev, enum amd_power
return 0;
}
+static int amdgpu_device_enable_mgpu_fan_boost(void)
+{
+ struct amdgpu_gpu_instance *gpu_ins;
+ struct amdgpu_device *adev;
+ int i, ret = 0;
+
+ mutex_lock(&mgpu_info.mutex);
+
+ /*
+ * MGPU fan boost feature should be enabled
+ * only when there are two or more dGPUs in
+ * the system
+ */
+ if (mgpu_info.num_dgpu < 2)
+ goto out;
+
+ for (i = 0; i < mgpu_info.num_dgpu; i++) {
+ gpu_ins = &(mgpu_info.gpu_ins[i]);
+ adev = gpu_ins->adev;
+ if (!(adev->flags & AMD_IS_APU) &&
+ !gpu_ins->mgpu_fan_enabled &&
+ adev->powerplay.pp_funcs &&
+ adev->powerplay.pp_funcs->enable_mgpu_fan_boost) {
+ ret = amdgpu_dpm_enable_mgpu_fan_boost(adev);
+ if (ret)
+ break;
+
+ gpu_ins->mgpu_fan_enabled = 1;
+ }
+ }
+
+out:
+ mutex_unlock(&mgpu_info.mutex);
+
+ return ret;
+}
+
/**
* amdgpu_device_ip_late_init - run late init for hardware IPs
*
@@ -1854,11 +1934,15 @@ static int amdgpu_device_ip_late_init(struct amdgpu_device *adev)
amdgpu_device_set_cg_state(adev, AMD_CG_STATE_GATE);
amdgpu_device_set_pg_state(adev, AMD_PG_STATE_GATE);
- queue_delayed_work(system_wq, &adev->late_init_work,
- msecs_to_jiffies(AMDGPU_RESUME_MS));
-
amdgpu_device_fill_reset_magic(adev);
+ r = amdgpu_device_enable_mgpu_fan_boost();
+ if (r)
+ DRM_ERROR("enable mgpu fan boost failed (%d).\n", r);
+
+ /* set to low pstate by default */
+ amdgpu_xgmi_set_pstate(adev, 0);
+
return 0;
}
@@ -1957,65 +2041,20 @@ static int amdgpu_device_ip_fini(struct amdgpu_device *adev)
return 0;
}
-static int amdgpu_device_enable_mgpu_fan_boost(void)
-{
- struct amdgpu_gpu_instance *gpu_ins;
- struct amdgpu_device *adev;
- int i, ret = 0;
-
- mutex_lock(&mgpu_info.mutex);
-
- /*
- * MGPU fan boost feature should be enabled
- * only when there are two or more dGPUs in
- * the system
- */
- if (mgpu_info.num_dgpu < 2)
- goto out;
-
- for (i = 0; i < mgpu_info.num_dgpu; i++) {
- gpu_ins = &(mgpu_info.gpu_ins[i]);
- adev = gpu_ins->adev;
- if (!(adev->flags & AMD_IS_APU) &&
- !gpu_ins->mgpu_fan_enabled &&
- adev->powerplay.pp_funcs &&
- adev->powerplay.pp_funcs->enable_mgpu_fan_boost) {
- ret = amdgpu_dpm_enable_mgpu_fan_boost(adev);
- if (ret)
- break;
-
- gpu_ins->mgpu_fan_enabled = 1;
- }
- }
-
-out:
- mutex_unlock(&mgpu_info.mutex);
-
- return ret;
-}
-
/**
- * amdgpu_device_ip_late_init_func_handler - work handler for ib test
+ * amdgpu_device_delayed_init_work_handler - work handler for IB tests
*
* @work: work_struct.
*/
-static void amdgpu_device_ip_late_init_func_handler(struct work_struct *work)
+static void amdgpu_device_delayed_init_work_handler(struct work_struct *work)
{
struct amdgpu_device *adev =
- container_of(work, struct amdgpu_device, late_init_work.work);
+ container_of(work, struct amdgpu_device, delayed_init_work.work);
int r;
r = amdgpu_ib_ring_tests(adev);
if (r)
DRM_ERROR("ib ring test failed (%d).\n", r);
-
- r = amdgpu_device_enable_mgpu_fan_boost();
- if (r)
- DRM_ERROR("enable mgpu fan boost failed (%d).\n", r);
-
- /*set to low pstate by default */
- amdgpu_xgmi_set_pstate(adev, 0);
-
}
static void amdgpu_device_delay_enable_gfx_off(struct work_struct *work)
@@ -2467,7 +2506,9 @@ int amdgpu_device_init(struct amdgpu_device *adev,
mutex_init(&adev->lock_reset);
mutex_init(&adev->virt.dpm_mutex);
- amdgpu_device_check_arguments(adev);
+ r = amdgpu_device_check_arguments(adev);
+ if (r)
+ return r;
spin_lock_init(&adev->mmio_idx_lock);
spin_lock_init(&adev->smc_idx_lock);
@@ -2485,8 +2526,8 @@ int amdgpu_device_init(struct amdgpu_device *adev,
INIT_LIST_HEAD(&adev->ring_lru_list);
spin_lock_init(&adev->ring_lru_list_lock);
- INIT_DELAYED_WORK(&adev->late_init_work,
- amdgpu_device_ip_late_init_func_handler);
+ INIT_DELAYED_WORK(&adev->delayed_init_work,
+ amdgpu_device_delayed_init_work_handler);
INIT_DELAYED_WORK(&adev->gfx.gfx_off_delay_work,
amdgpu_device_delay_enable_gfx_off);
@@ -2552,19 +2593,6 @@ int amdgpu_device_init(struct amdgpu_device *adev,
goto fence_driver_init;
}
- /* Read BIOS */
- if (!amdgpu_get_bios(adev)) {
- r = -EINVAL;
- goto failed;
- }
-
- r = amdgpu_atombios_init(adev);
- if (r) {
- dev_err(adev->dev, "amdgpu_atombios_init failed\n");
- amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_INIT_FAIL, 0, 0);
- goto failed;
- }
-
/* detect if we are with an SRIOV vbios */
amdgpu_device_detect_sriov_bios(adev);
@@ -2666,6 +2694,10 @@ fence_driver_init:
if (r)
DRM_ERROR("registering pm debugfs failed (%d).\n", r);
+ r = amdgpu_ucode_sysfs_init(adev);
+ if (r)
+ DRM_ERROR("Creating firmware sysfs failed (%d).\n", r);
+
r = amdgpu_debugfs_gem_init(adev);
if (r)
DRM_ERROR("registering gem debugfs failed (%d).\n", r);
@@ -2706,7 +2738,16 @@ fence_driver_init:
}
/* must succeed. */
- amdgpu_ras_post_init(adev);
+ amdgpu_ras_resume(adev);
+
+ queue_delayed_work(system_wq, &adev->delayed_init_work,
+ msecs_to_jiffies(AMDGPU_RESUME_MS));
+
+ r = device_create_file(adev->dev, &dev_attr_pcie_replay_count);
+ if (r) {
+ dev_err(adev->dev, "Could not create pcie_replay_count");
+ return r;
+ }
return 0;
@@ -2749,7 +2790,7 @@ void amdgpu_device_fini(struct amdgpu_device *adev)
adev->firmware.gpu_info_fw = NULL;
}
adev->accel_working = false;
- cancel_delayed_work_sync(&adev->late_init_work);
+ cancel_delayed_work_sync(&adev->delayed_init_work);
/* free i2c buses */
if (!amdgpu_device_has_dc_support(adev))
amdgpu_i2c_fini(adev);
@@ -2771,6 +2812,8 @@ void amdgpu_device_fini(struct amdgpu_device *adev)
adev->rmmio = NULL;
amdgpu_device_doorbell_fini(adev);
amdgpu_debugfs_regs_cleanup(adev);
+ device_remove_file(adev->dev, &dev_attr_pcie_replay_count);
+ amdgpu_ucode_sysfs_fini(adev);
}
@@ -2810,7 +2853,7 @@ int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon)
if (fbcon)
amdgpu_fbdev_set_suspend(adev, 1);
- cancel_delayed_work_sync(&adev->late_init_work);
+ cancel_delayed_work_sync(&adev->delayed_init_work);
if (!amdgpu_device_has_dc_support(adev)) {
/* turn off display hw */
@@ -2851,6 +2894,8 @@ int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon)
amdgpu_amdkfd_suspend(adev);
+ amdgpu_ras_suspend(adev);
+
r = amdgpu_device_ip_suspend_phase1(adev);
/* evict vram memory */
@@ -2928,6 +2973,9 @@ int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon)
if (r)
return r;
+ queue_delayed_work(system_wq, &adev->delayed_init_work,
+ msecs_to_jiffies(AMDGPU_RESUME_MS));
+
if (!amdgpu_device_has_dc_support(adev)) {
/* pin cursors */
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
@@ -2951,7 +2999,7 @@ int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon)
return r;
/* Make sure IB tests flushed */
- flush_delayed_work(&adev->late_init_work);
+ flush_delayed_work(&adev->delayed_init_work);
/* blat the mode back in */
if (fbcon) {
@@ -2971,6 +3019,8 @@ int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon)
drm_kms_helper_poll_enable(dev);
+ amdgpu_ras_resume(adev);
+
/*
* Most of the connector probing functions try to acquire runtime pm
* refs to ensure that the GPU is powered on when connector polling is
@@ -3335,8 +3385,6 @@ static int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev,
if (!ring || !ring->sched.thread)
continue;
- drm_sched_stop(&ring->sched);
-
/* after all hw jobs are reset, hw fence is meaningless, so force_completion */
amdgpu_fence_driver_force_completion(ring);
}
@@ -3344,8 +3392,7 @@ static int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev,
if(job)
drm_sched_increase_karma(&job->base);
-
-
+ /* Don't suspend on bare metal if we are not going to HW reset the ASIC */
if (!amdgpu_sriov_vf(adev)) {
if (!need_full_reset)
@@ -3452,6 +3499,13 @@ static int amdgpu_do_asic_reset(struct amdgpu_hive_info *hive,
if (vram_lost)
amdgpu_device_fill_reset_magic(tmp_adev);
+ r = amdgpu_device_ip_late_init(tmp_adev);
+ if (r)
+ goto out;
+
+ /* must succeed. */
+ amdgpu_ras_resume(tmp_adev);
+
/* Update PSP FW topology after reset */
if (hive && tmp_adev->gmc.xgmi.num_physical_nodes > 1)
r = amdgpu_xgmi_update_topology(hive, tmp_adev);
@@ -3483,38 +3537,21 @@ end:
return r;
}
-static void amdgpu_device_post_asic_reset(struct amdgpu_device *adev,
- struct amdgpu_job *job)
+static bool amdgpu_device_lock_adev(struct amdgpu_device *adev, bool trylock)
{
- int i;
-
- for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
- struct amdgpu_ring *ring = adev->rings[i];
-
- if (!ring || !ring->sched.thread)
- continue;
-
- if (!adev->asic_reset_res)
- drm_sched_resubmit_jobs(&ring->sched);
-
- drm_sched_start(&ring->sched, !adev->asic_reset_res);
- }
-
- if (!amdgpu_device_has_dc_support(adev)) {
- drm_helper_resume_force_mode(adev->ddev);
- }
-
- adev->asic_reset_res = 0;
-}
+ if (trylock) {
+ if (!mutex_trylock(&adev->lock_reset))
+ return false;
+ } else
+ mutex_lock(&adev->lock_reset);
-static void amdgpu_device_lock_adev(struct amdgpu_device *adev)
-{
- mutex_lock(&adev->lock_reset);
atomic_inc(&adev->gpu_reset_counter);
adev->in_gpu_reset = 1;
/* Block kfd: SRIOV would do it separately */
if (!amdgpu_sriov_vf(adev))
amdgpu_amdkfd_pre_reset(adev);
+
+ return true;
}
static void amdgpu_device_unlock_adev(struct amdgpu_device *adev)
@@ -3542,40 +3579,44 @@ static void amdgpu_device_unlock_adev(struct amdgpu_device *adev)
int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
struct amdgpu_job *job)
{
- int r;
+ struct list_head device_list, *device_list_handle = NULL;
+ bool need_full_reset, job_signaled;
struct amdgpu_hive_info *hive = NULL;
- bool need_full_reset = false;
struct amdgpu_device *tmp_adev = NULL;
- struct list_head device_list, *device_list_handle = NULL;
+ int i, r = 0;
+ need_full_reset = job_signaled = false;
INIT_LIST_HEAD(&device_list);
dev_info(adev->dev, "GPU reset begin!\n");
+ cancel_delayed_work_sync(&adev->delayed_init_work);
+
+ hive = amdgpu_get_xgmi_hive(adev, false);
+
/*
- * In case of XGMI hive disallow concurrent resets to be triggered
- * by different nodes. No point also since the one node already executing
- * reset will also reset all the other nodes in the hive.
+ * Here we trylock to avoid chain of resets executing from
+ * either trigger by jobs on different adevs in XGMI hive or jobs on
+ * different schedulers for same device while this TO handler is running.
+ * We always reset all schedulers for device and all devices for XGMI
+ * hive so that should take care of them too.
*/
- hive = amdgpu_get_xgmi_hive(adev, 0);
- if (hive && adev->gmc.xgmi.num_physical_nodes > 1 &&
- !mutex_trylock(&hive->reset_lock))
+
+ if (hive && !mutex_trylock(&hive->reset_lock)) {
+ DRM_INFO("Bailing on TDR for s_job:%llx, hive: %llx as another already in progress",
+ job->base.id, hive->hive_id);
return 0;
+ }
/* Start with adev pre asic reset first for soft reset check.*/
- amdgpu_device_lock_adev(adev);
- r = amdgpu_device_pre_asic_reset(adev,
- job,
- &need_full_reset);
- if (r) {
- /*TODO Should we stop ?*/
- DRM_ERROR("GPU pre asic reset failed with err, %d for drm dev, %s ",
- r, adev->ddev->unique);
- adev->asic_reset_res = r;
+ if (!amdgpu_device_lock_adev(adev, !hive)) {
+ DRM_INFO("Bailing on TDR for s_job:%llx, as another already in progress",
+ job->base.id);
+ return 0;
}
/* Build list of devices to reset */
- if (need_full_reset && adev->gmc.xgmi.num_physical_nodes > 1) {
+ if (adev->gmc.xgmi.num_physical_nodes > 1) {
if (!hive) {
amdgpu_device_unlock_adev(adev);
return -ENODEV;
@@ -3592,13 +3633,56 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
device_list_handle = &device_list;
}
+ /* block all schedulers and reset given job's ring */
+ list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) {
+ for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
+ struct amdgpu_ring *ring = tmp_adev->rings[i];
+
+ if (!ring || !ring->sched.thread)
+ continue;
+
+ drm_sched_stop(&ring->sched, &job->base);
+ }
+ }
+
+
+ /*
+ * Must check guilty signal here since after this point all old
+ * HW fences are force signaled.
+ *
+ * job->base holds a reference to parent fence
+ */
+ if (job && job->base.s_fence->parent &&
+ dma_fence_is_signaled(job->base.s_fence->parent))
+ job_signaled = true;
+
+ if (!amdgpu_device_ip_need_full_reset(adev))
+ device_list_handle = &device_list;
+
+ if (job_signaled) {
+ dev_info(adev->dev, "Guilty job already signaled, skipping HW reset");
+ goto skip_hw_reset;
+ }
+
+
+ /* Guilty job will be freed after this*/
+ r = amdgpu_device_pre_asic_reset(adev,
+ job,
+ &need_full_reset);
+ if (r) {
+ /*TODO Should we stop ?*/
+ DRM_ERROR("GPU pre asic reset failed with err, %d for drm dev, %s ",
+ r, adev->ddev->unique);
+ adev->asic_reset_res = r;
+ }
+
retry: /* Rest of adevs pre asic reset from XGMI hive. */
list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) {
if (tmp_adev == adev)
continue;
- amdgpu_device_lock_adev(tmp_adev);
+ amdgpu_device_lock_adev(tmp_adev, false);
r = amdgpu_device_pre_asic_reset(tmp_adev,
NULL,
&need_full_reset);
@@ -3622,9 +3706,28 @@ retry: /* Rest of adevs pre asic reset from XGMI hive. */
goto retry;
}
+skip_hw_reset:
+
/* Post ASIC reset for all devs .*/
list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) {
- amdgpu_device_post_asic_reset(tmp_adev, tmp_adev == adev ? job : NULL);
+ for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
+ struct amdgpu_ring *ring = tmp_adev->rings[i];
+
+ if (!ring || !ring->sched.thread)
+ continue;
+
+ /* No point to resubmit jobs if we didn't HW reset*/
+ if (!tmp_adev->asic_reset_res && !job_signaled)
+ drm_sched_resubmit_jobs(&ring->sched);
+
+ drm_sched_start(&ring->sched, !tmp_adev->asic_reset_res);
+ }
+
+ if (!amdgpu_device_has_dc_support(tmp_adev) && !job_signaled) {
+ drm_helper_resume_force_mode(tmp_adev->ddev);
+ }
+
+ tmp_adev->asic_reset_res = 0;
if (r) {
/* bad news, how to tell it to userspace ? */
@@ -3637,7 +3740,7 @@ retry: /* Rest of adevs pre asic reset from XGMI hive. */
amdgpu_device_unlock_adev(tmp_adev);
}
- if (hive && adev->gmc.xgmi.num_physical_nodes > 1)
+ if (hive)
mutex_unlock(&hive->reset_lock);
if (r)
@@ -3645,43 +3748,6 @@ retry: /* Rest of adevs pre asic reset from XGMI hive. */
return r;
}
-static void amdgpu_device_get_min_pci_speed_width(struct amdgpu_device *adev,
- enum pci_bus_speed *speed,
- enum pcie_link_width *width)
-{
- struct pci_dev *pdev = adev->pdev;
- enum pci_bus_speed cur_speed;
- enum pcie_link_width cur_width;
- u32 ret = 1;
-
- *speed = PCI_SPEED_UNKNOWN;
- *width = PCIE_LNK_WIDTH_UNKNOWN;
-
- while (pdev) {
- cur_speed = pcie_get_speed_cap(pdev);
- cur_width = pcie_get_width_cap(pdev);
- ret = pcie_bandwidth_available(adev->pdev, NULL,
- NULL, &cur_width);
- if (!ret)
- cur_width = PCIE_LNK_WIDTH_RESRV;
-
- if (cur_speed != PCI_SPEED_UNKNOWN) {
- if (*speed == PCI_SPEED_UNKNOWN)
- *speed = cur_speed;
- else if (cur_speed < *speed)
- *speed = cur_speed;
- }
-
- if (cur_width != PCIE_LNK_WIDTH_UNKNOWN) {
- if (*width == PCIE_LNK_WIDTH_UNKNOWN)
- *width = cur_width;
- else if (cur_width < *width)
- *width = cur_width;
- }
- pdev = pci_upstream_bridge(pdev);
- }
-}
-
/**
* amdgpu_device_get_pcie_info - fence pcie info about the PCIE slot
*
@@ -3715,8 +3781,8 @@ static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev)
if (adev->pm.pcie_gen_mask && adev->pm.pcie_mlw_mask)
return;
- amdgpu_device_get_min_pci_speed_width(adev, &platform_speed_cap,
- &platform_link_width);
+ pcie_bandwidth_available(adev->pdev, NULL,
+ &platform_speed_cap, &platform_link_width);
if (adev->pm.pcie_gen_mask == 0) {
/* asic caps */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
index b083b219b1a9..535650967b1a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
@@ -23,7 +23,7 @@
* Authors: Dave Airlie
* Alex Deucher
*/
-#include <drm/drmP.h>
+
#include <drm/amdgpu_drm.h>
#include "amdgpu.h"
#include "amdgpu_i2c.h"
@@ -32,11 +32,13 @@
#include "amdgpu_display.h"
#include <asm/div64.h>
+#include <linux/pci.h>
#include <linux/pm_runtime.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_edid.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_fb_helper.h>
+#include <drm/drm_vblank.h>
static void amdgpu_display_flip_callback(struct dma_fence *f,
struct dma_fence_cb *cb)
@@ -631,10 +633,6 @@ int amdgpu_display_modeset_create_props(struct amdgpu_device *adev)
amdgpu_dither_enum_list, sz);
if (amdgpu_device_has_dc_support(adev)) {
- adev->mode_info.max_bpc_property =
- drm_property_create_range(adev->ddev, 0, "max bpc", 8, 16);
- if (!adev->mode_info.max_bpc_property)
- return -ENOMEM;
adev->mode_info.abm_level_property =
drm_property_create_range(adev->ddev, 0,
"abm level", 0, 4);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
index a38e0fb4a6fe..489041df1f45 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
@@ -1,5 +1,5 @@
/*
- * Copyright 2012 Advanced Micro Devices, Inc.
+ * Copyright 2019 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -31,8 +31,6 @@
* objects between different devices via PRIME <prime_buffer_sharing>`.
*/
-#include <drm/drmP.h>
-
#include "amdgpu.h"
#include "amdgpu_display.h"
#include "amdgpu_gem.h"
@@ -103,7 +101,8 @@ void amdgpu_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
* Returns:
* 0 on success or a negative error code on failure.
*/
-int amdgpu_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
+int amdgpu_gem_prime_mmap(struct drm_gem_object *obj,
+ struct vm_area_struct *vma)
{
struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
@@ -137,57 +136,6 @@ int amdgpu_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma
return ret;
}
-/**
- * amdgpu_gem_prime_import_sg_table - &drm_driver.gem_prime_import_sg_table
- * implementation
- * @dev: DRM device
- * @attach: DMA-buf attachment
- * @sg: Scatter/gather table
- *
- * Imports shared DMA buffer memory exported by another device.
- *
- * Returns:
- * A new GEM BO of the given DRM device, representing the memory
- * described by the given DMA-buf attachment and scatter/gather table.
- */
-struct drm_gem_object *
-amdgpu_gem_prime_import_sg_table(struct drm_device *dev,
- struct dma_buf_attachment *attach,
- struct sg_table *sg)
-{
- struct reservation_object *resv = attach->dmabuf->resv;
- struct amdgpu_device *adev = dev->dev_private;
- struct amdgpu_bo *bo;
- struct amdgpu_bo_param bp;
- int ret;
-
- memset(&bp, 0, sizeof(bp));
- bp.size = attach->dmabuf->size;
- bp.byte_align = PAGE_SIZE;
- bp.domain = AMDGPU_GEM_DOMAIN_CPU;
- bp.flags = 0;
- bp.type = ttm_bo_type_sg;
- bp.resv = resv;
- ww_mutex_lock(&resv->lock, NULL);
- ret = amdgpu_bo_create(adev, &bp, &bo);
- if (ret)
- goto error;
-
- bo->tbo.sg = sg;
- bo->tbo.ttm->sg = sg;
- bo->allowed_domains = AMDGPU_GEM_DOMAIN_GTT;
- bo->preferred_domains = AMDGPU_GEM_DOMAIN_GTT;
- if (attach->dmabuf->ops != &amdgpu_dmabuf_ops)
- bo->prime_shared_count = 1;
-
- ww_mutex_unlock(&resv->lock);
- return &bo->gem_base;
-
-error:
- ww_mutex_unlock(&resv->lock);
- return ERR_PTR(ret);
-}
-
static int
__reservation_object_make_exclusive(struct reservation_object *obj)
{
@@ -231,7 +179,7 @@ err_fences_put:
}
/**
- * amdgpu_gem_map_attach - &dma_buf_ops.attach implementation
+ * amdgpu_dma_buf_map_attach - &dma_buf_ops.attach implementation
* @dma_buf: Shared DMA buffer
* @attach: DMA-buf attachment
*
@@ -242,8 +190,8 @@ err_fences_put:
* Returns:
* 0 on success or a negative error code on failure.
*/
-static int amdgpu_gem_map_attach(struct dma_buf *dma_buf,
- struct dma_buf_attachment *attach)
+static int amdgpu_dma_buf_map_attach(struct dma_buf *dma_buf,
+ struct dma_buf_attachment *attach)
{
struct drm_gem_object *obj = dma_buf->priv;
struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
@@ -291,15 +239,15 @@ error_detach:
}
/**
- * amdgpu_gem_map_detach - &dma_buf_ops.detach implementation
+ * amdgpu_dma_buf_map_detach - &dma_buf_ops.detach implementation
* @dma_buf: Shared DMA buffer
* @attach: DMA-buf attachment
*
* This is called when a shared DMA buffer no longer needs to be accessible by
* another device. For now, simply unpins the buffer from GTT.
*/
-static void amdgpu_gem_map_detach(struct dma_buf *dma_buf,
- struct dma_buf_attachment *attach)
+static void amdgpu_dma_buf_map_detach(struct dma_buf *dma_buf,
+ struct dma_buf_attachment *attach)
{
struct drm_gem_object *obj = dma_buf->priv;
struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
@@ -334,7 +282,7 @@ struct reservation_object *amdgpu_gem_prime_res_obj(struct drm_gem_object *obj)
}
/**
- * amdgpu_gem_begin_cpu_access - &dma_buf_ops.begin_cpu_access implementation
+ * amdgpu_dma_buf_begin_cpu_access - &dma_buf_ops.begin_cpu_access implementation
* @dma_buf: Shared DMA buffer
* @direction: Direction of DMA transfer
*
@@ -345,8 +293,8 @@ struct reservation_object *amdgpu_gem_prime_res_obj(struct drm_gem_object *obj)
* Returns:
* 0 on success or a negative error code on failure.
*/
-static int amdgpu_gem_begin_cpu_access(struct dma_buf *dma_buf,
- enum dma_data_direction direction)
+static int amdgpu_dma_buf_begin_cpu_access(struct dma_buf *dma_buf,
+ enum dma_data_direction direction)
{
struct amdgpu_bo *bo = gem_to_amdgpu_bo(dma_buf->priv);
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
@@ -374,12 +322,12 @@ static int amdgpu_gem_begin_cpu_access(struct dma_buf *dma_buf,
}
const struct dma_buf_ops amdgpu_dmabuf_ops = {
- .attach = amdgpu_gem_map_attach,
- .detach = amdgpu_gem_map_detach,
+ .attach = amdgpu_dma_buf_map_attach,
+ .detach = amdgpu_dma_buf_map_detach,
.map_dma_buf = drm_gem_map_dma_buf,
.unmap_dma_buf = drm_gem_unmap_dma_buf,
.release = drm_gem_dmabuf_release,
- .begin_cpu_access = amdgpu_gem_begin_cpu_access,
+ .begin_cpu_access = amdgpu_dma_buf_begin_cpu_access,
.mmap = drm_gem_dmabuf_mmap,
.vmap = drm_gem_dmabuf_vmap,
.vunmap = drm_gem_dmabuf_vunmap,
@@ -418,6 +366,57 @@ struct dma_buf *amdgpu_gem_prime_export(struct drm_device *dev,
}
/**
+ * amdgpu_gem_prime_import_sg_table - &drm_driver.gem_prime_import_sg_table
+ * implementation
+ * @dev: DRM device
+ * @attach: DMA-buf attachment
+ * @sg: Scatter/gather table
+ *
+ * Imports shared DMA buffer memory exported by another device.
+ *
+ * Returns:
+ * A new GEM BO of the given DRM device, representing the memory
+ * described by the given DMA-buf attachment and scatter/gather table.
+ */
+struct drm_gem_object *
+amdgpu_gem_prime_import_sg_table(struct drm_device *dev,
+ struct dma_buf_attachment *attach,
+ struct sg_table *sg)
+{
+ struct reservation_object *resv = attach->dmabuf->resv;
+ struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_bo *bo;
+ struct amdgpu_bo_param bp;
+ int ret;
+
+ memset(&bp, 0, sizeof(bp));
+ bp.size = attach->dmabuf->size;
+ bp.byte_align = PAGE_SIZE;
+ bp.domain = AMDGPU_GEM_DOMAIN_CPU;
+ bp.flags = 0;
+ bp.type = ttm_bo_type_sg;
+ bp.resv = resv;
+ ww_mutex_lock(&resv->lock, NULL);
+ ret = amdgpu_bo_create(adev, &bp, &bo);
+ if (ret)
+ goto error;
+
+ bo->tbo.sg = sg;
+ bo->tbo.ttm->sg = sg;
+ bo->allowed_domains = AMDGPU_GEM_DOMAIN_GTT;
+ bo->preferred_domains = AMDGPU_GEM_DOMAIN_GTT;
+ if (attach->dmabuf->ops != &amdgpu_dmabuf_ops)
+ bo->prime_shared_count = 1;
+
+ ww_mutex_unlock(&resv->lock);
+ return &bo->gem_base;
+
+error:
+ ww_mutex_unlock(&resv->lock);
+ return ERR_PTR(ret);
+}
+
+/**
* amdgpu_gem_prime_import - &drm_driver.gem_prime_import implementation
* @dev: DRM device
* @dma_buf: Shared DMA buffer
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.h
new file mode 100644
index 000000000000..c7056cbe8685
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef __AMDGPU_DMA_BUF_H__
+#define __AMDGPU_DMA_BUF_H__
+
+#include <drm/drm_gem.h>
+
+struct sg_table *amdgpu_gem_prime_get_sg_table(struct drm_gem_object *obj);
+struct drm_gem_object *
+amdgpu_gem_prime_import_sg_table(struct drm_device *dev,
+ struct dma_buf_attachment *attach,
+ struct sg_table *sg);
+struct dma_buf *amdgpu_gem_prime_export(struct drm_device *dev,
+ struct drm_gem_object *gobj,
+ int flags);
+struct drm_gem_object *amdgpu_gem_prime_import(struct drm_device *dev,
+ struct dma_buf *dma_buf);
+struct reservation_object *amdgpu_gem_prime_res_obj(struct drm_gem_object *);
+void *amdgpu_gem_prime_vmap(struct drm_gem_object *obj);
+void amdgpu_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr);
+int amdgpu_gem_prime_mmap(struct drm_gem_object *obj,
+ struct vm_area_struct *vma);
+
+extern const struct dma_buf_ops amdgpu_dmabuf_ops;
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c
index 523b8ab6b04e..eedecaf4c804 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c
@@ -22,7 +22,6 @@
* Authors: Alex Deucher
*/
-#include <drm/drmP.h>
#include "amdgpu.h"
#include "amdgpu_atombios.h"
#include "amdgpu_i2c.h"
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h
index dca35407879d..521dbd0d9af8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h
@@ -75,6 +75,20 @@ struct amdgpu_dpm_thermal {
int min_temp;
/* high temperature threshold */
int max_temp;
+ /* edge max emergency(shutdown) temp */
+ int max_edge_emergency_temp;
+ /* hotspot low temperature threshold */
+ int min_hotspot_temp;
+ /* hotspot high temperature critical threshold */
+ int max_hotspot_crit_temp;
+ /* hotspot max emergency(shutdown) temp */
+ int max_hotspot_emergency_temp;
+ /* memory low temperature threshold */
+ int min_mem_temp;
+ /* memory high temperature critical threshold */
+ int max_mem_crit_temp;
+ /* memory max emergency(shutdown) temp */
+ int max_mem_emergency_temp;
/* was last interrupt low to high or high to low */
bool high_to_low;
/* interrupt source */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index 1e2cc9d68a05..0a577a389024 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -22,21 +22,23 @@
* OTHER DEALINGS IN THE SOFTWARE.
*/
-#include <drm/drmP.h>
#include <drm/amdgpu_drm.h>
+#include <drm/drm_drv.h>
#include <drm/drm_gem.h>
+#include <drm/drm_vblank.h>
#include "amdgpu_drv.h"
#include <drm/drm_pciids.h>
#include <linux/console.h>
#include <linux/module.h>
+#include <linux/pci.h>
#include <linux/pm_runtime.h>
#include <linux/vga_switcheroo.h>
#include <drm/drm_probe_helper.h>
#include "amdgpu.h"
#include "amdgpu_irq.h"
-#include "amdgpu_gem.h"
+#include "amdgpu_dma_buf.h"
#include "amdgpu_amdkfd.h"
@@ -81,6 +83,8 @@
#define KMS_DRIVER_MINOR 32
#define KMS_DRIVER_PATCHLEVEL 0
+#define AMDGPU_MAX_TIMEOUT_PARAM_LENTH 256
+
int amdgpu_vram_limit = 0;
int amdgpu_vis_vram_limit = 0;
int amdgpu_gart_size = -1; /* auto */
@@ -93,7 +97,7 @@ int amdgpu_disp_priority = 0;
int amdgpu_hw_i2c = 0;
int amdgpu_pcie_gen2 = -1;
int amdgpu_msi = -1;
-int amdgpu_lockup_timeout = 10000;
+char amdgpu_lockup_timeout[AMDGPU_MAX_TIMEOUT_PARAM_LENTH];
int amdgpu_dpm = -1;
int amdgpu_fw_load_type = -1;
int amdgpu_aspm = -1;
@@ -227,12 +231,21 @@ MODULE_PARM_DESC(msi, "MSI support (1 = enable, 0 = disable, -1 = auto)");
module_param_named(msi, amdgpu_msi, int, 0444);
/**
- * DOC: lockup_timeout (int)
- * Set GPU scheduler timeout value in ms. Value 0 is invalidated, will be adjusted to 10000.
- * Negative values mean 'infinite timeout' (MAX_JIFFY_OFFSET). The default is 10000.
- */
-MODULE_PARM_DESC(lockup_timeout, "GPU lockup timeout in ms > 0 (default 10000)");
-module_param_named(lockup_timeout, amdgpu_lockup_timeout, int, 0444);
+ * DOC: lockup_timeout (string)
+ * Set GPU scheduler timeout value in ms.
+ *
+ * The format can be [Non-Compute] or [GFX,Compute,SDMA,Video]. That is there can be one or
+ * multiple values specified. 0 and negative values are invalidated. They will be adjusted
+ * to default timeout.
+ * - With one value specified, the setting will apply to all non-compute jobs.
+ * - With multiple values specified, the first one will be for GFX. The second one is for Compute.
+ * And the third and fourth ones are for SDMA and Video.
+ * By default(with no lockup_timeout settings), the timeout for all non-compute(GFX, SDMA and Video)
+ * jobs is 10000. And there is no timeout enforced on compute jobs.
+ */
+MODULE_PARM_DESC(lockup_timeout, "GPU lockup timeout in ms (default: 10000 for non-compute jobs and no timeout for compute jobs), "
+ "format is [Non-Compute] or [GFX,Compute,SDMA,Video]");
+module_param_string(lockup_timeout, amdgpu_lockup_timeout, sizeof(amdgpu_lockup_timeout), 0444);
/**
* DOC: dpm (int)
@@ -655,6 +668,16 @@ MODULE_PARM_DESC(noretry,
int halt_if_hws_hang;
module_param(halt_if_hws_hang, int, 0644);
MODULE_PARM_DESC(halt_if_hws_hang, "Halt if HWS hang is detected (0 = off (default), 1 = on)");
+
+/**
+ * DOC: hws_gws_support(bool)
+ * Whether HWS support gws barriers. Default value: false (not supported)
+ * This will be replaced with a MEC firmware version check once firmware
+ * is ready
+ */
+bool hws_gws_support;
+module_param(hws_gws_support, bool, 0444);
+MODULE_PARM_DESC(hws_gws_support, "MEC FW support gws barriers (false = not supported (Default), true = supported)");
#endif
/**
@@ -1216,6 +1239,62 @@ int amdgpu_file_to_fpriv(struct file *filp, struct amdgpu_fpriv **fpriv)
return 0;
}
+int amdgpu_device_get_job_timeout_settings(struct amdgpu_device *adev)
+{
+ char *input = amdgpu_lockup_timeout;
+ char *timeout_setting = NULL;
+ int index = 0;
+ long timeout;
+ int ret = 0;
+
+ /*
+ * By default timeout for non compute jobs is 10000.
+ * And there is no timeout enforced on compute jobs.
+ */
+ adev->gfx_timeout = adev->sdma_timeout = adev->video_timeout = 10000;
+ adev->compute_timeout = MAX_SCHEDULE_TIMEOUT;
+
+ if (strnlen(input, AMDGPU_MAX_TIMEOUT_PARAM_LENTH)) {
+ while ((timeout_setting = strsep(&input, ",")) &&
+ strnlen(timeout_setting, AMDGPU_MAX_TIMEOUT_PARAM_LENTH)) {
+ ret = kstrtol(timeout_setting, 0, &timeout);
+ if (ret)
+ return ret;
+
+ /* Invalidate 0 and negative values */
+ if (timeout <= 0) {
+ index++;
+ continue;
+ }
+
+ switch (index++) {
+ case 0:
+ adev->gfx_timeout = timeout;
+ break;
+ case 1:
+ adev->compute_timeout = timeout;
+ break;
+ case 2:
+ adev->sdma_timeout = timeout;
+ break;
+ case 3:
+ adev->video_timeout = timeout;
+ break;
+ default:
+ break;
+ }
+ }
+ /*
+ * There is only one value specified and
+ * it should apply to all non-compute jobs.
+ */
+ if (index == 1)
+ adev->sdma_timeout = adev->video_timeout = adev->gfx_timeout;
+ }
+
+ return ret;
+}
+
static bool
amdgpu_get_crtc_scanout_position(struct drm_device *dev, unsigned int pipe,
bool in_vblank_irq, int *vpos, int *hpos,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_encoders.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_encoders.c
index ec78e2b2015c..571a6dfb473e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_encoders.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_encoders.c
@@ -23,7 +23,7 @@
* Authors: Dave Airlie
* Alex Deucher
*/
-#include <drm/drmP.h>
+
#include <drm/drm_crtc_helper.h>
#include <drm/amdgpu_drm.h>
#include "amdgpu.h"
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
index e47609218839..eb3569b46c1e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
@@ -23,22 +23,22 @@
* Authors:
* David Airlie
*/
+
#include <linux/module.h>
-#include <linux/slab.h>
#include <linux/pm_runtime.h>
+#include <linux/slab.h>
+#include <linux/vga_switcheroo.h>
-#include <drm/drmP.h>
+#include <drm/amdgpu_drm.h>
#include <drm/drm_crtc.h>
#include <drm/drm_crtc_helper.h>
-#include <drm/amdgpu_drm.h>
+#include <drm/drm_fb_helper.h>
+#include <drm/drm_fourcc.h>
+
#include "amdgpu.h"
#include "cikd.h"
#include "amdgpu_gem.h"
-#include <drm/drm_fb_helper.h>
-
-#include <linux/vga_switcheroo.h>
-
#include "amdgpu_display.h"
/* object hierarchy -
@@ -121,6 +121,7 @@ static int amdgpufb_create_pinned_object(struct amdgpu_fbdev *rfbdev,
struct drm_mode_fb_cmd2 *mode_cmd,
struct drm_gem_object **gobj_p)
{
+ const struct drm_format_info *info;
struct amdgpu_device *adev = rfbdev->adev;
struct drm_gem_object *gobj = NULL;
struct amdgpu_bo *abo = NULL;
@@ -131,7 +132,8 @@ static int amdgpufb_create_pinned_object(struct amdgpu_fbdev *rfbdev,
int height = mode_cmd->height;
u32 cpp;
- cpp = drm_format_plane_cpp(mode_cmd->pixel_format, 0);
+ info = drm_get_format_info(adev->ddev, mode_cmd);
+ cpp = info->cpp[0];
/* need to align pitch with crtc limits */
mode_cmd->pitches[0] = amdgpu_align_pitch(adev, mode_cmd->width, cpp,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
index 4dee2326b29c..df49fa4bbf61 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
@@ -34,7 +34,9 @@
#include <linux/kref.h>
#include <linux/slab.h>
#include <linux/firmware.h>
-#include <drm/drmP.h>
+
+#include <drm/drm_debugfs.h>
+
#include "amdgpu.h"
#include "amdgpu_trace.h"
@@ -427,9 +429,13 @@ int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring,
int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring,
unsigned num_hw_submission)
{
+ struct amdgpu_device *adev = ring->adev;
long timeout;
int r;
+ if (!adev)
+ return -EINVAL;
+
/* Check that num_hw_submission is a power of two */
if ((num_hw_submission & (num_hw_submission - 1)) != 0)
return -EINVAL;
@@ -451,12 +457,31 @@ int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring,
/* No need to setup the GPU scheduler for KIQ ring */
if (ring->funcs->type != AMDGPU_RING_TYPE_KIQ) {
- /* for non-sriov case, no timeout enforce on compute ring */
- if ((ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE)
- && !amdgpu_sriov_vf(ring->adev))
- timeout = MAX_SCHEDULE_TIMEOUT;
- else
- timeout = msecs_to_jiffies(amdgpu_lockup_timeout);
+ switch (ring->funcs->type) {
+ case AMDGPU_RING_TYPE_GFX:
+ timeout = adev->gfx_timeout;
+ break;
+ case AMDGPU_RING_TYPE_COMPUTE:
+ /*
+ * For non-sriov case, no timeout enforce
+ * on compute ring by default. Unless user
+ * specifies a timeout for compute ring.
+ *
+ * For sriov case, always use the timeout
+ * as gfx ring
+ */
+ if (!amdgpu_sriov_vf(ring->adev))
+ timeout = adev->compute_timeout;
+ else
+ timeout = adev->gfx_timeout;
+ break;
+ case AMDGPU_RING_TYPE_SDMA:
+ timeout = adev->sdma_timeout;
+ break;
+ default:
+ timeout = adev->video_timeout;
+ break;
+ }
r = drm_sched_init(&ring->sched, &amdgpu_sched_ops,
num_hw_submission, amdgpu_job_hang_limit,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
index 6d11e1721147..d79ab1da9e07 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
@@ -25,7 +25,10 @@
* Alex Deucher
* Jerome Glisse
*/
-#include <drm/drmP.h>
+
+#include <linux/pci.h>
+#include <linux/vmalloc.h>
+
#include <drm/amdgpu_drm.h>
#ifdef CONFIG_X86
#include <asm/set_memory.h>
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gds.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gds.h
index f89f5734d985..dad2186f4ed5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gds.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gds.h
@@ -27,26 +27,11 @@
struct amdgpu_ring;
struct amdgpu_bo;
-struct amdgpu_gds_asic_info {
- uint32_t total_size;
- uint32_t gfx_partition_size;
- uint32_t cs_partition_size;
-};
-
struct amdgpu_gds {
- struct amdgpu_gds_asic_info mem;
- struct amdgpu_gds_asic_info gws;
- struct amdgpu_gds_asic_info oa;
+ uint32_t gds_size;
+ uint32_t gws_size;
+ uint32_t oa_size;
uint32_t gds_compute_max_wave_id;
-
- /* At present, GDS, GWS and OA resources for gfx (graphics)
- * is always pre-allocated and available for graphics operation.
- * Such resource is shared between all gfx clients.
- * TODO: move this operation to user space
- * */
- struct amdgpu_bo* gds_gfx_bo;
- struct amdgpu_bo* gws_gfx_bo;
- struct amdgpu_bo* oa_gfx_bo;
};
struct amdgpu_gds_reg_offset {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index d4fcf5475464..37b526c6f494 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -26,9 +26,13 @@
* Jerome Glisse
*/
#include <linux/ktime.h>
+#include <linux/module.h>
#include <linux/pagemap.h>
-#include <drm/drmP.h>
+#include <linux/pci.h>
+
#include <drm/amdgpu_drm.h>
+#include <drm/drm_debugfs.h>
+
#include "amdgpu.h"
#include "amdgpu_display.h"
#include "amdgpu_xgmi.h"
@@ -171,7 +175,7 @@ void amdgpu_gem_object_close(struct drm_gem_object *obj,
amdgpu_vm_get_pd_bo(vm, &list, &vm_pd);
- r = ttm_eu_reserve_buffers(&ticket, &list, false, &duplicates);
+ r = ttm_eu_reserve_buffers(&ticket, &list, false, &duplicates, true);
if (r) {
dev_err(adev->dev, "leaking bo va because "
"we fail to reserve bo (%d)\n", r);
@@ -330,26 +334,24 @@ int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
r = amdgpu_bo_reserve(bo, true);
if (r)
- goto free_pages;
+ goto user_pages_done;
amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT);
r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
amdgpu_bo_unreserve(bo);
if (r)
- goto free_pages;
+ goto user_pages_done;
}
r = drm_gem_handle_create(filp, gobj, &handle);
- /* drop reference from allocate - handle holds it now */
- drm_gem_object_put_unlocked(gobj);
if (r)
- return r;
+ goto user_pages_done;
args->handle = handle;
- return 0;
-free_pages:
- release_pages(bo->tbo.ttm->pages, bo->tbo.ttm->num_pages);
+user_pages_done:
+ if (args->flags & AMDGPU_GEM_USERPTR_VALIDATE)
+ amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm);
release_object:
drm_gem_object_put_unlocked(gobj);
@@ -610,7 +612,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
amdgpu_vm_get_pd_bo(&fpriv->vm, &list, &vm_pd);
- r = ttm_eu_reserve_buffers(&ticket, &list, true, &duplicates);
+ r = ttm_eu_reserve_buffers(&ticket, &list, true, &duplicates, true);
if (r)
goto error_unref;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.h
index f1ddfc50bcc7..b8ba6e27c61f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.h
@@ -39,22 +39,6 @@ int amdgpu_gem_object_open(struct drm_gem_object *obj,
void amdgpu_gem_object_close(struct drm_gem_object *obj,
struct drm_file *file_priv);
unsigned long amdgpu_gem_timeout(uint64_t timeout_ns);
-struct sg_table *amdgpu_gem_prime_get_sg_table(struct drm_gem_object *obj);
-struct drm_gem_object *
-amdgpu_gem_prime_import_sg_table(struct drm_device *dev,
- struct dma_buf_attachment *attach,
- struct sg_table *sg);
-struct dma_buf *amdgpu_gem_prime_export(struct drm_device *dev,
- struct drm_gem_object *gobj,
- int flags);
-struct drm_gem_object *amdgpu_gem_prime_import(struct drm_device *dev,
- struct dma_buf *dma_buf);
-struct reservation_object *amdgpu_gem_prime_res_obj(struct drm_gem_object *);
-void *amdgpu_gem_prime_vmap(struct drm_gem_object *obj);
-void amdgpu_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr);
-int amdgpu_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma);
-
-extern const struct dma_buf_ops amdgpu_dmabuf_ops;
/*
* GEM objects.
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
index 997932ebbb83..f198185c1fb6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
@@ -22,7 +22,7 @@
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
-#include <drm/drmP.h>
+
#include "amdgpu.h"
#include "amdgpu_gfx.h"
#include "amdgpu_rlc.h"
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
index 250d9212cc38..924d83e711ef 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
@@ -24,6 +24,8 @@
*
*/
+#include <linux/io-64-nonatomic-lo-hi.h>
+
#include "amdgpu.h"
/**
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
index 62591d081856..627104401e84 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
@@ -22,7 +22,6 @@
* Authors: Christian König
*/
-#include <drm/drmP.h>
#include "amdgpu.h"
struct amdgpu_gtt_mgr {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c
index f2739995c335..70dbe343f51d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c
@@ -23,9 +23,10 @@
* Authors: Dave Airlie
* Alex Deucher
*/
+
#include <linux/export.h>
+#include <linux/pci.h>
-#include <drm/drmP.h>
#include <drm/drm_edid.h>
#include <drm/amdgpu_drm.h>
#include "amdgpu.h"
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
index fe393a46f881..c124e583bb91 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
@@ -28,8 +28,10 @@
*/
#include <linux/seq_file.h>
#include <linux/slab.h>
-#include <drm/drmP.h>
+
#include <drm/amdgpu_drm.h>
+#include <drm/drm_debugfs.h>
+
#include "amdgpu.h"
#include "atom.h"
#include "amdgpu_trace.h"
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
index df9b173c3d0b..df6d33381f8b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
@@ -24,7 +24,7 @@
#include <linux/idr.h>
#include <linux/dma-fence-array.h>
-#include <drm/drmP.h>
+
#include "amdgpu.h"
#include "amdgpu_trace.h"
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c
index 934dfdcb4e73..6d8f05511aba 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c
@@ -21,7 +21,8 @@
*
*/
-#include <drm/drmP.h>
+#include <linux/dma-mapping.h>
+
#include "amdgpu.h"
#include "amdgpu_ih.h"
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ioc32.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ioc32.c
index 26482914dc4b..5cf142e849bb 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ioc32.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ioc32.c
@@ -29,8 +29,9 @@
*/
#include <linux/compat.h>
-#include <drm/drmP.h>
#include <drm/amdgpu_drm.h>
+#include <drm/drm_ioctl.h>
+
#include "amdgpu_drv.h"
long amdgpu_kms_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
index af4c3b1af322..2a3f5ec298db 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
@@ -43,8 +43,11 @@
*/
#include <linux/irq.h>
-#include <drm/drmP.h>
+#include <linux/pci.h>
+
#include <drm/drm_crtc_helper.h>
+#include <drm/drm_irq.h>
+#include <drm/drm_vblank.h>
#include <drm/amdgpu_drm.h>
#include "amdgpu.h"
#include "amdgpu_ih.h"
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
index 0a17fb1af204..9d76e0923a5a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
@@ -24,7 +24,7 @@
#include <linux/kthread.h>
#include <linux/wait.h>
#include <linux/sched.h>
-#include <drm/drmP.h>
+
#include "amdgpu.h"
#include "amdgpu_trace.h"
@@ -51,6 +51,8 @@ static void amdgpu_job_timedout(struct drm_sched_job *s_job)
if (amdgpu_device_should_recover_gpu(ring->adev))
amdgpu_device_gpu_recover(ring->adev, job);
+ else
+ drm_sched_suspend_timeout(&ring->sched);
}
int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index b17d0545728e..a70e5a32749a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -25,8 +25,9 @@
* Alex Deucher
* Jerome Glisse
*/
-#include <drm/drmP.h>
+
#include "amdgpu.h"
+#include <drm/drm_debugfs.h>
#include <drm/amdgpu_drm.h>
#include "amdgpu_sched.h"
#include "amdgpu_uvd.h"
@@ -35,6 +36,8 @@
#include <linux/vga_switcheroo.h>
#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/pci.h>
#include <linux/pm_runtime.h>
#include "amdgpu_amdkfd.h"
#include "amdgpu_gem.h"
@@ -590,13 +593,10 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
struct drm_amdgpu_info_gds gds_info;
memset(&gds_info, 0, sizeof(gds_info));
- gds_info.gds_gfx_partition_size = adev->gds.mem.gfx_partition_size;
- gds_info.compute_partition_size = adev->gds.mem.cs_partition_size;
- gds_info.gds_total_size = adev->gds.mem.total_size;
- gds_info.gws_per_gfx_partition = adev->gds.gws.gfx_partition_size;
- gds_info.gws_per_compute_partition = adev->gds.gws.cs_partition_size;
- gds_info.oa_per_gfx_partition = adev->gds.oa.gfx_partition_size;
- gds_info.oa_per_compute_partition = adev->gds.oa.cs_partition_size;
+ gds_info.compute_partition_size = adev->gds.gds_size;
+ gds_info.gds_total_size = adev->gds.gds_size;
+ gds_info.gws_per_compute_partition = adev->gds.gws_size;
+ gds_info.oa_per_compute_partition = adev->gds.oa_size;
return copy_to_user(out, &gds_info,
min((size_t)size, sizeof(gds_info))) ? -EFAULT : 0;
}
@@ -977,7 +977,7 @@ int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
int r, pasid;
/* Ensure IB tests are run on ring */
- flush_delayed_work(&adev->late_init_work);
+ flush_delayed_work(&adev->delayed_init_work);
file_priv->driver_priv = NULL;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
index 58ed401c5996..4ff4cf5988ea 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
@@ -45,9 +45,9 @@
#include <linux/firmware.h>
#include <linux/module.h>
-#include <linux/mmu_notifier.h>
+#include <linux/hmm.h>
#include <linux/interval_tree.h>
-#include <drm/drmP.h>
+
#include <drm/drm.h>
#include "amdgpu.h"
@@ -58,14 +58,12 @@
*
* @adev: amdgpu device pointer
* @mm: process address space
- * @mn: MMU notifier structure
* @type: type of MMU notifier
* @work: destruction work item
* @node: hash table node to find structure by adev and mn
* @lock: rw semaphore protecting the notifier nodes
* @objects: interval tree containing amdgpu_mn_nodes
- * @read_lock: mutex for recursive locking of @lock
- * @recursion: depth of recursion
+ * @mirror: HMM mirror function support
*
* Data for each amdgpu device and process address space.
*/
@@ -73,7 +71,6 @@ struct amdgpu_mn {
/* constant after initialisation */
struct amdgpu_device *adev;
struct mm_struct *mm;
- struct mmu_notifier mn;
enum amdgpu_mn_type type;
/* only used on destruction */
@@ -85,8 +82,9 @@ struct amdgpu_mn {
/* objects protected by lock */
struct rw_semaphore lock;
struct rb_root_cached objects;
- struct mutex read_lock;
- atomic_t recursion;
+
+ /* HMM mirror */
+ struct hmm_mirror mirror;
};
/**
@@ -103,7 +101,7 @@ struct amdgpu_mn_node {
};
/**
- * amdgpu_mn_destroy - destroy the MMU notifier
+ * amdgpu_mn_destroy - destroy the HMM mirror
*
* @work: previously sheduled work item
*
@@ -129,28 +127,26 @@ static void amdgpu_mn_destroy(struct work_struct *work)
}
up_write(&amn->lock);
mutex_unlock(&adev->mn_lock);
- mmu_notifier_unregister_no_release(&amn->mn, amn->mm);
+
+ hmm_mirror_unregister(&amn->mirror);
kfree(amn);
}
/**
- * amdgpu_mn_release - callback to notify about mm destruction
+ * amdgpu_hmm_mirror_release - callback to notify about mm destruction
*
- * @mn: our notifier
- * @mm: the mm this callback is about
+ * @mirror: the HMM mirror (mm) this callback is about
*
- * Shedule a work item to lazy destroy our notifier.
+ * Shedule a work item to lazy destroy HMM mirror.
*/
-static void amdgpu_mn_release(struct mmu_notifier *mn,
- struct mm_struct *mm)
+static void amdgpu_hmm_mirror_release(struct hmm_mirror *mirror)
{
- struct amdgpu_mn *amn = container_of(mn, struct amdgpu_mn, mn);
+ struct amdgpu_mn *amn = container_of(mirror, struct amdgpu_mn, mirror);
INIT_WORK(&amn->work, amdgpu_mn_destroy);
schedule_work(&amn->work);
}
-
/**
* amdgpu_mn_lock - take the write side lock for this notifier
*
@@ -181,14 +177,10 @@ void amdgpu_mn_unlock(struct amdgpu_mn *mn)
static int amdgpu_mn_read_lock(struct amdgpu_mn *amn, bool blockable)
{
if (blockable)
- mutex_lock(&amn->read_lock);
- else if (!mutex_trylock(&amn->read_lock))
+ down_read(&amn->lock);
+ else if (!down_read_trylock(&amn->lock))
return -EAGAIN;
- if (atomic_inc_return(&amn->recursion) == 1)
- down_read_non_owner(&amn->lock);
- mutex_unlock(&amn->read_lock);
-
return 0;
}
@@ -199,8 +191,7 @@ static int amdgpu_mn_read_lock(struct amdgpu_mn *amn, bool blockable)
*/
static void amdgpu_mn_read_unlock(struct amdgpu_mn *amn)
{
- if (atomic_dec_return(&amn->recursion) == 0)
- up_read_non_owner(&amn->lock);
+ up_read(&amn->lock);
}
/**
@@ -229,149 +220,132 @@ static void amdgpu_mn_invalidate_node(struct amdgpu_mn_node *node,
true, false, MAX_SCHEDULE_TIMEOUT);
if (r <= 0)
DRM_ERROR("(%ld) failed to wait for user bo\n", r);
-
- amdgpu_ttm_tt_mark_user_pages(bo->tbo.ttm);
}
}
/**
- * amdgpu_mn_invalidate_range_start_gfx - callback to notify about mm change
+ * amdgpu_mn_sync_pagetables_gfx - callback to notify about mm change
*
- * @mn: our notifier
- * @range: mmu notifier context
+ * @mirror: the hmm_mirror (mm) is about to update
+ * @update: the update start, end address
*
* Block for operations on BOs to finish and mark pages as accessed and
* potentially dirty.
*/
-static int amdgpu_mn_invalidate_range_start_gfx(struct mmu_notifier *mn,
- const struct mmu_notifier_range *range)
+static int amdgpu_mn_sync_pagetables_gfx(struct hmm_mirror *mirror,
+ const struct hmm_update *update)
{
- struct amdgpu_mn *amn = container_of(mn, struct amdgpu_mn, mn);
+ struct amdgpu_mn *amn = container_of(mirror, struct amdgpu_mn, mirror);
+ unsigned long start = update->start;
+ unsigned long end = update->end;
+ bool blockable = update->blockable;
struct interval_tree_node *it;
- unsigned long end;
/* notification is exclusive, but interval is inclusive */
- end = range->end - 1;
+ end -= 1;
/* TODO we should be able to split locking for interval tree and
* amdgpu_mn_invalidate_node
*/
- if (amdgpu_mn_read_lock(amn, mmu_notifier_range_blockable(range)))
+ if (amdgpu_mn_read_lock(amn, blockable))
return -EAGAIN;
- it = interval_tree_iter_first(&amn->objects, range->start, end);
+ it = interval_tree_iter_first(&amn->objects, start, end);
while (it) {
struct amdgpu_mn_node *node;
- if (!mmu_notifier_range_blockable(range)) {
+ if (!blockable) {
amdgpu_mn_read_unlock(amn);
return -EAGAIN;
}
node = container_of(it, struct amdgpu_mn_node, it);
- it = interval_tree_iter_next(it, range->start, end);
+ it = interval_tree_iter_next(it, start, end);
- amdgpu_mn_invalidate_node(node, range->start, end);
+ amdgpu_mn_invalidate_node(node, start, end);
}
+ amdgpu_mn_read_unlock(amn);
+
return 0;
}
/**
- * amdgpu_mn_invalidate_range_start_hsa - callback to notify about mm change
+ * amdgpu_mn_sync_pagetables_hsa - callback to notify about mm change
*
- * @mn: our notifier
- * @mm: the mm this callback is about
- * @start: start of updated range
- * @end: end of updated range
+ * @mirror: the hmm_mirror (mm) is about to update
+ * @update: the update start, end address
*
* We temporarily evict all BOs between start and end. This
* necessitates evicting all user-mode queues of the process. The BOs
* are restorted in amdgpu_mn_invalidate_range_end_hsa.
*/
-static int amdgpu_mn_invalidate_range_start_hsa(struct mmu_notifier *mn,
- const struct mmu_notifier_range *range)
+static int amdgpu_mn_sync_pagetables_hsa(struct hmm_mirror *mirror,
+ const struct hmm_update *update)
{
- struct amdgpu_mn *amn = container_of(mn, struct amdgpu_mn, mn);
+ struct amdgpu_mn *amn = container_of(mirror, struct amdgpu_mn, mirror);
+ unsigned long start = update->start;
+ unsigned long end = update->end;
+ bool blockable = update->blockable;
struct interval_tree_node *it;
- unsigned long end;
/* notification is exclusive, but interval is inclusive */
- end = range->end - 1;
+ end -= 1;
- if (amdgpu_mn_read_lock(amn, mmu_notifier_range_blockable(range)))
+ if (amdgpu_mn_read_lock(amn, blockable))
return -EAGAIN;
- it = interval_tree_iter_first(&amn->objects, range->start, end);
+ it = interval_tree_iter_first(&amn->objects, start, end);
while (it) {
struct amdgpu_mn_node *node;
struct amdgpu_bo *bo;
- if (!mmu_notifier_range_blockable(range)) {
+ if (!blockable) {
amdgpu_mn_read_unlock(amn);
return -EAGAIN;
}
node = container_of(it, struct amdgpu_mn_node, it);
- it = interval_tree_iter_next(it, range->start, end);
+ it = interval_tree_iter_next(it, start, end);
list_for_each_entry(bo, &node->bos, mn_list) {
struct kgd_mem *mem = bo->kfd_bo;
if (amdgpu_ttm_tt_affect_userptr(bo->tbo.ttm,
- range->start,
- end))
- amdgpu_amdkfd_evict_userptr(mem, range->mm);
+ start, end))
+ amdgpu_amdkfd_evict_userptr(mem, amn->mm);
}
}
+ amdgpu_mn_read_unlock(amn);
+
return 0;
}
-/**
- * amdgpu_mn_invalidate_range_end - callback to notify about mm change
- *
- * @mn: our notifier
- * @mm: the mm this callback is about
- * @start: start of updated range
- * @end: end of updated range
- *
- * Release the lock again to allow new command submissions.
+/* Low bits of any reasonable mm pointer will be unused due to struct
+ * alignment. Use these bits to make a unique key from the mm pointer
+ * and notifier type.
*/
-static void amdgpu_mn_invalidate_range_end(struct mmu_notifier *mn,
- const struct mmu_notifier_range *range)
-{
- struct amdgpu_mn *amn = container_of(mn, struct amdgpu_mn, mn);
-
- amdgpu_mn_read_unlock(amn);
-}
+#define AMDGPU_MN_KEY(mm, type) ((unsigned long)(mm) + (type))
-static const struct mmu_notifier_ops amdgpu_mn_ops[] = {
+static struct hmm_mirror_ops amdgpu_hmm_mirror_ops[] = {
[AMDGPU_MN_TYPE_GFX] = {
- .release = amdgpu_mn_release,
- .invalidate_range_start = amdgpu_mn_invalidate_range_start_gfx,
- .invalidate_range_end = amdgpu_mn_invalidate_range_end,
+ .sync_cpu_device_pagetables = amdgpu_mn_sync_pagetables_gfx,
+ .release = amdgpu_hmm_mirror_release
},
[AMDGPU_MN_TYPE_HSA] = {
- .release = amdgpu_mn_release,
- .invalidate_range_start = amdgpu_mn_invalidate_range_start_hsa,
- .invalidate_range_end = amdgpu_mn_invalidate_range_end,
+ .sync_cpu_device_pagetables = amdgpu_mn_sync_pagetables_hsa,
+ .release = amdgpu_hmm_mirror_release
},
};
-/* Low bits of any reasonable mm pointer will be unused due to struct
- * alignment. Use these bits to make a unique key from the mm pointer
- * and notifier type.
- */
-#define AMDGPU_MN_KEY(mm, type) ((unsigned long)(mm) + (type))
-
/**
- * amdgpu_mn_get - create notifier context
+ * amdgpu_mn_get - create HMM mirror context
*
* @adev: amdgpu device pointer
* @type: type of MMU notifier context
*
- * Creates a notifier context for current->mm.
+ * Creates a HMM mirror context for current->mm.
*/
struct amdgpu_mn *amdgpu_mn_get(struct amdgpu_device *adev,
enum amdgpu_mn_type type)
@@ -401,12 +375,10 @@ struct amdgpu_mn *amdgpu_mn_get(struct amdgpu_device *adev,
amn->mm = mm;
init_rwsem(&amn->lock);
amn->type = type;
- amn->mn.ops = &amdgpu_mn_ops[type];
amn->objects = RB_ROOT_CACHED;
- mutex_init(&amn->read_lock);
- atomic_set(&amn->recursion, 0);
- r = __mmu_notifier_register(&amn->mn, mm);
+ amn->mirror.ops = &amdgpu_hmm_mirror_ops[type];
+ r = hmm_mirror_register(&amn->mirror, mm);
if (r)
goto free_amn;
@@ -432,7 +404,7 @@ free_amn:
* @bo: amdgpu buffer object
* @addr: userptr addr we should monitor
*
- * Registers an MMU notifier for the given BO at the specified address.
+ * Registers an HMM mirror for the given BO at the specified address.
* Returns 0 on success, -ERRNO if anything goes wrong.
*/
int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr)
@@ -488,11 +460,11 @@ int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr)
}
/**
- * amdgpu_mn_unregister - unregister a BO for notifier updates
+ * amdgpu_mn_unregister - unregister a BO for HMM mirror updates
*
* @bo: amdgpu buffer object
*
- * Remove any registration of MMU notifier updates from the buffer object.
+ * Remove any registration of HMM mirror updates from the buffer object.
*/
void amdgpu_mn_unregister(struct amdgpu_bo *bo)
{
@@ -528,3 +500,26 @@ void amdgpu_mn_unregister(struct amdgpu_bo *bo)
mutex_unlock(&adev->mn_lock);
}
+/* flags used by HMM internal, not related to CPU/GPU PTE flags */
+static const uint64_t hmm_range_flags[HMM_PFN_FLAG_MAX] = {
+ (1 << 0), /* HMM_PFN_VALID */
+ (1 << 1), /* HMM_PFN_WRITE */
+ 0 /* HMM_PFN_DEVICE_PRIVATE */
+};
+
+static const uint64_t hmm_range_values[HMM_PFN_VALUE_MAX] = {
+ 0xfffffffffffffffeUL, /* HMM_PFN_ERROR */
+ 0, /* HMM_PFN_NONE */
+ 0xfffffffffffffffcUL /* HMM_PFN_SPECIAL */
+};
+
+void amdgpu_hmm_init_range(struct hmm_range *range)
+{
+ if (range) {
+ range->flags = hmm_range_flags;
+ range->values = hmm_range_values;
+ range->pfn_shift = PAGE_SHIFT;
+ range->pfns = NULL;
+ INIT_LIST_HEAD(&range->list);
+ }
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.h
index eb0f432f78fe..f5b67c63ed6b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.h
@@ -25,22 +25,24 @@
#define __AMDGPU_MN_H__
/*
- * MMU Notifier
+ * HMM mirror
*/
struct amdgpu_mn;
+struct hmm_range;
enum amdgpu_mn_type {
AMDGPU_MN_TYPE_GFX,
AMDGPU_MN_TYPE_HSA,
};
-#if defined(CONFIG_MMU_NOTIFIER)
+#if defined(CONFIG_HMM_MIRROR)
void amdgpu_mn_lock(struct amdgpu_mn *mn);
void amdgpu_mn_unlock(struct amdgpu_mn *mn);
struct amdgpu_mn *amdgpu_mn_get(struct amdgpu_device *adev,
enum amdgpu_mn_type type);
int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr);
void amdgpu_mn_unregister(struct amdgpu_bo *bo);
+void amdgpu_hmm_init_range(struct hmm_range *range);
#else
static inline void amdgpu_mn_lock(struct amdgpu_mn *mn) {}
static inline void amdgpu_mn_unlock(struct amdgpu_mn *mn) {}
@@ -51,6 +53,8 @@ static inline struct amdgpu_mn *amdgpu_mn_get(struct amdgpu_device *adev,
}
static inline int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr)
{
+ DRM_WARN_ONCE("HMM_MIRROR kernel config option is not enabled, "
+ "add CONFIG_ZONE_DEVICE=y in config file to fix this\n");
return -ENODEV;
}
static inline void amdgpu_mn_unregister(struct amdgpu_bo *bo) {}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
index 2e9e3db778c6..eb9975f4decb 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
@@ -331,8 +331,6 @@ struct amdgpu_mode_info {
struct drm_property *audio_property;
/* FMT dithering */
struct drm_property *dither_property;
- /* maximum number of bits per channel for monitor color */
- struct drm_property *max_bpc_property;
/* Adaptive Backlight Modulation (power feature) */
struct drm_property *abm_level_property;
/* hardcoded DFP edid from BIOS */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index 93b2c5a48a71..16f96f2e3671 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -31,7 +31,7 @@
*/
#include <linux/list.h>
#include <linux/slab.h>
-#include <drm/drmP.h>
+
#include <drm/amdgpu_drm.h>
#include <drm/drm_cache.h>
#include "amdgpu.h"
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pll.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pll.c
index 8e67c1210d7c..1f2305b7bd13 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pll.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pll.c
@@ -20,7 +20,7 @@
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
-#include <drm/drmP.h>
+
#include <drm/amdgpu_drm.h>
#include "amdgpu.h"
#include "atom.h"
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
index abeaab4bf1bc..194d0c75b072 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
@@ -22,7 +22,9 @@
* Authors: Rafał Miłecki <zajec5@gmail.com>
* Alex Deucher <alexdeucher@gmail.com>
*/
-#include <drm/drmP.h>
+
+#include <drm/drm_debugfs.h>
+
#include "amdgpu.h"
#include "amdgpu_drv.h"
#include "amdgpu_pm.h"
@@ -31,6 +33,7 @@
#include "amdgpu_smu.h"
#include "atom.h"
#include <linux/power_supply.h>
+#include <linux/pci.h>
#include <linux/hwmon.h>
#include <linux/hwmon-sysfs.h>
#include <linux/nospec.h>
@@ -67,6 +70,15 @@ static const struct cg_flag_name clocks[] = {
{0, NULL},
};
+static const struct hwmon_temp_label {
+ enum PP_HWMON_TEMP channel;
+ const char *label;
+} temp_label[] = {
+ {PP_TEMP_EDGE, "edge"},
+ {PP_TEMP_JUNCTION, "junction"},
+ {PP_TEMP_MEM, "mem"},
+};
+
void amdgpu_pm_acpi_event_handler(struct amdgpu_device *adev)
{
if (adev->pm.dpm_enabled) {
@@ -758,7 +770,11 @@ static ssize_t amdgpu_set_ppfeature_status(struct device *dev,
pr_debug("featuremask = 0x%llx\n", featuremask);
- if (adev->powerplay.pp_funcs->set_ppfeature_status) {
+ if (is_support_sw_smu(adev)) {
+ ret = smu_set_ppfeature_status(&adev->smu, featuremask);
+ if (ret)
+ return -EINVAL;
+ } else if (adev->powerplay.pp_funcs->set_ppfeature_status) {
ret = amdgpu_dpm_set_ppfeature_status(adev, featuremask);
if (ret)
return -EINVAL;
@@ -774,7 +790,9 @@ static ssize_t amdgpu_get_ppfeature_status(struct device *dev,
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = ddev->dev_private;
- if (adev->powerplay.pp_funcs->get_ppfeature_status)
+ if (is_support_sw_smu(adev)) {
+ return smu_get_ppfeature_status(&adev->smu, buf);
+ } else if (adev->powerplay.pp_funcs->get_ppfeature_status)
return amdgpu_dpm_get_ppfeature_status(adev, buf);
return snprintf(buf, PAGE_SIZE, "\n");
@@ -1303,6 +1321,32 @@ static ssize_t amdgpu_get_busy_percent(struct device *dev,
}
/**
+ * DOC: mem_busy_percent
+ *
+ * The amdgpu driver provides a sysfs API for reading how busy the VRAM
+ * is as a percentage. The file mem_busy_percent is used for this.
+ * The SMU firmware computes a percentage of load based on the
+ * aggregate activity level in the IP cores.
+ */
+static ssize_t amdgpu_get_memory_busy_percent(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = ddev->dev_private;
+ int r, value, size = sizeof(value);
+
+ /* read the IP busy sensor */
+ r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MEM_LOAD,
+ (void *)&value, &size);
+
+ if (r)
+ return r;
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", value);
+}
+
+/**
* DOC: pcie_bw
*
* The amdgpu driver provides a sysfs API for estimating how much data
@@ -1327,6 +1371,29 @@ static ssize_t amdgpu_get_pcie_bw(struct device *dev,
count0, count1, pcie_get_mps(adev->pdev));
}
+/**
+ * DOC: unique_id
+ *
+ * The amdgpu driver provides a sysfs API for providing a unique ID for the GPU
+ * The file unique_id is used for this.
+ * This will provide a Unique ID that will persist from machine to machine
+ *
+ * NOTE: This will only work for GFX9 and newer. This file will be absent
+ * on unsupported ASICs (GFX8 and older)
+ */
+static ssize_t amdgpu_get_unique_id(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = ddev->dev_private;
+
+ if (adev->unique_id)
+ return snprintf(buf, PAGE_SIZE, "%016llx\n", adev->unique_id);
+
+ return 0;
+}
+
static DEVICE_ATTR(power_dpm_state, S_IRUGO | S_IWUSR, amdgpu_get_dpm_state, amdgpu_set_dpm_state);
static DEVICE_ATTR(power_dpm_force_performance_level, S_IRUGO | S_IWUSR,
amdgpu_get_dpm_forced_performance_level,
@@ -1371,10 +1438,13 @@ static DEVICE_ATTR(pp_od_clk_voltage, S_IRUGO | S_IWUSR,
amdgpu_set_pp_od_clk_voltage);
static DEVICE_ATTR(gpu_busy_percent, S_IRUGO,
amdgpu_get_busy_percent, NULL);
+static DEVICE_ATTR(mem_busy_percent, S_IRUGO,
+ amdgpu_get_memory_busy_percent, NULL);
static DEVICE_ATTR(pcie_bw, S_IRUGO, amdgpu_get_pcie_bw, NULL);
static DEVICE_ATTR(ppfeatures, S_IRUGO | S_IWUSR,
amdgpu_get_ppfeature_status,
amdgpu_set_ppfeature_status);
+static DEVICE_ATTR(unique_id, S_IRUGO, amdgpu_get_unique_id, NULL);
static ssize_t amdgpu_hwmon_show_temp(struct device *dev,
struct device_attribute *attr,
@@ -1382,6 +1452,7 @@ static ssize_t amdgpu_hwmon_show_temp(struct device *dev,
{
struct amdgpu_device *adev = dev_get_drvdata(dev);
struct drm_device *ddev = adev->ddev;
+ int channel = to_sensor_dev_attr(attr)->index;
int r, temp, size = sizeof(temp);
/* Can't get temperature when the card is off */
@@ -1389,11 +1460,32 @@ static ssize_t amdgpu_hwmon_show_temp(struct device *dev,
(ddev->switch_power_state != DRM_SWITCH_POWER_ON))
return -EINVAL;
- /* get the temperature */
- r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_TEMP,
- (void *)&temp, &size);
- if (r)
- return r;
+ if (channel >= PP_TEMP_MAX)
+ return -EINVAL;
+
+ switch (channel) {
+ case PP_TEMP_JUNCTION:
+ /* get current junction temperature */
+ r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_HOTSPOT_TEMP,
+ (void *)&temp, &size);
+ if (r)
+ return r;
+ break;
+ case PP_TEMP_EDGE:
+ /* get current edge temperature */
+ r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_EDGE_TEMP,
+ (void *)&temp, &size);
+ if (r)
+ return r;
+ break;
+ case PP_TEMP_MEM:
+ /* get current memory temperature */
+ r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MEM_TEMP,
+ (void *)&temp, &size);
+ if (r)
+ return r;
+ break;
+ }
return snprintf(buf, PAGE_SIZE, "%d\n", temp);
}
@@ -1414,6 +1506,76 @@ static ssize_t amdgpu_hwmon_show_temp_thresh(struct device *dev,
return snprintf(buf, PAGE_SIZE, "%d\n", temp);
}
+static ssize_t amdgpu_hwmon_show_hotspot_temp_thresh(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct amdgpu_device *adev = dev_get_drvdata(dev);
+ int hyst = to_sensor_dev_attr(attr)->index;
+ int temp;
+
+ if (hyst)
+ temp = adev->pm.dpm.thermal.min_hotspot_temp;
+ else
+ temp = adev->pm.dpm.thermal.max_hotspot_crit_temp;
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", temp);
+}
+
+static ssize_t amdgpu_hwmon_show_mem_temp_thresh(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct amdgpu_device *adev = dev_get_drvdata(dev);
+ int hyst = to_sensor_dev_attr(attr)->index;
+ int temp;
+
+ if (hyst)
+ temp = adev->pm.dpm.thermal.min_mem_temp;
+ else
+ temp = adev->pm.dpm.thermal.max_mem_crit_temp;
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", temp);
+}
+
+static ssize_t amdgpu_hwmon_show_temp_label(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ int channel = to_sensor_dev_attr(attr)->index;
+
+ if (channel >= PP_TEMP_MAX)
+ return -EINVAL;
+
+ return snprintf(buf, PAGE_SIZE, "%s\n", temp_label[channel].label);
+}
+
+static ssize_t amdgpu_hwmon_show_temp_emergency(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct amdgpu_device *adev = dev_get_drvdata(dev);
+ int channel = to_sensor_dev_attr(attr)->index;
+ int temp = 0;
+
+ if (channel >= PP_TEMP_MAX)
+ return -EINVAL;
+
+ switch (channel) {
+ case PP_TEMP_JUNCTION:
+ temp = adev->pm.dpm.thermal.max_hotspot_emergency_temp;
+ break;
+ case PP_TEMP_EDGE:
+ temp = adev->pm.dpm.thermal.max_edge_emergency_temp;
+ break;
+ case PP_TEMP_MEM:
+ temp = adev->pm.dpm.thermal.max_mem_emergency_temp;
+ break;
+ }
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", temp);
+}
+
static ssize_t amdgpu_hwmon_get_pwm1_enable(struct device *dev,
struct device_attribute *attr,
char *buf)
@@ -1983,11 +2145,20 @@ static ssize_t amdgpu_hwmon_show_mclk_label(struct device *dev,
*
* hwmon interfaces for GPU temperature:
*
- * - temp1_input: the on die GPU temperature in millidegrees Celsius
+ * - temp[1-3]_input: the on die GPU temperature in millidegrees Celsius
+ * - temp2_input and temp3_input are supported on SOC15 dGPUs only
+ *
+ * - temp[1-3]_label: temperature channel label
+ * - temp2_label and temp3_label are supported on SOC15 dGPUs only
+ *
+ * - temp[1-3]_crit: temperature critical max value in millidegrees Celsius
+ * - temp2_crit and temp3_crit are supported on SOC15 dGPUs only
*
- * - temp1_crit: temperature critical max value in millidegrees Celsius
+ * - temp[1-3]_crit_hyst: temperature hysteresis for critical limit in millidegrees Celsius
+ * - temp2_crit_hyst and temp3_crit_hyst are supported on SOC15 dGPUs only
*
- * - temp1_crit_hyst: temperature hysteresis for critical limit in millidegrees Celsius
+ * - temp[1-3]_emergency: temperature emergency max value(asic shutdown) in millidegrees Celsius
+ * - these are supported on SOC15 dGPUs only
*
* hwmon interfaces for GPU voltage:
*
@@ -2035,9 +2206,21 @@ static ssize_t amdgpu_hwmon_show_mclk_label(struct device *dev,
*
*/
-static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, amdgpu_hwmon_show_temp, NULL, 0);
+static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, amdgpu_hwmon_show_temp, NULL, PP_TEMP_EDGE);
static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, amdgpu_hwmon_show_temp_thresh, NULL, 0);
static SENSOR_DEVICE_ATTR(temp1_crit_hyst, S_IRUGO, amdgpu_hwmon_show_temp_thresh, NULL, 1);
+static SENSOR_DEVICE_ATTR(temp1_emergency, S_IRUGO, amdgpu_hwmon_show_temp_emergency, NULL, PP_TEMP_EDGE);
+static SENSOR_DEVICE_ATTR(temp2_input, S_IRUGO, amdgpu_hwmon_show_temp, NULL, PP_TEMP_JUNCTION);
+static SENSOR_DEVICE_ATTR(temp2_crit, S_IRUGO, amdgpu_hwmon_show_hotspot_temp_thresh, NULL, 0);
+static SENSOR_DEVICE_ATTR(temp2_crit_hyst, S_IRUGO, amdgpu_hwmon_show_hotspot_temp_thresh, NULL, 1);
+static SENSOR_DEVICE_ATTR(temp2_emergency, S_IRUGO, amdgpu_hwmon_show_temp_emergency, NULL, PP_TEMP_JUNCTION);
+static SENSOR_DEVICE_ATTR(temp3_input, S_IRUGO, amdgpu_hwmon_show_temp, NULL, PP_TEMP_MEM);
+static SENSOR_DEVICE_ATTR(temp3_crit, S_IRUGO, amdgpu_hwmon_show_mem_temp_thresh, NULL, 0);
+static SENSOR_DEVICE_ATTR(temp3_crit_hyst, S_IRUGO, amdgpu_hwmon_show_mem_temp_thresh, NULL, 1);
+static SENSOR_DEVICE_ATTR(temp3_emergency, S_IRUGO, amdgpu_hwmon_show_temp_emergency, NULL, PP_TEMP_MEM);
+static SENSOR_DEVICE_ATTR(temp1_label, S_IRUGO, amdgpu_hwmon_show_temp_label, NULL, PP_TEMP_EDGE);
+static SENSOR_DEVICE_ATTR(temp2_label, S_IRUGO, amdgpu_hwmon_show_temp_label, NULL, PP_TEMP_JUNCTION);
+static SENSOR_DEVICE_ATTR(temp3_label, S_IRUGO, amdgpu_hwmon_show_temp_label, NULL, PP_TEMP_MEM);
static SENSOR_DEVICE_ATTR(pwm1, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_pwm1, amdgpu_hwmon_set_pwm1, 0);
static SENSOR_DEVICE_ATTR(pwm1_enable, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_pwm1_enable, amdgpu_hwmon_set_pwm1_enable, 0);
static SENSOR_DEVICE_ATTR(pwm1_min, S_IRUGO, amdgpu_hwmon_get_pwm1_min, NULL, 0);
@@ -2064,6 +2247,18 @@ static struct attribute *hwmon_attributes[] = {
&sensor_dev_attr_temp1_input.dev_attr.attr,
&sensor_dev_attr_temp1_crit.dev_attr.attr,
&sensor_dev_attr_temp1_crit_hyst.dev_attr.attr,
+ &sensor_dev_attr_temp2_input.dev_attr.attr,
+ &sensor_dev_attr_temp2_crit.dev_attr.attr,
+ &sensor_dev_attr_temp2_crit_hyst.dev_attr.attr,
+ &sensor_dev_attr_temp3_input.dev_attr.attr,
+ &sensor_dev_attr_temp3_crit.dev_attr.attr,
+ &sensor_dev_attr_temp3_crit_hyst.dev_attr.attr,
+ &sensor_dev_attr_temp1_emergency.dev_attr.attr,
+ &sensor_dev_attr_temp2_emergency.dev_attr.attr,
+ &sensor_dev_attr_temp3_emergency.dev_attr.attr,
+ &sensor_dev_attr_temp1_label.dev_attr.attr,
+ &sensor_dev_attr_temp2_label.dev_attr.attr,
+ &sensor_dev_attr_temp3_label.dev_attr.attr,
&sensor_dev_attr_pwm1.dev_attr.attr,
&sensor_dev_attr_pwm1_enable.dev_attr.attr,
&sensor_dev_attr_pwm1_min.dev_attr.attr,
@@ -2186,6 +2381,22 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj,
attr == &sensor_dev_attr_freq2_label.dev_attr.attr))
return 0;
+ /* only SOC15 dGPUs support hotspot and mem temperatures */
+ if (((adev->flags & AMD_IS_APU) ||
+ adev->asic_type < CHIP_VEGA10) &&
+ (attr == &sensor_dev_attr_temp2_crit.dev_attr.attr ||
+ attr == &sensor_dev_attr_temp2_crit_hyst.dev_attr.attr ||
+ attr == &sensor_dev_attr_temp3_crit.dev_attr.attr ||
+ attr == &sensor_dev_attr_temp3_crit_hyst.dev_attr.attr ||
+ attr == &sensor_dev_attr_temp1_emergency.dev_attr.attr ||
+ attr == &sensor_dev_attr_temp2_emergency.dev_attr.attr ||
+ attr == &sensor_dev_attr_temp3_emergency.dev_attr.attr ||
+ attr == &sensor_dev_attr_temp2_input.dev_attr.attr ||
+ attr == &sensor_dev_attr_temp3_input.dev_attr.attr ||
+ attr == &sensor_dev_attr_temp2_label.dev_attr.attr ||
+ attr == &sensor_dev_attr_temp3_label.dev_attr.attr))
+ return 0;
+
return effective_mode;
}
@@ -2627,6 +2838,16 @@ int amdgpu_pm_sysfs_init(struct amdgpu_device *adev)
"gpu_busy_level\n");
return ret;
}
+ /* APU does not have its own dedicated memory */
+ if (!(adev->flags & AMD_IS_APU)) {
+ ret = device_create_file(adev->dev,
+ &dev_attr_mem_busy_percent);
+ if (ret) {
+ DRM_ERROR("failed to create device file "
+ "mem_busy_percent\n");
+ return ret;
+ }
+ }
/* PCIe Perf counters won't work on APU nodes */
if (!(adev->flags & AMD_IS_APU)) {
ret = device_create_file(adev->dev, &dev_attr_pcie_bw);
@@ -2635,6 +2856,12 @@ int amdgpu_pm_sysfs_init(struct amdgpu_device *adev)
return ret;
}
}
+ if (adev->unique_id)
+ ret = device_create_file(adev->dev, &dev_attr_unique_id);
+ if (ret) {
+ DRM_ERROR("failed to create device file unique_id\n");
+ return ret;
+ }
ret = amdgpu_debugfs_pm_init(adev);
if (ret) {
DRM_ERROR("Failed to register debugfs file for dpm!\n");
@@ -2693,7 +2920,11 @@ void amdgpu_pm_sysfs_fini(struct amdgpu_device *adev)
&dev_attr_pp_od_clk_voltage);
device_remove_file(adev->dev, &dev_attr_gpu_busy_percent);
if (!(adev->flags & AMD_IS_APU))
+ device_remove_file(adev->dev, &dev_attr_mem_busy_percent);
+ if (!(adev->flags & AMD_IS_APU))
device_remove_file(adev->dev, &dev_attr_pcie_bw);
+ if (adev->unique_id)
+ device_remove_file(adev->dev, &dev_attr_unique_id);
if ((adev->asic_type >= CHIP_VEGA10) &&
!(adev->flags & AMD_IS_APU))
device_remove_file(adev->dev, &dev_attr_ppfeatures);
@@ -2790,6 +3021,10 @@ static int amdgpu_debugfs_pm_info_pp(struct seq_file *m, struct amdgpu_device *a
/* GPU Load */
if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_LOAD, (void *)&value, &size))
seq_printf(m, "GPU Load: %u %%\n", value);
+ /* MEM Load */
+ if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MEM_LOAD, (void *)&value, &size))
+ seq_printf(m, "MEM Load: %u %%\n", value);
+
seq_printf(m, "\n");
/* SMC feature mask */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
index 86cc24b2e0aa..909be1bf2294 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
@@ -24,7 +24,7 @@
*/
#include <linux/firmware.h>
-#include <drm/drmP.h>
+
#include "amdgpu.h"
#include "amdgpu_psp.h"
#include "amdgpu_ucode.h"
@@ -289,6 +289,34 @@ static int psp_asd_load(struct psp_context *psp)
return ret;
}
+static void psp_prep_reg_prog_cmd_buf(struct psp_gfx_cmd_resp *cmd,
+ uint32_t id, uint32_t value)
+{
+ cmd->cmd_id = GFX_CMD_ID_PROG_REG;
+ cmd->cmd.cmd_setup_reg_prog.reg_value = value;
+ cmd->cmd.cmd_setup_reg_prog.reg_id = id;
+}
+
+int psp_reg_program(struct psp_context *psp, enum psp_reg_prog_id reg,
+ uint32_t value)
+{
+ struct psp_gfx_cmd_resp *cmd = NULL;
+ int ret = 0;
+
+ if (reg >= PSP_REG_LAST)
+ return -EINVAL;
+
+ cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
+ if (!cmd)
+ return -ENOMEM;
+
+ psp_prep_reg_prog_cmd_buf(cmd, reg, value);
+ ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
+
+ kfree(cmd);
+ return ret;
+}
+
static void psp_prep_xgmi_ta_load_cmd_buf(struct psp_gfx_cmd_resp *cmd,
uint64_t xgmi_ta_mc, uint64_t xgmi_mc_shared,
uint32_t xgmi_ta_size, uint32_t shared_size)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
index cde113f07c96..cf49539b0b07 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
@@ -62,6 +62,14 @@ struct psp_ring
uint32_t ring_size;
};
+/* More registers may will be supported */
+enum psp_reg_prog_id {
+ PSP_REG_IH_RB_CNTL = 0, /* register IH_RB_CNTL */
+ PSP_REG_IH_RB_CNTL_RING1 = 1, /* register IH_RB_CNTL_RING1 */
+ PSP_REG_IH_RB_CNTL_RING2 = 2, /* register IH_RB_CNTL_RING2 */
+ PSP_REG_LAST
+};
+
struct psp_funcs
{
int (*init_microcode)(struct psp_context *psp);
@@ -95,12 +103,26 @@ struct psp_funcs
int (*ras_cure_posion)(struct psp_context *psp, uint64_t *mode_ptr);
};
+#define AMDGPU_XGMI_MAX_CONNECTED_NODES 64
+struct psp_xgmi_node_info {
+ uint64_t node_id;
+ uint8_t num_hops;
+ uint8_t is_sharing_enabled;
+ enum ta_xgmi_assigned_sdma_engine sdma_engine;
+};
+
+struct psp_xgmi_topology_info {
+ uint32_t num_nodes;
+ struct psp_xgmi_node_info nodes[AMDGPU_XGMI_MAX_CONNECTED_NODES];
+};
+
struct psp_xgmi_context {
uint8_t initialized;
uint32_t session_id;
struct amdgpu_bo *xgmi_shared_bo;
uint64_t xgmi_shared_mc_addr;
void *xgmi_shared_buf;
+ struct psp_xgmi_topology_info top_info;
};
struct psp_ras_context {
@@ -181,18 +203,6 @@ struct amdgpu_psp_funcs {
enum AMDGPU_UCODE_ID);
};
-#define AMDGPU_XGMI_MAX_CONNECTED_NODES 64
-struct psp_xgmi_node_info {
- uint64_t node_id;
- uint8_t num_hops;
- uint8_t is_sharing_enabled;
- enum ta_xgmi_assigned_sdma_engine sdma_engine;
-};
-
-struct psp_xgmi_topology_info {
- uint32_t num_nodes;
- struct psp_xgmi_node_info nodes[AMDGPU_XGMI_MAX_CONNECTED_NODES];
-};
#define psp_ring_init(psp, type) (psp)->funcs->ring_init((psp), (type))
#define psp_ring_create(psp, type) (psp)->funcs->ring_create((psp), (type))
@@ -250,5 +260,6 @@ int psp_ras_enable_features(struct psp_context *psp,
union ta_ras_cmd_input *info, bool enable);
extern const struct amdgpu_ip_block_version psp_v11_0_ip_block;
-
+int psp_reg_program(struct psp_context *psp, enum psp_reg_prog_id reg,
+ uint32_t value);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
index 22bd21efe6b1..4d387557cc37 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
@@ -24,6 +24,8 @@
#include <linux/debugfs.h>
#include <linux/list.h>
#include <linux/module.h>
+#include <linux/uaccess.h>
+
#include "amdgpu.h"
#include "amdgpu_ras.h"
#include "amdgpu_atomfirmware.h"
@@ -90,6 +92,12 @@ struct ras_manager {
struct ras_err_data err_data;
};
+struct ras_badpage {
+ unsigned int bp;
+ unsigned int size;
+ unsigned int flags;
+};
+
const char *ras_error_string[] = {
"none",
"parity",
@@ -118,9 +126,16 @@ const char *ras_block_string[] = {
#define ras_err_str(i) (ras_error_string[ffs(i)])
#define ras_block_str(i) (ras_block_string[i])
-#define AMDGPU_RAS_FLAG_INIT_BY_VBIOS 1
+#define AMDGPU_RAS_FLAG_INIT_BY_VBIOS 1
+#define AMDGPU_RAS_FLAG_INIT_NEED_RESET 2
#define RAS_DEFAULT_FLAGS (AMDGPU_RAS_FLAG_INIT_BY_VBIOS)
+static int amdgpu_ras_reserve_vram(struct amdgpu_device *adev,
+ uint64_t offset, uint64_t size,
+ struct amdgpu_bo **bo_ptr);
+static int amdgpu_ras_release_vram(struct amdgpu_device *adev,
+ struct amdgpu_bo **bo_ptr);
+
static void amdgpu_ras_self_test(struct amdgpu_device *adev)
{
/* TODO */
@@ -237,8 +252,8 @@ static int amdgpu_ras_debugfs_ctrl_parse_data(struct file *f,
return 0;
}
-/*
- * DOC: ras debugfs control interface
+/**
+ * DOC: AMDGPU RAS debugfs control interface
*
* It accepts struct ras_debug_if who has two members.
*
@@ -300,6 +315,7 @@ static ssize_t amdgpu_ras_debugfs_ctrl_write(struct file *f, const char __user *
{
struct amdgpu_device *adev = (struct amdgpu_device *)file_inode(f)->i_private;
struct ras_debug_if data;
+ struct amdgpu_bo *bo;
int ret = 0;
ret = amdgpu_ras_debugfs_ctrl_parse_data(f, buf, size, pos, &data);
@@ -317,7 +333,16 @@ static ssize_t amdgpu_ras_debugfs_ctrl_write(struct file *f, const char __user *
ret = amdgpu_ras_feature_enable(adev, &data.head, 1);
break;
case 2:
+ ret = amdgpu_ras_reserve_vram(adev,
+ data.inject.address, PAGE_SIZE, &bo);
+ /* This address might be used already on failure. In fact we can
+ * perform an injection in such case.
+ */
+ if (ret)
+ break;
+ data.inject.address = amdgpu_bo_gpu_offset(bo);
ret = amdgpu_ras_error_inject(adev, &data.inject);
+ amdgpu_ras_release_vram(adev, &bo);
break;
default:
ret = -EINVAL;
@@ -521,6 +546,8 @@ int amdgpu_ras_feature_enable(struct amdgpu_device *adev,
enable ? "enable":"disable",
ras_block_str(head->block),
ret);
+ if (ret == TA_RAS_STATUS__RESET_NEEDED)
+ return -EAGAIN;
return -EINVAL;
}
@@ -541,16 +568,32 @@ int amdgpu_ras_feature_enable_on_boot(struct amdgpu_device *adev,
return -EINVAL;
if (con->flags & AMDGPU_RAS_FLAG_INIT_BY_VBIOS) {
- /* If ras is enabled by vbios, we set up ras object first in
- * both case. For enable, that is all what we need do. For
- * disable, we need perform a ras TA disable cmd after that.
- */
- ret = __amdgpu_ras_feature_enable(adev, head, 1);
- if (ret)
- return ret;
+ if (enable) {
+ /* There is no harm to issue a ras TA cmd regardless of
+ * the currecnt ras state.
+ * If current state == target state, it will do nothing
+ * But sometimes it requests driver to reset and repost
+ * with error code -EAGAIN.
+ */
+ ret = amdgpu_ras_feature_enable(adev, head, 1);
+ /* With old ras TA, we might fail to enable ras.
+ * Log it and just setup the object.
+ * TODO need remove this WA in the future.
+ */
+ if (ret == -EINVAL) {
+ ret = __amdgpu_ras_feature_enable(adev, head, 1);
+ if (!ret)
+ DRM_INFO("RAS INFO: %s setup object\n",
+ ras_block_str(head->block));
+ }
+ } else {
+ /* setup the object then issue a ras TA disable cmd.*/
+ ret = __amdgpu_ras_feature_enable(adev, head, 1);
+ if (ret)
+ return ret;
- if (!enable)
ret = amdgpu_ras_feature_enable(adev, head, 0);
+ }
} else
ret = amdgpu_ras_feature_enable(adev, head, enable);
@@ -691,6 +734,77 @@ int amdgpu_ras_query_error_count(struct amdgpu_device *adev,
/* sysfs begin */
+static int amdgpu_ras_badpages_read(struct amdgpu_device *adev,
+ struct ras_badpage **bps, unsigned int *count);
+
+static char *amdgpu_ras_badpage_flags_str(unsigned int flags)
+{
+ switch (flags) {
+ case 0:
+ return "R";
+ case 1:
+ return "P";
+ case 2:
+ default:
+ return "F";
+ };
+}
+
+/*
+ * DOC: ras sysfs gpu_vram_bad_pages interface
+ *
+ * It allows user to read the bad pages of vram on the gpu through
+ * /sys/class/drm/card[0/1/2...]/device/ras/gpu_vram_bad_pages
+ *
+ * It outputs multiple lines, and each line stands for one gpu page.
+ *
+ * The format of one line is below,
+ * gpu pfn : gpu page size : flags
+ *
+ * gpu pfn and gpu page size are printed in hex format.
+ * flags can be one of below character,
+ * R: reserved, this gpu page is reserved and not able to use.
+ * P: pending for reserve, this gpu page is marked as bad, will be reserved
+ * in next window of page_reserve.
+ * F: unable to reserve. this gpu page can't be reserved due to some reasons.
+ *
+ * examples:
+ * 0x00000001 : 0x00001000 : R
+ * 0x00000002 : 0x00001000 : P
+ */
+
+static ssize_t amdgpu_ras_sysfs_badpages_read(struct file *f,
+ struct kobject *kobj, struct bin_attribute *attr,
+ char *buf, loff_t ppos, size_t count)
+{
+ struct amdgpu_ras *con =
+ container_of(attr, struct amdgpu_ras, badpages_attr);
+ struct amdgpu_device *adev = con->adev;
+ const unsigned int element_size =
+ sizeof("0xabcdabcd : 0x12345678 : R\n") - 1;
+ unsigned int start = div64_ul(ppos + element_size - 1, element_size);
+ unsigned int end = div64_ul(ppos + count - 1, element_size);
+ ssize_t s = 0;
+ struct ras_badpage *bps = NULL;
+ unsigned int bps_count = 0;
+
+ memset(buf, 0, count);
+
+ if (amdgpu_ras_badpages_read(adev, &bps, &bps_count))
+ return 0;
+
+ for (; start < end && start < bps_count; start++)
+ s += scnprintf(&buf[s], element_size + 1,
+ "0x%08x : 0x%08x : %1s\n",
+ bps[start].bp,
+ bps[start].size,
+ amdgpu_ras_badpage_flags_str(bps[start].flags));
+
+ kfree(bps);
+
+ return s;
+}
+
static ssize_t amdgpu_ras_sysfs_features_read(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -731,9 +845,14 @@ static int amdgpu_ras_sysfs_create_feature_node(struct amdgpu_device *adev)
&con->features_attr.attr,
NULL
};
+ struct bin_attribute *bin_attrs[] = {
+ &con->badpages_attr,
+ NULL
+ };
struct attribute_group group = {
.name = "ras",
.attrs = attrs,
+ .bin_attrs = bin_attrs,
};
con->features_attr = (struct device_attribute) {
@@ -743,7 +862,19 @@ static int amdgpu_ras_sysfs_create_feature_node(struct amdgpu_device *adev)
},
.show = amdgpu_ras_sysfs_features_read,
};
+
+ con->badpages_attr = (struct bin_attribute) {
+ .attr = {
+ .name = "gpu_vram_bad_pages",
+ .mode = S_IRUGO,
+ },
+ .size = 0,
+ .private = NULL,
+ .read = amdgpu_ras_sysfs_badpages_read,
+ };
+
sysfs_attr_init(attrs[0]);
+ sysfs_bin_attr_init(bin_attrs[0]);
return sysfs_create_group(&adev->dev->kobj, &group);
}
@@ -755,9 +886,14 @@ static int amdgpu_ras_sysfs_remove_feature_node(struct amdgpu_device *adev)
&con->features_attr.attr,
NULL
};
+ struct bin_attribute *bin_attrs[] = {
+ &con->badpages_attr,
+ NULL
+ };
struct attribute_group group = {
.name = "ras",
.attrs = attrs,
+ .bin_attrs = bin_attrs,
};
sysfs_remove_group(&adev->dev->kobj, &group);
@@ -1089,6 +1225,53 @@ static int amdgpu_ras_interrupt_remove_all(struct amdgpu_device *adev)
/* ih end */
/* recovery begin */
+
+/* return 0 on success.
+ * caller need free bps.
+ */
+static int amdgpu_ras_badpages_read(struct amdgpu_device *adev,
+ struct ras_badpage **bps, unsigned int *count)
+{
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ struct ras_err_handler_data *data;
+ int i = 0;
+ int ret = 0;
+
+ if (!con || !con->eh_data || !bps || !count)
+ return -EINVAL;
+
+ mutex_lock(&con->recovery_lock);
+ data = con->eh_data;
+ if (!data || data->count == 0) {
+ *bps = NULL;
+ goto out;
+ }
+
+ *bps = kmalloc(sizeof(struct ras_badpage) * data->count, GFP_KERNEL);
+ if (!*bps) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ for (; i < data->count; i++) {
+ (*bps)[i] = (struct ras_badpage){
+ .bp = data->bps[i].bp,
+ .size = AMDGPU_GPU_PAGE_SIZE,
+ .flags = 0,
+ };
+
+ if (data->last_reserved <= i)
+ (*bps)[i].flags = 1;
+ else if (data->bps[i].bo == NULL)
+ (*bps)[i].flags = 2;
+ }
+
+ *count = data->count;
+out:
+ mutex_unlock(&con->recovery_lock);
+ return ret;
+}
+
static void amdgpu_ras_do_recovery(struct work_struct *work)
{
struct amdgpu_ras *ras =
@@ -1340,6 +1523,19 @@ static int amdgpu_ras_recovery_fini(struct amdgpu_device *adev)
}
/* recovery end */
+/* return 0 if ras will reset gpu and repost.*/
+int amdgpu_ras_request_reset_on_boot(struct amdgpu_device *adev,
+ unsigned int block)
+{
+ struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
+
+ if (!ras)
+ return -EINVAL;
+
+ ras->flags |= AMDGPU_RAS_FLAG_INIT_NEED_RESET;
+ return 0;
+}
+
/*
* check hardware's ras ability which will be saved in hw_supported.
* if hardware does not support ras, we can skip some ras initializtion and
@@ -1415,8 +1611,10 @@ recovery_out:
return -EINVAL;
}
-/* do some init work after IP late init as dependence */
-void amdgpu_ras_post_init(struct amdgpu_device *adev)
+/* do some init work after IP late init as dependence.
+ * and it runs in resume/gpu reset/booting up cases.
+ */
+void amdgpu_ras_resume(struct amdgpu_device *adev)
{
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
struct ras_manager *obj, *tmp;
@@ -1444,6 +1642,32 @@ void amdgpu_ras_post_init(struct amdgpu_device *adev)
}
}
}
+
+ if (con->flags & AMDGPU_RAS_FLAG_INIT_NEED_RESET) {
+ con->flags &= ~AMDGPU_RAS_FLAG_INIT_NEED_RESET;
+ /* setup ras obj state as disabled.
+ * for init_by_vbios case.
+ * if we want to enable ras, just enable it in a normal way.
+ * If we want do disable it, need setup ras obj as enabled,
+ * then issue another TA disable cmd.
+ * See feature_enable_on_boot
+ */
+ amdgpu_ras_disable_all_features(adev, 1);
+ amdgpu_ras_reset_gpu(adev, 0);
+ }
+}
+
+void amdgpu_ras_suspend(struct amdgpu_device *adev)
+{
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+
+ if (!con)
+ return;
+
+ amdgpu_ras_disable_all_features(adev, 0);
+ /* Make sure all ras objects are disabled. */
+ if (con->features)
+ amdgpu_ras_disable_all_features(adev, 1);
}
/* do some fini work before IP fini as dependence */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
index 24c6e5fcda86..94c652f5265a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
@@ -93,6 +93,7 @@ struct amdgpu_ras {
struct dentry *ent;
/* sysfs */
struct device_attribute features_attr;
+ struct bin_attribute badpages_attr;
/* block array */
struct ras_manager *objs;
@@ -177,6 +178,12 @@ static inline int amdgpu_ras_is_supported(struct amdgpu_device *adev,
return ras && (ras->supported & (1 << block));
}
+int amdgpu_ras_request_reset_on_boot(struct amdgpu_device *adev,
+ unsigned int block);
+
+void amdgpu_ras_resume(struct amdgpu_device *adev);
+void amdgpu_ras_suspend(struct amdgpu_device *adev);
+
int amdgpu_ras_query_error_count(struct amdgpu_device *adev,
bool is_ce);
@@ -189,13 +196,10 @@ int amdgpu_ras_reserve_bad_pages(struct amdgpu_device *adev);
static inline int amdgpu_ras_reset_gpu(struct amdgpu_device *adev,
bool is_baco)
{
- /* remove me when gpu reset works on vega20 A1. */
-#if 0
struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
if (atomic_cmpxchg(&ras->in_recovery, 0, 1) == 0)
schedule_work(&ras->recovery_work);
-#endif
return 0;
}
@@ -257,7 +261,6 @@ amdgpu_ras_error_to_ta(enum amdgpu_ras_error_type error) {
/* called in ip_init and ip_fini */
int amdgpu_ras_init(struct amdgpu_device *adev);
-void amdgpu_ras_post_init(struct amdgpu_device *adev);
int amdgpu_ras_fini(struct amdgpu_device *adev);
int amdgpu_ras_pre_fini(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
index 8f5026c123ef..ee440fe29b91 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
@@ -28,8 +28,9 @@
*/
#include <linux/seq_file.h>
#include <linux/slab.h>
+#include <linux/uaccess.h>
#include <linux/debugfs.h>
-#include <drm/drmP.h>
+
#include <drm/amdgpu_drm.h>
#include "amdgpu.h"
#include "atom.h"
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
index d7fae2676269..cdddce938bf5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
@@ -114,6 +114,7 @@ struct amdgpu_ring_funcs {
uint32_t align_mask;
u32 nop;
bool support_64bit_ptrs;
+ bool no_user_fence;
unsigned vmhub;
unsigned extra_dw;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
index bfaf5c6323be..0bd1d4ffc19e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
@@ -41,7 +41,7 @@
* If we are asked to block we wait on all the oldest fence of all
* rings. We just wait for any of those fence to complete.
*/
-#include <drm/drmP.h>
+
#include "amdgpu.h"
static void amdgpu_sa_bo_remove_locked(struct amdgpu_sa_bo *sa_bo);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c
index 639297250c21..c799691dfa84 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c
@@ -23,8 +23,11 @@
*/
#include <linux/fdtable.h>
+#include <linux/file.h>
#include <linux/pid.h>
+
#include <drm/amdgpu_drm.h>
+
#include "amdgpu.h"
#include "amdgpu_vm.h"
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.h
index 2a1a0c734bdd..12299fd95691 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.h
@@ -25,7 +25,10 @@
#ifndef __AMDGPU_SCHED_H__
#define __AMDGPU_SCHED_H__
-#include <drm/drmP.h>
+enum drm_sched_priority;
+
+struct drm_device;
+struct drm_file;
enum drm_sched_priority amdgpu_to_sched_priority(int amdgpu_priority);
int amdgpu_sched_ioctl(struct drm_device *dev, void *data,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c
index 115bb0c99b0f..a425329d1897 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c
@@ -20,7 +20,7 @@
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
-#include <drm/drmP.h>
+
#include "amdgpu.h"
#include "amdgpu_sdma.h"
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
index 2d6f5ec77a68..9828f3c7c655 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
@@ -28,7 +28,6 @@
* Christian König <christian.koenig@amd.com>
*/
-#include <drm/drmP.h>
#include "amdgpu.h"
#include "amdgpu_trace.h"
#include "amdgpu_amdkfd.h"
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c
index 8904e62dca7a..b66d29d5ffa2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c
@@ -22,7 +22,7 @@
*
* Authors: Michel Dänzer
*/
-#include <drm/drmP.h>
+
#include <drm/amdgpu_drm.h>
#include "amdgpu.h"
#include "amdgpu_uvd.h"
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
index d3ca2424b5fe..77674a7b9616 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
@@ -28,8 +28,6 @@
#include <linux/types.h>
#include <linux/tracepoint.h>
-#include <drm/drmP.h>
-
#undef TRACE_SYSTEM
#define TRACE_SYSTEM amdgpu
#define TRACE_INCLUDE_FILE amdgpu_trace
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace_points.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace_points.c
index f212402570a5..57c6c39ba064 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace_points.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace_points.c
@@ -21,7 +21,7 @@
*
* Author : Dave Airlie <airlied@redhat.com>
*/
-#include <drm/drmP.h>
+
#include <drm/amdgpu_drm.h>
#include "amdgpu.h"
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index 0c52d1f9fe0f..d81bebf76310 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -29,20 +29,26 @@
* Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
* Dave Airlie
*/
+
+#include <linux/dma-mapping.h>
+#include <linux/iommu.h>
+#include <linux/hmm.h>
+#include <linux/pagemap.h>
+#include <linux/sched/task.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include <linux/swap.h>
+#include <linux/swiotlb.h>
+
#include <drm/ttm/ttm_bo_api.h>
#include <drm/ttm/ttm_bo_driver.h>
#include <drm/ttm/ttm_placement.h>
#include <drm/ttm/ttm_module.h>
#include <drm/ttm/ttm_page_alloc.h>
-#include <drm/drmP.h>
+
+#include <drm/drm_debugfs.h>
#include <drm/amdgpu_drm.h>
-#include <linux/seq_file.h>
-#include <linux/slab.h>
-#include <linux/swiotlb.h>
-#include <linux/swap.h>
-#include <linux/pagemap.h>
-#include <linux/debugfs.h>
-#include <linux/iommu.h>
+
#include "amdgpu.h"
#include "amdgpu_object.h"
#include "amdgpu_trace.h"
@@ -703,143 +709,191 @@ static unsigned long amdgpu_ttm_io_mem_pfn(struct ttm_buffer_object *bo,
/*
* TTM backend functions.
*/
-struct amdgpu_ttm_gup_task_list {
- struct list_head list;
- struct task_struct *task;
-};
-
struct amdgpu_ttm_tt {
struct ttm_dma_tt ttm;
u64 offset;
uint64_t userptr;
struct task_struct *usertask;
uint32_t userflags;
- spinlock_t guptasklock;
- struct list_head guptasks;
- atomic_t mmu_invalidations;
- uint32_t last_set_pages;
+#if IS_ENABLED(CONFIG_DRM_AMDGPU_USERPTR)
+ struct hmm_range *ranges;
+ int nr_ranges;
+#endif
};
/**
- * amdgpu_ttm_tt_get_user_pages - Pin pages of memory pointed to by a USERPTR
- * pointer to memory
+ * amdgpu_ttm_tt_get_user_pages - get device accessible pages that back user
+ * memory and start HMM tracking CPU page table update
*
- * Called by amdgpu_gem_userptr_ioctl() and amdgpu_cs_parser_bos().
- * This provides a wrapper around the get_user_pages() call to provide
- * device accessible pages that back user memory.
+ * Calling function must call amdgpu_ttm_tt_userptr_range_done() once and only
+ * once afterwards to stop HMM tracking
*/
+#if IS_ENABLED(CONFIG_DRM_AMDGPU_USERPTR)
+
+/* Support Userptr pages cross max 16 vmas */
+#define MAX_NR_VMAS (16)
+
int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages)
{
struct amdgpu_ttm_tt *gtt = (void *)ttm;
struct mm_struct *mm = gtt->usertask->mm;
- unsigned int flags = 0;
- unsigned pinned = 0;
- int r;
+ unsigned long start = gtt->userptr;
+ unsigned long end = start + ttm->num_pages * PAGE_SIZE;
+ struct vm_area_struct *vma = NULL, *vmas[MAX_NR_VMAS];
+ struct hmm_range *ranges;
+ unsigned long nr_pages, i;
+ uint64_t *pfns, f;
+ int r = 0;
if (!mm) /* Happens during process shutdown */
return -ESRCH;
- if (!(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY))
- flags |= FOLL_WRITE;
-
down_read(&mm->mmap_sem);
- if (gtt->userflags & AMDGPU_GEM_USERPTR_ANONONLY) {
- /*
- * check that we only use anonymous memory to prevent problems
- * with writeback
- */
- unsigned long end = gtt->userptr + ttm->num_pages * PAGE_SIZE;
- struct vm_area_struct *vma;
+ /* user pages may cross multiple VMAs */
+ gtt->nr_ranges = 0;
+ do {
+ unsigned long vm_start;
- vma = find_vma(mm, gtt->userptr);
- if (!vma || vma->vm_file || vma->vm_end < end) {
- up_read(&mm->mmap_sem);
- return -EPERM;
+ if (gtt->nr_ranges >= MAX_NR_VMAS) {
+ DRM_ERROR("Too many VMAs in userptr range\n");
+ r = -EFAULT;
+ goto out;
+ }
+
+ vm_start = vma ? vma->vm_end : start;
+ vma = find_vma(mm, vm_start);
+ if (unlikely(!vma || vm_start < vma->vm_start)) {
+ r = -EFAULT;
+ goto out;
}
+ vmas[gtt->nr_ranges++] = vma;
+ } while (end > vma->vm_end);
+
+ DRM_DEBUG_DRIVER("0x%lx nr_ranges %d pages 0x%lx\n",
+ start, gtt->nr_ranges, ttm->num_pages);
+
+ if (unlikely((gtt->userflags & AMDGPU_GEM_USERPTR_ANONONLY) &&
+ vmas[0]->vm_file)) {
+ r = -EPERM;
+ goto out;
}
- /* loop enough times using contiguous pages of memory */
- do {
- unsigned num_pages = ttm->num_pages - pinned;
- uint64_t userptr = gtt->userptr + pinned * PAGE_SIZE;
- struct page **p = pages + pinned;
- struct amdgpu_ttm_gup_task_list guptask;
+ ranges = kvmalloc_array(gtt->nr_ranges, sizeof(*ranges), GFP_KERNEL);
+ if (unlikely(!ranges)) {
+ r = -ENOMEM;
+ goto out;
+ }
- guptask.task = current;
- spin_lock(&gtt->guptasklock);
- list_add(&guptask.list, &gtt->guptasks);
- spin_unlock(&gtt->guptasklock);
+ pfns = kvmalloc_array(ttm->num_pages, sizeof(*pfns), GFP_KERNEL);
+ if (unlikely(!pfns)) {
+ r = -ENOMEM;
+ goto out_free_ranges;
+ }
- if (mm == current->mm)
- r = get_user_pages(userptr, num_pages, flags, p, NULL);
- else
- r = get_user_pages_remote(gtt->usertask,
- mm, userptr, num_pages,
- flags, p, NULL, NULL);
+ for (i = 0; i < gtt->nr_ranges; i++)
+ amdgpu_hmm_init_range(&ranges[i]);
- spin_lock(&gtt->guptasklock);
- list_del(&guptask.list);
- spin_unlock(&gtt->guptasklock);
+ f = ranges[0].flags[HMM_PFN_VALID];
+ f |= amdgpu_ttm_tt_is_readonly(ttm) ?
+ 0 : ranges[0].flags[HMM_PFN_WRITE];
+ memset64(pfns, f, ttm->num_pages);
- if (r < 0)
- goto release_pages;
+ for (nr_pages = 0, i = 0; i < gtt->nr_ranges; i++) {
+ ranges[i].vma = vmas[i];
+ ranges[i].start = max(start, vmas[i]->vm_start);
+ ranges[i].end = min(end, vmas[i]->vm_end);
+ ranges[i].pfns = pfns + nr_pages;
+ nr_pages += (ranges[i].end - ranges[i].start) / PAGE_SIZE;
- pinned += r;
+ r = hmm_vma_fault(&ranges[i], true);
+ if (unlikely(r))
+ break;
+ }
+ if (unlikely(r)) {
+ while (i--)
+ hmm_vma_range_done(&ranges[i]);
- } while (pinned < ttm->num_pages);
+ goto out_free_pfns;
+ }
up_read(&mm->mmap_sem);
+
+ for (i = 0; i < ttm->num_pages; i++) {
+ pages[i] = hmm_pfn_to_page(&ranges[0], pfns[i]);
+ if (!pages[i]) {
+ pr_err("Page fault failed for pfn[%lu] = 0x%llx\n",
+ i, pfns[i]);
+ goto out_invalid_pfn;
+ }
+ }
+ gtt->ranges = ranges;
+
return 0;
-release_pages:
- release_pages(pages, pinned);
+out_free_pfns:
+ kvfree(pfns);
+out_free_ranges:
+ kvfree(ranges);
+out:
up_read(&mm->mmap_sem);
+
return r;
+
+out_invalid_pfn:
+ for (i = 0; i < gtt->nr_ranges; i++)
+ hmm_vma_range_done(&ranges[i]);
+ kvfree(pfns);
+ kvfree(ranges);
+ return -ENOMEM;
}
/**
- * amdgpu_ttm_tt_set_user_pages - Copy pages in, putting old pages as necessary.
+ * amdgpu_ttm_tt_userptr_range_done - stop HMM track the CPU page table change
+ * Check if the pages backing this ttm range have been invalidated
*
- * Called by amdgpu_cs_list_validate(). This creates the page list
- * that backs user memory and will ultimately be mapped into the device
- * address space.
+ * Returns: true if pages are still valid
*/
-void amdgpu_ttm_tt_set_user_pages(struct ttm_tt *ttm, struct page **pages)
+bool amdgpu_ttm_tt_get_user_pages_done(struct ttm_tt *ttm)
{
struct amdgpu_ttm_tt *gtt = (void *)ttm;
- unsigned i;
+ bool r = false;
+ int i;
- gtt->last_set_pages = atomic_read(&gtt->mmu_invalidations);
- for (i = 0; i < ttm->num_pages; ++i) {
- if (ttm->pages[i])
- put_page(ttm->pages[i]);
+ if (!gtt || !gtt->userptr)
+ return false;
- ttm->pages[i] = pages ? pages[i] : NULL;
+ DRM_DEBUG_DRIVER("user_pages_done 0x%llx nr_ranges %d pages 0x%lx\n",
+ gtt->userptr, gtt->nr_ranges, ttm->num_pages);
+
+ WARN_ONCE(!gtt->ranges || !gtt->ranges[0].pfns,
+ "No user pages to check\n");
+
+ if (gtt->ranges) {
+ for (i = 0; i < gtt->nr_ranges; i++)
+ r |= hmm_vma_range_done(&gtt->ranges[i]);
+ kvfree(gtt->ranges[0].pfns);
+ kvfree(gtt->ranges);
+ gtt->ranges = NULL;
}
+
+ return r;
}
+#endif
/**
- * amdgpu_ttm_tt_mark_user_page - Mark pages as dirty
+ * amdgpu_ttm_tt_set_user_pages - Copy pages in, putting old pages as necessary.
*
- * Called while unpinning userptr pages
+ * Called by amdgpu_cs_list_validate(). This creates the page list
+ * that backs user memory and will ultimately be mapped into the device
+ * address space.
*/
-void amdgpu_ttm_tt_mark_user_pages(struct ttm_tt *ttm)
+void amdgpu_ttm_tt_set_user_pages(struct ttm_tt *ttm, struct page **pages)
{
- struct amdgpu_ttm_tt *gtt = (void *)ttm;
- unsigned i;
-
- for (i = 0; i < ttm->num_pages; ++i) {
- struct page *page = ttm->pages[i];
+ unsigned long i;
- if (!page)
- continue;
-
- if (!(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY))
- set_page_dirty(page);
-
- mark_page_accessed(page);
- }
+ for (i = 0; i < ttm->num_pages; ++i)
+ ttm->pages[i] = pages ? pages[i] : NULL;
}
/**
@@ -901,10 +955,14 @@ static void amdgpu_ttm_tt_unpin_userptr(struct ttm_tt *ttm)
/* unmap the pages mapped to the device */
dma_unmap_sg(adev->dev, ttm->sg->sgl, ttm->sg->nents, direction);
- /* mark the pages as dirty */
- amdgpu_ttm_tt_mark_user_pages(ttm);
-
sg_free_table(ttm->sg);
+
+#if IS_ENABLED(CONFIG_DRM_AMDGPU_USERPTR)
+ if (gtt->ranges &&
+ ttm->pages[0] == hmm_pfn_to_page(&gtt->ranges[0],
+ gtt->ranges[0].pfns[0]))
+ WARN_ONCE(1, "Missing get_user_page_done\n");
+#endif
}
int amdgpu_ttm_gart_bind(struct amdgpu_device *adev,
@@ -1254,11 +1312,6 @@ int amdgpu_ttm_tt_set_userptr(struct ttm_tt *ttm, uint64_t addr,
gtt->usertask = current->group_leader;
get_task_struct(gtt->usertask);
- spin_lock_init(&gtt->guptasklock);
- INIT_LIST_HEAD(&gtt->guptasks);
- atomic_set(&gtt->mmu_invalidations, 0);
- gtt->last_set_pages = 0;
-
return 0;
}
@@ -1287,7 +1340,6 @@ bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start,
unsigned long end)
{
struct amdgpu_ttm_tt *gtt = (void *)ttm;
- struct amdgpu_ttm_gup_task_list *entry;
unsigned long size;
if (gtt == NULL || !gtt->userptr)
@@ -1300,48 +1352,20 @@ bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start,
if (gtt->userptr > end || gtt->userptr + size <= start)
return false;
- /* Search the lists of tasks that hold this mapping and see
- * if current is one of them. If it is return false.
- */
- spin_lock(&gtt->guptasklock);
- list_for_each_entry(entry, &gtt->guptasks, list) {
- if (entry->task == current) {
- spin_unlock(&gtt->guptasklock);
- return false;
- }
- }
- spin_unlock(&gtt->guptasklock);
-
- atomic_inc(&gtt->mmu_invalidations);
-
return true;
}
/**
- * amdgpu_ttm_tt_userptr_invalidated - Has the ttm_tt object been invalidated?
- */
-bool amdgpu_ttm_tt_userptr_invalidated(struct ttm_tt *ttm,
- int *last_invalidated)
-{
- struct amdgpu_ttm_tt *gtt = (void *)ttm;
- int prev_invalidated = *last_invalidated;
-
- *last_invalidated = atomic_read(&gtt->mmu_invalidations);
- return prev_invalidated != *last_invalidated;
-}
-
-/**
- * amdgpu_ttm_tt_userptr_needs_pages - Have the pages backing this ttm_tt object
- * been invalidated since the last time they've been set?
+ * amdgpu_ttm_tt_is_userptr - Have the pages backing by userptr?
*/
-bool amdgpu_ttm_tt_userptr_needs_pages(struct ttm_tt *ttm)
+bool amdgpu_ttm_tt_is_userptr(struct ttm_tt *ttm)
{
struct amdgpu_ttm_tt *gtt = (void *)ttm;
if (gtt == NULL || !gtt->userptr)
return false;
- return atomic_read(&gtt->mmu_invalidations) != gtt->last_set_pages;
+ return true;
}
/**
@@ -1753,44 +1777,26 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
/* Initialize various on-chip memory pools */
r = ttm_bo_init_mm(&adev->mman.bdev, AMDGPU_PL_GDS,
- adev->gds.mem.total_size);
+ adev->gds.gds_size);
if (r) {
DRM_ERROR("Failed initializing GDS heap.\n");
return r;
}
- r = amdgpu_bo_create_kernel(adev, adev->gds.mem.gfx_partition_size,
- 4, AMDGPU_GEM_DOMAIN_GDS,
- &adev->gds.gds_gfx_bo, NULL, NULL);
- if (r)
- return r;
-
r = ttm_bo_init_mm(&adev->mman.bdev, AMDGPU_PL_GWS,
- adev->gds.gws.total_size);
+ adev->gds.gws_size);
if (r) {
DRM_ERROR("Failed initializing gws heap.\n");
return r;
}
- r = amdgpu_bo_create_kernel(adev, adev->gds.gws.gfx_partition_size,
- 1, AMDGPU_GEM_DOMAIN_GWS,
- &adev->gds.gws_gfx_bo, NULL, NULL);
- if (r)
- return r;
-
r = ttm_bo_init_mm(&adev->mman.bdev, AMDGPU_PL_OA,
- adev->gds.oa.total_size);
+ adev->gds.oa_size);
if (r) {
DRM_ERROR("Failed initializing oa heap.\n");
return r;
}
- r = amdgpu_bo_create_kernel(adev, adev->gds.oa.gfx_partition_size,
- 1, AMDGPU_GEM_DOMAIN_OA,
- &adev->gds.oa_gfx_bo, NULL, NULL);
- if (r)
- return r;
-
/* Register debugfs entries for amdgpu_ttm */
r = amdgpu_ttm_debugfs_init(adev);
if (r) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
index b5b2d101f7db..c2b7669004ba 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
@@ -101,9 +101,21 @@ int amdgpu_mmap(struct file *filp, struct vm_area_struct *vma);
int amdgpu_ttm_alloc_gart(struct ttm_buffer_object *bo);
int amdgpu_ttm_recover_gart(struct ttm_buffer_object *tbo);
+#if IS_ENABLED(CONFIG_DRM_AMDGPU_USERPTR)
int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages);
+bool amdgpu_ttm_tt_get_user_pages_done(struct ttm_tt *ttm);
+#else
+static inline int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages)
+{
+ return -EPERM;
+}
+static inline bool amdgpu_ttm_tt_get_user_pages_done(struct ttm_tt *ttm)
+{
+ return false;
+}
+#endif
+
void amdgpu_ttm_tt_set_user_pages(struct ttm_tt *ttm, struct page **pages);
-void amdgpu_ttm_tt_mark_user_pages(struct ttm_tt *ttm);
int amdgpu_ttm_tt_set_userptr(struct ttm_tt *ttm, uint64_t addr,
uint32_t flags);
bool amdgpu_ttm_tt_has_userptr(struct ttm_tt *ttm);
@@ -112,7 +124,7 @@ bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start,
unsigned long end);
bool amdgpu_ttm_tt_userptr_invalidated(struct ttm_tt *ttm,
int *last_invalidated);
-bool amdgpu_ttm_tt_userptr_needs_pages(struct ttm_tt *ttm);
+bool amdgpu_ttm_tt_is_userptr(struct ttm_tt *ttm);
bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm);
uint64_t amdgpu_ttm_tt_pde_flags(struct ttm_tt *ttm, struct ttm_mem_reg *mem);
uint64_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
index 7b33867036e7..524f70f2b52f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
@@ -24,7 +24,7 @@
#include <linux/firmware.h>
#include <linux/slab.h>
#include <linux/module.h>
-#include <drm/drmP.h>
+
#include "amdgpu.h"
#include "amdgpu_ucode.h"
@@ -313,6 +313,69 @@ amdgpu_ucode_get_load_type(struct amdgpu_device *adev, int load_type)
return AMDGPU_FW_LOAD_DIRECT;
}
+#define FW_VERSION_ATTR(name, mode, field) \
+static ssize_t show_##name(struct device *dev, \
+ struct device_attribute *attr, \
+ char *buf) \
+{ \
+ struct drm_device *ddev = dev_get_drvdata(dev); \
+ struct amdgpu_device *adev = ddev->dev_private; \
+ \
+ return snprintf(buf, PAGE_SIZE, "0x%08x\n", adev->field); \
+} \
+static DEVICE_ATTR(name, mode, show_##name, NULL)
+
+FW_VERSION_ATTR(vce_fw_version, 0444, vce.fw_version);
+FW_VERSION_ATTR(uvd_fw_version, 0444, uvd.fw_version);
+FW_VERSION_ATTR(mc_fw_version, 0444, gmc.fw_version);
+FW_VERSION_ATTR(me_fw_version, 0444, gfx.me_fw_version);
+FW_VERSION_ATTR(pfp_fw_version, 0444, gfx.pfp_fw_version);
+FW_VERSION_ATTR(ce_fw_version, 0444, gfx.ce_fw_version);
+FW_VERSION_ATTR(rlc_fw_version, 0444, gfx.rlc_fw_version);
+FW_VERSION_ATTR(rlc_srlc_fw_version, 0444, gfx.rlc_srlc_fw_version);
+FW_VERSION_ATTR(rlc_srlg_fw_version, 0444, gfx.rlc_srlg_fw_version);
+FW_VERSION_ATTR(rlc_srls_fw_version, 0444, gfx.rlc_srls_fw_version);
+FW_VERSION_ATTR(mec_fw_version, 0444, gfx.mec_fw_version);
+FW_VERSION_ATTR(mec2_fw_version, 0444, gfx.mec2_fw_version);
+FW_VERSION_ATTR(sos_fw_version, 0444, psp.sos_fw_version);
+FW_VERSION_ATTR(asd_fw_version, 0444, psp.asd_fw_version);
+FW_VERSION_ATTR(ta_ras_fw_version, 0444, psp.ta_fw_version);
+FW_VERSION_ATTR(ta_xgmi_fw_version, 0444, psp.ta_fw_version);
+FW_VERSION_ATTR(smc_fw_version, 0444, pm.fw_version);
+FW_VERSION_ATTR(sdma_fw_version, 0444, sdma.instance[0].fw_version);
+FW_VERSION_ATTR(sdma2_fw_version, 0444, sdma.instance[1].fw_version);
+FW_VERSION_ATTR(vcn_fw_version, 0444, vcn.fw_version);
+FW_VERSION_ATTR(dmcu_fw_version, 0444, dm.dmcu_fw_version);
+
+static struct attribute *fw_attrs[] = {
+ &dev_attr_vce_fw_version.attr, &dev_attr_uvd_fw_version.attr,
+ &dev_attr_mc_fw_version.attr, &dev_attr_me_fw_version.attr,
+ &dev_attr_pfp_fw_version.attr, &dev_attr_ce_fw_version.attr,
+ &dev_attr_rlc_fw_version.attr, &dev_attr_rlc_srlc_fw_version.attr,
+ &dev_attr_rlc_srlg_fw_version.attr, &dev_attr_rlc_srls_fw_version.attr,
+ &dev_attr_mec_fw_version.attr, &dev_attr_mec2_fw_version.attr,
+ &dev_attr_sos_fw_version.attr, &dev_attr_asd_fw_version.attr,
+ &dev_attr_ta_ras_fw_version.attr, &dev_attr_ta_xgmi_fw_version.attr,
+ &dev_attr_smc_fw_version.attr, &dev_attr_sdma_fw_version.attr,
+ &dev_attr_sdma2_fw_version.attr, &dev_attr_vcn_fw_version.attr,
+ &dev_attr_dmcu_fw_version.attr, NULL
+};
+
+static const struct attribute_group fw_attr_group = {
+ .name = "fw_version",
+ .attrs = fw_attrs
+};
+
+int amdgpu_ucode_sysfs_init(struct amdgpu_device *adev)
+{
+ return sysfs_create_group(&adev->dev->kobj, &fw_attr_group);
+}
+
+void amdgpu_ucode_sysfs_fini(struct amdgpu_device *adev)
+{
+ sysfs_remove_group(&adev->dev->kobj, &fw_attr_group);
+}
+
static int amdgpu_ucode_init_single_fw(struct amdgpu_device *adev,
struct amdgpu_firmware_info *ucode,
uint64_t mc_addr, void *kptr)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
index 7ac25a1c7853..ec4c2ea1f05a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
@@ -291,7 +291,9 @@ bool amdgpu_ucode_hdr_version(union amdgpu_firmware_header *hdr,
int amdgpu_ucode_init_bo(struct amdgpu_device *adev);
int amdgpu_ucode_create_bo(struct amdgpu_device *adev);
+int amdgpu_ucode_sysfs_init(struct amdgpu_device *adev);
void amdgpu_ucode_free_bo(struct amdgpu_device *adev);
+void amdgpu_ucode_sysfs_fini(struct amdgpu_device *adev);
enum amdgpu_firmware_load_type
amdgpu_ucode_get_load_type(struct amdgpu_device *adev, int load_type);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
index 4e5d13e41f6a..5b2fea3b4a2c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
@@ -30,7 +30,7 @@
#include <linux/firmware.h>
#include <linux/module.h>
-#include <drm/drmP.h>
+
#include <drm/drm.h>
#include "amdgpu.h"
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
index f7189e22f6b7..b70b3c45bb29 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
@@ -27,7 +27,7 @@
#include <linux/firmware.h>
#include <linux/module.h>
-#include <drm/drmP.h>
+
#include <drm/drm.h>
#include "amdgpu.h"
@@ -1092,7 +1092,7 @@ int amdgpu_vce_ring_test_ring(struct amdgpu_ring *ring)
for (i = 0; i < timeout; i++) {
if (amdgpu_ring_get_rptr(ring) != rptr)
break;
- DRM_UDELAY(1);
+ udelay(1);
}
if (i >= timeout)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
index e6b07ece3910..9f32bf862d94 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
@@ -26,7 +26,8 @@
#include <linux/firmware.h>
#include <linux/module.h>
-#include <drm/drmP.h>
+#include <linux/pci.h>
+
#include <drm/drm.h>
#include "amdgpu.h"
@@ -212,132 +213,6 @@ int amdgpu_vcn_resume(struct amdgpu_device *adev)
return 0;
}
-static int amdgpu_vcn_pause_dpg_mode(struct amdgpu_device *adev,
- struct dpg_pause_state *new_state)
-{
- int ret_code;
- uint32_t reg_data = 0;
- uint32_t reg_data2 = 0;
- struct amdgpu_ring *ring;
-
- /* pause/unpause if state is changed */
- if (adev->vcn.pause_state.fw_based != new_state->fw_based) {
- DRM_DEBUG("dpg pause state changed %d:%d -> %d:%d",
- adev->vcn.pause_state.fw_based, adev->vcn.pause_state.jpeg,
- new_state->fw_based, new_state->jpeg);
-
- reg_data = RREG32_SOC15(UVD, 0, mmUVD_DPG_PAUSE) &
- (~UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK);
-
- if (new_state->fw_based == VCN_DPG_STATE__PAUSE) {
- ret_code = 0;
-
- if (!(reg_data & UVD_DPG_PAUSE__JPEG_PAUSE_DPG_ACK_MASK))
- SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS,
- UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF,
- UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code);
-
- if (!ret_code) {
- /* pause DPG non-jpeg */
- reg_data |= UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK;
- WREG32_SOC15(UVD, 0, mmUVD_DPG_PAUSE, reg_data);
- SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_DPG_PAUSE,
- UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK,
- UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK, ret_code);
-
- /* Restore */
- ring = &adev->vcn.ring_enc[0];
- WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO, ring->gpu_addr);
- WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
- WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE, ring->ring_size / 4);
- WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
- WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
-
- ring = &adev->vcn.ring_enc[1];
- WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO2, ring->gpu_addr);
- WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
- WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE2, ring->ring_size / 4);
- WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
- WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
-
- ring = &adev->vcn.ring_dec;
- WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR,
- RREG32_SOC15(UVD, 0, mmUVD_SCRATCH2) & 0x7FFFFFFF);
- SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS,
- UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON,
- UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code);
- }
- } else {
- /* unpause dpg non-jpeg, no need to wait */
- reg_data &= ~UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK;
- WREG32_SOC15(UVD, 0, mmUVD_DPG_PAUSE, reg_data);
- }
- adev->vcn.pause_state.fw_based = new_state->fw_based;
- }
-
- /* pause/unpause if state is changed */
- if (adev->vcn.pause_state.jpeg != new_state->jpeg) {
- DRM_DEBUG("dpg pause state changed %d:%d -> %d:%d",
- adev->vcn.pause_state.fw_based, adev->vcn.pause_state.jpeg,
- new_state->fw_based, new_state->jpeg);
-
- reg_data = RREG32_SOC15(UVD, 0, mmUVD_DPG_PAUSE) &
- (~UVD_DPG_PAUSE__JPEG_PAUSE_DPG_ACK_MASK);
-
- if (new_state->jpeg == VCN_DPG_STATE__PAUSE) {
- ret_code = 0;
-
- if (!(reg_data & UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK))
- SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS,
- UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF,
- UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code);
-
- if (!ret_code) {
- /* Make sure JPRG Snoop is disabled before sending the pause */
- reg_data2 = RREG32_SOC15(UVD, 0, mmUVD_POWER_STATUS);
- reg_data2 |= UVD_POWER_STATUS__JRBC_SNOOP_DIS_MASK;
- WREG32_SOC15(UVD, 0, mmUVD_POWER_STATUS, reg_data2);
-
- /* pause DPG jpeg */
- reg_data |= UVD_DPG_PAUSE__JPEG_PAUSE_DPG_REQ_MASK;
- WREG32_SOC15(UVD, 0, mmUVD_DPG_PAUSE, reg_data);
- SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_DPG_PAUSE,
- UVD_DPG_PAUSE__JPEG_PAUSE_DPG_ACK_MASK,
- UVD_DPG_PAUSE__JPEG_PAUSE_DPG_ACK_MASK, ret_code);
-
- /* Restore */
- ring = &adev->vcn.ring_jpeg;
- WREG32_SOC15(UVD, 0, mmUVD_LMI_JRBC_RB_VMID, 0);
- WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_CNTL,
- UVD_JRBC_RB_CNTL__RB_NO_FETCH_MASK |
- UVD_JRBC_RB_CNTL__RB_RPTR_WR_EN_MASK);
- WREG32_SOC15(UVD, 0, mmUVD_LMI_JRBC_RB_64BIT_BAR_LOW,
- lower_32_bits(ring->gpu_addr));
- WREG32_SOC15(UVD, 0, mmUVD_LMI_JRBC_RB_64BIT_BAR_HIGH,
- upper_32_bits(ring->gpu_addr));
- WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_RPTR, ring->wptr);
- WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_WPTR, ring->wptr);
- WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_CNTL,
- UVD_JRBC_RB_CNTL__RB_RPTR_WR_EN_MASK);
-
- ring = &adev->vcn.ring_dec;
- WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR,
- RREG32_SOC15(UVD, 0, mmUVD_SCRATCH2) & 0x7FFFFFFF);
- SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS,
- UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON,
- UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code);
- }
- } else {
- /* unpause dpg jpeg, no need to wait */
- reg_data &= ~UVD_DPG_PAUSE__JPEG_PAUSE_DPG_REQ_MASK;
- WREG32_SOC15(UVD, 0, mmUVD_DPG_PAUSE, reg_data);
- }
- adev->vcn.pause_state.jpeg = new_state->jpeg;
- }
-
- return 0;
-}
-
static void amdgpu_vcn_idle_work_handler(struct work_struct *work)
{
struct amdgpu_device *adev =
@@ -362,7 +237,7 @@ static void amdgpu_vcn_idle_work_handler(struct work_struct *work)
else
new_state.jpeg = VCN_DPG_STATE__UNPAUSE;
- amdgpu_vcn_pause_dpg_mode(adev, &new_state);
+ adev->vcn.pause_dpg_mode(adev, &new_state);
}
fences += amdgpu_fence_count_emitted(&adev->vcn.ring_jpeg);
@@ -417,7 +292,7 @@ void amdgpu_vcn_ring_begin_use(struct amdgpu_ring *ring)
else if (ring->funcs->type == AMDGPU_RING_TYPE_VCN_JPEG)
new_state.jpeg = VCN_DPG_STATE__PAUSE;
- amdgpu_vcn_pause_dpg_mode(adev, &new_state);
+ adev->vcn.pause_dpg_mode(adev, &new_state);
}
}
@@ -446,7 +321,7 @@ int amdgpu_vcn_dec_ring_test_ring(struct amdgpu_ring *ring)
tmp = RREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_SCRATCH9));
if (tmp == 0xDEADBEEF)
break;
- DRM_UDELAY(1);
+ udelay(1);
}
if (i >= adev->usec_timeout)
@@ -610,7 +485,7 @@ int amdgpu_vcn_enc_ring_test_ring(struct amdgpu_ring *ring)
for (i = 0; i < adev->usec_timeout; i++) {
if (amdgpu_ring_get_rptr(ring) != rptr)
break;
- DRM_UDELAY(1);
+ udelay(1);
}
if (i >= adev->usec_timeout)
@@ -769,7 +644,7 @@ int amdgpu_vcn_jpeg_ring_test_ring(struct amdgpu_ring *ring)
tmp = RREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_SCRATCH9));
if (tmp == 0xDEADBEEF)
break;
- DRM_UDELAY(1);
+ udelay(1);
}
if (i >= adev->usec_timeout)
@@ -843,7 +718,7 @@ int amdgpu_vcn_jpeg_ring_test_ib(struct amdgpu_ring *ring, long timeout)
tmp = RREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_SCRATCH9));
if (tmp == 0xDEADBEEF)
break;
- DRM_UDELAY(1);
+ udelay(1);
}
if (i >= adev->usec_timeout)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
index a0ad19af9080..a1ee19251aae 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
@@ -45,6 +45,27 @@
#define VCN_ENC_CMD_REG_WRITE 0x0000000b
#define VCN_ENC_CMD_REG_WAIT 0x0000000c
+#define RREG32_SOC15_DPG_MODE(ip, inst, reg, mask, sram_sel) \
+ ({ WREG32_SOC15(ip, inst, mmUVD_DPG_LMA_MASK, mask); \
+ WREG32_SOC15(ip, inst, mmUVD_DPG_LMA_CTL, \
+ UVD_DPG_LMA_CTL__MASK_EN_MASK | \
+ ((adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg) \
+ << UVD_DPG_LMA_CTL__READ_WRITE_ADDR__SHIFT) | \
+ (sram_sel << UVD_DPG_LMA_CTL__SRAM_SEL__SHIFT)); \
+ RREG32_SOC15(ip, inst, mmUVD_DPG_LMA_DATA); \
+ })
+
+#define WREG32_SOC15_DPG_MODE(ip, inst, reg, value, mask, sram_sel) \
+ do { \
+ WREG32_SOC15(ip, inst, mmUVD_DPG_LMA_DATA, value); \
+ WREG32_SOC15(ip, inst, mmUVD_DPG_LMA_MASK, mask); \
+ WREG32_SOC15(ip, inst, mmUVD_DPG_LMA_CTL, \
+ UVD_DPG_LMA_CTL__READ_WRITE_MASK | \
+ ((adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg) \
+ << UVD_DPG_LMA_CTL__READ_WRITE_ADDR__SHIFT) | \
+ (sram_sel << UVD_DPG_LMA_CTL__SRAM_SEL__SHIFT)); \
+ } while (0)
+
enum engine_status_constants {
UVD_PGFSM_STATUS__UVDM_UVDU_PWR_ON = 0x2AAAA0,
UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON = 0x00000002,
@@ -81,6 +102,8 @@ struct amdgpu_vcn {
unsigned num_enc_rings;
enum amd_powergating_state cur_state;
struct dpg_pause_state pause_state;
+ int (*pause_dpg_mode)(struct amdgpu_device *adev,
+ struct dpg_pause_state *new_state);
};
int amdgpu_vcn_sw_init(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
index 7d484fad3909..07a7e3820b7b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
@@ -21,6 +21,10 @@
*
*/
+#include <linux/module.h>
+
+#include <drm/drm_drv.h>
+
#include "amdgpu.h"
bool amdgpu_virt_mmio_blocked(struct amdgpu_device *adev)
@@ -426,3 +430,47 @@ uint32_t amdgpu_virt_get_mclk(struct amdgpu_device *adev, bool lowest)
return clk;
}
+void amdgpu_virt_init_reg_access_mode(struct amdgpu_device *adev)
+{
+ struct amdgpu_virt *virt = &adev->virt;
+
+ if (virt->ops && virt->ops->init_reg_access_mode)
+ virt->ops->init_reg_access_mode(adev);
+}
+
+bool amdgpu_virt_support_psp_prg_ih_reg(struct amdgpu_device *adev)
+{
+ bool ret = false;
+ struct amdgpu_virt *virt = &adev->virt;
+
+ if (amdgpu_sriov_vf(adev)
+ && (virt->reg_access_mode & AMDGPU_VIRT_REG_ACCESS_PSP_PRG_IH))
+ ret = true;
+
+ return ret;
+}
+
+bool amdgpu_virt_support_rlc_prg_reg(struct amdgpu_device *adev)
+{
+ bool ret = false;
+ struct amdgpu_virt *virt = &adev->virt;
+
+ if (amdgpu_sriov_vf(adev)
+ && (virt->reg_access_mode & AMDGPU_VIRT_REG_ACCESS_RLC)
+ && !(amdgpu_sriov_runtime(adev)))
+ ret = true;
+
+ return ret;
+}
+
+bool amdgpu_virt_support_skip_setting(struct amdgpu_device *adev)
+{
+ bool ret = false;
+ struct amdgpu_virt *virt = &adev->virt;
+
+ if (amdgpu_sriov_vf(adev)
+ && (virt->reg_access_mode & AMDGPU_VIRT_REG_SKIP_SEETING))
+ ret = true;
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
index 584947b7ccf3..dca25deee75c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
@@ -48,6 +48,12 @@ struct amdgpu_vf_error_buffer {
uint64_t data[AMDGPU_VF_ERROR_ENTRY_SIZE];
};
+/* According to the fw feature, some new reg access modes are supported */
+#define AMDGPU_VIRT_REG_ACCESS_LEGACY (1 << 0) /* directly mmio */
+#define AMDGPU_VIRT_REG_ACCESS_PSP_PRG_IH (1 << 1) /* by PSP */
+#define AMDGPU_VIRT_REG_ACCESS_RLC (1 << 2) /* by RLC */
+#define AMDGPU_VIRT_REG_SKIP_SEETING (1 << 3) /* Skip setting reg */
+
/**
* struct amdgpu_virt_ops - amdgpu device virt operations
*/
@@ -59,6 +65,7 @@ struct amdgpu_virt_ops {
void (*trans_msg)(struct amdgpu_device *adev, u32 req, u32 data1, u32 data2, u32 data3);
int (*get_pp_clk)(struct amdgpu_device *adev, u32 type, char *buf);
int (*force_dpm_level)(struct amdgpu_device *adev, u32 level);
+ void (*init_reg_access_mode)(struct amdgpu_device *adev);
};
/*
@@ -258,6 +265,7 @@ struct amdgpu_virt {
uint32_t gim_feature;
/* protect DPM events to GIM */
struct mutex dpm_mutex;
+ uint32_t reg_access_mode;
};
#define amdgpu_sriov_enabled(adev) \
@@ -307,4 +315,9 @@ void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev);
uint32_t amdgpu_virt_get_sclk(struct amdgpu_device *adev, bool lowest);
uint32_t amdgpu_virt_get_mclk(struct amdgpu_device *adev, bool lowest);
+void amdgpu_virt_init_reg_access_mode(struct amdgpu_device *adev);
+bool amdgpu_virt_support_psp_prg_ih_reg(struct amdgpu_device *adev);
+bool amdgpu_virt_support_rlc_prg_reg(struct amdgpu_device *adev);
+bool amdgpu_virt_support_skip_setting(struct amdgpu_device *adev);
+
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 4f10f5aba00b..e44f9dd202e8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -28,7 +28,7 @@
#include <linux/dma-fence-array.h>
#include <linux/interval_tree_generic.h>
#include <linux/idr.h>
-#include <drm/drmP.h>
+
#include <drm/amdgpu_drm.h>
#include "amdgpu.h"
#include "amdgpu_trace.h"
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
index ec9ea3fdbb4a..8abc9b6892ea 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
@@ -22,7 +22,6 @@
* Authors: Christian König
*/
-#include <drm/drmP.h>
#include "amdgpu.h"
struct amdgpu_vram_mgr {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
index a48c84c51775..d11eba09eadd 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
@@ -40,6 +40,34 @@ void *amdgpu_xgmi_hive_try_lock(struct amdgpu_hive_info *hive)
return &hive->device_list;
}
+/**
+ * DOC: AMDGPU XGMI Support
+ *
+ * XGMI is a high speed interconnect that joins multiple GPU cards
+ * into a homogeneous memory space that is organized by a collective
+ * hive ID and individual node IDs, both of which are 64-bit numbers.
+ *
+ * The file xgmi_device_id contains the unique per GPU device ID and
+ * is stored in the /sys/class/drm/card${cardno}/device/ directory.
+ *
+ * Inside the device directory a sub-directory 'xgmi_hive_info' is
+ * created which contains the hive ID and the list of nodes.
+ *
+ * The hive ID is stored in:
+ * /sys/class/drm/card${cardno}/device/xgmi_hive_info/xgmi_hive_id
+ *
+ * The node information is stored in numbered directories:
+ * /sys/class/drm/card${cardno}/device/xgmi_hive_info/node${nodeno}/xgmi_device_id
+ *
+ * Each device has their own xgmi_hive_info direction with a mirror
+ * set of node sub-directories.
+ *
+ * The XGMI memory space is built by contiguously adding the power of
+ * two padded VRAM space from each node to each other.
+ *
+ */
+
+
static ssize_t amdgpu_xgmi_show_hive_id(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -238,7 +266,7 @@ int amdgpu_xgmi_update_topology(struct amdgpu_hive_info *hive, struct amdgpu_dev
/* Each psp need to set the latest topology */
ret = psp_xgmi_set_topology_info(&adev->psp,
hive->number_devices,
- &hive->topology_info);
+ &adev->psp.xgmi_context.top_info);
if (ret)
dev_err(adev->dev,
"XGMI: Set topology failure on device %llx, hive %llx, ret %d",
@@ -248,9 +276,22 @@ int amdgpu_xgmi_update_topology(struct amdgpu_hive_info *hive, struct amdgpu_dev
return ret;
}
+
+int amdgpu_xgmi_get_hops_count(struct amdgpu_device *adev,
+ struct amdgpu_device *peer_adev)
+{
+ struct psp_xgmi_topology_info *top = &adev->psp.xgmi_context.top_info;
+ int i;
+
+ for (i = 0 ; i < top->num_nodes; ++i)
+ if (top->nodes[i].node_id == peer_adev->gmc.xgmi.node_id)
+ return top->nodes[i].num_hops;
+ return -EINVAL;
+}
+
int amdgpu_xgmi_add_device(struct amdgpu_device *adev)
{
- struct psp_xgmi_topology_info *hive_topology;
+ struct psp_xgmi_topology_info *top_info;
struct amdgpu_hive_info *hive;
struct amdgpu_xgmi *entry;
struct amdgpu_device *tmp_adev = NULL;
@@ -283,35 +324,46 @@ int amdgpu_xgmi_add_device(struct amdgpu_device *adev)
goto exit;
}
- hive_topology = &hive->topology_info;
+ top_info = &adev->psp.xgmi_context.top_info;
list_add_tail(&adev->gmc.xgmi.head, &hive->device_list);
list_for_each_entry(entry, &hive->device_list, head)
- hive_topology->nodes[count++].node_id = entry->node_id;
+ top_info->nodes[count++].node_id = entry->node_id;
+ top_info->num_nodes = count;
hive->number_devices = count;
- /* Each psp need to get the latest topology */
list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) {
- ret = psp_xgmi_get_topology_info(&tmp_adev->psp, count, hive_topology);
+ /* update node list for other device in the hive */
+ if (tmp_adev != adev) {
+ top_info = &tmp_adev->psp.xgmi_context.top_info;
+ top_info->nodes[count - 1].node_id = adev->gmc.xgmi.node_id;
+ top_info->num_nodes = count;
+ }
+ ret = amdgpu_xgmi_update_topology(hive, tmp_adev);
+ if (ret)
+ goto exit;
+ }
+
+ /* get latest topology info for each device from psp */
+ list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) {
+ ret = psp_xgmi_get_topology_info(&tmp_adev->psp, count,
+ &tmp_adev->psp.xgmi_context.top_info);
if (ret) {
dev_err(tmp_adev->dev,
"XGMI: Get topology failure on device %llx, hive %llx, ret %d",
tmp_adev->gmc.xgmi.node_id,
tmp_adev->gmc.xgmi.hive_id, ret);
/* To do : continue with some node failed or disable the whole hive */
- break;
+ goto exit;
}
}
- list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) {
- ret = amdgpu_xgmi_update_topology(hive, tmp_adev);
- if (ret)
- break;
- }
-
if (!ret)
ret = amdgpu_xgmi_sysfs_add_dev_info(adev, hive);
+
+ mutex_unlock(&hive->hive_lock);
+exit:
if (!ret)
dev_info(adev->dev, "XGMI: Add node %d, hive 0x%llx.\n",
adev->gmc.xgmi.physical_node_id, adev->gmc.xgmi.hive_id);
@@ -320,9 +372,6 @@ int amdgpu_xgmi_add_device(struct amdgpu_device *adev)
adev->gmc.xgmi.physical_node_id, adev->gmc.xgmi.hive_id,
ret);
-
- mutex_unlock(&hive->hive_lock);
-exit:
return ret;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h
index 3e9c91e9a4bf..fbcee31788c4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h
@@ -27,7 +27,6 @@
struct amdgpu_hive_info {
uint64_t hive_id;
struct list_head device_list;
- struct psp_xgmi_topology_info topology_info;
int number_devices;
struct mutex hive_lock, reset_lock;
struct kobject *kobj;
@@ -41,6 +40,8 @@ int amdgpu_xgmi_update_topology(struct amdgpu_hive_info *hive, struct amdgpu_dev
int amdgpu_xgmi_add_device(struct amdgpu_device *adev);
void amdgpu_xgmi_remove_device(struct amdgpu_device *adev);
int amdgpu_xgmi_set_pstate(struct amdgpu_device *adev, int pstate);
+int amdgpu_xgmi_get_hops_count(struct amdgpu_device *adev,
+ struct amdgpu_device *peer_adev);
static inline bool amdgpu_xgmi_same_hive(struct amdgpu_device *adev,
struct amdgpu_device *bo_adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/atom.h b/drivers/gpu/drm/amd/amdgpu/atom.h
index a39170991afe..4205bbe5d8d7 100644
--- a/drivers/gpu/drm/amd/amdgpu/atom.h
+++ b/drivers/gpu/drm/amd/amdgpu/atom.h
@@ -26,7 +26,8 @@
#define ATOM_H
#include <linux/types.h>
-#include <drm/drmP.h>
+
+struct drm_device;
#define ATOM_BIOS_MAGIC 0xAA55
#define ATOM_ATI_MAGIC_PTR 0x30
diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_crtc.c b/drivers/gpu/drm/amd/amdgpu/atombios_crtc.c
index 8a0818b23ea4..213e62a28ba0 100644
--- a/drivers/gpu/drm/amd/amdgpu/atombios_crtc.c
+++ b/drivers/gpu/drm/amd/amdgpu/atombios_crtc.c
@@ -23,7 +23,7 @@
* Authors: Dave Airlie
* Alex Deucher
*/
-#include <drm/drmP.h>
+
#include <drm/drm_crtc_helper.h>
#include <drm/amdgpu_drm.h>
#include <drm/drm_fixed.h>
diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_dp.c b/drivers/gpu/drm/amd/amdgpu/atombios_dp.c
index f81068ba4cc6..6858cde9fc5d 100644
--- a/drivers/gpu/drm/amd/amdgpu/atombios_dp.c
+++ b/drivers/gpu/drm/amd/amdgpu/atombios_dp.c
@@ -24,7 +24,7 @@
* Alex Deucher
* Jerome Glisse
*/
-#include <drm/drmP.h>
+
#include <drm/amdgpu_drm.h>
#include "amdgpu.h"
diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c b/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c
index 60e2447e12c5..1e94a9b652f7 100644
--- a/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c
+++ b/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c
@@ -23,7 +23,9 @@
* Authors: Dave Airlie
* Alex Deucher
*/
-#include <drm/drmP.h>
+
+#include <linux/pci.h>
+
#include <drm/drm_crtc_helper.h>
#include <drm/amdgpu_drm.h>
#include "amdgpu.h"
diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_i2c.c b/drivers/gpu/drm/amd/amdgpu/atombios_i2c.c
index f9b2ce9a98f3..980c363b1a0a 100644
--- a/drivers/gpu/drm/amd/amdgpu/atombios_i2c.c
+++ b/drivers/gpu/drm/amd/amdgpu/atombios_i2c.c
@@ -22,7 +22,7 @@
* Authors: Alex Deucher
*
*/
-#include <drm/drmP.h>
+
#include <drm/amdgpu_drm.h>
#include "amdgpu.h"
#include "atom.h"
diff --git a/drivers/gpu/drm/amd/amdgpu/cik.c b/drivers/gpu/drm/amd/amdgpu/cik.c
index 07c1f239e9c3..1ffbc0d3d7a1 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik.c
@@ -24,7 +24,8 @@
#include <linux/firmware.h>
#include <linux/slab.h>
#include <linux/module.h>
-#include <drm/drmP.h>
+#include <linux/pci.h>
+
#include "amdgpu.h"
#include "amdgpu_atombios.h"
#include "amdgpu_ih.h"
@@ -1804,6 +1805,18 @@ static bool cik_need_reset_on_init(struct amdgpu_device *adev)
return false;
}
+static uint64_t cik_get_pcie_replay_count(struct amdgpu_device *adev)
+{
+ uint64_t nak_r, nak_g;
+
+ /* Get the number of NAKs received and generated */
+ nak_r = RREG32_PCIE(ixPCIE_RX_NUM_NAK);
+ nak_g = RREG32_PCIE(ixPCIE_RX_NUM_NAK_GENERATED);
+
+ /* Add the total number of NAKs, i.e the number of replays */
+ return (nak_r + nak_g);
+}
+
static const struct amdgpu_asic_funcs cik_asic_funcs =
{
.read_disabled_bios = &cik_read_disabled_bios,
@@ -1821,6 +1834,7 @@ static const struct amdgpu_asic_funcs cik_asic_funcs =
.init_doorbell_index = &legacy_doorbell_index_init,
.get_pcie_usage = &cik_get_pcie_usage,
.need_reset_on_init = &cik_need_reset_on_init,
+ .get_pcie_replay_count = &cik_get_pcie_replay_count,
};
static int cik_common_early_init(void *handle)
diff --git a/drivers/gpu/drm/amd/amdgpu/cik_ih.c b/drivers/gpu/drm/amd/amdgpu/cik_ih.c
index 721c757156e8..401c99f0b2d0 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik_ih.c
@@ -20,7 +20,9 @@
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
-#include <drm/drmP.h>
+
+#include <linux/pci.h>
+
#include "amdgpu.h"
#include "amdgpu_ih.h"
#include "cikd.h"
diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
index d42808b05971..c45304f1047c 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
@@ -21,8 +21,10 @@
*
* Authors: Alex Deucher
*/
+
#include <linux/firmware.h>
-#include <drm/drmP.h>
+#include <linux/module.h>
+
#include "amdgpu.h"
#include "amdgpu_ucode.h"
#include "amdgpu_trace.h"
@@ -640,7 +642,7 @@ static int cik_sdma_ring_test_ring(struct amdgpu_ring *ring)
tmp = le32_to_cpu(adev->wb.wb[index]);
if (tmp == 0xDEADBEEF)
break;
- DRM_UDELAY(1);
+ udelay(1);
}
if (i >= adev->usec_timeout)
diff --git a/drivers/gpu/drm/amd/amdgpu/cz_ih.c b/drivers/gpu/drm/amd/amdgpu/cz_ih.c
index 61024b9c7a4b..1dca0cabc326 100644
--- a/drivers/gpu/drm/amd/amdgpu/cz_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/cz_ih.c
@@ -20,7 +20,9 @@
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
-#include <drm/drmP.h>
+
+#include <linux/pci.h>
+
#include "amdgpu.h"
#include "amdgpu_ih.h"
#include "vid.h"
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
index 1f0426d2fc2a..1ffd1963e765 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
@@ -20,7 +20,10 @@
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
-#include <drm/drmP.h>
+
+#include <drm/drm_fourcc.h>
+#include <drm/drm_vblank.h>
+
#include "amdgpu.h"
#include "amdgpu_pm.h"
#include "amdgpu_i2c.h"
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
index 2280b971d758..9e0782b54066 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
@@ -20,7 +20,10 @@
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
-#include <drm/drmP.h>
+
+#include <drm/drm_fourcc.h>
+#include <drm/drm_vblank.h>
+
#include "amdgpu.h"
#include "amdgpu_pm.h"
#include "amdgpu_i2c.h"
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
index bea32f076b91..4bf453e07dca 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
@@ -20,7 +20,12 @@
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
-#include <drm/drmP.h>
+
+#include <linux/pci.h>
+
+#include <drm/drm_fourcc.h>
+#include <drm/drm_vblank.h>
+
#include "amdgpu.h"
#include "amdgpu_pm.h"
#include "amdgpu_i2c.h"
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
index 13da915991dd..b23418ca8f6a 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
@@ -20,7 +20,10 @@
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
-#include <drm/drmP.h>
+
+#include <drm/drm_fourcc.h>
+#include <drm/drm_vblank.h>
+
#include "amdgpu.h"
#include "amdgpu_pm.h"
#include "amdgpu_i2c.h"
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
index e4cc1d48eaab..3026298da7eb 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
@@ -20,7 +20,9 @@
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
-#include <drm/drmP.h>
+
+#include <drm/drm_vblank.h>
+
#include "amdgpu.h"
#include "amdgpu_pm.h"
#include "amdgpu_i2c.h"
diff --git a/drivers/gpu/drm/amd/amdgpu/df_v3_6.c b/drivers/gpu/drm/amd/amdgpu/df_v3_6.c
index d5ebe566809b..8c09bf994acd 100644
--- a/drivers/gpu/drm/amd/amdgpu/df_v3_6.c
+++ b/drivers/gpu/drm/amd/amdgpu/df_v3_6.c
@@ -105,6 +105,431 @@ static void df_v3_6_get_clockgating_state(struct amdgpu_device *adev,
*flags |= AMD_CG_SUPPORT_DF_MGCG;
}
+/* hold counter assignment per gpu struct */
+struct df_v3_6_event_mask {
+ struct amdgpu_device gpu;
+ uint64_t config_assign_mask[AMDGPU_DF_MAX_COUNTERS];
+};
+
+/* get assigned df perfmon ctr as int */
+static void df_v3_6_pmc_config_2_cntr(struct amdgpu_device *adev,
+ uint64_t config,
+ int *counter)
+{
+ struct df_v3_6_event_mask *mask;
+ int i;
+
+ mask = container_of(adev, struct df_v3_6_event_mask, gpu);
+
+ for (i = 0; i < AMDGPU_DF_MAX_COUNTERS; i++) {
+ if ((config & 0x0FFFFFFUL) == mask->config_assign_mask[i]) {
+ *counter = i;
+ return;
+ }
+ }
+}
+
+/* get address based on counter assignment */
+static void df_v3_6_pmc_get_addr(struct amdgpu_device *adev,
+ uint64_t config,
+ int is_ctrl,
+ uint32_t *lo_base_addr,
+ uint32_t *hi_base_addr)
+{
+
+ int target_cntr = -1;
+
+ df_v3_6_pmc_config_2_cntr(adev, config, &target_cntr);
+
+ if (target_cntr < 0)
+ return;
+
+ switch (target_cntr) {
+
+ case 0:
+ *lo_base_addr = is_ctrl ? smnPerfMonCtlLo0 : smnPerfMonCtrLo0;
+ *hi_base_addr = is_ctrl ? smnPerfMonCtlHi0 : smnPerfMonCtrHi0;
+ break;
+ case 1:
+ *lo_base_addr = is_ctrl ? smnPerfMonCtlLo1 : smnPerfMonCtrLo1;
+ *hi_base_addr = is_ctrl ? smnPerfMonCtlHi1 : smnPerfMonCtrHi1;
+ break;
+ case 2:
+ *lo_base_addr = is_ctrl ? smnPerfMonCtlLo2 : smnPerfMonCtrLo2;
+ *hi_base_addr = is_ctrl ? smnPerfMonCtlHi2 : smnPerfMonCtrHi2;
+ break;
+ case 3:
+ *lo_base_addr = is_ctrl ? smnPerfMonCtlLo3 : smnPerfMonCtrLo3;
+ *hi_base_addr = is_ctrl ? smnPerfMonCtlHi3 : smnPerfMonCtrHi3;
+ break;
+
+ }
+
+}
+
+/* get read counter address */
+static void df_v3_6_pmc_get_read_settings(struct amdgpu_device *adev,
+ uint64_t config,
+ uint32_t *lo_base_addr,
+ uint32_t *hi_base_addr)
+{
+ df_v3_6_pmc_get_addr(adev, config, 0, lo_base_addr, hi_base_addr);
+}
+
+/* get control counter settings i.e. address and values to set */
+static void df_v3_6_pmc_get_ctrl_settings(struct amdgpu_device *adev,
+ uint64_t config,
+ uint32_t *lo_base_addr,
+ uint32_t *hi_base_addr,
+ uint32_t *lo_val,
+ uint32_t *hi_val)
+{
+
+ uint32_t eventsel, instance, unitmask;
+ uint32_t es_5_0, es_13_0, es_13_6, es_13_12, es_11_8, es_7_0;
+
+ df_v3_6_pmc_get_addr(adev, config, 1, lo_base_addr, hi_base_addr);
+
+ if (lo_val == NULL || hi_val == NULL)
+ return;
+
+ if ((*lo_base_addr == 0) || (*hi_base_addr == 0)) {
+ DRM_ERROR("DF PMC addressing not retrieved! Lo: %x, Hi: %x",
+ *lo_base_addr, *hi_base_addr);
+ return;
+ }
+
+ eventsel = GET_EVENT(config);
+ instance = GET_INSTANCE(config);
+ unitmask = GET_UNITMASK(config);
+
+ es_5_0 = eventsel & 0x3FUL;
+ es_13_6 = instance;
+ es_13_0 = (es_13_6 << 6) + es_5_0;
+ es_13_12 = (es_13_0 & 0x03000UL) >> 12;
+ es_11_8 = (es_13_0 & 0x0F00UL) >> 8;
+ es_7_0 = es_13_0 & 0x0FFUL;
+ *lo_val = (es_7_0 & 0xFFUL) | ((unitmask & 0x0FUL) << 8);
+ *hi_val = (es_11_8 | ((es_13_12)<<(29)));
+}
+
+/* assign df performance counters for read */
+static int df_v3_6_pmc_assign_cntr(struct amdgpu_device *adev,
+ uint64_t config,
+ int *is_assigned)
+{
+
+ struct df_v3_6_event_mask *mask;
+ int i, target_cntr;
+
+ target_cntr = -1;
+
+ *is_assigned = 0;
+
+ df_v3_6_pmc_config_2_cntr(adev, config, &target_cntr);
+
+ if (target_cntr >= 0) {
+ *is_assigned = 1;
+ return 0;
+ }
+
+ mask = container_of(adev, struct df_v3_6_event_mask, gpu);
+
+ for (i = 0; i < AMDGPU_DF_MAX_COUNTERS; i++) {
+ if (mask->config_assign_mask[i] == 0ULL) {
+ mask->config_assign_mask[i] = config & 0x0FFFFFFUL;
+ return 0;
+ }
+ }
+
+ return -ENOSPC;
+}
+
+/* release performance counter */
+static void df_v3_6_pmc_release_cntr(struct amdgpu_device *adev,
+ uint64_t config)
+{
+
+ struct df_v3_6_event_mask *mask;
+ int target_cntr;
+
+ target_cntr = -1;
+
+ df_v3_6_pmc_config_2_cntr(adev, config, &target_cntr);
+
+ mask = container_of(adev, struct df_v3_6_event_mask, gpu);
+
+ if (target_cntr >= 0)
+ mask->config_assign_mask[target_cntr] = 0ULL;
+
+}
+
+/*
+ * get xgmi link counters via programmable data fabric (df) counters (max 4)
+ * using cake tx event.
+ *
+ * @adev -> amdgpu device
+ * @instance-> currently cake has 2 links to poll on vega20
+ * @count -> counters to pass
+ *
+ */
+
+static void df_v3_6_get_xgmi_link_cntr(struct amdgpu_device *adev,
+ int instance,
+ uint64_t *count)
+{
+ uint32_t lo_base_addr, hi_base_addr, lo_val, hi_val;
+ uint64_t config;
+
+ config = GET_INSTANCE_CONFIG(instance);
+
+ df_v3_6_pmc_get_read_settings(adev, config, &lo_base_addr,
+ &hi_base_addr);
+
+ if ((lo_base_addr == 0) || (hi_base_addr == 0))
+ return;
+
+ lo_val = RREG32_PCIE(lo_base_addr);
+ hi_val = RREG32_PCIE(hi_base_addr);
+
+ *count = ((hi_val | 0ULL) << 32) | (lo_val | 0ULL);
+}
+
+/*
+ * reset xgmi link counters
+ *
+ * @adev -> amdgpu device
+ * @instance-> currently cake has 2 links to poll on vega20
+ *
+ */
+static void df_v3_6_reset_xgmi_link_cntr(struct amdgpu_device *adev,
+ int instance)
+{
+ uint32_t lo_base_addr, hi_base_addr;
+ uint64_t config;
+
+ config = 0ULL | (0x7ULL) | ((0x46ULL + instance) << 8) | (0x2 << 16);
+
+ df_v3_6_pmc_get_read_settings(adev, config, &lo_base_addr,
+ &hi_base_addr);
+
+ if ((lo_base_addr == 0) || (hi_base_addr == 0))
+ return;
+
+ WREG32_PCIE(lo_base_addr, 0UL);
+ WREG32_PCIE(hi_base_addr, 0UL);
+}
+
+/*
+ * add xgmi link counters
+ *
+ * @adev -> amdgpu device
+ * @instance-> currently cake has 2 links to poll on vega20
+ *
+ */
+
+static int df_v3_6_add_xgmi_link_cntr(struct amdgpu_device *adev,
+ int instance)
+{
+ uint32_t lo_base_addr, hi_base_addr, lo_val, hi_val;
+ uint64_t config;
+ int ret, is_assigned;
+
+ if (instance < 0 || instance > 1)
+ return -EINVAL;
+
+ config = GET_INSTANCE_CONFIG(instance);
+
+ ret = df_v3_6_pmc_assign_cntr(adev, config, &is_assigned);
+
+ if (ret || is_assigned)
+ return ret;
+
+ df_v3_6_pmc_get_ctrl_settings(adev,
+ config,
+ &lo_base_addr,
+ &hi_base_addr,
+ &lo_val,
+ &hi_val);
+
+ WREG32_PCIE(lo_base_addr, lo_val);
+ WREG32_PCIE(hi_base_addr, hi_val);
+
+ return ret;
+}
+
+
+/*
+ * start xgmi link counters
+ *
+ * @adev -> amdgpu device
+ * @instance-> currently cake has 2 links to poll on vega20
+ * @is_enable -> either resume or assign event via df perfmon
+ *
+ */
+
+static int df_v3_6_start_xgmi_link_cntr(struct amdgpu_device *adev,
+ int instance,
+ int is_enable)
+{
+ uint32_t lo_base_addr, hi_base_addr, lo_val;
+ uint64_t config;
+ int ret;
+
+ if (instance < 0 || instance > 1)
+ return -EINVAL;
+
+ if (is_enable) {
+
+ ret = df_v3_6_add_xgmi_link_cntr(adev, instance);
+
+ if (ret)
+ return ret;
+
+ } else {
+
+ config = GET_INSTANCE_CONFIG(instance);
+
+ df_v3_6_pmc_get_ctrl_settings(adev,
+ config,
+ &lo_base_addr,
+ &hi_base_addr,
+ NULL,
+ NULL);
+
+ if (lo_base_addr == 0)
+ return -EINVAL;
+
+ lo_val = RREG32_PCIE(lo_base_addr);
+
+ WREG32_PCIE(lo_base_addr, lo_val | (1ULL << 22));
+
+ ret = 0;
+ }
+
+ return ret;
+
+}
+
+/*
+ * start xgmi link counters
+ *
+ * @adev -> amdgpu device
+ * @instance-> currently cake has 2 links to poll on vega20
+ * @is_enable -> either pause or unassign event via df perfmon
+ *
+ */
+
+static int df_v3_6_stop_xgmi_link_cntr(struct amdgpu_device *adev,
+ int instance,
+ int is_disable)
+{
+
+ uint32_t lo_base_addr, hi_base_addr, lo_val;
+ uint64_t config;
+
+ config = GET_INSTANCE_CONFIG(instance);
+
+ if (is_disable) {
+ df_v3_6_reset_xgmi_link_cntr(adev, instance);
+ df_v3_6_pmc_release_cntr(adev, config);
+ } else {
+
+ df_v3_6_pmc_get_ctrl_settings(adev,
+ config,
+ &lo_base_addr,
+ &hi_base_addr,
+ NULL,
+ NULL);
+
+ if ((lo_base_addr == 0) || (hi_base_addr == 0))
+ return -EINVAL;
+
+ lo_val = RREG32_PCIE(lo_base_addr);
+
+ WREG32_PCIE(lo_base_addr, lo_val & ~(1ULL << 22));
+ }
+
+ return 0;
+}
+
+static int df_v3_6_pmc_start(struct amdgpu_device *adev, uint64_t config,
+ int is_enable)
+{
+ int xgmi_tx_link, ret = 0;
+
+ switch (adev->asic_type) {
+ case CHIP_VEGA20:
+ xgmi_tx_link = IS_DF_XGMI_0_TX(config) ? 0
+ : (IS_DF_XGMI_1_TX(config) ? 1 : -1);
+
+ if (xgmi_tx_link >= 0)
+ ret = df_v3_6_start_xgmi_link_cntr(adev, xgmi_tx_link,
+ is_enable);
+
+ if (ret)
+ return ret;
+
+ ret = 0;
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+static int df_v3_6_pmc_stop(struct amdgpu_device *adev, uint64_t config,
+ int is_disable)
+{
+ int xgmi_tx_link, ret = 0;
+
+ switch (adev->asic_type) {
+ case CHIP_VEGA20:
+ xgmi_tx_link = IS_DF_XGMI_0_TX(config) ? 0
+ : (IS_DF_XGMI_1_TX(config) ? 1 : -1);
+
+ if (xgmi_tx_link >= 0) {
+ ret = df_v3_6_stop_xgmi_link_cntr(adev,
+ xgmi_tx_link,
+ is_disable);
+ if (ret)
+ return ret;
+ }
+
+ ret = 0;
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+static void df_v3_6_pmc_get_count(struct amdgpu_device *adev,
+ uint64_t config,
+ uint64_t *count)
+{
+
+ int xgmi_tx_link;
+
+ switch (adev->asic_type) {
+ case CHIP_VEGA20:
+ xgmi_tx_link = IS_DF_XGMI_0_TX(config) ? 0
+ : (IS_DF_XGMI_1_TX(config) ? 1 : -1);
+
+ if (xgmi_tx_link >= 0) {
+ df_v3_6_reset_xgmi_link_cntr(adev, xgmi_tx_link);
+ df_v3_6_get_xgmi_link_cntr(adev, xgmi_tx_link, count);
+ }
+
+ break;
+ default:
+ break;
+ }
+
+}
+
const struct amdgpu_df_funcs df_v3_6_funcs = {
.init = df_v3_6_init,
.enable_broadcast_mode = df_v3_6_enable_broadcast_mode,
@@ -113,4 +538,7 @@ const struct amdgpu_df_funcs df_v3_6_funcs = {
.update_medium_grain_clock_gating =
df_v3_6_update_medium_grain_clock_gating,
.get_clockgating_state = df_v3_6_get_clockgating_state,
+ .pmc_start = df_v3_6_pmc_start,
+ .pmc_stop = df_v3_6_pmc_stop,
+ .pmc_get_count = df_v3_6_pmc_get_count
};
diff --git a/drivers/gpu/drm/amd/amdgpu/df_v3_6.h b/drivers/gpu/drm/amd/amdgpu/df_v3_6.h
index e79c58e5efcb..fcffd807764d 100644
--- a/drivers/gpu/drm/amd/amdgpu/df_v3_6.h
+++ b/drivers/gpu/drm/amd/amdgpu/df_v3_6.h
@@ -35,6 +35,23 @@ enum DF_V3_6_MGCG {
DF_V3_6_MGCG_ENABLE_63_CYCLE_DELAY = 15
};
+/* Defined in global_features.h as FTI_PERFMON_VISIBLE */
+#define AMDGPU_DF_MAX_COUNTERS 4
+
+/* get flags from df perfmon config */
+#define GET_EVENT(x) (x & 0xFFUL)
+#define GET_INSTANCE(x) ((x >> 8) & 0xFFUL)
+#define GET_UNITMASK(x) ((x >> 16) & 0xFFUL)
+#define GET_INSTANCE_CONFIG(x) (0ULL | (0x07ULL) \
+ | ((0x046ULL + x) << 8) \
+ | (0x02 << 16))
+
+/* df event conf macros */
+#define IS_DF_XGMI_0_TX(x) (GET_EVENT(x) == 0x7 \
+ && GET_INSTANCE(x) == 0x46 && GET_UNITMASK(x) == 0x2)
+#define IS_DF_XGMI_1_TX(x) (GET_EVENT(x) == 0x7 \
+ && GET_INSTANCE(x) == 0x47 && GET_UNITMASK(x) == 0x2)
+
extern const struct amdgpu_df_funcs df_v3_6_funcs;
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
index c0cb244f58cd..91f10995249b 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
@@ -21,6 +21,8 @@
*
*/
#include <linux/firmware.h>
+#include <linux/module.h>
+
#include "amdgpu.h"
#include "amdgpu_ih.h"
#include "amdgpu_gfx.h"
@@ -1812,7 +1814,7 @@ static int gfx_v6_0_ring_test_ring(struct amdgpu_ring *ring)
tmp = RREG32(scratch);
if (tmp == 0xDEADBEEF)
break;
- DRM_UDELAY(1);
+ udelay(1);
}
if (i >= adev->usec_timeout)
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
index a59e0fdf5a97..003bb5769183 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
@@ -20,8 +20,10 @@
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
+
#include <linux/firmware.h>
-#include <drm/drmP.h>
+#include <linux/module.h>
+
#include "amdgpu.h"
#include "amdgpu_ih.h"
#include "amdgpu_gfx.h"
@@ -2080,7 +2082,7 @@ static int gfx_v7_0_ring_test_ring(struct amdgpu_ring *ring)
tmp = RREG32(scratch);
if (tmp == 0xDEADBEEF)
break;
- DRM_UDELAY(1);
+ udelay(1);
}
if (i >= adev->usec_timeout)
r = -ETIMEDOUT;
@@ -4493,12 +4495,8 @@ static int gfx_v7_0_sw_init(void *handle)
static int gfx_v7_0_sw_fini(void *handle)
{
- int i;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- amdgpu_bo_free_kernel(&adev->gds.oa_gfx_bo, NULL, NULL);
- amdgpu_bo_free_kernel(&adev->gds.gws_gfx_bo, NULL, NULL);
- amdgpu_bo_free_kernel(&adev->gds.gds_gfx_bo, NULL, NULL);
+ int i;
for (i = 0; i < adev->gfx.num_gfx_rings; i++)
amdgpu_ring_fini(&adev->gfx.gfx_ring[i]);
@@ -5070,30 +5068,10 @@ static void gfx_v7_0_set_irq_funcs(struct amdgpu_device *adev)
static void gfx_v7_0_set_gds_init(struct amdgpu_device *adev)
{
/* init asci gds info */
- adev->gds.mem.total_size = RREG32(mmGDS_VMID0_SIZE);
- adev->gds.gws.total_size = 64;
- adev->gds.oa.total_size = 16;
+ adev->gds.gds_size = RREG32(mmGDS_VMID0_SIZE);
+ adev->gds.gws_size = 64;
+ adev->gds.oa_size = 16;
adev->gds.gds_compute_max_wave_id = RREG32(mmGDS_COMPUTE_MAX_WAVE_ID);
-
- if (adev->gds.mem.total_size == 64 * 1024) {
- adev->gds.mem.gfx_partition_size = 4096;
- adev->gds.mem.cs_partition_size = 4096;
-
- adev->gds.gws.gfx_partition_size = 4;
- adev->gds.gws.cs_partition_size = 4;
-
- adev->gds.oa.gfx_partition_size = 4;
- adev->gds.oa.cs_partition_size = 1;
- } else {
- adev->gds.mem.gfx_partition_size = 1024;
- adev->gds.mem.cs_partition_size = 1024;
-
- adev->gds.gws.gfx_partition_size = 16;
- adev->gds.gws.cs_partition_size = 16;
-
- adev->gds.oa.gfx_partition_size = 4;
- adev->gds.oa.cs_partition_size = 4;
- }
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index 02955e6e9dd9..b7a2df46dc22 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -20,9 +20,13 @@
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
+
+#include <linux/delay.h>
#include <linux/kernel.h>
#include <linux/firmware.h>
-#include <drm/drmP.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+
#include "amdgpu.h"
#include "amdgpu_gfx.h"
#include "vi.h"
@@ -855,7 +859,7 @@ static int gfx_v8_0_ring_test_ring(struct amdgpu_ring *ring)
tmp = RREG32(scratch);
if (tmp == 0xDEADBEEF)
break;
- DRM_UDELAY(1);
+ udelay(1);
}
if (i >= adev->usec_timeout)
@@ -2057,12 +2061,8 @@ static int gfx_v8_0_sw_init(void *handle)
static int gfx_v8_0_sw_fini(void *handle)
{
- int i;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- amdgpu_bo_free_kernel(&adev->gds.oa_gfx_bo, NULL, NULL);
- amdgpu_bo_free_kernel(&adev->gds.gws_gfx_bo, NULL, NULL);
- amdgpu_bo_free_kernel(&adev->gds.gds_gfx_bo, NULL, NULL);
+ int i;
for (i = 0; i < adev->gfx.num_gfx_rings; i++)
amdgpu_ring_fini(&adev->gfx.gfx_ring[i]);
@@ -7010,30 +7010,10 @@ static void gfx_v8_0_set_rlc_funcs(struct amdgpu_device *adev)
static void gfx_v8_0_set_gds_init(struct amdgpu_device *adev)
{
/* init asci gds info */
- adev->gds.mem.total_size = RREG32(mmGDS_VMID0_SIZE);
- adev->gds.gws.total_size = 64;
- adev->gds.oa.total_size = 16;
+ adev->gds.gds_size = RREG32(mmGDS_VMID0_SIZE);
+ adev->gds.gws_size = 64;
+ adev->gds.oa_size = 16;
adev->gds.gds_compute_max_wave_id = RREG32(mmGDS_COMPUTE_MAX_WAVE_ID);
-
- if (adev->gds.mem.total_size == 64 * 1024) {
- adev->gds.mem.gfx_partition_size = 4096;
- adev->gds.mem.cs_partition_size = 4096;
-
- adev->gds.gws.gfx_partition_size = 4;
- adev->gds.gws.cs_partition_size = 4;
-
- adev->gds.oa.gfx_partition_size = 4;
- adev->gds.oa.cs_partition_size = 1;
- } else {
- adev->gds.mem.gfx_partition_size = 1024;
- adev->gds.mem.cs_partition_size = 1024;
-
- adev->gds.gws.gfx_partition_size = 16;
- adev->gds.gws.cs_partition_size = 16;
-
- adev->gds.oa.gfx_partition_size = 4;
- adev->gds.oa.cs_partition_size = 4;
- }
}
static void gfx_v8_0_set_user_cu_inactive_bitmap(struct amdgpu_device *adev,
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index b610e3b30d95..b4b85e550bc2 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -20,9 +20,13 @@
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
+
+#include <linux/delay.h>
#include <linux/kernel.h>
#include <linux/firmware.h>
-#include <drm/drmP.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+
#include "amdgpu.h"
#include "amdgpu_gfx.h"
#include "soc15.h"
@@ -35,6 +39,7 @@
#include "vega10_enum.h"
#include "hdp/hdp_4_0_offset.h"
+#include "soc15.h"
#include "soc15_common.h"
#include "clearstate_gfx9.h"
#include "v9_structs.h"
@@ -309,12 +314,14 @@ static void gfx_v9_0_init_golden_registers(struct amdgpu_device *adev)
{
switch (adev->asic_type) {
case CHIP_VEGA10:
- soc15_program_register_sequence(adev,
- golden_settings_gc_9_0,
- ARRAY_SIZE(golden_settings_gc_9_0));
- soc15_program_register_sequence(adev,
- golden_settings_gc_9_0_vg10,
- ARRAY_SIZE(golden_settings_gc_9_0_vg10));
+ if (!amdgpu_virt_support_skip_setting(adev)) {
+ soc15_program_register_sequence(adev,
+ golden_settings_gc_9_0,
+ ARRAY_SIZE(golden_settings_gc_9_0));
+ soc15_program_register_sequence(adev,
+ golden_settings_gc_9_0_vg10,
+ ARRAY_SIZE(golden_settings_gc_9_0_vg10));
+ }
break;
case CHIP_VEGA12:
soc15_program_register_sequence(adev,
@@ -419,7 +426,7 @@ static int gfx_v9_0_ring_test_ring(struct amdgpu_ring *ring)
tmp = RREG32(scratch);
if (tmp == 0xDEADBEEF)
break;
- DRM_UDELAY(1);
+ udelay(1);
}
if (i >= adev->usec_timeout)
@@ -1468,8 +1475,7 @@ static int gfx_v9_0_ngg_init(struct amdgpu_device *adev)
/* GDS reserve memory: 64 bytes alignment */
adev->gfx.ngg.gds_reserve_size = ALIGN(5 * 4, 0x40);
- adev->gds.mem.total_size -= adev->gfx.ngg.gds_reserve_size;
- adev->gds.mem.gfx_partition_size -= adev->gfx.ngg.gds_reserve_size;
+ adev->gds.gds_size -= adev->gfx.ngg.gds_reserve_size;
adev->gfx.ngg.gds_reserve_addr = RREG32_SOC15(GC, 0, mmGDS_VMID0_BASE);
adev->gfx.ngg.gds_reserve_addr += RREG32_SOC15(GC, 0, mmGDS_VMID0_SIZE);
@@ -1577,7 +1583,7 @@ static int gfx_v9_0_ngg_en(struct amdgpu_device *adev)
gfx_v9_0_write_data_to_reg(ring, 0, false,
SOC15_REG_OFFSET(GC, 0, mmGDS_VMID0_SIZE),
- (adev->gds.mem.total_size +
+ (adev->gds.gds_size +
adev->gfx.ngg.gds_reserve_size));
amdgpu_ring_write(ring, PACKET3(PACKET3_DMA_DATA, 5));
@@ -1791,10 +1797,6 @@ static int gfx_v9_0_sw_fini(void *handle)
kfree(ras_if);
}
- amdgpu_bo_free_kernel(&adev->gds.oa_gfx_bo, NULL, NULL);
- amdgpu_bo_free_kernel(&adev->gds.gws_gfx_bo, NULL, NULL);
- amdgpu_bo_free_kernel(&adev->gds.gds_gfx_bo, NULL, NULL);
-
for (i = 0; i < adev->gfx.num_gfx_rings; i++)
amdgpu_ring_fini(&adev->gfx.gfx_ring[i]);
for (i = 0; i < adev->gfx.num_compute_rings; i++)
@@ -1806,9 +1808,7 @@ static int gfx_v9_0_sw_fini(void *handle)
gfx_v9_0_mec_fini(adev);
gfx_v9_0_ngg_fini(adev);
- amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj,
- &adev->gfx.rlc.clear_state_gpu_addr,
- (void **)&adev->gfx.rlc.cs_ptr);
+ amdgpu_bo_unref(&adev->gfx.rlc.clear_state_obj);
if (adev->asic_type == CHIP_RAVEN) {
amdgpu_bo_free_kernel(&adev->gfx.rlc.cp_table_obj,
&adev->gfx.rlc.cp_table_gpu_addr,
@@ -1844,7 +1844,7 @@ static void gfx_v9_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, u32 sh
else
data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_INDEX, sh_num);
- WREG32_SOC15(GC, 0, mmGRBM_GFX_INDEX, data);
+ WREG32_SOC15_RLC_SHADOW(GC, 0, mmGRBM_GFX_INDEX, data);
}
static u32 gfx_v9_0_get_rb_active_bitmap(struct amdgpu_device *adev)
@@ -1912,8 +1912,8 @@ static void gfx_v9_0_init_compute_vmid(struct amdgpu_device *adev)
for (i = FIRST_COMPUTE_VMID; i < LAST_COMPUTE_VMID; i++) {
soc15_grbm_select(adev, 0, 0, 0, i);
/* CP and shaders */
- WREG32_SOC15(GC, 0, mmSH_MEM_CONFIG, sh_mem_config);
- WREG32_SOC15(GC, 0, mmSH_MEM_BASES, sh_mem_bases);
+ WREG32_SOC15_RLC(GC, 0, mmSH_MEM_CONFIG, sh_mem_config);
+ WREG32_SOC15_RLC(GC, 0, mmSH_MEM_BASES, sh_mem_bases);
}
soc15_grbm_select(adev, 0, 0, 0, 0);
mutex_unlock(&adev->srbm_mutex);
@@ -1924,7 +1924,7 @@ static void gfx_v9_0_constants_init(struct amdgpu_device *adev)
u32 tmp;
int i;
- WREG32_FIELD15(GC, 0, GRBM_CNTL, READ_TIMEOUT, 0xff);
+ WREG32_FIELD15_RLC(GC, 0, GRBM_CNTL, READ_TIMEOUT, 0xff);
gfx_v9_0_tiling_mode_table_init(adev);
@@ -1941,17 +1941,17 @@ static void gfx_v9_0_constants_init(struct amdgpu_device *adev)
if (i == 0) {
tmp = REG_SET_FIELD(0, SH_MEM_CONFIG, ALIGNMENT_MODE,
SH_MEM_ALIGNMENT_MODE_UNALIGNED);
- WREG32_SOC15(GC, 0, mmSH_MEM_CONFIG, tmp);
- WREG32_SOC15(GC, 0, mmSH_MEM_BASES, 0);
+ WREG32_SOC15_RLC(GC, 0, mmSH_MEM_CONFIG, tmp);
+ WREG32_SOC15_RLC(GC, 0, mmSH_MEM_BASES, 0);
} else {
tmp = REG_SET_FIELD(0, SH_MEM_CONFIG, ALIGNMENT_MODE,
SH_MEM_ALIGNMENT_MODE_UNALIGNED);
- WREG32_SOC15(GC, 0, mmSH_MEM_CONFIG, tmp);
+ WREG32_SOC15_RLC(GC, 0, mmSH_MEM_CONFIG, tmp);
tmp = REG_SET_FIELD(0, SH_MEM_BASES, PRIVATE_BASE,
(adev->gmc.private_aperture_start >> 48));
tmp = REG_SET_FIELD(tmp, SH_MEM_BASES, SHARED_BASE,
(adev->gmc.shared_aperture_start >> 48));
- WREG32_SOC15(GC, 0, mmSH_MEM_BASES, tmp);
+ WREG32_SOC15_RLC(GC, 0, mmSH_MEM_BASES, tmp);
}
}
soc15_grbm_select(adev, 0, 0, 0, 0);
@@ -1967,7 +1967,7 @@ static void gfx_v9_0_constants_init(struct amdgpu_device *adev)
*/
gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
- WREG32_SOC15(GC, 0, mmPA_SC_FIFO_SIZE,
+ WREG32_SOC15_RLC(GC, 0, mmPA_SC_FIFO_SIZE,
(adev->gfx.config.sc_prim_fifo_size_frontend <<
PA_SC_FIFO_SIZE__SC_FRONTEND_PRIM_FIFO_SIZE__SHIFT) |
(adev->gfx.config.sc_prim_fifo_size_backend <<
@@ -2034,11 +2034,11 @@ static void gfx_v9_0_enable_gui_idle_interrupt(struct amdgpu_device *adev,
static void gfx_v9_0_init_csb(struct amdgpu_device *adev)
{
/* csib */
- WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_CSIB_ADDR_HI),
+ WREG32_RLC(SOC15_REG_OFFSET(GC, 0, mmRLC_CSIB_ADDR_HI),
adev->gfx.rlc.clear_state_gpu_addr >> 32);
- WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_CSIB_ADDR_LO),
+ WREG32_RLC(SOC15_REG_OFFSET(GC, 0, mmRLC_CSIB_ADDR_LO),
adev->gfx.rlc.clear_state_gpu_addr & 0xfffffffc);
- WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_CSIB_LENGTH),
+ WREG32_RLC(SOC15_REG_OFFSET(GC, 0, mmRLC_CSIB_LENGTH),
adev->gfx.rlc.clear_state_size);
}
@@ -2508,7 +2508,7 @@ static void gfx_v9_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable)
for (i = 0; i < adev->gfx.num_gfx_rings; i++)
adev->gfx.gfx_ring[i].sched.ready = false;
}
- WREG32_SOC15(GC, 0, mmCP_ME_CNTL, tmp);
+ WREG32_SOC15_RLC(GC, 0, mmCP_ME_CNTL, tmp);
udelay(50);
}
@@ -2706,9 +2706,9 @@ static void gfx_v9_0_cp_compute_enable(struct amdgpu_device *adev, bool enable)
int i;
if (enable) {
- WREG32_SOC15(GC, 0, mmCP_MEC_CNTL, 0);
+ WREG32_SOC15_RLC(GC, 0, mmCP_MEC_CNTL, 0);
} else {
- WREG32_SOC15(GC, 0, mmCP_MEC_CNTL,
+ WREG32_SOC15_RLC(GC, 0, mmCP_MEC_CNTL,
(CP_MEC_CNTL__MEC_ME1_HALT_MASK | CP_MEC_CNTL__MEC_ME2_HALT_MASK));
for (i = 0; i < adev->gfx.num_compute_rings; i++)
adev->gfx.compute_ring[i].sched.ready = false;
@@ -2769,9 +2769,9 @@ static void gfx_v9_0_kiq_setting(struct amdgpu_ring *ring)
tmp = RREG32_SOC15(GC, 0, mmRLC_CP_SCHEDULERS);
tmp &= 0xffffff00;
tmp |= (ring->me << 5) | (ring->pipe << 3) | (ring->queue);
- WREG32_SOC15(GC, 0, mmRLC_CP_SCHEDULERS, tmp);
+ WREG32_SOC15_RLC(GC, 0, mmRLC_CP_SCHEDULERS, tmp);
tmp |= 0x80;
- WREG32_SOC15(GC, 0, mmRLC_CP_SCHEDULERS, tmp);
+ WREG32_SOC15_RLC(GC, 0, mmRLC_CP_SCHEDULERS, tmp);
}
static int gfx_v9_0_kiq_kcq_enable(struct amdgpu_device *adev)
@@ -2989,67 +2989,67 @@ static int gfx_v9_0_kiq_init_register(struct amdgpu_ring *ring)
/* disable wptr polling */
WREG32_FIELD15(GC, 0, CP_PQ_WPTR_POLL_CNTL, EN, 0);
- WREG32_SOC15(GC, 0, mmCP_HQD_EOP_BASE_ADDR,
+ WREG32_SOC15_RLC(GC, 0, mmCP_HQD_EOP_BASE_ADDR,
mqd->cp_hqd_eop_base_addr_lo);
- WREG32_SOC15(GC, 0, mmCP_HQD_EOP_BASE_ADDR_HI,
+ WREG32_SOC15_RLC(GC, 0, mmCP_HQD_EOP_BASE_ADDR_HI,
mqd->cp_hqd_eop_base_addr_hi);
/* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */
- WREG32_SOC15(GC, 0, mmCP_HQD_EOP_CONTROL,
+ WREG32_SOC15_RLC(GC, 0, mmCP_HQD_EOP_CONTROL,
mqd->cp_hqd_eop_control);
/* enable doorbell? */
- WREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL,
+ WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL,
mqd->cp_hqd_pq_doorbell_control);
/* disable the queue if it's active */
if (RREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE) & 1) {
- WREG32_SOC15(GC, 0, mmCP_HQD_DEQUEUE_REQUEST, 1);
+ WREG32_SOC15_RLC(GC, 0, mmCP_HQD_DEQUEUE_REQUEST, 1);
for (j = 0; j < adev->usec_timeout; j++) {
if (!(RREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE) & 1))
break;
udelay(1);
}
- WREG32_SOC15(GC, 0, mmCP_HQD_DEQUEUE_REQUEST,
+ WREG32_SOC15_RLC(GC, 0, mmCP_HQD_DEQUEUE_REQUEST,
mqd->cp_hqd_dequeue_request);
- WREG32_SOC15(GC, 0, mmCP_HQD_PQ_RPTR,
+ WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_RPTR,
mqd->cp_hqd_pq_rptr);
- WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_LO,
+ WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_WPTR_LO,
mqd->cp_hqd_pq_wptr_lo);
- WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_HI,
+ WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_WPTR_HI,
mqd->cp_hqd_pq_wptr_hi);
}
/* set the pointer to the MQD */
- WREG32_SOC15(GC, 0, mmCP_MQD_BASE_ADDR,
+ WREG32_SOC15_RLC(GC, 0, mmCP_MQD_BASE_ADDR,
mqd->cp_mqd_base_addr_lo);
- WREG32_SOC15(GC, 0, mmCP_MQD_BASE_ADDR_HI,
+ WREG32_SOC15_RLC(GC, 0, mmCP_MQD_BASE_ADDR_HI,
mqd->cp_mqd_base_addr_hi);
/* set MQD vmid to 0 */
- WREG32_SOC15(GC, 0, mmCP_MQD_CONTROL,
+ WREG32_SOC15_RLC(GC, 0, mmCP_MQD_CONTROL,
mqd->cp_mqd_control);
/* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */
- WREG32_SOC15(GC, 0, mmCP_HQD_PQ_BASE,
+ WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_BASE,
mqd->cp_hqd_pq_base_lo);
- WREG32_SOC15(GC, 0, mmCP_HQD_PQ_BASE_HI,
+ WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_BASE_HI,
mqd->cp_hqd_pq_base_hi);
/* set up the HQD, this is similar to CP_RB0_CNTL */
- WREG32_SOC15(GC, 0, mmCP_HQD_PQ_CONTROL,
+ WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_CONTROL,
mqd->cp_hqd_pq_control);
/* set the wb address whether it's enabled or not */
- WREG32_SOC15(GC, 0, mmCP_HQD_PQ_RPTR_REPORT_ADDR,
+ WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_RPTR_REPORT_ADDR,
mqd->cp_hqd_pq_rptr_report_addr_lo);
- WREG32_SOC15(GC, 0, mmCP_HQD_PQ_RPTR_REPORT_ADDR_HI,
+ WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_RPTR_REPORT_ADDR_HI,
mqd->cp_hqd_pq_rptr_report_addr_hi);
/* only used if CP_PQ_WPTR_POLL_CNTL.CP_PQ_WPTR_POLL_CNTL__EN_MASK=1 */
- WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_POLL_ADDR,
+ WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_WPTR_POLL_ADDR,
mqd->cp_hqd_pq_wptr_poll_addr_lo);
- WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_POLL_ADDR_HI,
+ WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_WPTR_POLL_ADDR_HI,
mqd->cp_hqd_pq_wptr_poll_addr_hi);
/* enable the doorbell if requested */
@@ -3060,23 +3060,23 @@ static int gfx_v9_0_kiq_init_register(struct amdgpu_ring *ring)
(adev->doorbell_index.userqueue_end * 2) << 2);
}
- WREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL,
+ WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL,
mqd->cp_hqd_pq_doorbell_control);
/* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */
- WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_LO,
+ WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_WPTR_LO,
mqd->cp_hqd_pq_wptr_lo);
- WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_HI,
+ WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_WPTR_HI,
mqd->cp_hqd_pq_wptr_hi);
/* set the vmid for the queue */
- WREG32_SOC15(GC, 0, mmCP_HQD_VMID, mqd->cp_hqd_vmid);
+ WREG32_SOC15_RLC(GC, 0, mmCP_HQD_VMID, mqd->cp_hqd_vmid);
- WREG32_SOC15(GC, 0, mmCP_HQD_PERSISTENT_STATE,
+ WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PERSISTENT_STATE,
mqd->cp_hqd_persistent_state);
/* activate the queue */
- WREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE,
+ WREG32_SOC15_RLC(GC, 0, mmCP_HQD_ACTIVE,
mqd->cp_hqd_active);
if (ring->use_doorbell)
@@ -3093,7 +3093,7 @@ static int gfx_v9_0_kiq_fini_register(struct amdgpu_ring *ring)
/* disable the queue if it's active */
if (RREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE) & 1) {
- WREG32_SOC15(GC, 0, mmCP_HQD_DEQUEUE_REQUEST, 1);
+ WREG32_SOC15_RLC(GC, 0, mmCP_HQD_DEQUEUE_REQUEST, 1);
for (j = 0; j < adev->usec_timeout; j++) {
if (!(RREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE) & 1))
@@ -3105,21 +3105,21 @@ static int gfx_v9_0_kiq_fini_register(struct amdgpu_ring *ring)
DRM_DEBUG("KIQ dequeue request failed.\n");
/* Manual disable if dequeue request times out */
- WREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE, 0);
+ WREG32_SOC15_RLC(GC, 0, mmCP_HQD_ACTIVE, 0);
}
- WREG32_SOC15(GC, 0, mmCP_HQD_DEQUEUE_REQUEST,
+ WREG32_SOC15_RLC(GC, 0, mmCP_HQD_DEQUEUE_REQUEST,
0);
}
- WREG32_SOC15(GC, 0, mmCP_HQD_IQ_TIMER, 0);
- WREG32_SOC15(GC, 0, mmCP_HQD_IB_CONTROL, 0);
- WREG32_SOC15(GC, 0, mmCP_HQD_PERSISTENT_STATE, 0);
- WREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL, 0x40000000);
- WREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL, 0);
- WREG32_SOC15(GC, 0, mmCP_HQD_PQ_RPTR, 0);
- WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_HI, 0);
- WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_LO, 0);
+ WREG32_SOC15_RLC(GC, 0, mmCP_HQD_IQ_TIMER, 0);
+ WREG32_SOC15_RLC(GC, 0, mmCP_HQD_IB_CONTROL, 0);
+ WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PERSISTENT_STATE, 0);
+ WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL, 0x40000000);
+ WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL, 0);
+ WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_RPTR, 0);
+ WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_WPTR_HI, 0);
+ WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_WPTR_LO, 0);
return 0;
}
@@ -3539,6 +3539,241 @@ static void gfx_v9_0_ring_emit_gds_switch(struct amdgpu_ring *ring,
(1 << (oa_size + oa_base)) - (1 << oa_base));
}
+static const u32 vgpr_init_compute_shader[] =
+{
+ 0xb07c0000, 0xbe8000ff,
+ 0x000000f8, 0xbf110800,
+ 0x7e000280, 0x7e020280,
+ 0x7e040280, 0x7e060280,
+ 0x7e080280, 0x7e0a0280,
+ 0x7e0c0280, 0x7e0e0280,
+ 0x80808800, 0xbe803200,
+ 0xbf84fff5, 0xbf9c0000,
+ 0xd28c0001, 0x0001007f,
+ 0xd28d0001, 0x0002027e,
+ 0x10020288, 0xb8810904,
+ 0xb7814000, 0xd1196a01,
+ 0x00000301, 0xbe800087,
+ 0xbefc00c1, 0xd89c4000,
+ 0x00020201, 0xd89cc080,
+ 0x00040401, 0x320202ff,
+ 0x00000800, 0x80808100,
+ 0xbf84fff8, 0x7e020280,
+ 0xbf810000, 0x00000000,
+};
+
+static const u32 sgpr_init_compute_shader[] =
+{
+ 0xb07c0000, 0xbe8000ff,
+ 0x0000005f, 0xbee50080,
+ 0xbe812c65, 0xbe822c65,
+ 0xbe832c65, 0xbe842c65,
+ 0xbe852c65, 0xb77c0005,
+ 0x80808500, 0xbf84fff8,
+ 0xbe800080, 0xbf810000,
+};
+
+static const struct soc15_reg_entry vgpr_init_regs[] = {
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE0), 0xffffffff },
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE1), 0xffffffff },
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE2), 0xffffffff },
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE3), 0xffffffff },
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_RESOURCE_LIMITS), 0x1000000 }, /* CU_GROUP_COUNT=1 */
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_X), 256*2 },
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_Y), 1 },
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_Z), 1 },
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC1), 0x100007f }, /* VGPRS=15 (256 logical VGPRs, SGPRS=1 (16 SGPRs, BULKY=1 */
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC2), 0x400000 }, /* 64KB LDS */
+};
+
+static const struct soc15_reg_entry sgpr_init_regs[] = {
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE0), 0xffffffff },
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE1), 0xffffffff },
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE2), 0xffffffff },
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE3), 0xffffffff },
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_RESOURCE_LIMITS), 0x1000000 }, /* CU_GROUP_COUNT=1 */
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_X), 256*2 },
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_Y), 1 },
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_Z), 1 },
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC1), 0x340 }, /* SGPRS=13 (112 GPRS) */
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC2), 0x0 },
+};
+
+static const struct soc15_reg_entry sec_ded_counter_registers[] = {
+ { SOC15_REG_ENTRY(GC, 0, mmCPC_EDC_SCRATCH_CNT) },
+ { SOC15_REG_ENTRY(GC, 0, mmCPC_EDC_UCODE_CNT) },
+ { SOC15_REG_ENTRY(GC, 0, mmCPF_EDC_ROQ_CNT) },
+ { SOC15_REG_ENTRY(GC, 0, mmCPF_EDC_TAG_CNT) },
+ { SOC15_REG_ENTRY(GC, 0, mmCPG_EDC_DMA_CNT) },
+ { SOC15_REG_ENTRY(GC, 0, mmCPG_EDC_TAG_CNT) },
+ { SOC15_REG_ENTRY(GC, 0, mmDC_EDC_CSINVOC_CNT) },
+ { SOC15_REG_ENTRY(GC, 0, mmDC_EDC_RESTORE_CNT) },
+ { SOC15_REG_ENTRY(GC, 0, mmDC_EDC_STATE_CNT) },
+ { SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_CNT) },
+ { SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_GRBM_CNT) },
+ { SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_DED) },
+ { SOC15_REG_ENTRY(GC, 0, mmSPI_EDC_CNT) },
+ { SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT) },
+ { SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_DED_CNT) },
+ { SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_INFO) },
+ { SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_SEC_CNT) },
+ { SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT) },
+ { SOC15_REG_ENTRY(GC, 0, mmTCP_ATC_EDC_GATCL1_CNT) },
+ { SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT) },
+ { SOC15_REG_ENTRY(GC, 0, mmTD_EDC_CNT) },
+ { SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2) },
+ { SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT) },
+ { SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT) },
+ { SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PHY_CNT) },
+ { SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PIPE_CNT) },
+ { SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT) },
+ { SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2) },
+ { SOC15_REG_ENTRY(GC, 0, mmTCI_EDC_CNT) },
+ { SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2) },
+ { SOC15_REG_ENTRY(GC, 0, mmTCA_EDC_CNT) },
+};
+
+static int gfx_v9_0_do_edc_gpr_workarounds(struct amdgpu_device *adev)
+{
+ struct amdgpu_ring *ring = &adev->gfx.compute_ring[0];
+ struct amdgpu_ib ib;
+ struct dma_fence *f = NULL;
+ int r, i, j;
+ unsigned total_size, vgpr_offset, sgpr_offset;
+ u64 gpu_addr;
+
+ /* only support when RAS is enabled */
+ if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX))
+ return 0;
+
+ /* bail if the compute ring is not ready */
+ if (!ring->sched.ready)
+ return 0;
+
+ total_size =
+ ((ARRAY_SIZE(vgpr_init_regs) * 3) + 4 + 5 + 2) * 4;
+ total_size +=
+ ((ARRAY_SIZE(sgpr_init_regs) * 3) + 4 + 5 + 2) * 4;
+ total_size = ALIGN(total_size, 256);
+ vgpr_offset = total_size;
+ total_size += ALIGN(sizeof(vgpr_init_compute_shader), 256);
+ sgpr_offset = total_size;
+ total_size += sizeof(sgpr_init_compute_shader);
+
+ /* allocate an indirect buffer to put the commands in */
+ memset(&ib, 0, sizeof(ib));
+ r = amdgpu_ib_get(adev, NULL, total_size, &ib);
+ if (r) {
+ DRM_ERROR("amdgpu: failed to get ib (%d).\n", r);
+ return r;
+ }
+
+ /* load the compute shaders */
+ for (i = 0; i < ARRAY_SIZE(vgpr_init_compute_shader); i++)
+ ib.ptr[i + (vgpr_offset / 4)] = vgpr_init_compute_shader[i];
+
+ for (i = 0; i < ARRAY_SIZE(sgpr_init_compute_shader); i++)
+ ib.ptr[i + (sgpr_offset / 4)] = sgpr_init_compute_shader[i];
+
+ /* init the ib length to 0 */
+ ib.length_dw = 0;
+
+ /* VGPR */
+ /* write the register state for the compute dispatch */
+ for (i = 0; i < ARRAY_SIZE(vgpr_init_regs); i++) {
+ ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 1);
+ ib.ptr[ib.length_dw++] = SOC15_REG_ENTRY_OFFSET(vgpr_init_regs[i])
+ - PACKET3_SET_SH_REG_START;
+ ib.ptr[ib.length_dw++] = vgpr_init_regs[i].reg_value;
+ }
+ /* write the shader start address: mmCOMPUTE_PGM_LO, mmCOMPUTE_PGM_HI */
+ gpu_addr = (ib.gpu_addr + (u64)vgpr_offset) >> 8;
+ ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 2);
+ ib.ptr[ib.length_dw++] = SOC15_REG_OFFSET(GC, 0, mmCOMPUTE_PGM_LO)
+ - PACKET3_SET_SH_REG_START;
+ ib.ptr[ib.length_dw++] = lower_32_bits(gpu_addr);
+ ib.ptr[ib.length_dw++] = upper_32_bits(gpu_addr);
+
+ /* write dispatch packet */
+ ib.ptr[ib.length_dw++] = PACKET3(PACKET3_DISPATCH_DIRECT, 3);
+ ib.ptr[ib.length_dw++] = 128; /* x */
+ ib.ptr[ib.length_dw++] = 1; /* y */
+ ib.ptr[ib.length_dw++] = 1; /* z */
+ ib.ptr[ib.length_dw++] =
+ REG_SET_FIELD(0, COMPUTE_DISPATCH_INITIATOR, COMPUTE_SHADER_EN, 1);
+
+ /* write CS partial flush packet */
+ ib.ptr[ib.length_dw++] = PACKET3(PACKET3_EVENT_WRITE, 0);
+ ib.ptr[ib.length_dw++] = EVENT_TYPE(7) | EVENT_INDEX(4);
+
+ /* SGPR */
+ /* write the register state for the compute dispatch */
+ for (i = 0; i < ARRAY_SIZE(sgpr_init_regs); i++) {
+ ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 1);
+ ib.ptr[ib.length_dw++] = SOC15_REG_ENTRY_OFFSET(sgpr_init_regs[i])
+ - PACKET3_SET_SH_REG_START;
+ ib.ptr[ib.length_dw++] = sgpr_init_regs[i].reg_value;
+ }
+ /* write the shader start address: mmCOMPUTE_PGM_LO, mmCOMPUTE_PGM_HI */
+ gpu_addr = (ib.gpu_addr + (u64)sgpr_offset) >> 8;
+ ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 2);
+ ib.ptr[ib.length_dw++] = SOC15_REG_OFFSET(GC, 0, mmCOMPUTE_PGM_LO)
+ - PACKET3_SET_SH_REG_START;
+ ib.ptr[ib.length_dw++] = lower_32_bits(gpu_addr);
+ ib.ptr[ib.length_dw++] = upper_32_bits(gpu_addr);
+
+ /* write dispatch packet */
+ ib.ptr[ib.length_dw++] = PACKET3(PACKET3_DISPATCH_DIRECT, 3);
+ ib.ptr[ib.length_dw++] = 128; /* x */
+ ib.ptr[ib.length_dw++] = 1; /* y */
+ ib.ptr[ib.length_dw++] = 1; /* z */
+ ib.ptr[ib.length_dw++] =
+ REG_SET_FIELD(0, COMPUTE_DISPATCH_INITIATOR, COMPUTE_SHADER_EN, 1);
+
+ /* write CS partial flush packet */
+ ib.ptr[ib.length_dw++] = PACKET3(PACKET3_EVENT_WRITE, 0);
+ ib.ptr[ib.length_dw++] = EVENT_TYPE(7) | EVENT_INDEX(4);
+
+ /* shedule the ib on the ring */
+ r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
+ if (r) {
+ DRM_ERROR("amdgpu: ib submit failed (%d).\n", r);
+ goto fail;
+ }
+
+ /* wait for the GPU to finish processing the IB */
+ r = dma_fence_wait(f, false);
+ if (r) {
+ DRM_ERROR("amdgpu: fence wait failed (%d).\n", r);
+ goto fail;
+ }
+
+ /* read back registers to clear the counters */
+ mutex_lock(&adev->grbm_idx_mutex);
+ for (j = 0; j < 16; j++) {
+ gfx_v9_0_select_se_sh(adev, 0x01, 0x0, j);
+ for (i = 0; i < ARRAY_SIZE(sec_ded_counter_registers); i++)
+ RREG32(SOC15_REG_ENTRY_OFFSET(sec_ded_counter_registers[i]));
+ gfx_v9_0_select_se_sh(adev, 0x02, 0x0, j);
+ for (i = 0; i < ARRAY_SIZE(sec_ded_counter_registers); i++)
+ RREG32(SOC15_REG_ENTRY_OFFSET(sec_ded_counter_registers[i]));
+ gfx_v9_0_select_se_sh(adev, 0x03, 0x0, j);
+ for (i = 0; i < ARRAY_SIZE(sec_ded_counter_registers); i++)
+ RREG32(SOC15_REG_ENTRY_OFFSET(sec_ded_counter_registers[i]));
+ gfx_v9_0_select_se_sh(adev, 0x04, 0x0, j);
+ for (i = 0; i < ARRAY_SIZE(sec_ded_counter_registers); i++)
+ RREG32(SOC15_REG_ENTRY_OFFSET(sec_ded_counter_registers[i]));
+ }
+ WREG32_SOC15(GC, 0, mmGRBM_GFX_INDEX, 0xe0000000);
+ mutex_unlock(&adev->grbm_idx_mutex);
+
+fail:
+ amdgpu_ib_free(adev, &ib, NULL);
+ dma_fence_put(f);
+
+ return r;
+}
+
static int gfx_v9_0_early_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@@ -3580,8 +3815,31 @@ static int gfx_v9_0_ecc_late_init(void *handle)
return 0;
}
- if (*ras_if)
+ /* requires IBs so do in late init after IB pool is initialized */
+ r = gfx_v9_0_do_edc_gpr_workarounds(adev);
+ if (r)
+ return r;
+
+ /* handle resume path. */
+ if (*ras_if) {
+ /* resend ras TA enable cmd during resume.
+ * prepare to handle failure.
+ */
+ ih_info.head = **ras_if;
+ r = amdgpu_ras_feature_enable_on_boot(adev, *ras_if, 1);
+ if (r) {
+ if (r == -EAGAIN) {
+ /* request a gpu reset. will run again. */
+ amdgpu_ras_request_reset_on_boot(adev,
+ AMDGPU_RAS_BLOCK__GFX);
+ return 0;
+ }
+ /* fail to enable ras, cleanup all. */
+ goto irq;
+ }
+ /* enable successfully. continue. */
goto resume;
+ }
*ras_if = kmalloc(sizeof(**ras_if), GFP_KERNEL);
if (!*ras_if)
@@ -3590,8 +3848,14 @@ static int gfx_v9_0_ecc_late_init(void *handle)
**ras_if = ras_block;
r = amdgpu_ras_feature_enable_on_boot(adev, *ras_if, 1);
- if (r)
+ if (r) {
+ if (r == -EAGAIN) {
+ amdgpu_ras_request_reset_on_boot(adev,
+ AMDGPU_RAS_BLOCK__GFX);
+ r = 0;
+ }
goto feature;
+ }
ih_info.head = **ras_if;
fs_info.head = **ras_if;
@@ -3624,7 +3888,7 @@ interrupt:
feature:
kfree(*ras_if);
*ras_if = NULL;
- return -EINVAL;
+ return r;
}
static int gfx_v9_0_late_init(void *handle)
@@ -4329,8 +4593,8 @@ static void gfx_v9_0_hqd_set_priority(struct amdgpu_device *adev,
mutex_lock(&adev->srbm_mutex);
soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
- WREG32_SOC15(GC, 0, mmCP_HQD_PIPE_PRIORITY, pipe_priority);
- WREG32_SOC15(GC, 0, mmCP_HQD_QUEUE_PRIORITY, queue_priority);
+ WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PIPE_PRIORITY, pipe_priority);
+ WREG32_SOC15_RLC(GC, 0, mmCP_HQD_QUEUE_PRIORITY, queue_priority);
soc15_grbm_select(adev, 0, 0, 0, 0);
mutex_unlock(&adev->srbm_mutex);
@@ -5066,13 +5330,13 @@ static void gfx_v9_0_set_gds_init(struct amdgpu_device *adev)
case CHIP_VEGA10:
case CHIP_VEGA12:
case CHIP_VEGA20:
- adev->gds.mem.total_size = 0x10000;
+ adev->gds.gds_size = 0x10000;
break;
case CHIP_RAVEN:
- adev->gds.mem.total_size = 0x1000;
+ adev->gds.gds_size = 0x1000;
break;
default:
- adev->gds.mem.total_size = 0x10000;
+ adev->gds.gds_size = 0x10000;
break;
}
@@ -5096,28 +5360,8 @@ static void gfx_v9_0_set_gds_init(struct amdgpu_device *adev)
break;
}
- adev->gds.gws.total_size = 64;
- adev->gds.oa.total_size = 16;
-
- if (adev->gds.mem.total_size == 64 * 1024) {
- adev->gds.mem.gfx_partition_size = 4096;
- adev->gds.mem.cs_partition_size = 4096;
-
- adev->gds.gws.gfx_partition_size = 4;
- adev->gds.gws.cs_partition_size = 4;
-
- adev->gds.oa.gfx_partition_size = 4;
- adev->gds.oa.cs_partition_size = 1;
- } else {
- adev->gds.mem.gfx_partition_size = 1024;
- adev->gds.mem.cs_partition_size = 1024;
-
- adev->gds.gws.gfx_partition_size = 16;
- adev->gds.gws.cs_partition_size = 16;
-
- adev->gds.oa.gfx_partition_size = 4;
- adev->gds.oa.cs_partition_size = 4;
- }
+ adev->gds.gws_size = 64;
+ adev->gds.oa_size = 16;
}
static void gfx_v9_0_set_user_cu_inactive_bitmap(struct amdgpu_device *adev,
diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
index 7bb5359d0bbd..9f0f189fc111 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
@@ -71,12 +71,12 @@ static void gfxhub_v1_0_init_system_aperture_regs(struct amdgpu_device *adev)
uint64_t value;
/* Program the AGP BAR */
- WREG32_SOC15(GC, 0, mmMC_VM_AGP_BASE, 0);
- WREG32_SOC15(GC, 0, mmMC_VM_AGP_BOT, adev->gmc.agp_start >> 24);
- WREG32_SOC15(GC, 0, mmMC_VM_AGP_TOP, adev->gmc.agp_end >> 24);
+ WREG32_SOC15_RLC(GC, 0, mmMC_VM_AGP_BASE, 0);
+ WREG32_SOC15_RLC(GC, 0, mmMC_VM_AGP_BOT, adev->gmc.agp_start >> 24);
+ WREG32_SOC15_RLC(GC, 0, mmMC_VM_AGP_TOP, adev->gmc.agp_end >> 24);
/* Program the system aperture low logical page number. */
- WREG32_SOC15(GC, 0, mmMC_VM_SYSTEM_APERTURE_LOW_ADDR,
+ WREG32_SOC15_RLC(GC, 0, mmMC_VM_SYSTEM_APERTURE_LOW_ADDR,
min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18);
if (adev->asic_type == CHIP_RAVEN && adev->rev_id >= 0x8)
@@ -86,11 +86,11 @@ static void gfxhub_v1_0_init_system_aperture_regs(struct amdgpu_device *adev)
* workaround that increase system aperture high address (add 1)
* to get rid of the VM fault and hardware hang.
*/
- WREG32_SOC15(GC, 0, mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
+ WREG32_SOC15_RLC(GC, 0, mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
max((adev->gmc.fb_end >> 18) + 0x1,
adev->gmc.agp_end >> 18));
else
- WREG32_SOC15(GC, 0, mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
+ WREG32_SOC15_RLC(GC, 0, mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18);
/* Set default page address. */
@@ -129,7 +129,7 @@ static void gfxhub_v1_0_init_tlb_regs(struct amdgpu_device *adev)
MTYPE, MTYPE_UC);/* XXX for emulation. */
tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ATC_EN, 1);
- WREG32_SOC15(GC, 0, mmMC_VM_MX_L1_TLB_CNTL, tmp);
+ WREG32_SOC15_RLC(GC, 0, mmMC_VM_MX_L1_TLB_CNTL, tmp);
}
static void gfxhub_v1_0_init_cache_regs(struct amdgpu_device *adev)
@@ -146,12 +146,12 @@ static void gfxhub_v1_0_init_cache_regs(struct amdgpu_device *adev)
tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, PDE_FAULT_CLASSIFICATION, 0);
tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, CONTEXT1_IDENTITY_ACCESS_MODE, 1);
tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, IDENTITY_MODE_FRAGMENT_SIZE, 0);
- WREG32_SOC15(GC, 0, mmVM_L2_CNTL, tmp);
+ WREG32_SOC15_RLC(GC, 0, mmVM_L2_CNTL, tmp);
tmp = RREG32_SOC15(GC, 0, mmVM_L2_CNTL2);
tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS, 1);
tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_L2_CACHE, 1);
- WREG32_SOC15(GC, 0, mmVM_L2_CNTL2, tmp);
+ WREG32_SOC15_RLC(GC, 0, mmVM_L2_CNTL2, tmp);
tmp = mmVM_L2_CNTL3_DEFAULT;
if (adev->gmc.translate_further) {
@@ -163,12 +163,12 @@ static void gfxhub_v1_0_init_cache_regs(struct amdgpu_device *adev)
tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3,
L2_CACHE_BIGK_FRAGMENT_SIZE, 6);
}
- WREG32_SOC15(GC, 0, mmVM_L2_CNTL3, tmp);
+ WREG32_SOC15_RLC(GC, 0, mmVM_L2_CNTL3, tmp);
tmp = mmVM_L2_CNTL4_DEFAULT;
tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_PDE_REQUEST_PHYSICAL, 0);
tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_PTE_REQUEST_PHYSICAL, 0);
- WREG32_SOC15(GC, 0, mmVM_L2_CNTL4, tmp);
+ WREG32_SOC15_RLC(GC, 0, mmVM_L2_CNTL4, tmp);
}
static void gfxhub_v1_0_enable_system_domain(struct amdgpu_device *adev)
@@ -267,9 +267,9 @@ int gfxhub_v1_0_gart_enable(struct amdgpu_device *adev)
* VF copy registers so vbios post doesn't program them, for
* SRIOV driver need to program them
*/
- WREG32_SOC15(GC, 0, mmMC_VM_FB_LOCATION_BASE,
+ WREG32_SOC15_RLC(GC, 0, mmMC_VM_FB_LOCATION_BASE,
adev->gmc.vram_start >> 24);
- WREG32_SOC15(GC, 0, mmMC_VM_FB_LOCATION_TOP,
+ WREG32_SOC15_RLC(GC, 0, mmMC_VM_FB_LOCATION_TOP,
adev->gmc.vram_end >> 24);
}
@@ -303,7 +303,7 @@ void gfxhub_v1_0_gart_disable(struct amdgpu_device *adev)
MC_VM_MX_L1_TLB_CNTL,
ENABLE_ADVANCED_DRIVER_MODEL,
0);
- WREG32_SOC15(GC, 0, mmMC_VM_MX_L1_TLB_CNTL, tmp);
+ WREG32_SOC15_RLC(GC, 0, mmMC_VM_MX_L1_TLB_CNTL, tmp);
/* Setup L2 cache */
WREG32_FIELD15(GC, 0, VM_L2_CNTL, ENABLE_L2_CACHE, 0);
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
index b06d876da2d9..ca8dbe91cc8b 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
@@ -20,8 +20,11 @@
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
+
#include <linux/firmware.h>
-#include <drm/drmP.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+
#include <drm/drm_cache.h>
#include "amdgpu.h"
#include "gmc_v6_0.h"
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
index 75aa3332aee2..57f80065d57a 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
@@ -20,8 +20,11 @@
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
+
#include <linux/firmware.h>
-#include <drm/drmP.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+
#include <drm/drm_cache.h>
#include "amdgpu.h"
#include "cikd.h"
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
index 8a3b5e6fc6c9..9238280d1ff7 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
@@ -20,8 +20,11 @@
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
+
#include <linux/firmware.h>
-#include <drm/drmP.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+
#include <drm/drm_cache.h>
#include "amdgpu.h"
#include "gmc_v8_0.h"
@@ -289,7 +292,7 @@ out:
*
* @adev: amdgpu_device pointer
*
- * Load the GDDR MC ucode into the hw (CIK).
+ * Load the GDDR MC ucode into the hw (VI).
* Returns 0 on success, error on failure.
*/
static int gmc_v8_0_tonga_mc_load_microcode(struct amdgpu_device *adev)
@@ -443,7 +446,7 @@ static void gmc_v8_0_vram_gtt_location(struct amdgpu_device *adev,
* @adev: amdgpu_device pointer
*
* Set the location of vram, gart, and AGP in the GPU's
- * physical address space (CIK).
+ * physical address space (VI).
*/
static void gmc_v8_0_mc_program(struct amdgpu_device *adev)
{
@@ -515,7 +518,7 @@ static void gmc_v8_0_mc_program(struct amdgpu_device *adev)
* @adev: amdgpu_device pointer
*
* Look up the amount of vram, vram width, and decide how to place
- * vram and gart within the GPU's physical address space (CIK).
+ * vram and gart within the GPU's physical address space (VI).
* Returns 0 for success.
*/
static int gmc_v8_0_mc_init(struct amdgpu_device *adev)
@@ -630,7 +633,7 @@ static int gmc_v8_0_mc_init(struct amdgpu_device *adev)
* @adev: amdgpu_device pointer
* @vmid: vm instance to flush
*
- * Flush the TLB for the requested page table (CIK).
+ * Flush the TLB for the requested page table (VI).
*/
static void gmc_v8_0_flush_gpu_tlb(struct amdgpu_device *adev,
uint32_t vmid, uint32_t flush_type)
@@ -800,7 +803,7 @@ static void gmc_v8_0_set_prt(struct amdgpu_device *adev, bool enable)
* This sets up the TLBs, programs the page tables for VMID0,
* sets up the hw for VMIDs 1-15 which are allocated on
* demand, and sets up the global locations for the LDS, GDS,
- * and GPUVM for FSA64 clients (CIK).
+ * and GPUVM for FSA64 clients (VI).
* Returns 0 for success, errors for failure.
*/
static int gmc_v8_0_gart_enable(struct amdgpu_device *adev)
@@ -948,7 +951,7 @@ static int gmc_v8_0_gart_init(struct amdgpu_device *adev)
*
* @adev: amdgpu_device pointer
*
- * This disables all VM page table (CIK).
+ * This disables all VM page table (VI).
*/
static void gmc_v8_0_gart_disable(struct amdgpu_device *adev)
{
@@ -978,7 +981,7 @@ static void gmc_v8_0_gart_disable(struct amdgpu_device *adev)
* @status: VM_CONTEXT1_PROTECTION_FAULT_STATUS register value
* @addr: VM_CONTEXT1_PROTECTION_FAULT_ADDR register value
*
- * Print human readable fault information (CIK).
+ * Print human readable fault information (VI).
*/
static void gmc_v8_0_vm_decode_fault(struct amdgpu_device *adev, u32 status,
u32 addr, u32 mc_client, unsigned pasid)
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
index 72837b8c7031..8e3f5990e278 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
@@ -20,8 +20,12 @@
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
+
#include <linux/firmware.h>
+#include <linux/pci.h>
+
#include <drm/drm_cache.h>
+
#include "amdgpu.h"
#include "gmc_v9_0.h"
#include "amdgpu_atomfirmware.h"
@@ -686,8 +690,25 @@ static int gmc_v9_0_ecc_late_init(void *handle)
return 0;
}
/* handle resume path. */
- if (*ras_if)
+ if (*ras_if) {
+ /* resend ras TA enable cmd during resume.
+ * prepare to handle failure.
+ */
+ ih_info.head = **ras_if;
+ r = amdgpu_ras_feature_enable_on_boot(adev, *ras_if, 1);
+ if (r) {
+ if (r == -EAGAIN) {
+ /* request a gpu reset. will run again. */
+ amdgpu_ras_request_reset_on_boot(adev,
+ AMDGPU_RAS_BLOCK__UMC);
+ return 0;
+ }
+ /* fail to enable ras, cleanup all. */
+ goto irq;
+ }
+ /* enable successfully. continue. */
goto resume;
+ }
*ras_if = kmalloc(sizeof(**ras_if), GFP_KERNEL);
if (!*ras_if)
@@ -696,8 +717,14 @@ static int gmc_v9_0_ecc_late_init(void *handle)
**ras_if = ras_block;
r = amdgpu_ras_feature_enable_on_boot(adev, *ras_if, 1);
- if (r)
+ if (r) {
+ if (r == -EAGAIN) {
+ amdgpu_ras_request_reset_on_boot(adev,
+ AMDGPU_RAS_BLOCK__UMC);
+ r = 0;
+ }
goto feature;
+ }
ih_info.head = **ras_if;
fs_info.head = **ras_if;
@@ -730,7 +757,7 @@ interrupt:
feature:
kfree(*ras_if);
*ras_if = NULL;
- return -EINVAL;
+ return r;
}
@@ -1099,6 +1126,9 @@ static void gmc_v9_0_init_golden_registers(struct amdgpu_device *adev)
switch (adev->asic_type) {
case CHIP_VEGA10:
+ if (amdgpu_virt_support_skip_setting(adev))
+ break;
+ /* fall through */
case CHIP_VEGA20:
soc15_program_register_sequence(adev,
golden_settings_mmhub_1_0_0,
@@ -1163,6 +1193,9 @@ static int gmc_v9_0_gart_enable(struct amdgpu_device *adev)
tmp = RREG32_SOC15(HDP, 0, mmHDP_HOST_PATH_CNTL);
WREG32_SOC15(HDP, 0, mmHDP_HOST_PATH_CNTL, tmp);
+ WREG32_SOC15(HDP, 0, mmHDP_NONSURFACE_BASE, (adev->gmc.vram_start >> 8));
+ WREG32_SOC15(HDP, 0, mmHDP_NONSURFACE_BASE_HI, (adev->gmc.vram_start >> 40));
+
/* After HDP is initialized, flush HDP.*/
adev->nbio_funcs->hdp_flush(adev, NULL);
diff --git a/drivers/gpu/drm/amd/amdgpu/iceland_ih.c b/drivers/gpu/drm/amd/amdgpu/iceland_ih.c
index b1626e1d2f5d..a13dd9a51149 100644
--- a/drivers/gpu/drm/amd/amdgpu/iceland_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/iceland_ih.c
@@ -20,7 +20,9 @@
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
-#include <drm/drmP.h>
+
+#include <linux/pci.h>
+
#include "amdgpu.h"
#include "amdgpu_ih.h"
#include "vid.h"
diff --git a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
index f2e6b148ccad..4b3faaccecb9 100644
--- a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
@@ -21,7 +21,6 @@
*
*/
-#include <drm/drmP.h>
#include "amdgpu.h"
#include "amdgpu_pm.h"
#include "cikd.h"
diff --git a/drivers/gpu/drm/amd/amdgpu/kv_smc.c b/drivers/gpu/drm/amd/amdgpu/kv_smc.c
index b82e33c01571..2d9ab6b8be66 100644
--- a/drivers/gpu/drm/amd/amdgpu/kv_smc.c
+++ b/drivers/gpu/drm/amd/amdgpu/kv_smc.c
@@ -22,7 +22,6 @@
* Authors: Alex Deucher
*/
-#include <drm/drmP.h>
#include "amdgpu.h"
#include "cikd.h"
#include "kv_dpm.h"
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
index 41a9a5779623..05d1d448c8f5 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
@@ -111,6 +111,9 @@ static void mmhub_v1_0_init_system_aperture_regs(struct amdgpu_device *adev)
WREG32_SOC15(MMHUB, 0, mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18);
+ if (amdgpu_virt_support_skip_setting(adev))
+ return;
+
/* Set default page address. */
value = adev->vram_scratch.gpu_addr - adev->gmc.vram_start +
adev->vm_manager.vram_base_offset;
@@ -156,6 +159,9 @@ static void mmhub_v1_0_init_cache_regs(struct amdgpu_device *adev)
{
uint32_t tmp;
+ if (amdgpu_virt_support_skip_setting(adev))
+ return;
+
/* Setup L2 cache */
tmp = RREG32_SOC15(MMHUB, 0, mmVM_L2_CNTL);
tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 1);
@@ -202,6 +208,9 @@ static void mmhub_v1_0_enable_system_domain(struct amdgpu_device *adev)
static void mmhub_v1_0_disable_identity_aperture(struct amdgpu_device *adev)
{
+ if (amdgpu_virt_support_skip_setting(adev))
+ return;
+
WREG32_SOC15(MMHUB, 0, mmVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_LO32,
0XFFFFFFFF);
WREG32_SOC15(MMHUB, 0, mmVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_HI32,
@@ -338,11 +347,13 @@ void mmhub_v1_0_gart_disable(struct amdgpu_device *adev)
0);
WREG32_SOC15(MMHUB, 0, mmMC_VM_MX_L1_TLB_CNTL, tmp);
- /* Setup L2 cache */
- tmp = RREG32_SOC15(MMHUB, 0, mmVM_L2_CNTL);
- tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 0);
- WREG32_SOC15(MMHUB, 0, mmVM_L2_CNTL, tmp);
- WREG32_SOC15(MMHUB, 0, mmVM_L2_CNTL3, 0);
+ if (!amdgpu_virt_support_skip_setting(adev)) {
+ /* Setup L2 cache */
+ tmp = RREG32_SOC15(MMHUB, 0, mmVM_L2_CNTL);
+ tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 0);
+ WREG32_SOC15(MMHUB, 0, mmVM_L2_CNTL, tmp);
+ WREG32_SOC15(MMHUB, 0, mmVM_L2_CNTL3, 0);
+ }
}
/**
@@ -354,6 +365,10 @@ void mmhub_v1_0_gart_disable(struct amdgpu_device *adev)
void mmhub_v1_0_set_fault_enable_default(struct amdgpu_device *adev, bool value)
{
u32 tmp;
+
+ if (amdgpu_virt_support_skip_setting(adev))
+ return;
+
tmp = RREG32_SOC15(MMHUB, 0, mmVM_L2_PROTECTION_FAULT_CNTL);
tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
index 2471e7cf75ea..31030f86be86 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
@@ -26,6 +26,7 @@
#include "nbio/nbio_6_1_sh_mask.h"
#include "gc/gc_9_0_offset.h"
#include "gc/gc_9_0_sh_mask.h"
+#include "mp/mp_9_0_offset.h"
#include "soc15.h"
#include "vega10_ih.h"
#include "soc15_common.h"
@@ -343,7 +344,7 @@ flr_done:
/* Trigger recovery for world switch failure if no TDR */
if (amdgpu_device_should_recover_gpu(adev)
- && amdgpu_lockup_timeout == MAX_SCHEDULE_TIMEOUT)
+ && adev->sdma_timeout == MAX_SCHEDULE_TIMEOUT)
amdgpu_device_gpu_recover(adev, NULL);
}
@@ -448,6 +449,23 @@ void xgpu_ai_mailbox_put_irq(struct amdgpu_device *adev)
amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0);
}
+static void xgpu_ai_init_reg_access_mode(struct amdgpu_device *adev)
+{
+ uint32_t rlc_fw_ver = RREG32_SOC15(GC, 0, mmRLC_GPM_GENERAL_6);
+ uint32_t sos_fw_ver = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_58);
+
+ adev->virt.reg_access_mode = AMDGPU_VIRT_REG_ACCESS_LEGACY;
+
+ if (rlc_fw_ver >= 0x5d)
+ adev->virt.reg_access_mode |= AMDGPU_VIRT_REG_ACCESS_RLC;
+
+ if (sos_fw_ver >= 0x80455)
+ adev->virt.reg_access_mode |= AMDGPU_VIRT_REG_ACCESS_PSP_PRG_IH;
+
+ if (sos_fw_ver >= 0x8045b)
+ adev->virt.reg_access_mode |= AMDGPU_VIRT_REG_SKIP_SEETING;
+}
+
const struct amdgpu_virt_ops xgpu_ai_virt_ops = {
.req_full_gpu = xgpu_ai_request_full_gpu_access,
.rel_full_gpu = xgpu_ai_release_full_gpu_access,
@@ -456,4 +474,5 @@ const struct amdgpu_virt_ops xgpu_ai_virt_ops = {
.trans_msg = xgpu_ai_mailbox_trans_msg,
.get_pp_clk = xgpu_ai_get_pp_clk,
.force_dpm_level = xgpu_ai_force_dpm_level,
+ .init_reg_access_mode = xgpu_ai_init_reg_access_mode,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c
index 1cdb98ad2db3..73419fa38159 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c
@@ -29,9 +29,18 @@
#include "nbio/nbio_7_0_sh_mask.h"
#include "nbio/nbio_7_0_smn.h"
#include "vega10_enum.h"
+#include <uapi/linux/kfd_ioctl.h>
#define smnNBIF_MGCG_CTRL_LCLK 0x1013a05c
+static void nbio_v7_0_remap_hdp_registers(struct amdgpu_device *adev)
+{
+ WREG32_SOC15(NBIO, 0, mmREMAP_HDP_MEM_FLUSH_CNTL,
+ adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL);
+ WREG32_SOC15(NBIO, 0, mmREMAP_HDP_REG_FLUSH_CNTL,
+ adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_REG_FLUSH_CNTL);
+}
+
static u32 nbio_v7_0_get_rev_id(struct amdgpu_device *adev)
{
u32 tmp = RREG32_SOC15(NBIO, 0, mmRCC_DEV0_EPF0_STRAP0);
@@ -55,10 +64,9 @@ static void nbio_v7_0_hdp_flush(struct amdgpu_device *adev,
struct amdgpu_ring *ring)
{
if (!ring || !ring->funcs->emit_wreg)
- WREG32_SOC15_NO_KIQ(NBIO, 0, mmHDP_MEM_COHERENCY_FLUSH_CNTL, 0);
+ WREG32_NO_KIQ((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
else
- amdgpu_ring_emit_wreg(ring, SOC15_REG_OFFSET(
- NBIO, 0, mmHDP_MEM_COHERENCY_FLUSH_CNTL), 0);
+ amdgpu_ring_emit_wreg(ring, (adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
}
static u32 nbio_v7_0_get_memsize(struct amdgpu_device *adev)
@@ -283,4 +291,5 @@ const struct amdgpu_nbio_funcs nbio_v7_0_funcs = {
.ih_control = nbio_v7_0_ih_control,
.init_registers = nbio_v7_0_init_registers,
.detect_hw_virt = nbio_v7_0_detect_hw_virt,
+ .remap_hdp_registers = nbio_v7_0_remap_hdp_registers,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
index c69d51598cfe..bfaaa327ae3c 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
@@ -27,9 +27,18 @@
#include "nbio/nbio_7_4_offset.h"
#include "nbio/nbio_7_4_sh_mask.h"
#include "nbio/nbio_7_4_0_smn.h"
+#include <uapi/linux/kfd_ioctl.h>
#define smnNBIF_MGCG_CTRL_LCLK 0x1013a21c
+static void nbio_v7_4_remap_hdp_registers(struct amdgpu_device *adev)
+{
+ WREG32_SOC15(NBIO, 0, mmREMAP_HDP_MEM_FLUSH_CNTL,
+ adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL);
+ WREG32_SOC15(NBIO, 0, mmREMAP_HDP_REG_FLUSH_CNTL,
+ adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_REG_FLUSH_CNTL);
+}
+
static u32 nbio_v7_4_get_rev_id(struct amdgpu_device *adev)
{
u32 tmp = RREG32_SOC15(NBIO, 0, mmRCC_DEV0_EPF0_STRAP0);
@@ -53,10 +62,9 @@ static void nbio_v7_4_hdp_flush(struct amdgpu_device *adev,
struct amdgpu_ring *ring)
{
if (!ring || !ring->funcs->emit_wreg)
- WREG32_SOC15_NO_KIQ(NBIO, 0, mmHDP_MEM_COHERENCY_FLUSH_CNTL, 0);
+ WREG32_NO_KIQ((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
else
- amdgpu_ring_emit_wreg(ring, SOC15_REG_OFFSET(
- NBIO, 0, mmHDP_MEM_COHERENCY_FLUSH_CNTL), 0);
+ amdgpu_ring_emit_wreg(ring, (adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
}
static u32 nbio_v7_4_get_memsize(struct amdgpu_device *adev)
@@ -262,4 +270,5 @@ const struct amdgpu_nbio_funcs nbio_v7_4_funcs = {
.ih_control = nbio_v7_4_ih_control,
.init_registers = nbio_v7_4_init_registers,
.detect_hw_virt = nbio_v7_4_detect_hw_virt,
+ .remap_hdp_registers = nbio_v7_4_remap_hdp_registers,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h b/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h
index 2f79765b4bdb..7f8edc66ddff 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h
+++ b/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h
@@ -94,6 +94,7 @@ enum psp_gfx_cmd_id
GFX_CMD_ID_SAVE_RESTORE = 0x00000008, /* save/restore HW IP FW */
GFX_CMD_ID_SETUP_VMR = 0x00000009, /* setup VMR region */
GFX_CMD_ID_DESTROY_VMR = 0x0000000A, /* destroy VMR region */
+ GFX_CMD_ID_PROG_REG = 0x0000000B, /* program regs */
};
@@ -217,6 +218,12 @@ struct psp_gfx_cmd_save_restore_ip_fw
enum psp_gfx_fw_type fw_type; /* FW type */
};
+/* Command to setup register program */
+struct psp_gfx_cmd_reg_prog {
+ uint32_t reg_value;
+ uint32_t reg_id;
+};
+
/* All GFX ring buffer commands. */
union psp_gfx_commands
{
@@ -226,6 +233,7 @@ union psp_gfx_commands
struct psp_gfx_cmd_setup_tmr cmd_setup_tmr;
struct psp_gfx_cmd_load_ip_fw cmd_load_ip_fw;
struct psp_gfx_cmd_save_restore_ip_fw cmd_save_restore_ip_fw;
+ struct psp_gfx_cmd_reg_prog cmd_setup_reg_prog;
};
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c
index 77c2bc344dfc..ce1ea31feee0 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c
@@ -24,6 +24,9 @@
*/
#include <linux/firmware.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+
#include "amdgpu.h"
#include "amdgpu_psp.h"
#include "amdgpu_ucode.h"
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
index b91df7bd1d98..b1e7aca72578 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
@@ -21,6 +21,8 @@
*/
#include <linux/firmware.h>
+#include <linux/module.h>
+
#include "amdgpu.h"
#include "amdgpu_psp.h"
#include "amdgpu_ucode.h"
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c b/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c
index 143f0fae69d5..2ea772692037 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c
@@ -24,7 +24,9 @@
*/
#include <linux/firmware.h>
-#include <drm/drmP.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+
#include "amdgpu.h"
#include "amdgpu_psp.h"
#include "amdgpu_ucode.h"
@@ -50,6 +52,10 @@ MODULE_FIRMWARE("amdgpu/vega12_asd.bin");
static uint32_t sos_old_versions[] = {1517616, 1510592, 1448594, 1446554};
+static bool psp_v3_1_support_vmr_ring(struct psp_context *psp);
+static int psp_v3_1_ring_stop(struct psp_context *psp,
+ enum psp_ring_type ring_type);
+
static int psp_v3_1_init_microcode(struct psp_context *psp)
{
struct amdgpu_device *adev = psp->adev;
@@ -296,27 +302,57 @@ static int psp_v3_1_ring_create(struct psp_context *psp,
psp_v3_1_reroute_ih(psp);
- /* Write low address of the ring to C2PMSG_69 */
- psp_ring_reg = lower_32_bits(ring->ring_mem_mc_addr);
- WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_69, psp_ring_reg);
- /* Write high address of the ring to C2PMSG_70 */
- psp_ring_reg = upper_32_bits(ring->ring_mem_mc_addr);
- WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_70, psp_ring_reg);
- /* Write size of ring to C2PMSG_71 */
- psp_ring_reg = ring->ring_size;
- WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_71, psp_ring_reg);
- /* Write the ring initialization command to C2PMSG_64 */
- psp_ring_reg = ring_type;
- psp_ring_reg = psp_ring_reg << 16;
- WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_64, psp_ring_reg);
-
- /* there might be handshake issue with hardware which needs delay */
- mdelay(20);
-
- /* Wait for response flag (bit 31) in C2PMSG_64 */
- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64),
- 0x80000000, 0x8000FFFF, false);
+ if (psp_v3_1_support_vmr_ring(psp)) {
+ ret = psp_v3_1_ring_stop(psp, ring_type);
+ if (ret) {
+ DRM_ERROR("psp_v3_1_ring_stop_sriov failed!\n");
+ return ret;
+ }
+
+ /* Write low address of the ring to C2PMSG_102 */
+ psp_ring_reg = lower_32_bits(ring->ring_mem_mc_addr);
+ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_102, psp_ring_reg);
+ /* Write high address of the ring to C2PMSG_103 */
+ psp_ring_reg = upper_32_bits(ring->ring_mem_mc_addr);
+ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_103, psp_ring_reg);
+ /* No size initialization for sriov */
+ /* Write the ring initialization command to C2PMSG_101 */
+ psp_ring_reg = ring_type;
+ psp_ring_reg = psp_ring_reg << 16;
+ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_101, psp_ring_reg);
+
+ /* there might be hardware handshake issue which needs delay */
+ mdelay(20);
+
+ /* Wait for response flag (bit 31) in C2PMSG_101 */
+ ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0,
+ mmMP0_SMN_C2PMSG_101), 0x80000000,
+ 0x8000FFFF, false);
+ } else {
+
+ /* Write low address of the ring to C2PMSG_69 */
+ psp_ring_reg = lower_32_bits(ring->ring_mem_mc_addr);
+ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_69, psp_ring_reg);
+ /* Write high address of the ring to C2PMSG_70 */
+ psp_ring_reg = upper_32_bits(ring->ring_mem_mc_addr);
+ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_70, psp_ring_reg);
+ /* Write size of ring to C2PMSG_71 */
+ psp_ring_reg = ring->ring_size;
+ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_71, psp_ring_reg);
+ /* Write the ring initialization command to C2PMSG_64 */
+ psp_ring_reg = ring_type;
+ psp_ring_reg = psp_ring_reg << 16;
+ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_64, psp_ring_reg);
+
+ /* there might be hardware handshake issue which needs delay */
+ mdelay(20);
+
+ /* Wait for response flag (bit 31) in C2PMSG_64 */
+ ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0,
+ mmMP0_SMN_C2PMSG_64), 0x80000000,
+ 0x8000FFFF, false);
+ }
return ret;
}
@@ -327,16 +363,31 @@ static int psp_v3_1_ring_stop(struct psp_context *psp,
unsigned int psp_ring_reg = 0;
struct amdgpu_device *adev = psp->adev;
- /* Write the ring destroy command to C2PMSG_64 */
- psp_ring_reg = 3 << 16;
- WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_64, psp_ring_reg);
-
- /* there might be handshake issue with hardware which needs delay */
- mdelay(20);
-
- /* Wait for response flag (bit 31) in C2PMSG_64 */
- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64),
- 0x80000000, 0x80000000, false);
+ if (psp_v3_1_support_vmr_ring(psp)) {
+ /* Write the Destroy GPCOM ring command to C2PMSG_101 */
+ psp_ring_reg = GFX_CTRL_CMD_ID_DESTROY_GPCOM_RING;
+ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_101, psp_ring_reg);
+
+ /* there might be handshake issue which needs delay */
+ mdelay(20);
+
+ /* Wait for response flag (bit 31) in C2PMSG_101 */
+ ret = psp_wait_for(psp,
+ SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_101),
+ 0x80000000, 0x80000000, false);
+ } else {
+ /* Write the ring destroy command to C2PMSG_64 */
+ psp_ring_reg = 3 << 16;
+ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_64, psp_ring_reg);
+
+ /* there might be handshake issue which needs delay */
+ mdelay(20);
+
+ /* Wait for response flag (bit 31) in C2PMSG_64 */
+ ret = psp_wait_for(psp,
+ SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64),
+ 0x80000000, 0x80000000, false);
+ }
return ret;
}
@@ -375,7 +426,10 @@ static int psp_v3_1_cmd_submit(struct psp_context *psp,
uint32_t rb_frame_size_dw = sizeof(struct psp_gfx_rb_frame) / 4;
/* KM (GPCOM) prepare write pointer */
- psp_write_ptr_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_67);
+ if (psp_v3_1_support_vmr_ring(psp))
+ psp_write_ptr_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_102);
+ else
+ psp_write_ptr_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_67);
/* Update KM RB frame pointer to new frame */
/* write_frame ptr increments by size of rb_frame in bytes */
@@ -404,7 +458,13 @@ static int psp_v3_1_cmd_submit(struct psp_context *psp,
/* Update the write Pointer in DWORDs */
psp_write_ptr_reg = (psp_write_ptr_reg + rb_frame_size_dw) % ring_size_dw;
- WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_67, psp_write_ptr_reg);
+ if (psp_v3_1_support_vmr_ring(psp)) {
+ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_102, psp_write_ptr_reg);
+ /* send interrupt to PSP for SRIOV ring write pointer update */
+ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_101,
+ GFX_CTRL_CMD_ID_CONSUME_CMD);
+ } else
+ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_67, psp_write_ptr_reg);
return 0;
}
@@ -574,6 +634,14 @@ static int psp_v3_1_mode1_reset(struct psp_context *psp)
return 0;
}
+static bool psp_v3_1_support_vmr_ring(struct psp_context *psp)
+{
+ if (amdgpu_sriov_vf(psp->adev) && psp->sos_fw_version >= 0x80455)
+ return true;
+
+ return false;
+}
+
static const struct psp_funcs psp_v3_1_funcs = {
.init_microcode = psp_v3_1_init_microcode,
.bootloader_load_sysdrv = psp_v3_1_bootloader_load_sysdrv,
@@ -586,6 +654,7 @@ static const struct psp_funcs psp_v3_1_funcs = {
.compare_sram_data = psp_v3_1_compare_sram_data,
.smu_reload_quirk = psp_v3_1_smu_reload_quirk,
.mode1_reset = psp_v3_1_mode1_reset,
+ .support_vmr_ring = psp_v3_1_support_vmr_ring,
};
void psp_v3_1_set_psp_funcs(struct psp_context *psp)
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
index 36196372e8db..a10175838013 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
@@ -21,8 +21,11 @@
*
* Authors: Alex Deucher
*/
+
+#include <linux/delay.h>
#include <linux/firmware.h>
-#include <drm/drmP.h>
+#include <linux/module.h>
+
#include "amdgpu.h"
#include "amdgpu_ucode.h"
#include "amdgpu_trace.h"
@@ -574,7 +577,7 @@ static int sdma_v2_4_ring_test_ring(struct amdgpu_ring *ring)
tmp = le32_to_cpu(adev->wb.wb[index]);
if (tmp == 0xDEADBEEF)
break;
- DRM_UDELAY(1);
+ udelay(1);
}
if (i >= adev->usec_timeout)
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
index 6d39544e7829..5f4e2c616241 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
@@ -21,8 +21,11 @@
*
* Authors: Alex Deucher
*/
+
+#include <linux/delay.h>
#include <linux/firmware.h>
-#include <drm/drmP.h>
+#include <linux/module.h>
+
#include "amdgpu.h"
#include "amdgpu_ucode.h"
#include "amdgpu_trace.h"
@@ -846,7 +849,7 @@ static int sdma_v3_0_ring_test_ring(struct amdgpu_ring *ring)
tmp = le32_to_cpu(adev->wb.wb[index]);
if (tmp == 0xDEADBEEF)
break;
- DRM_UDELAY(1);
+ udelay(1);
}
if (i >= adev->usec_timeout)
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
index 9c88ce513d78..bc3087599523 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
@@ -21,8 +21,11 @@
*
*/
+#include <linux/delay.h>
#include <linux/firmware.h>
-#include <drm/drmP.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+
#include "amdgpu.h"
#include "amdgpu_ucode.h"
#include "amdgpu_trace.h"
@@ -210,12 +213,14 @@ static void sdma_v4_0_init_golden_registers(struct amdgpu_device *adev)
{
switch (adev->asic_type) {
case CHIP_VEGA10:
- soc15_program_register_sequence(adev,
- golden_settings_sdma_4,
- ARRAY_SIZE(golden_settings_sdma_4));
- soc15_program_register_sequence(adev,
- golden_settings_sdma_vg10,
- ARRAY_SIZE(golden_settings_sdma_vg10));
+ if (!amdgpu_virt_support_skip_setting(adev)) {
+ soc15_program_register_sequence(adev,
+ golden_settings_sdma_4,
+ ARRAY_SIZE(golden_settings_sdma_4));
+ soc15_program_register_sequence(adev,
+ golden_settings_sdma_vg10,
+ ARRAY_SIZE(golden_settings_sdma_vg10));
+ }
break;
case CHIP_VEGA12:
soc15_program_register_sequence(adev,
@@ -1207,7 +1212,7 @@ static int sdma_v4_0_ring_test_ring(struct amdgpu_ring *ring)
tmp = le32_to_cpu(adev->wb.wb[index]);
if (tmp == 0xDEADBEEF)
break;
- DRM_UDELAY(1);
+ udelay(1);
}
if (i >= adev->usec_timeout)
@@ -1521,8 +1526,25 @@ static int sdma_v4_0_late_init(void *handle)
}
/* handle resume path. */
- if (*ras_if)
+ if (*ras_if) {
+ /* resend ras TA enable cmd during resume.
+ * prepare to handle failure.
+ */
+ ih_info.head = **ras_if;
+ r = amdgpu_ras_feature_enable_on_boot(adev, *ras_if, 1);
+ if (r) {
+ if (r == -EAGAIN) {
+ /* request a gpu reset. will run again. */
+ amdgpu_ras_request_reset_on_boot(adev,
+ AMDGPU_RAS_BLOCK__SDMA);
+ return 0;
+ }
+ /* fail to enable ras, cleanup all. */
+ goto irq;
+ }
+ /* enable successfully. continue. */
goto resume;
+ }
*ras_if = kmalloc(sizeof(**ras_if), GFP_KERNEL);
if (!*ras_if)
@@ -1531,8 +1553,14 @@ static int sdma_v4_0_late_init(void *handle)
**ras_if = ras_block;
r = amdgpu_ras_feature_enable_on_boot(adev, *ras_if, 1);
- if (r)
+ if (r) {
+ if (r == -EAGAIN) {
+ amdgpu_ras_request_reset_on_boot(adev,
+ AMDGPU_RAS_BLOCK__SDMA);
+ r = 0;
+ }
goto feature;
+ }
ih_info.head = **ras_if;
fs_info.head = **ras_if;
@@ -1571,7 +1599,7 @@ interrupt:
feature:
kfree(*ras_if);
*ras_if = NULL;
- return -EINVAL;
+ return r;
}
static int sdma_v4_0_sw_init(void *handle)
diff --git a/drivers/gpu/drm/amd/amdgpu/si.c b/drivers/gpu/drm/amd/amdgpu/si.c
index 9d8df68893b9..5e1a2528df7f 100644
--- a/drivers/gpu/drm/amd/amdgpu/si.c
+++ b/drivers/gpu/drm/amd/amdgpu/si.c
@@ -24,7 +24,8 @@
#include <linux/firmware.h>
#include <linux/slab.h>
#include <linux/module.h>
-#include <drm/drmP.h>
+#include <linux/pci.h>
+
#include "amdgpu.h"
#include "amdgpu_atombios.h"
#include "amdgpu_ih.h"
@@ -1375,6 +1376,18 @@ static void si_get_pcie_usage(struct amdgpu_device *adev, uint64_t *count0,
*count1 = RREG32_PCIE(ixPCIE_PERF_COUNT1_TXCLK) | (cnt1_of << 32);
}
+static uint64_t si_get_pcie_replay_count(struct amdgpu_device *adev)
+{
+ uint64_t nak_r, nak_g;
+
+ /* Get the number of NAKs received and generated */
+ nak_r = RREG32_PCIE(ixPCIE_RX_NUM_NAK);
+ nak_g = RREG32_PCIE(ixPCIE_RX_NUM_NAK_GENERATED);
+
+ /* Add the total number of NAKs, i.e the number of replays */
+ return (nak_r + nak_g);
+}
+
static const struct amdgpu_asic_funcs si_asic_funcs =
{
.read_disabled_bios = &si_read_disabled_bios,
@@ -1393,6 +1406,7 @@ static const struct amdgpu_asic_funcs si_asic_funcs =
.need_full_reset = &si_need_full_reset,
.get_pcie_usage = &si_get_pcie_usage,
.need_reset_on_init = &si_need_reset_on_init,
+ .get_pcie_replay_count = &si_get_pcie_replay_count,
};
static uint32_t si_get_rev_id(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/si_dma.c b/drivers/gpu/drm/amd/amdgpu/si_dma.c
index 3eeefd40dae0..bdda8b4e03f0 100644
--- a/drivers/gpu/drm/amd/amdgpu/si_dma.c
+++ b/drivers/gpu/drm/amd/amdgpu/si_dma.c
@@ -21,7 +21,7 @@
*
* Authors: Alex Deucher
*/
-#include <drm/drmP.h>
+
#include "amdgpu.h"
#include "amdgpu_trace.h"
#include "si.h"
@@ -230,7 +230,7 @@ static int si_dma_ring_test_ring(struct amdgpu_ring *ring)
tmp = le32_to_cpu(adev->wb.wb[index]);
if (tmp == 0xDEADBEEF)
break;
- DRM_UDELAY(1);
+ udelay(1);
}
if (i >= adev->usec_timeout)
diff --git a/drivers/gpu/drm/amd/amdgpu/si_dpm.c b/drivers/gpu/drm/amd/amdgpu/si_dpm.c
index d57e75e5c71f..4cb4c891120b 100644
--- a/drivers/gpu/drm/amd/amdgpu/si_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/si_dpm.c
@@ -21,7 +21,9 @@
*
*/
-#include <drm/drmP.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+
#include "amdgpu.h"
#include "amdgpu_pm.h"
#include "amdgpu_dpm.h"
diff --git a/drivers/gpu/drm/amd/amdgpu/si_ih.c b/drivers/gpu/drm/amd/amdgpu/si_ih.c
index 8c50c9cab455..57bb5f9e08b2 100644
--- a/drivers/gpu/drm/amd/amdgpu/si_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/si_ih.c
@@ -20,7 +20,9 @@
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
-#include <drm/drmP.h>
+
+#include <linux/pci.h>
+
#include "amdgpu.h"
#include "amdgpu_ih.h"
#include "sid.h"
diff --git a/drivers/gpu/drm/amd/amdgpu/si_smc.c b/drivers/gpu/drm/amd/amdgpu/si_smc.c
index 4a2fd8b61940..8f994ffa9cd1 100644
--- a/drivers/gpu/drm/amd/amdgpu/si_smc.c
+++ b/drivers/gpu/drm/amd/amdgpu/si_smc.c
@@ -23,7 +23,7 @@
*/
#include <linux/firmware.h>
-#include <drm/drmP.h>
+
#include "amdgpu.h"
#include "sid.h"
#include "ppsmc.h"
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
index b7e594c2bfb4..b769995c3029 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
@@ -23,7 +23,8 @@
#include <linux/firmware.h>
#include <linux/slab.h>
#include <linux/module.h>
-#include <drm/drmP.h>
+#include <linux/pci.h>
+
#include "amdgpu.h"
#include "amdgpu_atombios.h"
#include "amdgpu_ih.h"
@@ -44,6 +45,7 @@
#include "smuio/smuio_9_0_offset.h"
#include "smuio/smuio_9_0_sh_mask.h"
#include "nbio/nbio_7_0_default.h"
+#include "nbio/nbio_7_0_offset.h"
#include "nbio/nbio_7_0_sh_mask.h"
#include "nbio/nbio_7_0_smn.h"
#include "mp/mp_9_0_offset.h"
@@ -64,6 +66,9 @@
#include "dce_virtual.h"
#include "mxgpu_ai.h"
#include "amdgpu_smu.h"
+#include "amdgpu_ras.h"
+#include "amdgpu_xgmi.h"
+#include <uapi/linux/kfd_ioctl.h>
#define mmMP0_MISC_CGTT_CTRL0 0x01b9
#define mmMP0_MISC_CGTT_CTRL0_BASE_IDX 0
@@ -230,7 +235,7 @@ void soc15_grbm_select(struct amdgpu_device *adev,
grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, VMID, vmid);
grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, QUEUEID, queue);
- WREG32(SOC15_REG_OFFSET(GC, 0, mmGRBM_GFX_CNTL), grbm_gfx_cntl);
+ WREG32_SOC15_RLC_SHADOW(GC, 0, mmGRBM_GFX_CNTL, grbm_gfx_cntl);
}
static void soc15_vga_set_state(struct amdgpu_device *adev, bool state)
@@ -385,7 +390,15 @@ void soc15_program_register_sequence(struct amdgpu_device *adev,
tmp &= ~(entry->and_mask);
tmp |= entry->or_mask;
}
- WREG32(reg, tmp);
+
+ if (reg == SOC15_REG_OFFSET(GC, 0, mmPA_SC_BINNER_EVENT_CNTL_3) ||
+ reg == SOC15_REG_OFFSET(GC, 0, mmPA_SC_ENHANCE) ||
+ reg == SOC15_REG_OFFSET(GC, 0, mmPA_SC_ENHANCE_1) ||
+ reg == SOC15_REG_OFFSET(GC, 0, mmSH_MEM_CONFIG))
+ WREG32_RLC(reg, tmp);
+ else
+ WREG32(reg, tmp);
+
}
}
@@ -475,6 +488,13 @@ static int soc15_asic_reset(struct amdgpu_device *adev)
soc15_asic_get_baco_capability(adev, &baco_reset);
else
baco_reset = false;
+ if (baco_reset) {
+ struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev, 0);
+ struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
+
+ if (hive || (ras && ras->supported))
+ baco_reset = false;
+ }
break;
default:
baco_reset = false;
@@ -606,12 +626,24 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev)
case CHIP_VEGA20:
amdgpu_device_ip_block_add(adev, &vega10_common_ip_block);
amdgpu_device_ip_block_add(adev, &gmc_v9_0_ip_block);
- amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block);
- if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) {
- if (adev->asic_type == CHIP_VEGA20)
- amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
- else
- amdgpu_device_ip_block_add(adev, &psp_v3_1_ip_block);
+
+ /* For Vega10 SR-IOV, PSP need to be initialized before IH */
+ if (amdgpu_sriov_vf(adev)) {
+ if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) {
+ if (adev->asic_type == CHIP_VEGA20)
+ amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
+ else
+ amdgpu_device_ip_block_add(adev, &psp_v3_1_ip_block);
+ }
+ amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block);
+ } else {
+ amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block);
+ if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) {
+ if (adev->asic_type == CHIP_VEGA20)
+ amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
+ else
+ amdgpu_device_ip_block_add(adev, &psp_v3_1_ip_block);
+ }
}
amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block);
amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block);
@@ -733,7 +765,8 @@ static bool soc15_need_reset_on_init(struct amdgpu_device *adev)
/* Just return false for soc15 GPUs. Reset does not seem to
* be necessary.
*/
- return false;
+ if (!amdgpu_passthrough(adev))
+ return false;
if (adev->flags & AMD_IS_APU)
return false;
@@ -748,6 +781,18 @@ static bool soc15_need_reset_on_init(struct amdgpu_device *adev)
return false;
}
+static uint64_t soc15_get_pcie_replay_count(struct amdgpu_device *adev)
+{
+ uint64_t nak_r, nak_g;
+
+ /* Get the number of NAKs received and generated */
+ nak_r = RREG32_PCIE(smnPCIE_RX_NUM_NAK);
+ nak_g = RREG32_PCIE(smnPCIE_RX_NUM_NAK_GENERATED);
+
+ /* Add the total number of NAKs, i.e the number of replays */
+ return (nak_r + nak_g);
+}
+
static const struct amdgpu_asic_funcs soc15_asic_funcs =
{
.read_disabled_bios = &soc15_read_disabled_bios,
@@ -765,6 +810,7 @@ static const struct amdgpu_asic_funcs soc15_asic_funcs =
.init_doorbell_index = &vega10_doorbell_index_init,
.get_pcie_usage = &soc15_get_pcie_usage,
.need_reset_on_init = &soc15_need_reset_on_init,
+ .get_pcie_replay_count = &soc15_get_pcie_replay_count,
};
static const struct amdgpu_asic_funcs vega20_asic_funcs =
@@ -784,12 +830,16 @@ static const struct amdgpu_asic_funcs vega20_asic_funcs =
.init_doorbell_index = &vega20_doorbell_index_init,
.get_pcie_usage = &soc15_get_pcie_usage,
.need_reset_on_init = &soc15_need_reset_on_init,
+ .get_pcie_replay_count = &soc15_get_pcie_replay_count,
};
static int soc15_common_early_init(void *handle)
{
+#define MMIO_REG_HOLE_OFFSET (0x80000 - PAGE_SIZE)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ adev->rmmio_remap.reg_offset = MMIO_REG_HOLE_OFFSET;
+ adev->rmmio_remap.bus_addr = adev->rmmio_base + MMIO_REG_HOLE_OFFSET;
adev->smc_rreg = NULL;
adev->smc_wreg = NULL;
adev->pcie_rreg = &soc15_pcie_rreg;
@@ -998,11 +1048,17 @@ static void soc15_doorbell_range_init(struct amdgpu_device *adev)
int i;
struct amdgpu_ring *ring;
- for (i = 0; i < adev->sdma.num_instances; i++) {
- ring = &adev->sdma.instance[i].ring;
- adev->nbio_funcs->sdma_doorbell_range(adev, i,
- ring->use_doorbell, ring->doorbell_index,
- adev->doorbell_index.sdma_doorbell_range);
+ /* Two reasons to skip
+ * 1, Host driver already programmed them
+ * 2, To avoid registers program violations in SR-IOV
+ */
+ if (!amdgpu_virt_support_skip_setting(adev)) {
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ ring = &adev->sdma.instance[i].ring;
+ adev->nbio_funcs->sdma_doorbell_range(adev, i,
+ ring->use_doorbell, ring->doorbell_index,
+ adev->doorbell_index.sdma_doorbell_range);
+ }
}
adev->nbio_funcs->ih_doorbell_range(adev, adev->irq.ih.use_doorbell,
@@ -1019,6 +1075,12 @@ static int soc15_common_hw_init(void *handle)
soc15_program_aspm(adev);
/* setup nbio registers */
adev->nbio_funcs->init_registers(adev);
+ /* remap HDP registers to a hole in mmio space,
+ * for the purpose of expose those registers
+ * to process space
+ */
+ if (adev->nbio_funcs->remap_hdp_registers)
+ adev->nbio_funcs->remap_hdp_registers(adev);
/* enable the doorbell aperture */
soc15_enable_doorbell_aperture(adev, true);
/* HW doorbell routing policy: doorbell writing not
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.h b/drivers/gpu/drm/amd/amdgpu/soc15.h
index a66c8bfbbaa6..06f39f5bbf76 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15.h
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.h
@@ -42,8 +42,18 @@ struct soc15_reg_golden {
u32 or_mask;
};
+struct soc15_reg_entry {
+ uint32_t hwip;
+ uint32_t inst;
+ uint32_t seg;
+ uint32_t reg_offset;
+ uint32_t reg_value;
+};
+
#define SOC15_REG_ENTRY(ip, inst, reg) ip##_HWIP, inst, reg##_BASE_IDX, reg
+#define SOC15_REG_ENTRY_OFFSET(entry) (adev->reg_offset[entry.hwip][entry.inst][entry.seg] + entry.reg_offset)
+
#define SOC15_REG_GOLDEN_VALUE(ip, inst, reg, and_mask, or_mask) \
{ ip##_HWIP, inst, reg##_BASE_IDX, reg, and_mask, or_mask }
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15_common.h b/drivers/gpu/drm/amd/amdgpu/soc15_common.h
index 49c262540940..47f74dab365d 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15_common.h
+++ b/drivers/gpu/drm/amd/amdgpu/soc15_common.h
@@ -69,26 +69,60 @@
} \
} while (0)
-#define RREG32_SOC15_DPG_MODE(ip, inst, reg, mask, sram_sel) \
- ({ WREG32_SOC15(ip, inst, mmUVD_DPG_LMA_MASK, mask); \
- WREG32_SOC15(ip, inst, mmUVD_DPG_LMA_CTL, \
- UVD_DPG_LMA_CTL__MASK_EN_MASK | \
- ((adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg) \
- << UVD_DPG_LMA_CTL__READ_WRITE_ADDR__SHIFT) | \
- (sram_sel << UVD_DPG_LMA_CTL__SRAM_SEL__SHIFT)); \
- RREG32_SOC15(ip, inst, mmUVD_DPG_LMA_DATA); })
+#define WREG32_RLC(reg, value) \
+ do { \
+ if (amdgpu_virt_support_rlc_prg_reg(adev)) { \
+ uint32_t i = 0; \
+ uint32_t retries = 50000; \
+ uint32_t r0 = adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG0_BASE_IDX] + mmSCRATCH_REG0; \
+ uint32_t r1 = adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG1_BASE_IDX] + mmSCRATCH_REG1; \
+ uint32_t spare_int = adev->reg_offset[GC_HWIP][0][mmRLC_SPARE_INT_BASE_IDX] + mmRLC_SPARE_INT; \
+ WREG32(r0, value); \
+ WREG32(r1, (reg | 0x80000000)); \
+ WREG32(spare_int, 0x1); \
+ for (i = 0; i < retries; i++) { \
+ u32 tmp = RREG32(r1); \
+ if (!(tmp & 0x80000000)) \
+ break; \
+ udelay(10); \
+ } \
+ if (i >= retries) \
+ pr_err("timeout: rlcg program reg:0x%05x failed !\n", reg); \
+ } else { \
+ WREG32(reg, value); \
+ } \
+ } while (0)
-#define WREG32_SOC15_DPG_MODE(ip, inst, reg, value, mask, sram_sel) \
+#define WREG32_SOC15_RLC_SHADOW(ip, inst, reg, value) \
do { \
- WREG32_SOC15(ip, inst, mmUVD_DPG_LMA_DATA, value); \
- WREG32_SOC15(ip, inst, mmUVD_DPG_LMA_MASK, mask); \
- WREG32_SOC15(ip, inst, mmUVD_DPG_LMA_CTL, \
- UVD_DPG_LMA_CTL__READ_WRITE_MASK | \
- ((adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg) \
- << UVD_DPG_LMA_CTL__READ_WRITE_ADDR__SHIFT) | \
- (sram_sel << UVD_DPG_LMA_CTL__SRAM_SEL__SHIFT)); \
+ uint32_t target_reg = adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg;\
+ if (amdgpu_virt_support_rlc_prg_reg(adev)) { \
+ uint32_t r2 = adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG1_BASE_IDX] + mmSCRATCH_REG2; \
+ uint32_t r3 = adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG1_BASE_IDX] + mmSCRATCH_REG3; \
+ uint32_t grbm_cntl = adev->reg_offset[GC_HWIP][0][mmGRBM_GFX_CNTL_BASE_IDX] + mmGRBM_GFX_CNTL; \
+ uint32_t grbm_idx = adev->reg_offset[GC_HWIP][0][mmGRBM_GFX_INDEX_BASE_IDX] + mmGRBM_GFX_INDEX; \
+ if (target_reg == grbm_cntl) \
+ WREG32(r2, value); \
+ else if (target_reg == grbm_idx) \
+ WREG32(r3, value); \
+ WREG32(target_reg, value); \
+ } else { \
+ WREG32(target_reg, value); \
+ } \
} while (0)
-#endif
+#define WREG32_SOC15_RLC(ip, inst, reg, value) \
+ do { \
+ uint32_t target_reg = adev->reg_offset[GC_HWIP][0][reg##_BASE_IDX] + reg;\
+ WREG32_RLC(target_reg, value); \
+ } while (0)
+
+#define WREG32_FIELD15_RLC(ip, idx, reg, field, val) \
+ WREG32_RLC((adev->reg_offset[ip##_HWIP][idx][mm##reg##_BASE_IDX] + mm##reg), \
+ (RREG32(adev->reg_offset[ip##_HWIP][idx][mm##reg##_BASE_IDX] + mm##reg) \
+ & ~REG_FIELD_MASK(reg, field)) | (val) << REG_FIELD_SHIFT(reg, field))
+#define WREG32_SOC15_OFFSET_RLC(ip, inst, reg, offset, value) \
+ WREG32_RLC(((adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg) + offset), value)
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/tonga_ih.c b/drivers/gpu/drm/amd/amdgpu/tonga_ih.c
index a20b711a6756..e40140bf6699 100644
--- a/drivers/gpu/drm/amd/amdgpu/tonga_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/tonga_ih.c
@@ -20,7 +20,9 @@
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
-#include <drm/drmP.h>
+
+#include <linux/pci.h>
+
#include "amdgpu.h"
#include "amdgpu_ih.h"
#include "vid.h"
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
index c4fb58667fd4..82abd8e728ab 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
@@ -23,7 +23,7 @@
*/
#include <linux/firmware.h>
-#include <drm/drmP.h>
+
#include "amdgpu.h"
#include "amdgpu_uvd.h"
#include "cikd.h"
@@ -491,7 +491,7 @@ static int uvd_v4_2_ring_test_ring(struct amdgpu_ring *ring)
tmp = RREG32(mmUVD_CONTEXT_ID);
if (tmp == 0xDEADBEEF)
break;
- DRM_UDELAY(1);
+ udelay(1);
}
if (i >= adev->usec_timeout)
@@ -741,6 +741,7 @@ static const struct amdgpu_ring_funcs uvd_v4_2_ring_funcs = {
.type = AMDGPU_RING_TYPE_UVD,
.align_mask = 0xf,
.support_64bit_ptrs = false,
+ .no_user_fence = true,
.get_rptr = uvd_v4_2_ring_get_rptr,
.get_wptr = uvd_v4_2_ring_get_wptr,
.set_wptr = uvd_v4_2_ring_set_wptr,
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
index 52bd8a654734..01e62fb8e6e0 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
@@ -22,8 +22,9 @@
* Authors: Christian König <christian.koenig@amd.com>
*/
+#include <linux/delay.h>
#include <linux/firmware.h>
-#include <drm/drmP.h>
+
#include "amdgpu.h"
#include "amdgpu_uvd.h"
#include "vid.h"
@@ -506,7 +507,7 @@ static int uvd_v5_0_ring_test_ring(struct amdgpu_ring *ring)
tmp = RREG32(mmUVD_CONTEXT_ID);
if (tmp == 0xDEADBEEF)
break;
- DRM_UDELAY(1);
+ udelay(1);
}
if (i >= adev->usec_timeout)
@@ -849,6 +850,7 @@ static const struct amdgpu_ring_funcs uvd_v5_0_ring_funcs = {
.type = AMDGPU_RING_TYPE_UVD,
.align_mask = 0xf,
.support_64bit_ptrs = false,
+ .no_user_fence = true,
.get_rptr = uvd_v5_0_ring_get_rptr,
.get_wptr = uvd_v5_0_ring_get_wptr,
.set_wptr = uvd_v5_0_ring_set_wptr,
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
index be70e6e5f9df..670784a78512 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
@@ -23,7 +23,7 @@
*/
#include <linux/firmware.h>
-#include <drm/drmP.h>
+
#include "amdgpu.h"
#include "amdgpu_uvd.h"
#include "vid.h"
@@ -186,7 +186,7 @@ static int uvd_v6_0_enc_ring_test_ring(struct amdgpu_ring *ring)
for (i = 0; i < adev->usec_timeout; i++) {
if (amdgpu_ring_get_rptr(ring) != rptr)
break;
- DRM_UDELAY(1);
+ udelay(1);
}
if (i >= adev->usec_timeout)
@@ -960,7 +960,7 @@ static int uvd_v6_0_ring_test_ring(struct amdgpu_ring *ring)
tmp = RREG32(mmUVD_CONTEXT_ID);
if (tmp == 0xDEADBEEF)
break;
- DRM_UDELAY(1);
+ udelay(1);
}
if (i >= adev->usec_timeout)
@@ -1505,6 +1505,7 @@ static const struct amdgpu_ring_funcs uvd_v6_0_ring_phys_funcs = {
.type = AMDGPU_RING_TYPE_UVD,
.align_mask = 0xf,
.support_64bit_ptrs = false,
+ .no_user_fence = true,
.get_rptr = uvd_v6_0_ring_get_rptr,
.get_wptr = uvd_v6_0_ring_get_wptr,
.set_wptr = uvd_v6_0_ring_set_wptr,
@@ -1530,6 +1531,7 @@ static const struct amdgpu_ring_funcs uvd_v6_0_ring_vm_funcs = {
.type = AMDGPU_RING_TYPE_UVD,
.align_mask = 0xf,
.support_64bit_ptrs = false,
+ .no_user_fence = true,
.get_rptr = uvd_v6_0_ring_get_rptr,
.get_wptr = uvd_v6_0_ring_get_wptr,
.set_wptr = uvd_v6_0_ring_set_wptr,
@@ -1558,6 +1560,7 @@ static const struct amdgpu_ring_funcs uvd_v6_0_enc_ring_vm_funcs = {
.align_mask = 0x3f,
.nop = HEVC_ENC_CMD_NO_OP,
.support_64bit_ptrs = false,
+ .no_user_fence = true,
.get_rptr = uvd_v6_0_enc_ring_get_rptr,
.get_wptr = uvd_v6_0_enc_ring_get_wptr,
.set_wptr = uvd_v6_0_enc_ring_set_wptr,
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
index fc4f0bb9a2e7..a6bfe7651d07 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
@@ -22,7 +22,7 @@
*/
#include <linux/firmware.h>
-#include <drm/drmP.h>
+
#include "amdgpu.h"
#include "amdgpu_uvd.h"
#include "soc15.h"
@@ -194,7 +194,7 @@ static int uvd_v7_0_enc_ring_test_ring(struct amdgpu_ring *ring)
for (i = 0; i < adev->usec_timeout; i++) {
if (amdgpu_ring_get_rptr(ring) != rptr)
break;
- DRM_UDELAY(1);
+ udelay(1);
}
if (i >= adev->usec_timeout)
@@ -1230,7 +1230,7 @@ static int uvd_v7_0_ring_test_ring(struct amdgpu_ring *ring)
tmp = RREG32_SOC15(UVD, ring->me, mmUVD_CONTEXT_ID);
if (tmp == 0xDEADBEEF)
break;
- DRM_UDELAY(1);
+ udelay(1);
}
if (i >= adev->usec_timeout)
@@ -1762,6 +1762,7 @@ static const struct amdgpu_ring_funcs uvd_v7_0_ring_vm_funcs = {
.type = AMDGPU_RING_TYPE_UVD,
.align_mask = 0xf,
.support_64bit_ptrs = false,
+ .no_user_fence = true,
.vmhub = AMDGPU_MMHUB,
.get_rptr = uvd_v7_0_ring_get_rptr,
.get_wptr = uvd_v7_0_ring_get_wptr,
@@ -1794,6 +1795,7 @@ static const struct amdgpu_ring_funcs uvd_v7_0_enc_ring_vm_funcs = {
.align_mask = 0x3f,
.nop = HEVC_ENC_CMD_NO_OP,
.support_64bit_ptrs = false,
+ .no_user_fence = true,
.vmhub = AMDGPU_MMHUB,
.get_rptr = uvd_v7_0_enc_ring_get_rptr,
.get_wptr = uvd_v7_0_enc_ring_get_wptr,
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c
index 40363ca6c5f1..b6837fcfdba7 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c
@@ -26,7 +26,7 @@
*/
#include <linux/firmware.h>
-#include <drm/drmP.h>
+
#include "amdgpu.h"
#include "amdgpu_vce.h"
#include "cikd.h"
@@ -605,6 +605,7 @@ static const struct amdgpu_ring_funcs vce_v2_0_ring_funcs = {
.align_mask = 0xf,
.nop = VCE_CMD_NO_OP,
.support_64bit_ptrs = false,
+ .no_user_fence = true,
.get_rptr = vce_v2_0_ring_get_rptr,
.get_wptr = vce_v2_0_ring_get_wptr,
.set_wptr = vce_v2_0_ring_set_wptr,
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
index 6ec65cf11112..475ae68f38f5 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
@@ -26,7 +26,7 @@
*/
#include <linux/firmware.h>
-#include <drm/drmP.h>
+
#include "amdgpu.h"
#include "amdgpu_vce.h"
#include "vid.h"
@@ -894,6 +894,7 @@ static const struct amdgpu_ring_funcs vce_v3_0_ring_phys_funcs = {
.align_mask = 0xf,
.nop = VCE_CMD_NO_OP,
.support_64bit_ptrs = false,
+ .no_user_fence = true,
.get_rptr = vce_v3_0_ring_get_rptr,
.get_wptr = vce_v3_0_ring_get_wptr,
.set_wptr = vce_v3_0_ring_set_wptr,
@@ -917,6 +918,7 @@ static const struct amdgpu_ring_funcs vce_v3_0_ring_vm_funcs = {
.align_mask = 0xf,
.nop = VCE_CMD_NO_OP,
.support_64bit_ptrs = false,
+ .no_user_fence = true,
.get_rptr = vce_v3_0_ring_get_rptr,
.get_wptr = vce_v3_0_ring_get_wptr,
.set_wptr = vce_v3_0_ring_set_wptr,
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
index c0ec27991c22..eafbe8d8248d 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
@@ -25,7 +25,7 @@
*/
#include <linux/firmware.h>
-#include <drm/drmP.h>
+
#include "amdgpu.h"
#include "amdgpu_vce.h"
#include "soc15.h"
@@ -1069,6 +1069,7 @@ static const struct amdgpu_ring_funcs vce_v4_0_ring_vm_funcs = {
.align_mask = 0x3f,
.nop = VCE_CMD_NO_OP,
.support_64bit_ptrs = false,
+ .no_user_fence = true,
.vmhub = AMDGPU_MMHUB,
.get_rptr = vce_v4_0_ring_get_rptr,
.get_wptr = vce_v4_0_ring_get_wptr,
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
index 3dbc51f9d3b9..d30ff256ff57 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
@@ -22,7 +22,7 @@
*/
#include <linux/firmware.h>
-#include <drm/drmP.h>
+
#include "amdgpu.h"
#include "amdgpu_vcn.h"
#include "soc15.h"
@@ -49,6 +49,8 @@ static void vcn_v1_0_set_jpeg_ring_funcs(struct amdgpu_device *adev);
static void vcn_v1_0_set_irq_funcs(struct amdgpu_device *adev);
static void vcn_v1_0_jpeg_ring_set_patch_ring(struct amdgpu_ring *ring, uint32_t ptr);
static int vcn_v1_0_set_powergating_state(void *handle, enum amd_powergating_state state);
+static int vcn_v1_0_pause_dpg_mode(struct amdgpu_device *adev,
+ struct dpg_pause_state *new_state);
/**
* vcn_v1_0_early_init - set function pointers
@@ -140,7 +142,9 @@ static int vcn_v1_0_sw_init(void *handle)
if (r)
return r;
- return r;
+ adev->vcn.pause_dpg_mode = vcn_v1_0_pause_dpg_mode;
+
+ return 0;
}
/**
@@ -1204,6 +1208,132 @@ static int vcn_v1_0_stop(struct amdgpu_device *adev)
return r;
}
+static int vcn_v1_0_pause_dpg_mode(struct amdgpu_device *adev,
+ struct dpg_pause_state *new_state)
+{
+ int ret_code;
+ uint32_t reg_data = 0;
+ uint32_t reg_data2 = 0;
+ struct amdgpu_ring *ring;
+
+ /* pause/unpause if state is changed */
+ if (adev->vcn.pause_state.fw_based != new_state->fw_based) {
+ DRM_DEBUG("dpg pause state changed %d:%d -> %d:%d",
+ adev->vcn.pause_state.fw_based, adev->vcn.pause_state.jpeg,
+ new_state->fw_based, new_state->jpeg);
+
+ reg_data = RREG32_SOC15(UVD, 0, mmUVD_DPG_PAUSE) &
+ (~UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK);
+
+ if (new_state->fw_based == VCN_DPG_STATE__PAUSE) {
+ ret_code = 0;
+
+ if (!(reg_data & UVD_DPG_PAUSE__JPEG_PAUSE_DPG_ACK_MASK))
+ SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS,
+ UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF,
+ UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code);
+
+ if (!ret_code) {
+ /* pause DPG non-jpeg */
+ reg_data |= UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK;
+ WREG32_SOC15(UVD, 0, mmUVD_DPG_PAUSE, reg_data);
+ SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_DPG_PAUSE,
+ UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK,
+ UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK, ret_code);
+
+ /* Restore */
+ ring = &adev->vcn.ring_enc[0];
+ WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO, ring->gpu_addr);
+ WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
+ WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE, ring->ring_size / 4);
+ WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
+ WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
+
+ ring = &adev->vcn.ring_enc[1];
+ WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO2, ring->gpu_addr);
+ WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
+ WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE2, ring->ring_size / 4);
+ WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
+ WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
+
+ ring = &adev->vcn.ring_dec;
+ WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR,
+ RREG32_SOC15(UVD, 0, mmUVD_SCRATCH2) & 0x7FFFFFFF);
+ SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS,
+ UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON,
+ UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code);
+ }
+ } else {
+ /* unpause dpg non-jpeg, no need to wait */
+ reg_data &= ~UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK;
+ WREG32_SOC15(UVD, 0, mmUVD_DPG_PAUSE, reg_data);
+ }
+ adev->vcn.pause_state.fw_based = new_state->fw_based;
+ }
+
+ /* pause/unpause if state is changed */
+ if (adev->vcn.pause_state.jpeg != new_state->jpeg) {
+ DRM_DEBUG("dpg pause state changed %d:%d -> %d:%d",
+ adev->vcn.pause_state.fw_based, adev->vcn.pause_state.jpeg,
+ new_state->fw_based, new_state->jpeg);
+
+ reg_data = RREG32_SOC15(UVD, 0, mmUVD_DPG_PAUSE) &
+ (~UVD_DPG_PAUSE__JPEG_PAUSE_DPG_ACK_MASK);
+
+ if (new_state->jpeg == VCN_DPG_STATE__PAUSE) {
+ ret_code = 0;
+
+ if (!(reg_data & UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK))
+ SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS,
+ UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF,
+ UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code);
+
+ if (!ret_code) {
+ /* Make sure JPRG Snoop is disabled before sending the pause */
+ reg_data2 = RREG32_SOC15(UVD, 0, mmUVD_POWER_STATUS);
+ reg_data2 |= UVD_POWER_STATUS__JRBC_SNOOP_DIS_MASK;
+ WREG32_SOC15(UVD, 0, mmUVD_POWER_STATUS, reg_data2);
+
+ /* pause DPG jpeg */
+ reg_data |= UVD_DPG_PAUSE__JPEG_PAUSE_DPG_REQ_MASK;
+ WREG32_SOC15(UVD, 0, mmUVD_DPG_PAUSE, reg_data);
+ SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_DPG_PAUSE,
+ UVD_DPG_PAUSE__JPEG_PAUSE_DPG_ACK_MASK,
+ UVD_DPG_PAUSE__JPEG_PAUSE_DPG_ACK_MASK, ret_code);
+
+ /* Restore */
+ ring = &adev->vcn.ring_jpeg;
+ WREG32_SOC15(UVD, 0, mmUVD_LMI_JRBC_RB_VMID, 0);
+ WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_CNTL,
+ UVD_JRBC_RB_CNTL__RB_NO_FETCH_MASK |
+ UVD_JRBC_RB_CNTL__RB_RPTR_WR_EN_MASK);
+ WREG32_SOC15(UVD, 0, mmUVD_LMI_JRBC_RB_64BIT_BAR_LOW,
+ lower_32_bits(ring->gpu_addr));
+ WREG32_SOC15(UVD, 0, mmUVD_LMI_JRBC_RB_64BIT_BAR_HIGH,
+ upper_32_bits(ring->gpu_addr));
+ WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_RPTR, ring->wptr);
+ WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_WPTR, ring->wptr);
+ WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_CNTL,
+ UVD_JRBC_RB_CNTL__RB_RPTR_WR_EN_MASK);
+
+ ring = &adev->vcn.ring_dec;
+ WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR,
+ RREG32_SOC15(UVD, 0, mmUVD_SCRATCH2) & 0x7FFFFFFF);
+ SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS,
+ UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON,
+ UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code);
+ }
+ } else {
+ /* unpause dpg jpeg, no need to wait */
+ reg_data &= ~UVD_DPG_PAUSE__JPEG_PAUSE_DPG_REQ_MASK;
+ WREG32_SOC15(UVD, 0, mmUVD_DPG_PAUSE, reg_data);
+ }
+ adev->vcn.pause_state.jpeg = new_state->jpeg;
+ }
+
+ return 0;
+}
+
static bool vcn_v1_0_is_idle(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@@ -2054,6 +2184,7 @@ static const struct amdgpu_ring_funcs vcn_v1_0_dec_ring_vm_funcs = {
.type = AMDGPU_RING_TYPE_VCN_DEC,
.align_mask = 0xf,
.support_64bit_ptrs = false,
+ .no_user_fence = true,
.vmhub = AMDGPU_MMHUB,
.get_rptr = vcn_v1_0_dec_ring_get_rptr,
.get_wptr = vcn_v1_0_dec_ring_get_wptr,
@@ -2087,6 +2218,7 @@ static const struct amdgpu_ring_funcs vcn_v1_0_enc_ring_vm_funcs = {
.align_mask = 0x3f,
.nop = VCN_ENC_CMD_NO_OP,
.support_64bit_ptrs = false,
+ .no_user_fence = true,
.vmhub = AMDGPU_MMHUB,
.get_rptr = vcn_v1_0_enc_ring_get_rptr,
.get_wptr = vcn_v1_0_enc_ring_get_wptr,
@@ -2118,6 +2250,7 @@ static const struct amdgpu_ring_funcs vcn_v1_0_jpeg_ring_vm_funcs = {
.align_mask = 0xf,
.nop = PACKET0(0x81ff, 0),
.support_64bit_ptrs = false,
+ .no_user_fence = true,
.vmhub = AMDGPU_MMHUB,
.extra_dw = 64,
.get_rptr = vcn_v1_0_jpeg_ring_get_rptr,
diff --git a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
index 8d89ab7f0ae8..22260e6963b8 100644
--- a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
@@ -20,7 +20,9 @@
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
-#include <drm/drmP.h>
+
+#include <linux/pci.h>
+
#include "amdgpu.h"
#include "amdgpu_ih.h"
#include "soc15.h"
@@ -48,14 +50,29 @@ static void vega10_ih_enable_interrupts(struct amdgpu_device *adev)
ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, RB_ENABLE, 1);
ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, ENABLE_INTR, 1);
- WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL, ih_rb_cntl);
+ if (amdgpu_virt_support_psp_prg_ih_reg(adev)) {
+ if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL, ih_rb_cntl)) {
+ DRM_ERROR("PSP program IH_RB_CNTL failed!\n");
+ return;
+ }
+ } else {
+ WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL, ih_rb_cntl);
+ }
adev->irq.ih.enabled = true;
if (adev->irq.ih1.ring_size) {
ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING1);
ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL_RING1,
RB_ENABLE, 1);
- WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING1, ih_rb_cntl);
+ if (amdgpu_virt_support_psp_prg_ih_reg(adev)) {
+ if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL_RING1,
+ ih_rb_cntl)) {
+ DRM_ERROR("program IH_RB_CNTL_RING1 failed!\n");
+ return;
+ }
+ } else {
+ WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING1, ih_rb_cntl);
+ }
adev->irq.ih1.enabled = true;
}
@@ -63,7 +80,15 @@ static void vega10_ih_enable_interrupts(struct amdgpu_device *adev)
ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING2);
ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL_RING2,
RB_ENABLE, 1);
- WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING2, ih_rb_cntl);
+ if (amdgpu_virt_support_psp_prg_ih_reg(adev)) {
+ if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL_RING2,
+ ih_rb_cntl)) {
+ DRM_ERROR("program IH_RB_CNTL_RING2 failed!\n");
+ return;
+ }
+ } else {
+ WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING2, ih_rb_cntl);
+ }
adev->irq.ih2.enabled = true;
}
}
@@ -81,7 +106,15 @@ static void vega10_ih_disable_interrupts(struct amdgpu_device *adev)
ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, RB_ENABLE, 0);
ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, ENABLE_INTR, 0);
- WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL, ih_rb_cntl);
+ if (amdgpu_virt_support_psp_prg_ih_reg(adev)) {
+ if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL, ih_rb_cntl)) {
+ DRM_ERROR("PSP program IH_RB_CNTL failed!\n");
+ return;
+ }
+ } else {
+ WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL, ih_rb_cntl);
+ }
+
/* set rptr, wptr to 0 */
WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR, 0);
WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR, 0);
@@ -92,7 +125,15 @@ static void vega10_ih_disable_interrupts(struct amdgpu_device *adev)
ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING1);
ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL_RING1,
RB_ENABLE, 0);
- WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING1, ih_rb_cntl);
+ if (amdgpu_virt_support_psp_prg_ih_reg(adev)) {
+ if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL_RING1,
+ ih_rb_cntl)) {
+ DRM_ERROR("program IH_RB_CNTL_RING1 failed!\n");
+ return;
+ }
+ } else {
+ WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING1, ih_rb_cntl);
+ }
/* set rptr, wptr to 0 */
WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR_RING1, 0);
WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR_RING1, 0);
@@ -104,7 +145,16 @@ static void vega10_ih_disable_interrupts(struct amdgpu_device *adev)
ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING2);
ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL_RING2,
RB_ENABLE, 0);
- WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING2, ih_rb_cntl);
+ if (amdgpu_virt_support_psp_prg_ih_reg(adev)) {
+ if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL_RING2,
+ ih_rb_cntl)) {
+ DRM_ERROR("program IH_RB_CNTL_RING2 failed!\n");
+ return;
+ }
+ } else {
+ WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING2, ih_rb_cntl);
+ }
+
/* set rptr, wptr to 0 */
WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR_RING2, 0);
WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR_RING2, 0);
@@ -187,7 +237,15 @@ static int vega10_ih_irq_init(struct amdgpu_device *adev)
ih_rb_cntl = vega10_ih_rb_cntl(ih, ih_rb_cntl);
ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, RPTR_REARM,
!!adev->irq.msi_enabled);
- WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL, ih_rb_cntl);
+
+ if (amdgpu_virt_support_psp_prg_ih_reg(adev)) {
+ if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL, ih_rb_cntl)) {
+ DRM_ERROR("PSP program IH_RB_CNTL failed!\n");
+ return -ETIMEDOUT;
+ }
+ } else {
+ WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL, ih_rb_cntl);
+ }
/* set the writeback address whether it's enabled or not */
WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR_ADDR_LO,
@@ -214,7 +272,15 @@ static int vega10_ih_irq_init(struct amdgpu_device *adev)
WPTR_OVERFLOW_ENABLE, 0);
ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL,
RB_FULL_DRAIN_ENABLE, 1);
- WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING1, ih_rb_cntl);
+ if (amdgpu_virt_support_psp_prg_ih_reg(adev)) {
+ if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL_RING1,
+ ih_rb_cntl)) {
+ DRM_ERROR("program IH_RB_CNTL_RING1 failed!\n");
+ return -ETIMEDOUT;
+ }
+ } else {
+ WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING1, ih_rb_cntl);
+ }
/* set rptr, wptr to 0 */
WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR_RING1, 0);
@@ -232,7 +298,16 @@ static int vega10_ih_irq_init(struct amdgpu_device *adev)
ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING2);
ih_rb_cntl = vega10_ih_rb_cntl(ih, ih_rb_cntl);
- WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING2, ih_rb_cntl);
+
+ if (amdgpu_virt_support_psp_prg_ih_reg(adev)) {
+ if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL_RING2,
+ ih_rb_cntl)) {
+ DRM_ERROR("program IH_RB_CNTL_RING2 failed!\n");
+ return -ETIMEDOUT;
+ }
+ } else {
+ WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING2, ih_rb_cntl);
+ }
/* set rptr, wptr to 0 */
WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR_RING2, 0);
diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c
index 5e5b42a0744a..d40ed1a828dd 100644
--- a/drivers/gpu/drm/amd/amdgpu/vi.c
+++ b/drivers/gpu/drm/amd/amdgpu/vi.c
@@ -20,8 +20,10 @@
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
+
+#include <linux/pci.h>
#include <linux/slab.h>
-#include <drm/drmP.h>
+
#include "amdgpu.h"
#include "amdgpu_atombios.h"
#include "amdgpu_ih.h"
@@ -987,6 +989,18 @@ static void vi_get_pcie_usage(struct amdgpu_device *adev, uint64_t *count0,
*count1 = RREG32_PCIE(ixPCIE_PERF_COUNT1_TXCLK) | (cnt1_of << 32);
}
+static uint64_t vi_get_pcie_replay_count(struct amdgpu_device *adev)
+{
+ uint64_t nak_r, nak_g;
+
+ /* Get the number of NAKs received and generated */
+ nak_r = RREG32_PCIE(ixPCIE_RX_NUM_NAK);
+ nak_g = RREG32_PCIE(ixPCIE_RX_NUM_NAK_GENERATED);
+
+ /* Add the total number of NAKs, i.e the number of replays */
+ return (nak_r + nak_g);
+}
+
static bool vi_need_reset_on_init(struct amdgpu_device *adev)
{
u32 clock_cntl, pc;
@@ -1021,6 +1035,7 @@ static const struct amdgpu_asic_funcs vi_asic_funcs =
.init_doorbell_index = &legacy_doorbell_index_init,
.get_pcie_usage = &vi_get_pcie_usage,
.need_reset_on_init = &vi_need_reset_on_init,
+ .get_pcie_replay_count = &vi_get_pcie_replay_count,
};
#define CZ_REV_BRISTOL(rev) \
diff --git a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h
index 3621efbd5759..e413d4a71fa3 100644
--- a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h
+++ b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h
@@ -21,7 +21,7 @@
*/
static const uint32_t cwsr_trap_gfx8_hex[] = {
- 0xbf820001, 0xbf82012b,
+ 0xbf820001, 0xbf820121,
0xb8f4f802, 0x89748674,
0xb8f5f803, 0x8675ff75,
0x00000400, 0xbf850017,
@@ -36,12 +36,7 @@ static const uint32_t cwsr_trap_gfx8_hex[] = {
0x8671ff71, 0x0000ffff,
0x8f728374, 0xb972e0c2,
0xbf800002, 0xb9740002,
- 0xbe801f70, 0xb8f5f803,
- 0x8675ff75, 0x00000100,
- 0xbf840006, 0xbefa0080,
- 0xb97a0203, 0x8671ff71,
- 0x0000ffff, 0x80f08870,
- 0x82f18071, 0xbefa0080,
+ 0xbe801f70, 0xbefa0080,
0xb97a0283, 0xbef60068,
0xbef70069, 0xb8fa1c07,
0x8e7a9c7a, 0x87717a71,
@@ -279,15 +274,17 @@ static const uint32_t cwsr_trap_gfx8_hex[] = {
static const uint32_t cwsr_trap_gfx9_hex[] = {
- 0xbf820001, 0xbf82015d,
+ 0xbf820001, 0xbf82015e,
0xb8f8f802, 0x89788678,
- 0xb8f1f803, 0x866eff71,
- 0x00000400, 0xbf850037,
- 0x866eff71, 0x00000800,
- 0xbf850003, 0x866eff71,
- 0x00000100, 0xbf840008,
+ 0xb8fbf803, 0x866eff7b,
+ 0x00000400, 0xbf85003b,
+ 0x866eff7b, 0x00000800,
+ 0xbf850003, 0x866eff7b,
+ 0x00000100, 0xbf84000c,
0x866eff78, 0x00002000,
- 0xbf840001, 0xbf810000,
+ 0xbf840005, 0xbf8e0010,
+ 0xb8eef803, 0x866eff6e,
+ 0x00000400, 0xbf84fffb,
0x8778ff78, 0x00002000,
0x80ec886c, 0x82ed806d,
0xb8eef807, 0x866fff6e,
@@ -295,13 +292,13 @@ static const uint32_t cwsr_trap_gfx9_hex[] = {
0x8977ff77, 0xfc000000,
0x87776f77, 0x896eff6e,
0x001f8000, 0xb96ef807,
- 0xb8f0f812, 0xb8f1f813,
- 0x8ef08870, 0xc0071bb8,
+ 0xb8faf812, 0xb8fbf813,
+ 0x8efa887a, 0xc0071bbd,
0x00000000, 0xbf8cc07f,
- 0xc0071c38, 0x00000008,
+ 0xc0071ebd, 0x00000008,
0xbf8cc07f, 0x86ee6e6e,
0xbf840001, 0xbe801d6e,
- 0xb8f1f803, 0x8671ff71,
+ 0xb8fbf803, 0x867bff7b,
0x000001ff, 0xbf850002,
0x806c846c, 0x826d806d,
0x866dff6d, 0x0000ffff,
@@ -311,258 +308,256 @@ static const uint32_t cwsr_trap_gfx9_hex[] = {
0x8f6e8378, 0xb96ee0c2,
0xbf800002, 0xb9780002,
0xbe801f6c, 0x866dff6d,
- 0x0000ffff, 0xbef00080,
- 0xb9700283, 0xb8f02407,
- 0x8e709c70, 0x876d706d,
- 0xb8f003c7, 0x8e709b70,
- 0x876d706d, 0xb8f0f807,
- 0x8670ff70, 0x00007fff,
- 0xb970f807, 0xbeee007e,
+ 0x0000ffff, 0xbefa0080,
+ 0xb97a0283, 0xb8fa2407,
+ 0x8e7a9b7a, 0x876d7a6d,
+ 0xb8fa03c7, 0x8e7a9a7a,
+ 0x876d7a6d, 0xb8faf807,
+ 0x867aff7a, 0x00007fff,
+ 0xb97af807, 0xbeee007e,
0xbeef007f, 0xbefe0180,
- 0xbf900004, 0x87708478,
- 0xb970f802, 0xbf8e0002,
- 0xbf88fffe, 0xb8f02a05,
+ 0xbf900004, 0x877a8478,
+ 0xb97af802, 0xbf8e0002,
+ 0xbf88fffe, 0xb8fa2a05,
+ 0x807a817a, 0x8e7a8a7a,
+ 0xb8fb1605, 0x807b817b,
+ 0x8e7b867b, 0x807a7b7a,
+ 0x807a7e7a, 0x827b807f,
+ 0x867bff7b, 0x0000ffff,
+ 0xc04b1c3d, 0x00000050,
+ 0xbf8cc07f, 0xc04b1d3d,
+ 0x00000060, 0xbf8cc07f,
+ 0xc0431e7d, 0x00000074,
+ 0xbf8cc07f, 0xbef4007e,
+ 0x8675ff7f, 0x0000ffff,
+ 0x8775ff75, 0x00040000,
+ 0xbef60080, 0xbef700ff,
+ 0x00807fac, 0x867aff7f,
+ 0x08000000, 0x8f7a837a,
+ 0x87777a77, 0x867aff7f,
+ 0x70000000, 0x8f7a817a,
+ 0x87777a77, 0xbef1007c,
+ 0xbef00080, 0xb8f02a05,
0x80708170, 0x8e708a70,
- 0xb8f11605, 0x80718171,
- 0x8e718671, 0x80707170,
- 0x80707e70, 0x8271807f,
- 0x8671ff71, 0x0000ffff,
- 0xc0471cb8, 0x00000040,
- 0xbf8cc07f, 0xc04b1d38,
- 0x00000048, 0xbf8cc07f,
- 0xc0431e78, 0x00000058,
- 0xbf8cc07f, 0xc0471eb8,
- 0x0000005c, 0xbf8cc07f,
- 0xbef4007e, 0x8675ff7f,
- 0x0000ffff, 0x8775ff75,
- 0x00040000, 0xbef60080,
- 0xbef700ff, 0x00807fac,
- 0x8670ff7f, 0x08000000,
- 0x8f708370, 0x87777077,
- 0x8670ff7f, 0x70000000,
- 0x8f708170, 0x87777077,
- 0xbefb007c, 0xbefa0080,
- 0xb8fa2a05, 0x807a817a,
- 0x8e7a8a7a, 0xb8f01605,
- 0x80708170, 0x8e708670,
- 0x807a707a, 0xbef60084,
- 0xbef600ff, 0x01000000,
- 0xbefe007c, 0xbefc007a,
- 0xc0611efa, 0x0000007c,
- 0xbf8cc07f, 0x807a847a,
- 0xbefc007e, 0xbefe007c,
- 0xbefc007a, 0xc0611b3a,
+ 0xb8fa1605, 0x807a817a,
+ 0x8e7a867a, 0x80707a70,
+ 0xbef60084, 0xbef600ff,
+ 0x01000000, 0xbefe007c,
+ 0xbefc0070, 0xc0611c7a,
0x0000007c, 0xbf8cc07f,
- 0x807a847a, 0xbefc007e,
- 0xbefe007c, 0xbefc007a,
- 0xc0611b7a, 0x0000007c,
- 0xbf8cc07f, 0x807a847a,
+ 0x80708470, 0xbefc007e,
+ 0xbefe007c, 0xbefc0070,
+ 0xc0611b3a, 0x0000007c,
+ 0xbf8cc07f, 0x80708470,
0xbefc007e, 0xbefe007c,
- 0xbefc007a, 0xc0611bba,
+ 0xbefc0070, 0xc0611b7a,
0x0000007c, 0xbf8cc07f,
- 0x807a847a, 0xbefc007e,
- 0xbefe007c, 0xbefc007a,
- 0xc0611bfa, 0x0000007c,
- 0xbf8cc07f, 0x807a847a,
+ 0x80708470, 0xbefc007e,
+ 0xbefe007c, 0xbefc0070,
+ 0xc0611bba, 0x0000007c,
+ 0xbf8cc07f, 0x80708470,
0xbefc007e, 0xbefe007c,
- 0xbefc007a, 0xc0611e3a,
+ 0xbefc0070, 0xc0611bfa,
0x0000007c, 0xbf8cc07f,
- 0x807a847a, 0xbefc007e,
- 0xb8f1f803, 0xbefe007c,
- 0xbefc007a, 0xc0611c7a,
- 0x0000007c, 0xbf8cc07f,
- 0x807a847a, 0xbefc007e,
- 0xbefe007c, 0xbefc007a,
- 0xc0611a3a, 0x0000007c,
- 0xbf8cc07f, 0x807a847a,
+ 0x80708470, 0xbefc007e,
+ 0xbefe007c, 0xbefc0070,
+ 0xc0611e3a, 0x0000007c,
+ 0xbf8cc07f, 0x80708470,
+ 0xbefc007e, 0xb8fbf803,
+ 0xbefe007c, 0xbefc0070,
+ 0xc0611efa, 0x0000007c,
+ 0xbf8cc07f, 0x80708470,
0xbefc007e, 0xbefe007c,
- 0xbefc007a, 0xc0611a7a,
- 0x0000007c, 0xbf8cc07f,
- 0x807a847a, 0xbefc007e,
- 0xb8fbf801, 0xbefe007c,
- 0xbefc007a, 0xc0611efa,
+ 0xbefc0070, 0xc0611a3a,
0x0000007c, 0xbf8cc07f,
- 0x807a847a, 0xbefc007e,
- 0x8670ff7f, 0x04000000,
- 0xbeef0080, 0x876f6f70,
- 0xb8fa2a05, 0x807a817a,
- 0x8e7a8a7a, 0xb8f11605,
- 0x80718171, 0x8e718471,
- 0x8e768271, 0xbef600ff,
- 0x01000000, 0xbef20174,
- 0x80747a74, 0x82758075,
- 0xbefc0080, 0xbf800000,
- 0xbe802b00, 0xbe822b02,
- 0xbe842b04, 0xbe862b06,
- 0xbe882b08, 0xbe8a2b0a,
- 0xbe8c2b0c, 0xbe8e2b0e,
- 0xc06b003a, 0x00000000,
- 0xbf8cc07f, 0xc06b013a,
- 0x00000010, 0xbf8cc07f,
- 0xc06b023a, 0x00000020,
- 0xbf8cc07f, 0xc06b033a,
- 0x00000030, 0xbf8cc07f,
- 0x8074c074, 0x82758075,
- 0x807c907c, 0xbf0a717c,
- 0xbf85ffe7, 0xbef40172,
- 0xbefa0080, 0xbefe00c1,
- 0xbeff00c1, 0xbee80080,
- 0xbee90080, 0xbef600ff,
- 0x01000000, 0xe0724000,
- 0x7a1d0000, 0xe0724100,
- 0x7a1d0100, 0xe0724200,
- 0x7a1d0200, 0xe0724300,
- 0x7a1d0300, 0xbefe00c1,
- 0xbeff00c1, 0xb8f14306,
- 0x8671c171, 0xbf84002c,
- 0xbf8a0000, 0x8670ff6f,
- 0x04000000, 0xbf840028,
- 0x8e718671, 0x8e718271,
- 0xbef60071, 0xb8fa2a05,
- 0x807a817a, 0x8e7a8a7a,
- 0xb8f01605, 0x80708170,
- 0x8e708670, 0x807a707a,
- 0x807aff7a, 0x00000080,
+ 0x80708470, 0xbefc007e,
+ 0xbefe007c, 0xbefc0070,
+ 0xc0611a7a, 0x0000007c,
+ 0xbf8cc07f, 0x80708470,
+ 0xbefc007e, 0xb8f1f801,
+ 0xbefe007c, 0xbefc0070,
+ 0xc0611c7a, 0x0000007c,
+ 0xbf8cc07f, 0x80708470,
+ 0xbefc007e, 0x867aff7f,
+ 0x04000000, 0xbeef0080,
+ 0x876f6f7a, 0xb8f02a05,
+ 0x80708170, 0x8e708a70,
+ 0xb8fb1605, 0x807b817b,
+ 0x8e7b847b, 0x8e76827b,
0xbef600ff, 0x01000000,
- 0xbefc0080, 0xd28c0002,
- 0x000100c1, 0xd28d0003,
- 0x000204c1, 0xd1060002,
- 0x00011103, 0x7e0602ff,
- 0x00000200, 0xbefc00ff,
- 0x00010000, 0xbe800077,
- 0x8677ff77, 0xff7fffff,
- 0x8777ff77, 0x00058000,
- 0xd8ec0000, 0x00000002,
- 0xbf8cc07f, 0xe0765000,
- 0x7a1d0002, 0x68040702,
- 0xd0c9006a, 0x0000e302,
- 0xbf87fff7, 0xbef70000,
- 0xbefa00ff, 0x00000400,
+ 0xbef20174, 0x80747074,
+ 0x82758075, 0xbefc0080,
+ 0xbf800000, 0xbe802b00,
+ 0xbe822b02, 0xbe842b04,
+ 0xbe862b06, 0xbe882b08,
+ 0xbe8a2b0a, 0xbe8c2b0c,
+ 0xbe8e2b0e, 0xc06b003a,
+ 0x00000000, 0xbf8cc07f,
+ 0xc06b013a, 0x00000010,
+ 0xbf8cc07f, 0xc06b023a,
+ 0x00000020, 0xbf8cc07f,
+ 0xc06b033a, 0x00000030,
+ 0xbf8cc07f, 0x8074c074,
+ 0x82758075, 0x807c907c,
+ 0xbf0a7b7c, 0xbf85ffe7,
+ 0xbef40172, 0xbef00080,
0xbefe00c1, 0xbeff00c1,
- 0xb8f12a05, 0x80718171,
- 0x8e718271, 0x8e768871,
+ 0xbee80080, 0xbee90080,
0xbef600ff, 0x01000000,
- 0xbefc0084, 0xbf0a717c,
- 0xbf840015, 0xbf11017c,
- 0x8071ff71, 0x00001000,
- 0x7e000300, 0x7e020301,
- 0x7e040302, 0x7e060303,
- 0xe0724000, 0x7a1d0000,
- 0xe0724100, 0x7a1d0100,
- 0xe0724200, 0x7a1d0200,
- 0xe0724300, 0x7a1d0300,
- 0x807c847c, 0x807aff7a,
- 0x00000400, 0xbf0a717c,
- 0xbf85ffef, 0xbf9c0000,
- 0xbf8200dc, 0xbef4007e,
- 0x8675ff7f, 0x0000ffff,
- 0x8775ff75, 0x00040000,
- 0xbef60080, 0xbef700ff,
- 0x00807fac, 0x866eff7f,
- 0x08000000, 0x8f6e836e,
- 0x87776e77, 0x866eff7f,
- 0x70000000, 0x8f6e816e,
- 0x87776e77, 0x866eff7f,
- 0x04000000, 0xbf84001e,
+ 0xe0724000, 0x701d0000,
+ 0xe0724100, 0x701d0100,
+ 0xe0724200, 0x701d0200,
+ 0xe0724300, 0x701d0300,
0xbefe00c1, 0xbeff00c1,
- 0xb8ef4306, 0x866fc16f,
- 0xbf840019, 0x8e6f866f,
- 0x8e6f826f, 0xbef6006f,
- 0xb8f82a05, 0x80788178,
- 0x8e788a78, 0xb8ee1605,
- 0x806e816e, 0x8e6e866e,
- 0x80786e78, 0x8078ff78,
+ 0xb8fb4306, 0x867bc17b,
+ 0xbf84002c, 0xbf8a0000,
+ 0x867aff6f, 0x04000000,
+ 0xbf840028, 0x8e7b867b,
+ 0x8e7b827b, 0xbef6007b,
+ 0xb8f02a05, 0x80708170,
+ 0x8e708a70, 0xb8fa1605,
+ 0x807a817a, 0x8e7a867a,
+ 0x80707a70, 0x8070ff70,
0x00000080, 0xbef600ff,
0x01000000, 0xbefc0080,
- 0xe0510000, 0x781d0000,
- 0xe0510100, 0x781d0000,
- 0x807cff7c, 0x00000200,
- 0x8078ff78, 0x00000200,
- 0xbf0a6f7c, 0xbf85fff6,
- 0xbef80080, 0xbefe00c1,
- 0xbeff00c1, 0xb8ef2a05,
- 0x806f816f, 0x8e6f826f,
- 0x8e76886f, 0xbef600ff,
- 0x01000000, 0xbeee0078,
- 0x8078ff78, 0x00000400,
- 0xbefc0084, 0xbf11087c,
- 0x806fff6f, 0x00008000,
- 0xe0524000, 0x781d0000,
- 0xe0524100, 0x781d0100,
- 0xe0524200, 0x781d0200,
- 0xe0524300, 0x781d0300,
- 0xbf8c0f70, 0x7e000300,
+ 0xd28c0002, 0x000100c1,
+ 0xd28d0003, 0x000204c1,
+ 0xd1060002, 0x00011103,
+ 0x7e0602ff, 0x00000200,
+ 0xbefc00ff, 0x00010000,
+ 0xbe800077, 0x8677ff77,
+ 0xff7fffff, 0x8777ff77,
+ 0x00058000, 0xd8ec0000,
+ 0x00000002, 0xbf8cc07f,
+ 0xe0765000, 0x701d0002,
+ 0x68040702, 0xd0c9006a,
+ 0x0000f702, 0xbf87fff7,
+ 0xbef70000, 0xbef000ff,
+ 0x00000400, 0xbefe00c1,
+ 0xbeff00c1, 0xb8fb2a05,
+ 0x807b817b, 0x8e7b827b,
+ 0x8e76887b, 0xbef600ff,
+ 0x01000000, 0xbefc0084,
+ 0xbf0a7b7c, 0xbf840015,
+ 0xbf11017c, 0x807bff7b,
+ 0x00001000, 0x7e000300,
0x7e020301, 0x7e040302,
- 0x7e060303, 0x807c847c,
- 0x8078ff78, 0x00000400,
- 0xbf0a6f7c, 0xbf85ffee,
- 0xbf9c0000, 0xe0524000,
- 0x6e1d0000, 0xe0524100,
- 0x6e1d0100, 0xe0524200,
- 0x6e1d0200, 0xe0524300,
- 0x6e1d0300, 0xb8f82a05,
+ 0x7e060303, 0xe0724000,
+ 0x701d0000, 0xe0724100,
+ 0x701d0100, 0xe0724200,
+ 0x701d0200, 0xe0724300,
+ 0x701d0300, 0x807c847c,
+ 0x8070ff70, 0x00000400,
+ 0xbf0a7b7c, 0xbf85ffef,
+ 0xbf9c0000, 0xbf8200da,
+ 0xbef4007e, 0x8675ff7f,
+ 0x0000ffff, 0x8775ff75,
+ 0x00040000, 0xbef60080,
+ 0xbef700ff, 0x00807fac,
+ 0x866eff7f, 0x08000000,
+ 0x8f6e836e, 0x87776e77,
+ 0x866eff7f, 0x70000000,
+ 0x8f6e816e, 0x87776e77,
+ 0x866eff7f, 0x04000000,
+ 0xbf84001e, 0xbefe00c1,
+ 0xbeff00c1, 0xb8ef4306,
+ 0x866fc16f, 0xbf840019,
+ 0x8e6f866f, 0x8e6f826f,
+ 0xbef6006f, 0xb8f82a05,
0x80788178, 0x8e788a78,
0xb8ee1605, 0x806e816e,
0x8e6e866e, 0x80786e78,
- 0x80f8c078, 0xb8ef1605,
- 0x806f816f, 0x8e6f846f,
- 0x8e76826f, 0xbef600ff,
- 0x01000000, 0xbefc006f,
- 0xc031003a, 0x00000078,
- 0x80f8c078, 0xbf8cc07f,
- 0x80fc907c, 0xbf800000,
- 0xbe802d00, 0xbe822d02,
- 0xbe842d04, 0xbe862d06,
- 0xbe882d08, 0xbe8a2d0a,
- 0xbe8c2d0c, 0xbe8e2d0e,
- 0xbf06807c, 0xbf84fff0,
+ 0x8078ff78, 0x00000080,
+ 0xbef600ff, 0x01000000,
+ 0xbefc0080, 0xe0510000,
+ 0x781d0000, 0xe0510100,
+ 0x781d0000, 0x807cff7c,
+ 0x00000200, 0x8078ff78,
+ 0x00000200, 0xbf0a6f7c,
+ 0xbf85fff6, 0xbef80080,
+ 0xbefe00c1, 0xbeff00c1,
+ 0xb8ef2a05, 0x806f816f,
+ 0x8e6f826f, 0x8e76886f,
+ 0xbef600ff, 0x01000000,
+ 0xbeee0078, 0x8078ff78,
+ 0x00000400, 0xbefc0084,
+ 0xbf11087c, 0x806fff6f,
+ 0x00008000, 0xe0524000,
+ 0x781d0000, 0xe0524100,
+ 0x781d0100, 0xe0524200,
+ 0x781d0200, 0xe0524300,
+ 0x781d0300, 0xbf8c0f70,
+ 0x7e000300, 0x7e020301,
+ 0x7e040302, 0x7e060303,
+ 0x807c847c, 0x8078ff78,
+ 0x00000400, 0xbf0a6f7c,
+ 0xbf85ffee, 0xbf9c0000,
+ 0xe0524000, 0x6e1d0000,
+ 0xe0524100, 0x6e1d0100,
+ 0xe0524200, 0x6e1d0200,
+ 0xe0524300, 0x6e1d0300,
0xb8f82a05, 0x80788178,
0x8e788a78, 0xb8ee1605,
0x806e816e, 0x8e6e866e,
- 0x80786e78, 0xbef60084,
+ 0x80786e78, 0x80f8c078,
+ 0xb8ef1605, 0x806f816f,
+ 0x8e6f846f, 0x8e76826f,
0xbef600ff, 0x01000000,
- 0xc0211bfa, 0x00000078,
- 0x80788478, 0xc0211b3a,
+ 0xbefc006f, 0xc031003a,
+ 0x00000078, 0x80f8c078,
+ 0xbf8cc07f, 0x80fc907c,
+ 0xbf800000, 0xbe802d00,
+ 0xbe822d02, 0xbe842d04,
+ 0xbe862d06, 0xbe882d08,
+ 0xbe8a2d0a, 0xbe8c2d0c,
+ 0xbe8e2d0e, 0xbf06807c,
+ 0xbf84fff0, 0xb8f82a05,
+ 0x80788178, 0x8e788a78,
+ 0xb8ee1605, 0x806e816e,
+ 0x8e6e866e, 0x80786e78,
+ 0xbef60084, 0xbef600ff,
+ 0x01000000, 0xc0211bfa,
0x00000078, 0x80788478,
- 0xc0211b7a, 0x00000078,
- 0x80788478, 0xc0211eba,
+ 0xc0211b3a, 0x00000078,
+ 0x80788478, 0xc0211b7a,
0x00000078, 0x80788478,
- 0xc0211efa, 0x00000078,
- 0x80788478, 0xc0211c3a,
+ 0xc0211c3a, 0x00000078,
+ 0x80788478, 0xc0211c7a,
0x00000078, 0x80788478,
- 0xc0211c7a, 0x00000078,
- 0x80788478, 0xc0211a3a,
+ 0xc0211eba, 0x00000078,
+ 0x80788478, 0xc0211efa,
0x00000078, 0x80788478,
- 0xc0211a7a, 0x00000078,
- 0x80788478, 0xc0211cfa,
+ 0xc0211a3a, 0x00000078,
+ 0x80788478, 0xc0211a7a,
0x00000078, 0x80788478,
- 0xbf8cc07f, 0xbefc006f,
- 0xbefe007a, 0xbeff007b,
- 0x866f71ff, 0x000003ff,
- 0xb96f4803, 0x866f71ff,
- 0xfffff800, 0x8f6f8b6f,
- 0xb96fa2c3, 0xb973f801,
- 0xb8ee2a05, 0x806e816e,
- 0x8e6e8a6e, 0xb8ef1605,
- 0x806f816f, 0x8e6f866f,
- 0x806e6f6e, 0x806e746e,
- 0x826f8075, 0x866fff6f,
- 0x0000ffff, 0xc0071cb7,
- 0x00000040, 0xc00b1d37,
- 0x00000048, 0xc0031e77,
- 0x00000058, 0xc0071eb7,
- 0x0000005c, 0xbf8cc07f,
- 0x866fff6d, 0xf0000000,
- 0x8f6f9c6f, 0x8e6f906f,
- 0xbeee0080, 0x876e6f6e,
- 0x866fff6d, 0x08000000,
- 0x8f6f9b6f, 0x8e6f8f6f,
- 0x876e6f6e, 0x866fff70,
- 0x00800000, 0x8f6f976f,
- 0xb96ef807, 0x866dff6d,
- 0x0000ffff, 0x86fe7e7e,
- 0x86ea6a6a, 0x8f6e8370,
- 0xb96ee0c2, 0xbf800002,
- 0xb9700002, 0xbf8a0000,
- 0x95806f6c, 0xbf810000,
+ 0xc0211cfa, 0x00000078,
+ 0x80788478, 0xbf8cc07f,
+ 0xbefc006f, 0xbefe0070,
+ 0xbeff0071, 0x866f7bff,
+ 0x000003ff, 0xb96f4803,
+ 0x866f7bff, 0xfffff800,
+ 0x8f6f8b6f, 0xb96fa2c3,
+ 0xb973f801, 0xb8ee2a05,
+ 0x806e816e, 0x8e6e8a6e,
+ 0xb8ef1605, 0x806f816f,
+ 0x8e6f866f, 0x806e6f6e,
+ 0x806e746e, 0x826f8075,
+ 0x866fff6f, 0x0000ffff,
+ 0xc00b1c37, 0x00000050,
+ 0xc00b1d37, 0x00000060,
+ 0xc0031e77, 0x00000074,
+ 0xbf8cc07f, 0x866fff6d,
+ 0xf8000000, 0x8f6f9b6f,
+ 0x8e6f906f, 0xbeee0080,
+ 0x876e6f6e, 0x866fff6d,
+ 0x04000000, 0x8f6f9a6f,
+ 0x8e6f8f6f, 0x876e6f6e,
+ 0x866fff7a, 0x00800000,
+ 0x8f6f976f, 0xb96ef807,
+ 0x866dff6d, 0x0000ffff,
+ 0x86fe7e7e, 0x86ea6a6a,
+ 0x8f6e837a, 0xb96ee0c2,
+ 0xbf800002, 0xb97a0002,
+ 0xbf8a0000, 0x95806f6c,
+ 0xbf810000, 0x00000000,
};
diff --git a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx8.asm b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx8.asm
index abe1a5da29fb..a47f5b933120 100644
--- a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx8.asm
+++ b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx8.asm
@@ -282,19 +282,6 @@ if G8SR_DEBUG_TIMESTAMP
s_waitcnt lgkmcnt(0) //FIXME, will cause xnack??
end
- //check whether there is mem_viol
- s_getreg_b32 s_save_trapsts, hwreg(HW_REG_TRAPSTS)
- s_and_b32 s_save_trapsts, s_save_trapsts, SQ_WAVE_TRAPSTS_MEM_VIOL_MASK
- s_cbranch_scc0 L_NO_PC_REWIND
-
- //if so, need rewind PC assuming GDS operation gets NACKed
- s_mov_b32 s_save_tmp, 0 //clear mem_viol bit
- s_setreg_b32 hwreg(HW_REG_TRAPSTS, SQ_WAVE_TRAPSTS_MEM_VIOL_SHIFT, 1), s_save_tmp //clear mem_viol bit
- s_and_b32 s_save_pc_hi, s_save_pc_hi, 0x0000ffff //pc[47:32]
- s_sub_u32 s_save_pc_lo, s_save_pc_lo, 8 //pc[31:0]-8
- s_subb_u32 s_save_pc_hi, s_save_pc_hi, 0x0 // -scc
-
-L_NO_PC_REWIND:
s_mov_b32 s_save_tmp, 0 //clear saveCtx bit
s_setreg_b32 hwreg(HW_REG_TRAPSTS, SQ_WAVE_TRAPSTS_SAVECTX_SHIFT, 1), s_save_tmp //clear saveCtx bit
diff --git a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx9.asm b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx9.asm
index 0bb9c577b3a2..6bae2e022c6e 100644
--- a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx9.asm
+++ b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx9.asm
@@ -150,10 +150,10 @@ var S_SAVE_SPI_INIT_MTYPE_SHIFT = 28
var S_SAVE_SPI_INIT_FIRST_WAVE_MASK = 0x04000000 //bit[26]: FirstWaveInTG
var S_SAVE_SPI_INIT_FIRST_WAVE_SHIFT = 26
-var S_SAVE_PC_HI_RCNT_SHIFT = 28 //FIXME check with Brian to ensure all fields other than PC[47:0] can be used
-var S_SAVE_PC_HI_RCNT_MASK = 0xF0000000 //FIXME
-var S_SAVE_PC_HI_FIRST_REPLAY_SHIFT = 27 //FIXME
-var S_SAVE_PC_HI_FIRST_REPLAY_MASK = 0x08000000 //FIXME
+var S_SAVE_PC_HI_RCNT_SHIFT = 27 //FIXME check with Brian to ensure all fields other than PC[47:0] can be used
+var S_SAVE_PC_HI_RCNT_MASK = 0xF8000000 //FIXME
+var S_SAVE_PC_HI_FIRST_REPLAY_SHIFT = 26 //FIXME
+var S_SAVE_PC_HI_FIRST_REPLAY_MASK = 0x04000000 //FIXME
var s_save_spi_init_lo = exec_lo
var s_save_spi_init_hi = exec_hi
@@ -162,8 +162,8 @@ var s_save_pc_lo = ttmp0 //{TTMP1, TTMP0} = {3'h0,pc_rewind[3:0], HT[0],tra
var s_save_pc_hi = ttmp1
var s_save_exec_lo = ttmp2
var s_save_exec_hi = ttmp3
-var s_save_tmp = ttmp4
-var s_save_trapsts = ttmp5 //not really used until the end of the SAVE routine
+var s_save_tmp = ttmp14
+var s_save_trapsts = ttmp15 //not really used until the end of the SAVE routine
var s_save_xnack_mask_lo = ttmp6
var s_save_xnack_mask_hi = ttmp7
var s_save_buf_rsrc0 = ttmp8
@@ -171,9 +171,9 @@ var s_save_buf_rsrc1 = ttmp9
var s_save_buf_rsrc2 = ttmp10
var s_save_buf_rsrc3 = ttmp11
var s_save_status = ttmp12
-var s_save_mem_offset = ttmp14
+var s_save_mem_offset = ttmp4
var s_save_alloc_size = s_save_trapsts //conflict
-var s_save_m0 = ttmp15
+var s_save_m0 = ttmp5
var s_save_ttmps_lo = s_save_tmp //no conflict
var s_save_ttmps_hi = s_save_trapsts //no conflict
@@ -207,10 +207,10 @@ var s_restore_mode = ttmp7
var s_restore_pc_lo = ttmp0
var s_restore_pc_hi = ttmp1
-var s_restore_exec_lo = ttmp14
-var s_restore_exec_hi = ttmp15
-var s_restore_status = ttmp4
-var s_restore_trapsts = ttmp5
+var s_restore_exec_lo = ttmp4
+var s_restore_exec_hi = ttmp5
+var s_restore_status = ttmp14
+var s_restore_trapsts = ttmp15
var s_restore_xnack_mask_lo = xnack_mask_lo
var s_restore_xnack_mask_hi = xnack_mask_hi
var s_restore_buf_rsrc0 = ttmp8
@@ -266,10 +266,16 @@ if (!EMU_RUN_HACK)
L_HALT_WAVE:
// If STATUS.HALT is set then this fault must come from SQC instruction fetch.
- // We cannot prevent further faults so just terminate the wavefront.
+ // We cannot prevent further faults. Spin wait until context saved.
s_and_b32 ttmp2, s_save_status, SQ_WAVE_STATUS_HALT_MASK
s_cbranch_scc0 L_NOT_ALREADY_HALTED
- s_endpgm
+
+L_WAIT_CTX_SAVE:
+ s_sleep 0x10
+ s_getreg_b32 ttmp2, hwreg(HW_REG_TRAPSTS)
+ s_and_b32 ttmp2, ttmp2, SQ_WAVE_TRAPSTS_SAVECTX_MASK
+ s_cbranch_scc0 L_WAIT_CTX_SAVE
+
L_NOT_ALREADY_HALTED:
s_or_b32 s_save_status, s_save_status, SQ_WAVE_STATUS_HALT_MASK
@@ -293,12 +299,12 @@ L_FETCH_2ND_TRAP:
// Read second-level TBA/TMA from first-level TMA and jump if available.
// ttmp[2:5] and ttmp12 can be used (others hold SPI-initialized debug data)
// ttmp12 holds SQ_WAVE_STATUS
- s_getreg_b32 ttmp4, hwreg(HW_REG_SQ_SHADER_TMA_LO)
- s_getreg_b32 ttmp5, hwreg(HW_REG_SQ_SHADER_TMA_HI)
- s_lshl_b64 [ttmp4, ttmp5], [ttmp4, ttmp5], 0x8
- s_load_dwordx2 [ttmp2, ttmp3], [ttmp4, ttmp5], 0x0 glc:1 // second-level TBA
+ s_getreg_b32 ttmp14, hwreg(HW_REG_SQ_SHADER_TMA_LO)
+ s_getreg_b32 ttmp15, hwreg(HW_REG_SQ_SHADER_TMA_HI)
+ s_lshl_b64 [ttmp14, ttmp15], [ttmp14, ttmp15], 0x8
+ s_load_dwordx2 [ttmp2, ttmp3], [ttmp14, ttmp15], 0x0 glc:1 // second-level TBA
s_waitcnt lgkmcnt(0)
- s_load_dwordx2 [ttmp4, ttmp5], [ttmp4, ttmp5], 0x8 glc:1 // second-level TMA
+ s_load_dwordx2 [ttmp14, ttmp15], [ttmp14, ttmp15], 0x8 glc:1 // second-level TMA
s_waitcnt lgkmcnt(0)
s_and_b64 [ttmp2, ttmp3], [ttmp2, ttmp3], [ttmp2, ttmp3]
s_cbranch_scc0 L_NO_NEXT_TRAP // second-level trap handler not been set
@@ -405,7 +411,7 @@ end
else
end
- // Save trap temporaries 6-11, 13-15 initialized by SPI debug dispatch logic
+ // Save trap temporaries 4-11, 13 initialized by SPI debug dispatch logic
// ttmp SR memory offset : size(VGPR)+size(SGPR)+0x40
get_vgpr_size_bytes(s_save_ttmps_lo)
get_sgpr_size_bytes(s_save_ttmps_hi)
@@ -413,13 +419,11 @@ end
s_add_u32 s_save_ttmps_lo, s_save_ttmps_lo, s_save_spi_init_lo
s_addc_u32 s_save_ttmps_hi, s_save_spi_init_hi, 0x0
s_and_b32 s_save_ttmps_hi, s_save_ttmps_hi, 0xFFFF
- s_store_dwordx2 [ttmp6, ttmp7], [s_save_ttmps_lo, s_save_ttmps_hi], 0x40 glc:1
- ack_sqc_store_workaround()
- s_store_dwordx4 [ttmp8, ttmp9, ttmp10, ttmp11], [s_save_ttmps_lo, s_save_ttmps_hi], 0x48 glc:1
+ s_store_dwordx4 [ttmp4, ttmp5, ttmp6, ttmp7], [s_save_ttmps_lo, s_save_ttmps_hi], 0x50 glc:1
ack_sqc_store_workaround()
- s_store_dword ttmp13, [s_save_ttmps_lo, s_save_ttmps_hi], 0x58 glc:1
+ s_store_dwordx4 [ttmp8, ttmp9, ttmp10, ttmp11], [s_save_ttmps_lo, s_save_ttmps_hi], 0x60 glc:1
ack_sqc_store_workaround()
- s_store_dwordx2 [ttmp14, ttmp15], [s_save_ttmps_lo, s_save_ttmps_hi], 0x5C glc:1
+ s_store_dword ttmp13, [s_save_ttmps_lo, s_save_ttmps_hi], 0x74 glc:1
ack_sqc_store_workaround()
/* setup Resource Contants */
@@ -1093,7 +1097,7 @@ end
//s_setreg_b32 hwreg(HW_REG_TRAPSTS), s_restore_trapsts //don't overwrite SAVECTX bit as it may be set through external SAVECTX during restore
s_setreg_b32 hwreg(HW_REG_MODE), s_restore_mode
- // Restore trap temporaries 6-11, 13-15 initialized by SPI debug dispatch logic
+ // Restore trap temporaries 4-11, 13 initialized by SPI debug dispatch logic
// ttmp SR memory offset : size(VGPR)+size(SGPR)+0x40
get_vgpr_size_bytes(s_restore_ttmps_lo)
get_sgpr_size_bytes(s_restore_ttmps_hi)
@@ -1101,10 +1105,9 @@ end
s_add_u32 s_restore_ttmps_lo, s_restore_ttmps_lo, s_restore_buf_rsrc0
s_addc_u32 s_restore_ttmps_hi, s_restore_buf_rsrc1, 0x0
s_and_b32 s_restore_ttmps_hi, s_restore_ttmps_hi, 0xFFFF
- s_load_dwordx2 [ttmp6, ttmp7], [s_restore_ttmps_lo, s_restore_ttmps_hi], 0x40 glc:1
- s_load_dwordx4 [ttmp8, ttmp9, ttmp10, ttmp11], [s_restore_ttmps_lo, s_restore_ttmps_hi], 0x48 glc:1
- s_load_dword ttmp13, [s_restore_ttmps_lo, s_restore_ttmps_hi], 0x58 glc:1
- s_load_dwordx2 [ttmp14, ttmp15], [s_restore_ttmps_lo, s_restore_ttmps_hi], 0x5C glc:1
+ s_load_dwordx4 [ttmp4, ttmp5, ttmp6, ttmp7], [s_restore_ttmps_lo, s_restore_ttmps_hi], 0x50 glc:1
+ s_load_dwordx4 [ttmp8, ttmp9, ttmp10, ttmp11], [s_restore_ttmps_lo, s_restore_ttmps_hi], 0x60 glc:1
+ s_load_dword ttmp13, [s_restore_ttmps_lo, s_restore_ttmps_hi], 0x74 glc:1
s_waitcnt lgkmcnt(0)
//reuse s_restore_m0 as a temp register
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
index 083bd8114db1..f91126f5f1be 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
@@ -213,6 +213,8 @@ static int set_queue_properties_from_user(struct queue_properties *q_properties,
q_properties->type = KFD_QUEUE_TYPE_COMPUTE;
else if (args->queue_type == KFD_IOC_QUEUE_TYPE_SDMA)
q_properties->type = KFD_QUEUE_TYPE_SDMA;
+ else if (args->queue_type == KFD_IOC_QUEUE_TYPE_SDMA_XGMI)
+ q_properties->type = KFD_QUEUE_TYPE_SDMA_XGMI;
else
return -ENOTSUPP;
@@ -522,7 +524,7 @@ static int kfd_ioctl_set_trap_handler(struct file *filep,
struct kfd_process_device *pdd;
dev = kfd_device_by_id(args->gpu_id);
- if (dev == NULL)
+ if (!dev)
return -EINVAL;
mutex_lock(&p->mutex);
@@ -1272,6 +1274,12 @@ static int kfd_ioctl_alloc_memory_of_gpu(struct file *filep,
if (args->size != kfd_doorbell_process_slice(dev))
return -EINVAL;
offset = kfd_get_process_doorbells(dev, p);
+ } else if (flags & KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP) {
+ if (args->size != PAGE_SIZE)
+ return -EINVAL;
+ offset = amdgpu_amdkfd_get_mmio_remap_phys_addr(dev->kgd);
+ if (!offset)
+ return -ENOMEM;
}
mutex_lock(&p->mutex);
@@ -1301,6 +1309,14 @@ static int kfd_ioctl_alloc_memory_of_gpu(struct file *filep,
args->handle = MAKE_HANDLE(args->gpu_id, idr_handle);
args->mmap_offset = offset;
+ /* MMIO is mapped through kfd device
+ * Generate a kfd mmap offset
+ */
+ if (flags & KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP) {
+ args->mmap_offset = KFD_MMAP_TYPE_MMIO | KFD_MMAP_GPU_ID(args->gpu_id);
+ args->mmap_offset <<= PAGE_SHIFT;
+ }
+
return 0;
err_free:
@@ -1551,6 +1567,32 @@ copy_from_user_failed:
return err;
}
+static int kfd_ioctl_alloc_queue_gws(struct file *filep,
+ struct kfd_process *p, void *data)
+{
+ int retval;
+ struct kfd_ioctl_alloc_queue_gws_args *args = data;
+ struct kfd_dev *dev;
+
+ if (!hws_gws_support)
+ return -ENODEV;
+
+ dev = kfd_device_by_id(args->gpu_id);
+ if (!dev) {
+ pr_debug("Could not find gpu id 0x%x\n", args->gpu_id);
+ return -ENODEV;
+ }
+ if (dev->dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS)
+ return -ENODEV;
+
+ mutex_lock(&p->mutex);
+ retval = pqm_set_gws(&p->pqm, args->queue_id, args->num_gws ? dev->gws : NULL);
+ mutex_unlock(&p->mutex);
+
+ args->first_gws = 0;
+ return retval;
+}
+
static int kfd_ioctl_get_dmabuf_info(struct file *filep,
struct kfd_process *p, void *data)
{
@@ -1753,6 +1795,8 @@ static const struct amdkfd_ioctl_desc amdkfd_ioctls[] = {
AMDKFD_IOCTL_DEF(AMDKFD_IOC_IMPORT_DMABUF,
kfd_ioctl_import_dmabuf, 0),
+ AMDKFD_IOCTL_DEF(AMDKFD_IOC_ALLOC_QUEUE_GWS,
+ kfd_ioctl_alloc_queue_gws, 0),
};
#define AMDKFD_CORE_IOCTL_COUNT ARRAY_SIZE(amdkfd_ioctls)
@@ -1845,6 +1889,39 @@ err_i1:
return retcode;
}
+static int kfd_mmio_mmap(struct kfd_dev *dev, struct kfd_process *process,
+ struct vm_area_struct *vma)
+{
+ phys_addr_t address;
+ int ret;
+
+ if (vma->vm_end - vma->vm_start != PAGE_SIZE)
+ return -EINVAL;
+
+ address = amdgpu_amdkfd_get_mmio_remap_phys_addr(dev->kgd);
+
+ vma->vm_flags |= VM_IO | VM_DONTCOPY | VM_DONTEXPAND | VM_NORESERVE |
+ VM_DONTDUMP | VM_PFNMAP;
+
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+
+ pr_debug("Process %d mapping mmio page\n"
+ " target user address == 0x%08llX\n"
+ " physical address == 0x%08llX\n"
+ " vm_flags == 0x%04lX\n"
+ " size == 0x%04lX\n",
+ process->pasid, (unsigned long long) vma->vm_start,
+ address, vma->vm_flags, PAGE_SIZE);
+
+ ret = io_remap_pfn_range(vma,
+ vma->vm_start,
+ address >> PAGE_SHIFT,
+ PAGE_SIZE,
+ vma->vm_page_prot);
+ return ret;
+}
+
+
static int kfd_mmap(struct file *filp, struct vm_area_struct *vma)
{
struct kfd_process *process;
@@ -1875,6 +1952,10 @@ static int kfd_mmap(struct file *filp, struct vm_area_struct *vma)
if (!dev)
return -ENODEV;
return kfd_reserved_mem_mmap(dev, process, vma);
+ case KFD_MMAP_TYPE_MMIO:
+ if (!dev)
+ return -ENODEV;
+ return kfd_mmio_mmap(dev, process, vma);
}
return -EFAULT;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
index 2e7c44955f43..59f8ca4297db 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
@@ -134,6 +134,7 @@ static struct kfd_gpu_cache_info carrizo_cache_info[] = {
#define polaris10_cache_info carrizo_cache_info
#define polaris11_cache_info carrizo_cache_info
#define polaris12_cache_info carrizo_cache_info
+#define vegam_cache_info carrizo_cache_info
/* TODO - check & update Vega10 cache details */
#define vega10_cache_info carrizo_cache_info
#define raven_cache_info carrizo_cache_info
@@ -372,7 +373,7 @@ static int kfd_parse_subtype_iolink(struct crat_subtype_iolink *iolink,
if (props->iolink_type == CRAT_IOLINK_TYPE_PCIEXPRESS)
props->weight = 20;
else if (props->iolink_type == CRAT_IOLINK_TYPE_XGMI)
- props->weight = 15;
+ props->weight = 15 * iolink->num_hops_xgmi;
else
props->weight = node_distance(id_from, id_to);
@@ -652,6 +653,10 @@ static int kfd_fill_gpu_cache_info(struct kfd_dev *kdev,
pcache_info = polaris12_cache_info;
num_of_cache_types = ARRAY_SIZE(polaris12_cache_info);
break;
+ case CHIP_VEGAM:
+ pcache_info = vegam_cache_info;
+ num_of_cache_types = ARRAY_SIZE(vegam_cache_info);
+ break;
case CHIP_VEGA10:
case CHIP_VEGA12:
case CHIP_VEGA20:
@@ -1092,6 +1097,7 @@ static int kfd_fill_gpu_direct_io_link_to_cpu(int *avail_size,
static int kfd_fill_gpu_xgmi_link_to_gpu(int *avail_size,
struct kfd_dev *kdev,
+ struct kfd_dev *peer_kdev,
struct crat_subtype_iolink *sub_type_hdr,
uint32_t proximity_domain_from,
uint32_t proximity_domain_to)
@@ -1110,6 +1116,8 @@ static int kfd_fill_gpu_xgmi_link_to_gpu(int *avail_size,
sub_type_hdr->io_interface_type = CRAT_IOLINK_TYPE_XGMI;
sub_type_hdr->proximity_domain_from = proximity_domain_from;
sub_type_hdr->proximity_domain_to = proximity_domain_to;
+ sub_type_hdr->num_hops_xgmi =
+ amdgpu_amdkfd_get_xgmi_hops_count(kdev->kgd, peer_kdev->kgd);
return 0;
}
@@ -1287,7 +1295,7 @@ static int kfd_create_vcrat_image_gpu(void *pcrat_image,
(char *)sub_type_hdr +
sizeof(struct crat_subtype_iolink));
ret = kfd_fill_gpu_xgmi_link_to_gpu(
- &avail_size, kdev,
+ &avail_size, kdev, peer_dev->gpu,
(struct crat_subtype_iolink *)sub_type_hdr,
proximity_domain, nid);
if (ret < 0)
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_crat.h b/drivers/gpu/drm/amd/amdkfd/kfd_crat.h
index 7c3f192fe25f..d54ceebd346b 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_crat.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_crat.h
@@ -274,7 +274,8 @@ struct crat_subtype_iolink {
uint32_t minimum_bandwidth_mbs;
uint32_t maximum_bandwidth_mbs;
uint32_t recommended_transfer_size;
- uint8_t reserved2[CRAT_IOLINK_RESERVED_LENGTH];
+ uint8_t reserved2[CRAT_IOLINK_RESERVED_LENGTH - 1];
+ uint8_t num_hops_xgmi;
};
/*
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
index 765b58a17dc7..9d1b026e29e9 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
@@ -54,6 +54,7 @@ static const struct kfd_device_info kaveri_device_info = {
.needs_iommu_device = true,
.needs_pci_atomics = false,
.num_sdma_engines = 2,
+ .num_xgmi_sdma_engines = 0,
.num_sdma_queues_per_engine = 2,
};
@@ -71,6 +72,7 @@ static const struct kfd_device_info carrizo_device_info = {
.needs_iommu_device = true,
.needs_pci_atomics = false,
.num_sdma_engines = 2,
+ .num_xgmi_sdma_engines = 0,
.num_sdma_queues_per_engine = 2,
};
@@ -87,6 +89,7 @@ static const struct kfd_device_info raven_device_info = {
.needs_iommu_device = true,
.needs_pci_atomics = true,
.num_sdma_engines = 1,
+ .num_xgmi_sdma_engines = 0,
.num_sdma_queues_per_engine = 2,
};
#endif
@@ -105,6 +108,7 @@ static const struct kfd_device_info hawaii_device_info = {
.needs_iommu_device = false,
.needs_pci_atomics = false,
.num_sdma_engines = 2,
+ .num_xgmi_sdma_engines = 0,
.num_sdma_queues_per_engine = 2,
};
@@ -121,6 +125,7 @@ static const struct kfd_device_info tonga_device_info = {
.needs_iommu_device = false,
.needs_pci_atomics = true,
.num_sdma_engines = 2,
+ .num_xgmi_sdma_engines = 0,
.num_sdma_queues_per_engine = 2,
};
@@ -137,6 +142,7 @@ static const struct kfd_device_info fiji_device_info = {
.needs_iommu_device = false,
.needs_pci_atomics = true,
.num_sdma_engines = 2,
+ .num_xgmi_sdma_engines = 0,
.num_sdma_queues_per_engine = 2,
};
@@ -153,6 +159,7 @@ static const struct kfd_device_info fiji_vf_device_info = {
.needs_iommu_device = false,
.needs_pci_atomics = false,
.num_sdma_engines = 2,
+ .num_xgmi_sdma_engines = 0,
.num_sdma_queues_per_engine = 2,
};
@@ -170,6 +177,7 @@ static const struct kfd_device_info polaris10_device_info = {
.needs_iommu_device = false,
.needs_pci_atomics = true,
.num_sdma_engines = 2,
+ .num_xgmi_sdma_engines = 0,
.num_sdma_queues_per_engine = 2,
};
@@ -186,6 +194,7 @@ static const struct kfd_device_info polaris10_vf_device_info = {
.needs_iommu_device = false,
.needs_pci_atomics = false,
.num_sdma_engines = 2,
+ .num_xgmi_sdma_engines = 0,
.num_sdma_queues_per_engine = 2,
};
@@ -202,6 +211,7 @@ static const struct kfd_device_info polaris11_device_info = {
.needs_iommu_device = false,
.needs_pci_atomics = true,
.num_sdma_engines = 2,
+ .num_xgmi_sdma_engines = 0,
.num_sdma_queues_per_engine = 2,
};
@@ -218,6 +228,24 @@ static const struct kfd_device_info polaris12_device_info = {
.needs_iommu_device = false,
.needs_pci_atomics = true,
.num_sdma_engines = 2,
+ .num_xgmi_sdma_engines = 0,
+ .num_sdma_queues_per_engine = 2,
+};
+
+static const struct kfd_device_info vegam_device_info = {
+ .asic_family = CHIP_VEGAM,
+ .max_pasid_bits = 16,
+ .max_no_of_hqd = 24,
+ .doorbell_size = 4,
+ .ih_ring_entry_size = 4 * sizeof(uint32_t),
+ .event_interrupt_class = &event_interrupt_class_cik,
+ .num_of_watch_points = 4,
+ .mqd_size_aligned = MQD_SIZE_ALIGNED,
+ .supports_cwsr = true,
+ .needs_iommu_device = false,
+ .needs_pci_atomics = true,
+ .num_sdma_engines = 2,
+ .num_xgmi_sdma_engines = 0,
.num_sdma_queues_per_engine = 2,
};
@@ -234,6 +262,7 @@ static const struct kfd_device_info vega10_device_info = {
.needs_iommu_device = false,
.needs_pci_atomics = false,
.num_sdma_engines = 2,
+ .num_xgmi_sdma_engines = 0,
.num_sdma_queues_per_engine = 2,
};
@@ -250,6 +279,7 @@ static const struct kfd_device_info vega10_vf_device_info = {
.needs_iommu_device = false,
.needs_pci_atomics = false,
.num_sdma_engines = 2,
+ .num_xgmi_sdma_engines = 0,
.num_sdma_queues_per_engine = 2,
};
@@ -266,6 +296,7 @@ static const struct kfd_device_info vega12_device_info = {
.needs_iommu_device = false,
.needs_pci_atomics = false,
.num_sdma_engines = 2,
+ .num_xgmi_sdma_engines = 0,
.num_sdma_queues_per_engine = 2,
};
@@ -282,6 +313,7 @@ static const struct kfd_device_info vega20_device_info = {
.needs_iommu_device = false,
.needs_pci_atomics = false,
.num_sdma_engines = 2,
+ .num_xgmi_sdma_engines = 0,
.num_sdma_queues_per_engine = 8,
};
@@ -373,6 +405,9 @@ static const struct kfd_deviceid supported_devices[] = {
{ 0x6995, &polaris12_device_info }, /* Polaris12 */
{ 0x6997, &polaris12_device_info }, /* Polaris12 */
{ 0x699F, &polaris12_device_info }, /* Polaris12 */
+ { 0x694C, &vegam_device_info }, /* VegaM */
+ { 0x694E, &vegam_device_info }, /* VegaM */
+ { 0x694F, &vegam_device_info }, /* VegaM */
{ 0x6860, &vega10_device_info }, /* Vega10 */
{ 0x6861, &vega10_device_info }, /* Vega10 */
{ 0x6862, &vega10_device_info }, /* Vega10 */
@@ -518,6 +553,13 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd,
} else
kfd->max_proc_per_quantum = hws_max_conc_proc;
+ /* Allocate global GWS that is shared by all KFD processes */
+ if (hws_gws_support && amdgpu_amdkfd_alloc_gws(kfd->kgd,
+ amdgpu_amdkfd_get_num_gws(kfd->kgd), &kfd->gws)) {
+ dev_err(kfd_device, "Could not allocate %d gws\n",
+ amdgpu_amdkfd_get_num_gws(kfd->kgd));
+ goto out;
+ }
/* calculate max size of mqds needed for queues */
size = max_num_of_queues_per_device *
kfd->device_info->mqd_size_aligned;
@@ -541,7 +583,7 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd,
&kfd->gtt_start_gpu_addr, &kfd->gtt_start_cpu_ptr,
false)) {
dev_err(kfd_device, "Could not allocate %d bytes\n", size);
- goto out;
+ goto alloc_gtt_mem_failure;
}
dev_info(kfd_device, "Allocated %d bytes on gart\n", size);
@@ -611,6 +653,9 @@ kfd_doorbell_error:
kfd_gtt_sa_fini(kfd);
kfd_gtt_sa_init_error:
amdgpu_amdkfd_free_gtt_mem(kfd->kgd, kfd->gtt_mem);
+alloc_gtt_mem_failure:
+ if (hws_gws_support)
+ amdgpu_amdkfd_free_gws(kfd->kgd, kfd->gws);
dev_err(kfd_device,
"device %x:%x NOT added due to errors\n",
kfd->pdev->vendor, kfd->pdev->device);
@@ -628,6 +673,8 @@ void kgd2kfd_device_exit(struct kfd_dev *kfd)
kfd_doorbell_fini(kfd);
kfd_gtt_sa_fini(kfd);
amdgpu_amdkfd_free_gtt_mem(kfd->kgd, kfd->gtt_mem);
+ if (hws_gws_support)
+ amdgpu_amdkfd_free_gws(kfd->kgd, kfd->gws);
}
kfree(kfd);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
index ae381450601c..ece35c7a77b5 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
@@ -60,14 +60,14 @@ static int create_sdma_queue_nocpsch(struct device_queue_manager *dqm,
struct qcm_process_device *qpd);
static void deallocate_sdma_queue(struct device_queue_manager *dqm,
- unsigned int sdma_queue_id);
+ struct queue *q);
static void kfd_process_hw_exception(struct work_struct *work);
static inline
enum KFD_MQD_TYPE get_mqd_type_from_queue_type(enum kfd_queue_type type)
{
- if (type == KFD_QUEUE_TYPE_SDMA)
+ if (type == KFD_QUEUE_TYPE_SDMA || type == KFD_QUEUE_TYPE_SDMA_XGMI)
return KFD_MQD_TYPE_SDMA;
return KFD_MQD_TYPE_CP;
}
@@ -107,12 +107,23 @@ static unsigned int get_num_sdma_engines(struct device_queue_manager *dqm)
return dqm->dev->device_info->num_sdma_engines;
}
+static unsigned int get_num_xgmi_sdma_engines(struct device_queue_manager *dqm)
+{
+ return dqm->dev->device_info->num_xgmi_sdma_engines;
+}
+
unsigned int get_num_sdma_queues(struct device_queue_manager *dqm)
{
return dqm->dev->device_info->num_sdma_engines
* dqm->dev->device_info->num_sdma_queues_per_engine;
}
+unsigned int get_num_xgmi_sdma_queues(struct device_queue_manager *dqm)
+{
+ return dqm->dev->device_info->num_xgmi_sdma_engines
+ * dqm->dev->device_info->num_sdma_queues_per_engine;
+}
+
void program_sh_mem_settings(struct device_queue_manager *dqm,
struct qcm_process_device *qpd)
{
@@ -133,7 +144,8 @@ static int allocate_doorbell(struct qcm_process_device *qpd, struct queue *q)
* preserve the user mode ABI.
*/
q->doorbell_id = q->properties.queue_id;
- } else if (q->properties.type == KFD_QUEUE_TYPE_SDMA) {
+ } else if (q->properties.type == KFD_QUEUE_TYPE_SDMA ||
+ q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) {
/* For SDMA queues on SOC15 with 8-byte doorbell, use static
* doorbell assignments based on the engine and queue id.
* The doobell index distance between RLC (2*i) and (2*i+1)
@@ -174,7 +186,8 @@ static void deallocate_doorbell(struct qcm_process_device *qpd,
struct kfd_dev *dev = qpd->dqm->dev;
if (!KFD_IS_SOC15(dev->device_info->asic_family) ||
- q->properties.type == KFD_QUEUE_TYPE_SDMA)
+ q->properties.type == KFD_QUEUE_TYPE_SDMA ||
+ q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)
return;
old = test_and_clear_bit(q->doorbell_id, qpd->doorbell_bitmap);
@@ -289,7 +302,8 @@ static int create_queue_nocpsch(struct device_queue_manager *dqm,
if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE)
retval = create_compute_queue_nocpsch(dqm, q, qpd);
- else if (q->properties.type == KFD_QUEUE_TYPE_SDMA)
+ else if (q->properties.type == KFD_QUEUE_TYPE_SDMA ||
+ q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)
retval = create_sdma_queue_nocpsch(dqm, q, qpd);
else
retval = -EINVAL;
@@ -307,6 +321,8 @@ static int create_queue_nocpsch(struct device_queue_manager *dqm,
if (q->properties.type == KFD_QUEUE_TYPE_SDMA)
dqm->sdma_queue_count++;
+ else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)
+ dqm->xgmi_sdma_queue_count++;
/*
* Unconditionally increment this counter, regardless of the queue's
@@ -368,9 +384,7 @@ static int create_compute_queue_nocpsch(struct device_queue_manager *dqm,
struct mqd_manager *mqd_mgr;
int retval;
- mqd_mgr = dqm->ops.get_mqd_manager(dqm, KFD_MQD_TYPE_COMPUTE);
- if (!mqd_mgr)
- return -ENOMEM;
+ mqd_mgr = dqm->mqd_mgrs[KFD_MQD_TYPE_COMPUTE];
retval = allocate_hqd(dqm, q);
if (retval)
@@ -425,16 +439,17 @@ static int destroy_queue_nocpsch_locked(struct device_queue_manager *dqm,
int retval;
struct mqd_manager *mqd_mgr;
- mqd_mgr = dqm->ops.get_mqd_manager(dqm,
- get_mqd_type_from_queue_type(q->properties.type));
- if (!mqd_mgr)
- return -ENOMEM;
+ mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type(
+ q->properties.type)];
if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE) {
deallocate_hqd(dqm, q);
} else if (q->properties.type == KFD_QUEUE_TYPE_SDMA) {
dqm->sdma_queue_count--;
- deallocate_sdma_queue(dqm, q->sdma_id);
+ deallocate_sdma_queue(dqm, q);
+ } else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) {
+ dqm->xgmi_sdma_queue_count--;
+ deallocate_sdma_queue(dqm, q);
} else {
pr_debug("q->properties.type %d is invalid\n",
q->properties.type);
@@ -501,12 +516,8 @@ static int update_queue(struct device_queue_manager *dqm, struct queue *q)
retval = -ENODEV;
goto out_unlock;
}
- mqd_mgr = dqm->ops.get_mqd_manager(dqm,
- get_mqd_type_from_queue_type(q->properties.type));
- if (!mqd_mgr) {
- retval = -ENOMEM;
- goto out_unlock;
- }
+ mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type(
+ q->properties.type)];
/*
* Eviction state logic: we only mark active queues as evicted
* to avoid the overhead of restoring inactive queues later
@@ -529,7 +540,8 @@ static int update_queue(struct device_queue_manager *dqm, struct queue *q)
}
} else if (prev_active &&
(q->properties.type == KFD_QUEUE_TYPE_COMPUTE ||
- q->properties.type == KFD_QUEUE_TYPE_SDMA)) {
+ q->properties.type == KFD_QUEUE_TYPE_SDMA ||
+ q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)) {
retval = mqd_mgr->destroy_mqd(mqd_mgr, q->mqd,
KFD_PREEMPT_TYPE_WAVEFRONT_DRAIN,
KFD_UNMAP_LATENCY_MS, q->pipe, q->queue);
@@ -556,7 +568,8 @@ static int update_queue(struct device_queue_manager *dqm, struct queue *q)
retval = map_queues_cpsch(dqm);
else if (q->properties.is_active &&
(q->properties.type == KFD_QUEUE_TYPE_COMPUTE ||
- q->properties.type == KFD_QUEUE_TYPE_SDMA)) {
+ q->properties.type == KFD_QUEUE_TYPE_SDMA ||
+ q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)) {
if (WARN(q->process->mm != current->mm,
"should only run in user thread"))
retval = -EFAULT;
@@ -571,27 +584,6 @@ out_unlock:
return retval;
}
-static struct mqd_manager *get_mqd_manager(
- struct device_queue_manager *dqm, enum KFD_MQD_TYPE type)
-{
- struct mqd_manager *mqd_mgr;
-
- if (WARN_ON(type >= KFD_MQD_TYPE_MAX))
- return NULL;
-
- pr_debug("mqd type %d\n", type);
-
- mqd_mgr = dqm->mqd_mgrs[type];
- if (!mqd_mgr) {
- mqd_mgr = mqd_manager_init(type, dqm->dev);
- if (!mqd_mgr)
- pr_err("mqd manager is NULL");
- dqm->mqd_mgrs[type] = mqd_mgr;
- }
-
- return mqd_mgr;
-}
-
static int evict_process_queues_nocpsch(struct device_queue_manager *dqm,
struct qcm_process_device *qpd)
{
@@ -612,13 +604,8 @@ static int evict_process_queues_nocpsch(struct device_queue_manager *dqm,
list_for_each_entry(q, &qpd->queues_list, list) {
if (!q->properties.is_active)
continue;
- mqd_mgr = dqm->ops.get_mqd_manager(dqm,
- get_mqd_type_from_queue_type(q->properties.type));
- if (!mqd_mgr) { /* should not be here */
- pr_err("Cannot evict queue, mqd mgr is NULL\n");
- retval = -ENOMEM;
- goto out;
- }
+ mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type(
+ q->properties.type)];
q->properties.is_evicted = true;
q->properties.is_active = false;
retval = mqd_mgr->destroy_mqd(mqd_mgr, q->mqd,
@@ -717,13 +704,8 @@ static int restore_process_queues_nocpsch(struct device_queue_manager *dqm,
list_for_each_entry(q, &qpd->queues_list, list) {
if (!q->properties.is_evicted)
continue;
- mqd_mgr = dqm->ops.get_mqd_manager(dqm,
- get_mqd_type_from_queue_type(q->properties.type));
- if (!mqd_mgr) { /* should not be here */
- pr_err("Cannot restore queue, mqd mgr is NULL\n");
- retval = -ENOMEM;
- goto out;
- }
+ mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type(
+ q->properties.type)];
q->properties.is_evicted = false;
q->properties.is_active = true;
retval = mqd_mgr->load_mqd(mqd_mgr, q->mqd, q->pipe,
@@ -812,10 +794,14 @@ static int register_process(struct device_queue_manager *dqm,
retval = dqm->asic_ops.update_qpd(dqm, qpd);
dqm->processes_count++;
- kfd_inc_compute_active(dqm->dev);
dqm_unlock(dqm);
+ /* Outside the DQM lock because under the DQM lock we can't do
+ * reclaim or take other locks that others hold while reclaiming.
+ */
+ kfd_inc_compute_active(dqm->dev);
+
return retval;
}
@@ -836,7 +822,6 @@ static int unregister_process(struct device_queue_manager *dqm,
list_del(&cur->list);
kfree(cur);
dqm->processes_count--;
- kfd_dec_compute_active(dqm->dev);
goto out;
}
}
@@ -844,6 +829,13 @@ static int unregister_process(struct device_queue_manager *dqm,
retval = 1;
out:
dqm_unlock(dqm);
+
+ /* Outside the DQM lock because under the DQM lock we can't do
+ * reclaim or take other locks that others hold while reclaiming.
+ */
+ if (!retval)
+ kfd_dec_compute_active(dqm->dev);
+
return retval;
}
@@ -879,6 +871,7 @@ static int initialize_nocpsch(struct device_queue_manager *dqm)
INIT_LIST_HEAD(&dqm->queues);
dqm->queue_count = dqm->next_pipe_to_allocate = 0;
dqm->sdma_queue_count = 0;
+ dqm->xgmi_sdma_queue_count = 0;
for (pipe = 0; pipe < get_pipes_per_mec(dqm); pipe++) {
int pipe_offset = pipe * get_queues_per_pipe(dqm);
@@ -890,7 +883,8 @@ static int initialize_nocpsch(struct device_queue_manager *dqm)
}
dqm->vmid_bitmap = (1 << dqm->dev->vm_info.vmid_num_kfd) - 1;
- dqm->sdma_bitmap = (1 << get_num_sdma_queues(dqm)) - 1;
+ dqm->sdma_bitmap = (1ULL << get_num_sdma_queues(dqm)) - 1;
+ dqm->xgmi_sdma_bitmap = (1ULL << get_num_xgmi_sdma_queues(dqm)) - 1;
return 0;
}
@@ -921,26 +915,56 @@ static int stop_nocpsch(struct device_queue_manager *dqm)
}
static int allocate_sdma_queue(struct device_queue_manager *dqm,
- unsigned int *sdma_queue_id)
+ struct queue *q)
{
int bit;
- if (dqm->sdma_bitmap == 0)
- return -ENOMEM;
+ if (q->properties.type == KFD_QUEUE_TYPE_SDMA) {
+ if (dqm->sdma_bitmap == 0)
+ return -ENOMEM;
+ bit = __ffs64(dqm->sdma_bitmap);
+ dqm->sdma_bitmap &= ~(1ULL << bit);
+ q->sdma_id = bit;
+ q->properties.sdma_engine_id = q->sdma_id %
+ get_num_sdma_engines(dqm);
+ q->properties.sdma_queue_id = q->sdma_id /
+ get_num_sdma_engines(dqm);
+ } else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) {
+ if (dqm->xgmi_sdma_bitmap == 0)
+ return -ENOMEM;
+ bit = __ffs64(dqm->xgmi_sdma_bitmap);
+ dqm->xgmi_sdma_bitmap &= ~(1ULL << bit);
+ q->sdma_id = bit;
+ /* sdma_engine_id is sdma id including
+ * both PCIe-optimized SDMAs and XGMI-
+ * optimized SDMAs. The calculation below
+ * assumes the first N engines are always
+ * PCIe-optimized ones
+ */
+ q->properties.sdma_engine_id = get_num_sdma_engines(dqm) +
+ q->sdma_id % get_num_xgmi_sdma_engines(dqm);
+ q->properties.sdma_queue_id = q->sdma_id /
+ get_num_xgmi_sdma_engines(dqm);
+ }
- bit = ffs(dqm->sdma_bitmap) - 1;
- dqm->sdma_bitmap &= ~(1 << bit);
- *sdma_queue_id = bit;
+ pr_debug("SDMA engine id: %d\n", q->properties.sdma_engine_id);
+ pr_debug("SDMA queue id: %d\n", q->properties.sdma_queue_id);
return 0;
}
static void deallocate_sdma_queue(struct device_queue_manager *dqm,
- unsigned int sdma_queue_id)
+ struct queue *q)
{
- if (sdma_queue_id >= get_num_sdma_queues(dqm))
- return;
- dqm->sdma_bitmap |= (1 << sdma_queue_id);
+ if (q->properties.type == KFD_QUEUE_TYPE_SDMA) {
+ if (q->sdma_id >= get_num_sdma_queues(dqm))
+ return;
+ dqm->sdma_bitmap |= (1ULL << q->sdma_id);
+ } else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) {
+ if (q->sdma_id >= get_num_xgmi_sdma_queues(dqm))
+ return;
+ dqm->xgmi_sdma_bitmap |= (1ULL << q->sdma_id);
+ }
}
static int create_sdma_queue_nocpsch(struct device_queue_manager *dqm,
@@ -950,25 +974,16 @@ static int create_sdma_queue_nocpsch(struct device_queue_manager *dqm,
struct mqd_manager *mqd_mgr;
int retval;
- mqd_mgr = dqm->ops.get_mqd_manager(dqm, KFD_MQD_TYPE_SDMA);
- if (!mqd_mgr)
- return -ENOMEM;
+ mqd_mgr = dqm->mqd_mgrs[KFD_MQD_TYPE_SDMA];
- retval = allocate_sdma_queue(dqm, &q->sdma_id);
+ retval = allocate_sdma_queue(dqm, q);
if (retval)
return retval;
- q->properties.sdma_queue_id = q->sdma_id / get_num_sdma_engines(dqm);
- q->properties.sdma_engine_id = q->sdma_id % get_num_sdma_engines(dqm);
-
retval = allocate_doorbell(qpd, q);
if (retval)
goto out_deallocate_sdma_queue;
- pr_debug("SDMA id is: %d\n", q->sdma_id);
- pr_debug("SDMA queue id: %d\n", q->properties.sdma_queue_id);
- pr_debug("SDMA engine id: %d\n", q->properties.sdma_engine_id);
-
dqm->asic_ops.init_sdma_vm(dqm, q, qpd);
retval = mqd_mgr->init_mqd(mqd_mgr, &q->mqd, &q->mqd_mem_obj,
&q->gart_mqd_addr, &q->properties);
@@ -987,7 +1002,7 @@ out_uninit_mqd:
out_deallocate_doorbell:
deallocate_doorbell(qpd, q);
out_deallocate_sdma_queue:
- deallocate_sdma_queue(dqm, q->sdma_id);
+ deallocate_sdma_queue(dqm, q);
return retval;
}
@@ -1045,8 +1060,10 @@ static int initialize_cpsch(struct device_queue_manager *dqm)
INIT_LIST_HEAD(&dqm->queues);
dqm->queue_count = dqm->processes_count = 0;
dqm->sdma_queue_count = 0;
+ dqm->xgmi_sdma_queue_count = 0;
dqm->active_runlist = false;
- dqm->sdma_bitmap = (1 << get_num_sdma_queues(dqm)) - 1;
+ dqm->sdma_bitmap = (1ULL << get_num_sdma_queues(dqm)) - 1;
+ dqm->xgmi_sdma_bitmap = (1ULL << get_num_xgmi_sdma_queues(dqm)) - 1;
INIT_WORK(&dqm->hw_exception_work, kfd_process_hw_exception);
@@ -1161,38 +1178,26 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
int retval;
struct mqd_manager *mqd_mgr;
- retval = 0;
-
- dqm_lock(dqm);
-
if (dqm->total_queue_count >= max_num_of_queues_per_device) {
pr_warn("Can't create new usermode queue because %d queues were already created\n",
dqm->total_queue_count);
retval = -EPERM;
- goto out_unlock;
+ goto out;
}
- if (q->properties.type == KFD_QUEUE_TYPE_SDMA) {
- retval = allocate_sdma_queue(dqm, &q->sdma_id);
+ if (q->properties.type == KFD_QUEUE_TYPE_SDMA ||
+ q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) {
+ retval = allocate_sdma_queue(dqm, q);
if (retval)
- goto out_unlock;
- q->properties.sdma_queue_id =
- q->sdma_id / get_num_sdma_engines(dqm);
- q->properties.sdma_engine_id =
- q->sdma_id % get_num_sdma_engines(dqm);
+ goto out;
}
retval = allocate_doorbell(qpd, q);
if (retval)
goto out_deallocate_sdma_queue;
- mqd_mgr = dqm->ops.get_mqd_manager(dqm,
- get_mqd_type_from_queue_type(q->properties.type));
-
- if (!mqd_mgr) {
- retval = -ENOMEM;
- goto out_deallocate_doorbell;
- }
+ mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type(
+ q->properties.type)];
/*
* Eviction state logic: we only mark active queues as evicted
* to avoid the overhead of restoring inactive queues later
@@ -1201,9 +1206,7 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
q->properties.is_evicted = (q->properties.queue_size > 0 &&
q->properties.queue_percent > 0 &&
q->properties.queue_address != 0);
-
dqm->asic_ops.init_sdma_vm(dqm, q, qpd);
-
q->properties.tba_addr = qpd->tba_addr;
q->properties.tma_addr = qpd->tma_addr;
retval = mqd_mgr->init_mqd(mqd_mgr, &q->mqd, &q->mqd_mem_obj,
@@ -1211,6 +1214,8 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
if (retval)
goto out_deallocate_doorbell;
+ dqm_lock(dqm);
+
list_add(&q->list, &qpd->queues_list);
qpd->queue_count++;
if (q->properties.is_active) {
@@ -1221,6 +1226,8 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
if (q->properties.type == KFD_QUEUE_TYPE_SDMA)
dqm->sdma_queue_count++;
+ else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)
+ dqm->xgmi_sdma_queue_count++;
/*
* Unconditionally increment this counter, regardless of the queue's
* type or whether the queue is active.
@@ -1236,11 +1243,10 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
out_deallocate_doorbell:
deallocate_doorbell(qpd, q);
out_deallocate_sdma_queue:
- if (q->properties.type == KFD_QUEUE_TYPE_SDMA)
- deallocate_sdma_queue(dqm, q->sdma_id);
-out_unlock:
- dqm_unlock(dqm);
-
+ if (q->properties.type == KFD_QUEUE_TYPE_SDMA ||
+ q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)
+ deallocate_sdma_queue(dqm, q);
+out:
return retval;
}
@@ -1268,12 +1274,18 @@ int amdkfd_fence_wait_timeout(unsigned int *fence_addr,
return 0;
}
-static int unmap_sdma_queues(struct device_queue_manager *dqm,
- unsigned int sdma_engine)
+static int unmap_sdma_queues(struct device_queue_manager *dqm)
{
- return pm_send_unmap_queue(&dqm->packets, KFD_QUEUE_TYPE_SDMA,
- KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0, false,
- sdma_engine);
+ int i, retval = 0;
+
+ for (i = 0; i < dqm->dev->device_info->num_sdma_engines +
+ dqm->dev->device_info->num_xgmi_sdma_engines; i++) {
+ retval = pm_send_unmap_queue(&dqm->packets, KFD_QUEUE_TYPE_SDMA,
+ KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0, false, i);
+ if (retval)
+ return retval;
+ }
+ return retval;
}
/* dqm->lock mutex has to be locked before calling this function */
@@ -1309,13 +1321,11 @@ static int unmap_queues_cpsch(struct device_queue_manager *dqm,
if (!dqm->active_runlist)
return retval;
- pr_debug("Before destroying queues, sdma queue count is : %u\n",
- dqm->sdma_queue_count);
+ pr_debug("Before destroying queues, sdma queue count is : %u, xgmi sdma queue count is : %u\n",
+ dqm->sdma_queue_count, dqm->xgmi_sdma_queue_count);
- if (dqm->sdma_queue_count > 0) {
- unmap_sdma_queues(dqm, 0);
- unmap_sdma_queues(dqm, 1);
- }
+ if (dqm->sdma_queue_count > 0 || dqm->xgmi_sdma_queue_count)
+ unmap_sdma_queues(dqm);
retval = pm_send_unmap_queue(&dqm->packets, KFD_QUEUE_TYPE_COMPUTE,
filter, filter_param, false, 0);
@@ -1379,18 +1389,17 @@ static int destroy_queue_cpsch(struct device_queue_manager *dqm,
}
- mqd_mgr = dqm->ops.get_mqd_manager(dqm,
- get_mqd_type_from_queue_type(q->properties.type));
- if (!mqd_mgr) {
- retval = -ENOMEM;
- goto failed;
- }
+ mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type(
+ q->properties.type)];
deallocate_doorbell(qpd, q);
if (q->properties.type == KFD_QUEUE_TYPE_SDMA) {
dqm->sdma_queue_count--;
- deallocate_sdma_queue(dqm, q->sdma_id);
+ deallocate_sdma_queue(dqm, q);
+ } else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) {
+ dqm->xgmi_sdma_queue_count--;
+ deallocate_sdma_queue(dqm, q);
}
list_del(&q->list);
@@ -1403,8 +1412,6 @@ static int destroy_queue_cpsch(struct device_queue_manager *dqm,
qpd->reset_wavefronts = true;
}
- mqd_mgr->uninit_mqd(mqd_mgr, q->mqd, q->mqd_mem_obj);
-
/*
* Unconditionally decrement this counter, regardless of the queue's
* type
@@ -1415,9 +1422,11 @@ static int destroy_queue_cpsch(struct device_queue_manager *dqm,
dqm_unlock(dqm);
+ /* Do uninit_mqd after dqm_unlock(dqm) to avoid circular locking */
+ mqd_mgr->uninit_mqd(mqd_mgr, q->mqd, q->mqd_mem_obj);
+
return retval;
-failed:
failed_try_destroy_debugged_queue:
dqm_unlock(dqm);
@@ -1520,6 +1529,7 @@ static int process_termination_nocpsch(struct device_queue_manager *dqm,
struct queue *q, *next;
struct device_process_node *cur, *next_dpn;
int retval = 0;
+ bool found = false;
dqm_lock(dqm);
@@ -1538,12 +1548,19 @@ static int process_termination_nocpsch(struct device_queue_manager *dqm,
list_del(&cur->list);
kfree(cur);
dqm->processes_count--;
- kfd_dec_compute_active(dqm->dev);
+ found = true;
break;
}
}
dqm_unlock(dqm);
+
+ /* Outside the DQM lock because under the DQM lock we can't do
+ * reclaim or take other locks that others hold while reclaiming.
+ */
+ if (found)
+ kfd_dec_compute_active(dqm->dev);
+
return retval;
}
@@ -1564,11 +1581,7 @@ static int get_wave_state(struct device_queue_manager *dqm,
goto dqm_unlock;
}
- mqd_mgr = dqm->ops.get_mqd_manager(dqm, KFD_MQD_TYPE_COMPUTE);
- if (!mqd_mgr) {
- r = -ENOMEM;
- goto dqm_unlock;
- }
+ mqd_mgr = dqm->mqd_mgrs[KFD_MQD_TYPE_COMPUTE];
if (!mqd_mgr->get_wave_state) {
r = -EINVAL;
@@ -1593,6 +1606,7 @@ static int process_termination_cpsch(struct device_queue_manager *dqm,
struct device_process_node *cur, *next_dpn;
enum kfd_unmap_queues_filter filter =
KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES;
+ bool found = false;
retval = 0;
@@ -1611,7 +1625,10 @@ static int process_termination_cpsch(struct device_queue_manager *dqm,
list_for_each_entry(q, &qpd->queues_list, list) {
if (q->properties.type == KFD_QUEUE_TYPE_SDMA) {
dqm->sdma_queue_count--;
- deallocate_sdma_queue(dqm, q->sdma_id);
+ deallocate_sdma_queue(dqm, q);
+ } else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) {
+ dqm->xgmi_sdma_queue_count--;
+ deallocate_sdma_queue(dqm, q);
}
if (q->properties.is_active)
@@ -1626,7 +1643,7 @@ static int process_termination_cpsch(struct device_queue_manager *dqm,
list_del(&cur->list);
kfree(cur);
dqm->processes_count--;
- kfd_dec_compute_active(dqm->dev);
+ found = true;
break;
}
}
@@ -1638,21 +1655,68 @@ static int process_termination_cpsch(struct device_queue_manager *dqm,
qpd->reset_wavefronts = false;
}
- /* lastly, free mqd resources */
+ dqm_unlock(dqm);
+
+ /* Outside the DQM lock because under the DQM lock we can't do
+ * reclaim or take other locks that others hold while reclaiming.
+ */
+ if (found)
+ kfd_dec_compute_active(dqm->dev);
+
+ /* Lastly, free mqd resources.
+ * Do uninit_mqd() after dqm_unlock to avoid circular locking.
+ */
list_for_each_entry_safe(q, next, &qpd->queues_list, list) {
- mqd_mgr = dqm->ops.get_mqd_manager(dqm,
- get_mqd_type_from_queue_type(q->properties.type));
- if (!mqd_mgr) {
- retval = -ENOMEM;
- goto out;
- }
+ mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type(
+ q->properties.type)];
list_del(&q->list);
qpd->queue_count--;
mqd_mgr->uninit_mqd(mqd_mgr, q->mqd, q->mqd_mem_obj);
}
-out:
- dqm_unlock(dqm);
+ return retval;
+}
+
+static int init_mqd_managers(struct device_queue_manager *dqm)
+{
+ int i, j;
+ struct mqd_manager *mqd_mgr;
+
+ for (i = 0; i < KFD_MQD_TYPE_MAX; i++) {
+ mqd_mgr = dqm->asic_ops.mqd_manager_init(i, dqm->dev);
+ if (!mqd_mgr) {
+ pr_err("mqd manager [%d] initialization failed\n", i);
+ goto out_free;
+ }
+ dqm->mqd_mgrs[i] = mqd_mgr;
+ }
+
+ return 0;
+
+out_free:
+ for (j = 0; j < i; j++) {
+ kfree(dqm->mqd_mgrs[j]);
+ dqm->mqd_mgrs[j] = NULL;
+ }
+
+ return -ENOMEM;
+}
+
+/* Allocate one hiq mqd (HWS) and all SDMA mqd in a continuous trunk*/
+static int allocate_hiq_sdma_mqd(struct device_queue_manager *dqm)
+{
+ int retval;
+ struct kfd_dev *dev = dqm->dev;
+ struct kfd_mem_obj *mem_obj = &dqm->hiq_sdma_mqd;
+ uint32_t size = dqm->mqd_mgrs[KFD_MQD_TYPE_SDMA]->mqd_size *
+ dev->device_info->num_sdma_engines *
+ dev->device_info->num_sdma_queues_per_engine +
+ dqm->mqd_mgrs[KFD_MQD_TYPE_HIQ]->mqd_size;
+
+ retval = amdgpu_amdkfd_alloc_gtt_mem(dev->kgd, size,
+ &(mem_obj->gtt_mem), &(mem_obj->gpu_addr),
+ (void *)&(mem_obj->cpu_ptr), true);
+
return retval;
}
@@ -1693,7 +1757,6 @@ struct device_queue_manager *device_queue_manager_init(struct kfd_dev *dev)
dqm->ops.stop = stop_cpsch;
dqm->ops.destroy_queue = destroy_queue_cpsch;
dqm->ops.update_queue = update_queue;
- dqm->ops.get_mqd_manager = get_mqd_manager;
dqm->ops.register_process = register_process;
dqm->ops.unregister_process = unregister_process;
dqm->ops.uninitialize = uninitialize;
@@ -1713,7 +1776,6 @@ struct device_queue_manager *device_queue_manager_init(struct kfd_dev *dev)
dqm->ops.create_queue = create_queue_nocpsch;
dqm->ops.destroy_queue = destroy_queue_nocpsch;
dqm->ops.update_queue = update_queue;
- dqm->ops.get_mqd_manager = get_mqd_manager;
dqm->ops.register_process = register_process;
dqm->ops.unregister_process = unregister_process;
dqm->ops.initialize = initialize_nocpsch;
@@ -1749,6 +1811,7 @@ struct device_queue_manager *device_queue_manager_init(struct kfd_dev *dev)
case CHIP_POLARIS10:
case CHIP_POLARIS11:
case CHIP_POLARIS12:
+ case CHIP_VEGAM:
device_queue_manager_init_vi_tonga(&dqm->asic_ops);
break;
@@ -1764,6 +1827,14 @@ struct device_queue_manager *device_queue_manager_init(struct kfd_dev *dev)
goto out_free;
}
+ if (init_mqd_managers(dqm))
+ goto out_free;
+
+ if (allocate_hiq_sdma_mqd(dqm)) {
+ pr_err("Failed to allocate hiq sdma mqd trunk buffer\n");
+ goto out_free;
+ }
+
if (!dqm->ops.initialize(dqm))
return dqm;
@@ -1772,9 +1843,17 @@ out_free:
return NULL;
}
+void deallocate_hiq_sdma_mqd(struct kfd_dev *dev, struct kfd_mem_obj *mqd)
+{
+ WARN(!mqd, "No hiq sdma mqd trunk to free");
+
+ amdgpu_amdkfd_free_gtt_mem(dev->kgd, mqd->gtt_mem);
+}
+
void device_queue_manager_uninit(struct device_queue_manager *dqm)
{
dqm->ops.uninitialize(dqm);
+ deallocate_hiq_sdma_mqd(dqm->dev, &dqm->hiq_sdma_mqd);
kfree(dqm);
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
index 70e38a2e23b9..88b4c007696e 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
@@ -48,8 +48,6 @@ struct device_process_node {
*
* @update_queue: Queue update routine.
*
- * @get_mqd_manager: Returns the mqd manager according to the mqd type.
- *
* @exeute_queues: Dispatches the queues list to the H/W.
*
* @register_process: This routine associates a specific process with device.
@@ -97,10 +95,6 @@ struct device_queue_manager_ops {
int (*update_queue)(struct device_queue_manager *dqm,
struct queue *q);
- struct mqd_manager * (*get_mqd_manager)
- (struct device_queue_manager *dqm,
- enum KFD_MQD_TYPE type);
-
int (*register_process)(struct device_queue_manager *dqm,
struct qcm_process_device *qpd);
@@ -158,6 +152,8 @@ struct device_queue_manager_asic_ops {
void (*init_sdma_vm)(struct device_queue_manager *dqm,
struct queue *q,
struct qcm_process_device *qpd);
+ struct mqd_manager * (*mqd_manager_init)(enum KFD_MQD_TYPE type,
+ struct kfd_dev *dev);
};
/**
@@ -185,10 +181,12 @@ struct device_queue_manager {
unsigned int processes_count;
unsigned int queue_count;
unsigned int sdma_queue_count;
+ unsigned int xgmi_sdma_queue_count;
unsigned int total_queue_count;
unsigned int next_pipe_to_allocate;
unsigned int *allocated_queues;
- unsigned int sdma_bitmap;
+ uint64_t sdma_bitmap;
+ uint64_t xgmi_sdma_bitmap;
unsigned int vmid_bitmap;
uint64_t pipelines_addr;
struct kfd_mem_obj *pipeline_mem;
@@ -201,6 +199,7 @@ struct device_queue_manager {
/* hw exception */
bool is_hws_hang;
struct work_struct hw_exception_work;
+ struct kfd_mem_obj hiq_sdma_mqd;
};
void device_queue_manager_init_cik(
@@ -219,6 +218,7 @@ unsigned int get_queues_num(struct device_queue_manager *dqm);
unsigned int get_queues_per_pipe(struct device_queue_manager *dqm);
unsigned int get_pipes_per_mec(struct device_queue_manager *dqm);
unsigned int get_num_sdma_queues(struct device_queue_manager *dqm);
+unsigned int get_num_xgmi_sdma_queues(struct device_queue_manager *dqm);
static inline unsigned int get_sh_mem_bases_32(struct kfd_process_device *pdd)
{
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_cik.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_cik.c
index aed4c21417bf..0d26506798cf 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_cik.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_cik.c
@@ -48,6 +48,7 @@ void device_queue_manager_init_cik(
asic_ops->set_cache_memory_policy = set_cache_memory_policy_cik;
asic_ops->update_qpd = update_qpd_cik;
asic_ops->init_sdma_vm = init_sdma_vm;
+ asic_ops->mqd_manager_init = mqd_manager_init_cik;
}
void device_queue_manager_init_cik_hawaii(
@@ -56,6 +57,7 @@ void device_queue_manager_init_cik_hawaii(
asic_ops->set_cache_memory_policy = set_cache_memory_policy_cik;
asic_ops->update_qpd = update_qpd_cik_hawaii;
asic_ops->init_sdma_vm = init_sdma_vm_hawaii;
+ asic_ops->mqd_manager_init = mqd_manager_init_cik_hawaii;
}
static uint32_t compute_sh_mem_bases_64bit(unsigned int top_address_nybble)
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v9.c
index 417515332c35..e9fe39382371 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v9.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v9.c
@@ -37,6 +37,7 @@ void device_queue_manager_init_v9(
{
asic_ops->update_qpd = update_qpd_v9;
asic_ops->init_sdma_vm = init_sdma_vm_v9;
+ asic_ops->mqd_manager_init = mqd_manager_init_v9;
}
static uint32_t compute_sh_mem_bases_64bit(struct kfd_process_device *pdd)
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_vi.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_vi.c
index c3a5dcfe877a..3a7cb2f88366 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_vi.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_vi.c
@@ -54,6 +54,7 @@ void device_queue_manager_init_vi(
asic_ops->set_cache_memory_policy = set_cache_memory_policy_vi;
asic_ops->update_qpd = update_qpd_vi;
asic_ops->init_sdma_vm = init_sdma_vm;
+ asic_ops->mqd_manager_init = mqd_manager_init_vi;
}
void device_queue_manager_init_vi_tonga(
@@ -62,6 +63,7 @@ void device_queue_manager_init_vi_tonga(
asic_ops->set_cache_memory_policy = set_cache_memory_policy_vi_tonga;
asic_ops->update_qpd = update_qpd_vi_tonga;
asic_ops->init_sdma_vm = init_sdma_vm_tonga;
+ asic_ops->mqd_manager_init = mqd_manager_init_vi_tonga;
}
static uint32_t compute_sh_mem_bases_64bit(unsigned int top_address_nybble)
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_events.c b/drivers/gpu/drm/amd/amdkfd/kfd_events.c
index 6e1d41c5bf86..d674d4b3340f 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_events.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_events.c
@@ -983,7 +983,7 @@ void kfd_signal_vm_fault_event(struct kfd_dev *dev, unsigned int pasid,
return; /* Presumably process exited. */
memset(&memory_exception_data, 0, sizeof(memory_exception_data));
memory_exception_data.gpu_id = dev->id;
- memory_exception_data.failure.imprecise = 1;
+ memory_exception_data.failure.imprecise = true;
/* Set failure reason */
if (info) {
memory_exception_data.va = (info->page_addr) << PAGE_SHIFT;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c b/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c
index 213ea5454d11..22a8e88b6a67 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c
@@ -398,6 +398,7 @@ int kfd_init_apertures(struct kfd_process *process)
case CHIP_POLARIS10:
case CHIP_POLARIS11:
case CHIP_POLARIS12:
+ case CHIP_VEGAM:
kfd_init_apertures_vi(pdd, id);
break;
case CHIP_VEGA10:
@@ -435,5 +436,3 @@ int kfd_init_apertures(struct kfd_process *process)
return 0;
}
-
-
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
index f1596881f20a..1cc03b3ddbb9 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
@@ -58,9 +58,10 @@ static bool initialize(struct kernel_queue *kq, struct kfd_dev *dev,
kq->nop_packet = nop.u32all;
switch (type) {
case KFD_QUEUE_TYPE_DIQ:
+ kq->mqd_mgr = dev->dqm->mqd_mgrs[KFD_MQD_TYPE_DIQ];
+ break;
case KFD_QUEUE_TYPE_HIQ:
- kq->mqd_mgr = dev->dqm->ops.get_mqd_manager(dev->dqm,
- KFD_MQD_TYPE_HIQ);
+ kq->mqd_mgr = dev->dqm->mqd_mgrs[KFD_MQD_TYPE_HIQ];
break;
default:
pr_err("Invalid queue type %d\n", type);
@@ -314,6 +315,7 @@ struct kernel_queue *kernel_queue_init(struct kfd_dev *dev,
case CHIP_POLARIS10:
case CHIP_POLARIS11:
case CHIP_POLARIS12:
+ case CHIP_VEGAM:
kernel_queue_init_vi(&kq->ops_asic_specific);
break;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_v9.c
index 33830b1a5a54..07f02f8e4fe4 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_v9.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_v9.c
@@ -153,14 +153,13 @@ static int pm_map_queues_v9(struct packet_manager *pm, uint32_t *buffer,
packet->header.u32All = pm_build_pm4_header(IT_MAP_QUEUES,
sizeof(struct pm4_mes_map_queues));
- packet->bitfields2.alloc_format =
- alloc_format__mes_map_queues__one_per_pipe_vi;
packet->bitfields2.num_queues = 1;
packet->bitfields2.queue_sel =
queue_sel__mes_map_queues__map_to_hws_determined_queue_slots_vi;
packet->bitfields2.engine_sel =
engine_sel__mes_map_queues__compute_vi;
+ packet->bitfields2.gws_control_queue = q->gws ? 1 : 0;
packet->bitfields2.queue_type =
queue_type__mes_map_queues__normal_compute_vi;
@@ -175,6 +174,7 @@ static int pm_map_queues_v9(struct packet_manager *pm, uint32_t *buffer,
queue_type__mes_map_queues__debug_interface_queue_vi;
break;
case KFD_QUEUE_TYPE_SDMA:
+ case KFD_QUEUE_TYPE_SDMA_XGMI:
packet->bitfields2.engine_sel = q->properties.sdma_engine_id +
engine_sel__mes_map_queues__sdma0_vi;
use_static = false; /* no static queues under SDMA */
@@ -221,6 +221,7 @@ static int pm_unmap_queues_v9(struct packet_manager *pm, uint32_t *buffer,
engine_sel__mes_unmap_queues__compute;
break;
case KFD_QUEUE_TYPE_SDMA:
+ case KFD_QUEUE_TYPE_SDMA_XGMI:
packet->bitfields2.engine_sel =
engine_sel__mes_unmap_queues__sdma0 + sdma_engine;
break;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_vi.c b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_vi.c
index bf20c6d32ef3..2adaf40027eb 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_vi.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_vi.c
@@ -190,8 +190,6 @@ static int pm_map_queues_vi(struct packet_manager *pm, uint32_t *buffer,
packet->header.u32All = pm_build_pm4_header(IT_MAP_QUEUES,
sizeof(struct pm4_mes_map_queues));
- packet->bitfields2.alloc_format =
- alloc_format__mes_map_queues__one_per_pipe_vi;
packet->bitfields2.num_queues = 1;
packet->bitfields2.queue_sel =
queue_sel__mes_map_queues__map_to_hws_determined_queue_slots_vi;
@@ -212,6 +210,7 @@ static int pm_map_queues_vi(struct packet_manager *pm, uint32_t *buffer,
queue_type__mes_map_queues__debug_interface_queue_vi;
break;
case KFD_QUEUE_TYPE_SDMA:
+ case KFD_QUEUE_TYPE_SDMA_XGMI:
packet->bitfields2.engine_sel = q->properties.sdma_engine_id +
engine_sel__mes_map_queues__sdma0_vi;
use_static = false; /* no static queues under SDMA */
@@ -258,6 +257,7 @@ static int pm_unmap_queues_vi(struct packet_manager *pm, uint32_t *buffer,
engine_sel__mes_unmap_queues__compute;
break;
case KFD_QUEUE_TYPE_SDMA:
+ case KFD_QUEUE_TYPE_SDMA_XGMI:
packet->bitfields2.engine_sel =
engine_sel__mes_unmap_queues__sdma0 + sdma_engine;
break;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c
index aed9b9b82213..9307811bc427 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c
@@ -23,34 +23,54 @@
#include "kfd_mqd_manager.h"
#include "amdgpu_amdkfd.h"
+#include "kfd_device_queue_manager.h"
-struct mqd_manager *mqd_manager_init(enum KFD_MQD_TYPE type,
- struct kfd_dev *dev)
+struct kfd_mem_obj *allocate_hiq_mqd(struct kfd_dev *dev)
{
- switch (dev->device_info->asic_family) {
- case CHIP_KAVERI:
- return mqd_manager_init_cik(type, dev);
- case CHIP_HAWAII:
- return mqd_manager_init_cik_hawaii(type, dev);
- case CHIP_CARRIZO:
- return mqd_manager_init_vi(type, dev);
- case CHIP_TONGA:
- case CHIP_FIJI:
- case CHIP_POLARIS10:
- case CHIP_POLARIS11:
- case CHIP_POLARIS12:
- return mqd_manager_init_vi_tonga(type, dev);
- case CHIP_VEGA10:
- case CHIP_VEGA12:
- case CHIP_VEGA20:
- case CHIP_RAVEN:
- return mqd_manager_init_v9(type, dev);
- default:
- WARN(1, "Unexpected ASIC family %u",
- dev->device_info->asic_family);
- }
+ struct kfd_mem_obj *mqd_mem_obj = NULL;
+
+ mqd_mem_obj = kzalloc(sizeof(struct kfd_mem_obj), GFP_KERNEL);
+ if (!mqd_mem_obj)
+ return NULL;
+
+ mqd_mem_obj->gtt_mem = dev->dqm->hiq_sdma_mqd.gtt_mem;
+ mqd_mem_obj->gpu_addr = dev->dqm->hiq_sdma_mqd.gpu_addr;
+ mqd_mem_obj->cpu_ptr = dev->dqm->hiq_sdma_mqd.cpu_ptr;
+
+ return mqd_mem_obj;
+}
+
+struct kfd_mem_obj *allocate_sdma_mqd(struct kfd_dev *dev,
+ struct queue_properties *q)
+{
+ struct kfd_mem_obj *mqd_mem_obj = NULL;
+ uint64_t offset;
- return NULL;
+ mqd_mem_obj = kzalloc(sizeof(struct kfd_mem_obj), GFP_KERNEL);
+ if (!mqd_mem_obj)
+ return NULL;
+
+ offset = (q->sdma_engine_id *
+ dev->device_info->num_sdma_queues_per_engine +
+ q->sdma_queue_id) *
+ dev->dqm->mqd_mgrs[KFD_MQD_TYPE_SDMA]->mqd_size;
+
+ offset += dev->dqm->mqd_mgrs[KFD_MQD_TYPE_HIQ]->mqd_size;
+
+ mqd_mem_obj->gtt_mem = (void *)((uint64_t)dev->dqm->hiq_sdma_mqd.gtt_mem
+ + offset);
+ mqd_mem_obj->gpu_addr = dev->dqm->hiq_sdma_mqd.gpu_addr + offset;
+ mqd_mem_obj->cpu_ptr = (uint32_t *)((uint64_t)
+ dev->dqm->hiq_sdma_mqd.cpu_ptr + offset);
+
+ return mqd_mem_obj;
+}
+
+void uninit_mqd_hiq_sdma(struct mqd_manager *mm, void *mqd,
+ struct kfd_mem_obj *mqd_mem_obj)
+{
+ WARN_ON(!mqd_mem_obj->gtt_mem);
+ kfree(mqd_mem_obj);
}
void mqd_symmetrically_map_cu_mask(struct mqd_manager *mm,
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h
index f8261313ae7b..56af256a191b 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h
@@ -99,8 +99,16 @@ struct mqd_manager {
struct mutex mqd_mutex;
struct kfd_dev *dev;
+ uint32_t mqd_size;
};
+struct kfd_mem_obj *allocate_hiq_mqd(struct kfd_dev *dev);
+
+struct kfd_mem_obj *allocate_sdma_mqd(struct kfd_dev *dev,
+ struct queue_properties *q);
+void uninit_mqd_hiq_sdma(struct mqd_manager *mm, void *mqd,
+ struct kfd_mem_obj *mqd_mem_obj);
+
void mqd_symmetrically_map_cu_mask(struct mqd_manager *mm,
const uint32_t *cu_mask, uint32_t cu_mask_count,
uint32_t *se_mask);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c
index ae90a99909ef..6e8509ec29d9 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c
@@ -66,6 +66,22 @@ static void update_cu_mask(struct mqd_manager *mm, void *mqd,
m->compute_static_thread_mgmt_se3);
}
+static struct kfd_mem_obj *allocate_mqd(struct kfd_dev *kfd,
+ struct queue_properties *q)
+{
+ struct kfd_mem_obj *mqd_mem_obj;
+
+ if (q->type == KFD_QUEUE_TYPE_HIQ)
+ return allocate_hiq_mqd(kfd);
+
+ if (kfd_gtt_sa_allocate(kfd, sizeof(struct cik_mqd),
+ &mqd_mem_obj))
+ return NULL;
+
+ return mqd_mem_obj;
+}
+
+
static int init_mqd(struct mqd_manager *mm, void **mqd,
struct kfd_mem_obj **mqd_mem_obj, uint64_t *gart_addr,
struct queue_properties *q)
@@ -73,11 +89,10 @@ static int init_mqd(struct mqd_manager *mm, void **mqd,
uint64_t addr;
struct cik_mqd *m;
int retval;
+ struct kfd_dev *kfd = mm->dev;
- retval = kfd_gtt_sa_allocate(mm->dev, sizeof(struct cik_mqd),
- mqd_mem_obj);
-
- if (retval != 0)
+ *mqd_mem_obj = allocate_mqd(kfd, q);
+ if (!*mqd_mem_obj)
return -ENOMEM;
m = (struct cik_mqd *) (*mqd_mem_obj)->cpu_ptr;
@@ -136,12 +151,10 @@ static int init_mqd_sdma(struct mqd_manager *mm, void **mqd,
{
int retval;
struct cik_sdma_rlc_registers *m;
+ struct kfd_dev *dev = mm->dev;
- retval = kfd_gtt_sa_allocate(mm->dev,
- sizeof(struct cik_sdma_rlc_registers),
- mqd_mem_obj);
-
- if (retval != 0)
+ *mqd_mem_obj = allocate_sdma_mqd(dev, q);
+ if (!*mqd_mem_obj)
return -ENOMEM;
m = (struct cik_sdma_rlc_registers *) (*mqd_mem_obj)->cpu_ptr;
@@ -163,11 +176,6 @@ static void uninit_mqd(struct mqd_manager *mm, void *mqd,
kfd_gtt_sa_free(mm->dev, mqd_mem_obj);
}
-static void uninit_mqd_sdma(struct mqd_manager *mm, void *mqd,
- struct kfd_mem_obj *mqd_mem_obj)
-{
- kfd_gtt_sa_free(mm->dev, mqd_mem_obj);
-}
static int load_mqd(struct mqd_manager *mm, void *mqd, uint32_t pipe_id,
uint32_t queue_id, struct queue_properties *p,
@@ -400,28 +408,43 @@ struct mqd_manager *mqd_manager_init_cik(enum KFD_MQD_TYPE type,
mqd->update_mqd = update_mqd;
mqd->destroy_mqd = destroy_mqd;
mqd->is_occupied = is_occupied;
+ mqd->mqd_size = sizeof(struct cik_mqd);
#if defined(CONFIG_DEBUG_FS)
mqd->debugfs_show_mqd = debugfs_show_mqd;
#endif
break;
case KFD_MQD_TYPE_HIQ:
mqd->init_mqd = init_mqd_hiq;
+ mqd->uninit_mqd = uninit_mqd_hiq_sdma;
+ mqd->load_mqd = load_mqd;
+ mqd->update_mqd = update_mqd_hiq;
+ mqd->destroy_mqd = destroy_mqd;
+ mqd->is_occupied = is_occupied;
+ mqd->mqd_size = sizeof(struct cik_mqd);
+#if defined(CONFIG_DEBUG_FS)
+ mqd->debugfs_show_mqd = debugfs_show_mqd;
+#endif
+ break;
+ case KFD_MQD_TYPE_DIQ:
+ mqd->init_mqd = init_mqd_hiq;
mqd->uninit_mqd = uninit_mqd;
mqd->load_mqd = load_mqd;
mqd->update_mqd = update_mqd_hiq;
mqd->destroy_mqd = destroy_mqd;
mqd->is_occupied = is_occupied;
+ mqd->mqd_size = sizeof(struct cik_mqd);
#if defined(CONFIG_DEBUG_FS)
mqd->debugfs_show_mqd = debugfs_show_mqd;
#endif
break;
case KFD_MQD_TYPE_SDMA:
mqd->init_mqd = init_mqd_sdma;
- mqd->uninit_mqd = uninit_mqd_sdma;
+ mqd->uninit_mqd = uninit_mqd_hiq_sdma;
mqd->load_mqd = load_mqd_sdma;
mqd->update_mqd = update_mqd_sdma;
mqd->destroy_mqd = destroy_mqd_sdma;
mqd->is_occupied = is_occupied_sdma;
+ mqd->mqd_size = sizeof(struct cik_sdma_rlc_registers);
#if defined(CONFIG_DEBUG_FS)
mqd->debugfs_show_mqd = debugfs_show_mqd_sdma;
#endif
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
index 9dbba609450e..4750338199b6 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
@@ -67,33 +67,54 @@ static void update_cu_mask(struct mqd_manager *mm, void *mqd,
m->compute_static_thread_mgmt_se3);
}
-static int init_mqd(struct mqd_manager *mm, void **mqd,
- struct kfd_mem_obj **mqd_mem_obj, uint64_t *gart_addr,
- struct queue_properties *q)
+static struct kfd_mem_obj *allocate_mqd(struct kfd_dev *kfd,
+ struct queue_properties *q)
{
int retval;
- uint64_t addr;
- struct v9_mqd *m;
- struct kfd_dev *kfd = mm->dev;
+ struct kfd_mem_obj *mqd_mem_obj = NULL;
+
+ if (q->type == KFD_QUEUE_TYPE_HIQ)
+ return allocate_hiq_mqd(kfd);
/* From V9, for CWSR, the control stack is located on the next page
* boundary after the mqd, we will use the gtt allocation function
* instead of sub-allocation function.
*/
if (kfd->cwsr_enabled && (q->type == KFD_QUEUE_TYPE_COMPUTE)) {
- *mqd_mem_obj = kzalloc(sizeof(struct kfd_mem_obj), GFP_KERNEL);
- if (!*mqd_mem_obj)
- return -ENOMEM;
+ mqd_mem_obj = kzalloc(sizeof(struct kfd_mem_obj), GFP_NOIO);
+ if (!mqd_mem_obj)
+ return NULL;
retval = amdgpu_amdkfd_alloc_gtt_mem(kfd->kgd,
ALIGN(q->ctl_stack_size, PAGE_SIZE) +
ALIGN(sizeof(struct v9_mqd), PAGE_SIZE),
- &((*mqd_mem_obj)->gtt_mem),
- &((*mqd_mem_obj)->gpu_addr),
- (void *)&((*mqd_mem_obj)->cpu_ptr), true);
- } else
- retval = kfd_gtt_sa_allocate(mm->dev, sizeof(struct v9_mqd),
- mqd_mem_obj);
- if (retval != 0)
+ &(mqd_mem_obj->gtt_mem),
+ &(mqd_mem_obj->gpu_addr),
+ (void *)&(mqd_mem_obj->cpu_ptr), true);
+ } else {
+ retval = kfd_gtt_sa_allocate(kfd, sizeof(struct v9_mqd),
+ &mqd_mem_obj);
+ }
+
+ if (retval) {
+ kfree(mqd_mem_obj);
+ return NULL;
+ }
+
+ return mqd_mem_obj;
+
+}
+
+static int init_mqd(struct mqd_manager *mm, void **mqd,
+ struct kfd_mem_obj **mqd_mem_obj, uint64_t *gart_addr,
+ struct queue_properties *q)
+{
+ int retval;
+ uint64_t addr;
+ struct v9_mqd *m;
+ struct kfd_dev *kfd = mm->dev;
+
+ *mqd_mem_obj = allocate_mqd(kfd, q);
+ if (!*mqd_mem_obj)
return -ENOMEM;
m = (struct v9_mqd *) (*mqd_mem_obj)->cpu_ptr;
@@ -328,13 +349,10 @@ static int init_mqd_sdma(struct mqd_manager *mm, void **mqd,
{
int retval;
struct v9_sdma_mqd *m;
+ struct kfd_dev *dev = mm->dev;
-
- retval = kfd_gtt_sa_allocate(mm->dev,
- sizeof(struct v9_sdma_mqd),
- mqd_mem_obj);
-
- if (retval != 0)
+ *mqd_mem_obj = allocate_sdma_mqd(dev, q);
+ if (!*mqd_mem_obj)
return -ENOMEM;
m = (struct v9_sdma_mqd *) (*mqd_mem_obj)->cpu_ptr;
@@ -350,12 +368,6 @@ static int init_mqd_sdma(struct mqd_manager *mm, void **mqd,
return retval;
}
-static void uninit_mqd_sdma(struct mqd_manager *mm, void *mqd,
- struct kfd_mem_obj *mqd_mem_obj)
-{
- kfd_gtt_sa_free(mm->dev, mqd_mem_obj);
-}
-
static int load_mqd_sdma(struct mqd_manager *mm, void *mqd,
uint32_t pipe_id, uint32_t queue_id,
struct queue_properties *p, struct mm_struct *mms)
@@ -459,28 +471,43 @@ struct mqd_manager *mqd_manager_init_v9(enum KFD_MQD_TYPE type,
mqd->destroy_mqd = destroy_mqd;
mqd->is_occupied = is_occupied;
mqd->get_wave_state = get_wave_state;
+ mqd->mqd_size = sizeof(struct v9_mqd);
#if defined(CONFIG_DEBUG_FS)
mqd->debugfs_show_mqd = debugfs_show_mqd;
#endif
break;
case KFD_MQD_TYPE_HIQ:
mqd->init_mqd = init_mqd_hiq;
+ mqd->uninit_mqd = uninit_mqd_hiq_sdma;
+ mqd->load_mqd = load_mqd;
+ mqd->update_mqd = update_mqd_hiq;
+ mqd->destroy_mqd = destroy_mqd;
+ mqd->is_occupied = is_occupied;
+ mqd->mqd_size = sizeof(struct v9_mqd);
+#if defined(CONFIG_DEBUG_FS)
+ mqd->debugfs_show_mqd = debugfs_show_mqd;
+#endif
+ break;
+ case KFD_MQD_TYPE_DIQ:
+ mqd->init_mqd = init_mqd_hiq;
mqd->uninit_mqd = uninit_mqd;
mqd->load_mqd = load_mqd;
mqd->update_mqd = update_mqd_hiq;
mqd->destroy_mqd = destroy_mqd;
mqd->is_occupied = is_occupied;
+ mqd->mqd_size = sizeof(struct v9_mqd);
#if defined(CONFIG_DEBUG_FS)
mqd->debugfs_show_mqd = debugfs_show_mqd;
#endif
break;
case KFD_MQD_TYPE_SDMA:
mqd->init_mqd = init_mqd_sdma;
- mqd->uninit_mqd = uninit_mqd_sdma;
+ mqd->uninit_mqd = uninit_mqd_hiq_sdma;
mqd->load_mqd = load_mqd_sdma;
mqd->update_mqd = update_mqd_sdma;
mqd->destroy_mqd = destroy_mqd_sdma;
mqd->is_occupied = is_occupied_sdma;
+ mqd->mqd_size = sizeof(struct v9_sdma_mqd);
#if defined(CONFIG_DEBUG_FS)
mqd->debugfs_show_mqd = debugfs_show_mqd_sdma;
#endif
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c
index 6469b3456f00..b550dea9b10a 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c
@@ -68,6 +68,21 @@ static void update_cu_mask(struct mqd_manager *mm, void *mqd,
m->compute_static_thread_mgmt_se3);
}
+static struct kfd_mem_obj *allocate_mqd(struct kfd_dev *kfd,
+ struct queue_properties *q)
+{
+ struct kfd_mem_obj *mqd_mem_obj;
+
+ if (q->type == KFD_QUEUE_TYPE_HIQ)
+ return allocate_hiq_mqd(kfd);
+
+ if (kfd_gtt_sa_allocate(kfd, sizeof(struct vi_mqd),
+ &mqd_mem_obj))
+ return NULL;
+
+ return mqd_mem_obj;
+}
+
static int init_mqd(struct mqd_manager *mm, void **mqd,
struct kfd_mem_obj **mqd_mem_obj, uint64_t *gart_addr,
struct queue_properties *q)
@@ -75,10 +90,10 @@ static int init_mqd(struct mqd_manager *mm, void **mqd,
int retval;
uint64_t addr;
struct vi_mqd *m;
+ struct kfd_dev *kfd = mm->dev;
- retval = kfd_gtt_sa_allocate(mm->dev, sizeof(struct vi_mqd),
- mqd_mem_obj);
- if (retval != 0)
+ *mqd_mem_obj = allocate_mqd(kfd, q);
+ if (!*mqd_mem_obj)
return -ENOMEM;
m = (struct vi_mqd *) (*mqd_mem_obj)->cpu_ptr;
@@ -329,13 +344,10 @@ static int init_mqd_sdma(struct mqd_manager *mm, void **mqd,
{
int retval;
struct vi_sdma_mqd *m;
+ struct kfd_dev *dev = mm->dev;
-
- retval = kfd_gtt_sa_allocate(mm->dev,
- sizeof(struct vi_sdma_mqd),
- mqd_mem_obj);
-
- if (retval != 0)
+ *mqd_mem_obj = allocate_sdma_mqd(dev, q);
+ if (!*mqd_mem_obj)
return -ENOMEM;
m = (struct vi_sdma_mqd *) (*mqd_mem_obj)->cpu_ptr;
@@ -343,7 +355,7 @@ static int init_mqd_sdma(struct mqd_manager *mm, void **mqd,
memset(m, 0, sizeof(struct vi_sdma_mqd));
*mqd = m;
- if (gart_addr != NULL)
+ if (gart_addr)
*gart_addr = (*mqd_mem_obj)->gpu_addr;
retval = mm->update_mqd(mm, m, q);
@@ -351,12 +363,6 @@ static int init_mqd_sdma(struct mqd_manager *mm, void **mqd,
return retval;
}
-static void uninit_mqd_sdma(struct mqd_manager *mm, void *mqd,
- struct kfd_mem_obj *mqd_mem_obj)
-{
- kfd_gtt_sa_free(mm->dev, mqd_mem_obj);
-}
-
static int load_mqd_sdma(struct mqd_manager *mm, void *mqd,
uint32_t pipe_id, uint32_t queue_id,
struct queue_properties *p, struct mm_struct *mms)
@@ -459,28 +465,43 @@ struct mqd_manager *mqd_manager_init_vi(enum KFD_MQD_TYPE type,
mqd->destroy_mqd = destroy_mqd;
mqd->is_occupied = is_occupied;
mqd->get_wave_state = get_wave_state;
+ mqd->mqd_size = sizeof(struct vi_mqd);
#if defined(CONFIG_DEBUG_FS)
mqd->debugfs_show_mqd = debugfs_show_mqd;
#endif
break;
case KFD_MQD_TYPE_HIQ:
mqd->init_mqd = init_mqd_hiq;
+ mqd->uninit_mqd = uninit_mqd_hiq_sdma;
+ mqd->load_mqd = load_mqd;
+ mqd->update_mqd = update_mqd_hiq;
+ mqd->destroy_mqd = destroy_mqd;
+ mqd->is_occupied = is_occupied;
+ mqd->mqd_size = sizeof(struct vi_mqd);
+#if defined(CONFIG_DEBUG_FS)
+ mqd->debugfs_show_mqd = debugfs_show_mqd;
+#endif
+ break;
+ case KFD_MQD_TYPE_DIQ:
+ mqd->init_mqd = init_mqd_hiq;
mqd->uninit_mqd = uninit_mqd;
mqd->load_mqd = load_mqd;
mqd->update_mqd = update_mqd_hiq;
mqd->destroy_mqd = destroy_mqd;
mqd->is_occupied = is_occupied;
+ mqd->mqd_size = sizeof(struct vi_mqd);
#if defined(CONFIG_DEBUG_FS)
mqd->debugfs_show_mqd = debugfs_show_mqd;
#endif
break;
case KFD_MQD_TYPE_SDMA:
mqd->init_mqd = init_mqd_sdma;
- mqd->uninit_mqd = uninit_mqd_sdma;
+ mqd->uninit_mqd = uninit_mqd_hiq_sdma;
mqd->load_mqd = load_mqd_sdma;
mqd->update_mqd = update_mqd_sdma;
mqd->destroy_mqd = destroy_mqd_sdma;
mqd->is_occupied = is_occupied_sdma;
+ mqd->mqd_size = sizeof(struct vi_sdma_mqd);
#if defined(CONFIG_DEBUG_FS)
mqd->debugfs_show_mqd = debugfs_show_mqd_sdma;
#endif
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
index 045a229436a0..808194663a7d 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
@@ -48,7 +48,8 @@ static void pm_calc_rlib_size(struct packet_manager *pm,
process_count = pm->dqm->processes_count;
queue_count = pm->dqm->queue_count;
- compute_queue_count = queue_count - pm->dqm->sdma_queue_count;
+ compute_queue_count = queue_count - pm->dqm->sdma_queue_count -
+ pm->dqm->xgmi_sdma_queue_count;
/* check if there is over subscription
* Note: the arbitration between the number of VMIDs and
@@ -227,6 +228,7 @@ int pm_init(struct packet_manager *pm, struct device_queue_manager *dqm)
case CHIP_POLARIS10:
case CHIP_POLARIS11:
case CHIP_POLARIS12:
+ case CHIP_VEGAM:
pm->pmf = &kfd_vi_pm_funcs;
break;
case CHIP_VEGA10:
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_pm4_headers_ai.h b/drivers/gpu/drm/amd/amdkfd/kfd_pm4_headers_ai.h
index f2bcf5c092ea..49ab66b703fa 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_pm4_headers_ai.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_pm4_headers_ai.h
@@ -176,8 +176,7 @@ struct pm4_mes_map_process {
union {
struct {
- uint32_t num_gws:6;
- uint32_t reserved7:1;
+ uint32_t num_gws:7;
uint32_t sdma_enable:1;
uint32_t num_oac:4;
uint32_t reserved8:4;
@@ -255,11 +254,6 @@ enum mes_map_queues_queue_type_enum {
queue_type__mes_map_queues__low_latency_static_queue_vi = 3
};
-enum mes_map_queues_alloc_format_enum {
- alloc_format__mes_map_queues__one_per_pipe_vi = 0,
-alloc_format__mes_map_queues__all_on_one_pipe_vi = 1
-};
-
enum mes_map_queues_engine_sel_enum {
engine_sel__mes_map_queues__compute_vi = 0,
engine_sel__mes_map_queues__sdma0_vi = 2,
@@ -277,9 +271,11 @@ struct pm4_mes_map_queues {
struct {
uint32_t reserved1:4;
enum mes_map_queues_queue_sel_enum queue_sel:2;
- uint32_t reserved2:15;
+ uint32_t reserved5:6;
+ uint32_t gws_control_queue:1;
+ uint32_t reserved2:8;
enum mes_map_queues_queue_type_enum queue_type:3;
- enum mes_map_queues_alloc_format_enum alloc_format:2;
+ uint32_t reserved3:2;
enum mes_map_queues_engine_sel_enum engine_sel:3;
uint32_t num_queues:3;
} bitfields2;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_pm4_headers_vi.h b/drivers/gpu/drm/amd/amdkfd/kfd_pm4_headers_vi.h
index 7c8d9b357749..5466cfe1c3cc 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_pm4_headers_vi.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_pm4_headers_vi.h
@@ -216,11 +216,6 @@ enum mes_map_queues_queue_type_vi_enum {
queue_type__mes_map_queues__low_latency_static_queue_vi = 3
};
-enum mes_map_queues_alloc_format_vi_enum {
- alloc_format__mes_map_queues__one_per_pipe_vi = 0,
-alloc_format__mes_map_queues__all_on_one_pipe_vi = 1
-};
-
enum mes_map_queues_engine_sel_vi_enum {
engine_sel__mes_map_queues__compute_vi = 0,
engine_sel__mes_map_queues__sdma0_vi = 2,
@@ -240,7 +235,7 @@ struct pm4_mes_map_queues {
enum mes_map_queues_queue_sel_vi_enum queue_sel:2;
uint32_t reserved2:15;
enum mes_map_queues_queue_type_vi_enum queue_type:3;
- enum mes_map_queues_alloc_format_vi_enum alloc_format:2;
+ uint32_t reserved3:2;
enum mes_map_queues_engine_sel_vi_enum engine_sel:3;
uint32_t num_queues:3;
} bitfields2;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
index 487d5da337c1..b61dc53f42d2 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
@@ -59,6 +59,7 @@
#define KFD_MMAP_TYPE_DOORBELL (0x3ULL << KFD_MMAP_TYPE_SHIFT)
#define KFD_MMAP_TYPE_EVENTS (0x2ULL << KFD_MMAP_TYPE_SHIFT)
#define KFD_MMAP_TYPE_RESERVED_MEM (0x1ULL << KFD_MMAP_TYPE_SHIFT)
+#define KFD_MMAP_TYPE_MMIO (0x0ULL << KFD_MMAP_TYPE_SHIFT)
#define KFD_MMAP_GPU_ID_SHIFT (46 - PAGE_SHIFT)
#define KFD_MMAP_GPU_ID_MASK (((1ULL << KFD_GPU_ID_HASH_WIDTH) - 1) \
@@ -160,6 +161,11 @@ extern int noretry;
*/
extern int halt_if_hws_hang;
+/*
+ * Whether MEC FW support GWS barriers
+ */
+extern bool hws_gws_support;
+
enum cache_policy {
cache_policy_coherent,
cache_policy_noncoherent
@@ -188,6 +194,7 @@ struct kfd_device_info {
bool needs_iommu_device;
bool needs_pci_atomics;
unsigned int num_sdma_engines;
+ unsigned int num_xgmi_sdma_engines;
unsigned int num_sdma_queues_per_engine;
};
@@ -258,7 +265,7 @@ struct kfd_dev {
bool interrupts_active;
/* Debug manager */
- struct kfd_dbgmgr *dbgmgr;
+ struct kfd_dbgmgr *dbgmgr;
/* Firmware versions */
uint16_t mec_fw_version;
@@ -282,6 +289,9 @@ struct kfd_dev {
/* Compute Profile ref. count */
atomic_t compute_profile;
+
+ /* Global GWS resource shared b/t processes*/
+ void *gws;
};
enum kfd_mempool {
@@ -329,7 +339,8 @@ enum kfd_queue_type {
KFD_QUEUE_TYPE_COMPUTE,
KFD_QUEUE_TYPE_SDMA,
KFD_QUEUE_TYPE_HIQ,
- KFD_QUEUE_TYPE_DIQ
+ KFD_QUEUE_TYPE_DIQ,
+ KFD_QUEUE_TYPE_SDMA_XGMI
};
enum kfd_queue_format {
@@ -444,6 +455,9 @@ struct queue_properties {
*
* @device: The kfd device that created this queue.
*
+ * @gws: Pointing to gws kgd_mem if this is a gws control queue; NULL
+ * otherwise.
+ *
* This structure represents user mode compute queues.
* It contains all the necessary data to handle such queues.
*
@@ -465,6 +479,7 @@ struct queue {
struct kfd_process *process;
struct kfd_dev *device;
+ void *gws;
};
/*
@@ -475,6 +490,7 @@ enum KFD_MQD_TYPE {
KFD_MQD_TYPE_HIQ, /* for hiq */
KFD_MQD_TYPE_CP, /* for cp queues and diq */
KFD_MQD_TYPE_SDMA, /* for sdma queues */
+ KFD_MQD_TYPE_DIQ, /* for diq */
KFD_MQD_TYPE_MAX
};
@@ -819,8 +835,6 @@ void uninit_queue(struct queue *q);
void print_queue_properties(struct queue_properties *q);
void print_queue(struct queue *q);
-struct mqd_manager *mqd_manager_init(enum KFD_MQD_TYPE type,
- struct kfd_dev *dev);
struct mqd_manager *mqd_manager_init_cik(enum KFD_MQD_TYPE type,
struct kfd_dev *dev);
struct mqd_manager *mqd_manager_init_cik_hawaii(enum KFD_MQD_TYPE type,
@@ -859,6 +873,8 @@ int pqm_update_queue(struct process_queue_manager *pqm, unsigned int qid,
struct queue_properties *p);
int pqm_set_cu_mask(struct process_queue_manager *pqm, unsigned int qid,
struct queue_properties *p);
+int pqm_set_gws(struct process_queue_manager *pqm, unsigned int qid,
+ void *gws);
struct kernel_queue *pqm_get_kernel_queue(struct process_queue_manager *pqm,
unsigned int qid);
int pqm_get_wave_state(struct process_queue_manager *pqm,
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
index fcaaf93681ac..da0958625861 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
@@ -26,6 +26,7 @@
#include "kfd_device_queue_manager.h"
#include "kfd_priv.h"
#include "kfd_kernel_queue.h"
+#include "amdgpu_amdkfd.h"
static inline struct process_queue_node *get_queue_by_qid(
struct process_queue_manager *pqm, unsigned int qid)
@@ -74,6 +75,55 @@ void kfd_process_dequeue_from_device(struct kfd_process_device *pdd)
pdd->already_dequeued = true;
}
+int pqm_set_gws(struct process_queue_manager *pqm, unsigned int qid,
+ void *gws)
+{
+ struct kfd_dev *dev = NULL;
+ struct process_queue_node *pqn;
+ struct kfd_process_device *pdd;
+ struct kgd_mem *mem = NULL;
+ int ret;
+
+ pqn = get_queue_by_qid(pqm, qid);
+ if (!pqn) {
+ pr_err("Queue id does not match any known queue\n");
+ return -EINVAL;
+ }
+
+ if (pqn->q)
+ dev = pqn->q->device;
+ if (WARN_ON(!dev))
+ return -ENODEV;
+
+ pdd = kfd_get_process_device_data(dev, pqm->process);
+ if (!pdd) {
+ pr_err("Process device data doesn't exist\n");
+ return -EINVAL;
+ }
+
+ /* Only allow one queue per process can have GWS assigned */
+ if (gws && pdd->qpd.num_gws)
+ return -EBUSY;
+
+ if (!gws && pdd->qpd.num_gws == 0)
+ return -EINVAL;
+
+ if (gws)
+ ret = amdgpu_amdkfd_add_gws_to_process(pdd->process->kgd_process_info,
+ gws, &mem);
+ else
+ ret = amdgpu_amdkfd_remove_gws_from_process(pdd->process->kgd_process_info,
+ pqn->q->gws);
+ if (unlikely(ret))
+ return ret;
+
+ pqn->q->gws = mem;
+ pdd->qpd.num_gws = gws ? amdgpu_amdkfd_get_num_gws(dev->kgd) : 0;
+
+ return pqn->q->device->dqm->ops.update_queue(pqn->q->device->dqm,
+ pqn->q);
+}
+
void kfd_process_dequeue_from_all_devices(struct kfd_process *p)
{
struct kfd_process_device *pdd;
@@ -186,8 +236,13 @@ int pqm_create_queue(struct process_queue_manager *pqm,
switch (type) {
case KFD_QUEUE_TYPE_SDMA:
- if (dev->dqm->queue_count >= get_num_sdma_queues(dev->dqm)) {
- pr_err("Over-subscription is not allowed for SDMA.\n");
+ case KFD_QUEUE_TYPE_SDMA_XGMI:
+ if ((type == KFD_QUEUE_TYPE_SDMA && dev->dqm->sdma_queue_count
+ >= get_num_sdma_queues(dev->dqm)) ||
+ (type == KFD_QUEUE_TYPE_SDMA_XGMI &&
+ dev->dqm->xgmi_sdma_queue_count
+ >= get_num_xgmi_sdma_queues(dev->dqm))) {
+ pr_debug("Over-subscription is not allowed for SDMA.\n");
retval = -EPERM;
goto err_create_queue;
}
@@ -325,6 +380,13 @@ int pqm_destroy_queue(struct process_queue_manager *pqm, unsigned int qid)
if (retval != -ETIME)
goto err_destroy_queue;
}
+
+ if (pqn->q->gws) {
+ amdgpu_amdkfd_remove_gws_from_process(pqm->process->kgd_process_info,
+ pqn->q->gws);
+ pdd->qpd.num_gws = 0;
+ }
+
kfree(pqn->q->properties.cu_mask);
pqn->q->properties.cu_mask = NULL;
uninit_queue(pqn->q);
@@ -446,6 +508,7 @@ int pqm_debugfs_mqds(struct seq_file *m, void *data)
q = pqn->q;
switch (q->properties.type) {
case KFD_QUEUE_TYPE_SDMA:
+ case KFD_QUEUE_TYPE_SDMA_XGMI:
seq_printf(m, " SDMA queue on device %x\n",
q->device->id);
mqd_type = KFD_MQD_TYPE_SDMA;
@@ -461,8 +524,7 @@ int pqm_debugfs_mqds(struct seq_file *m, void *data)
q->properties.type, q->device->id);
continue;
}
- mqd_mgr = q->device->dqm->ops.get_mqd_manager(
- q->device->dqm, mqd_type);
+ mqd_mgr = q->device->dqm->mqd_mgrs[mqd_type];
} else if (pqn->kq) {
q = pqn->kq->queue;
mqd_mgr = pqn->kq->mqd_mgr;
@@ -470,7 +532,6 @@ int pqm_debugfs_mqds(struct seq_file *m, void *data)
case KFD_QUEUE_TYPE_DIQ:
seq_printf(m, " DIQ on device %x\n",
pqn->kq->dev->id);
- mqd_type = KFD_MQD_TYPE_HIQ;
break;
default:
seq_printf(m,
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
index 769dbc7be8cb..d241a8672599 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
@@ -454,6 +454,8 @@ static ssize_t node_show(struct kobject *kobj, struct attribute *attr,
dev->node_props.lds_size_in_kb);
sysfs_show_32bit_prop(buffer, "gds_size_in_kb",
dev->node_props.gds_size_in_kb);
+ sysfs_show_32bit_prop(buffer, "num_gws",
+ dev->node_props.num_gws);
sysfs_show_32bit_prop(buffer, "wave_front_size",
dev->node_props.wave_front_size);
sysfs_show_32bit_prop(buffer, "array_count",
@@ -476,6 +478,10 @@ static ssize_t node_show(struct kobject *kobj, struct attribute *attr,
dev->node_props.drm_render_minor);
sysfs_show_64bit_prop(buffer, "hive_id",
dev->node_props.hive_id);
+ sysfs_show_32bit_prop(buffer, "num_sdma_engines",
+ dev->node_props.num_sdma_engines);
+ sysfs_show_32bit_prop(buffer, "num_sdma_xgmi_engines",
+ dev->node_props.num_sdma_xgmi_engines);
if (dev->gpu) {
log_max_watch_addr =
@@ -1078,8 +1084,9 @@ static uint32_t kfd_generate_gpu_id(struct kfd_dev *gpu)
local_mem_info.local_mem_size_public;
buf[0] = gpu->pdev->devfn;
- buf[1] = gpu->pdev->subsystem_vendor;
- buf[2] = gpu->pdev->subsystem_device;
+ buf[1] = gpu->pdev->subsystem_vendor |
+ (gpu->pdev->subsystem_device << 16);
+ buf[2] = pci_domain_nr(gpu->pdev->bus);
buf[3] = gpu->pdev->device;
buf[4] = gpu->pdev->bus->number;
buf[5] = lower_32_bits(local_mem_size);
@@ -1281,6 +1288,12 @@ int kfd_topology_add_device(struct kfd_dev *gpu)
gpu->shared_resources.drm_render_minor;
dev->node_props.hive_id = gpu->hive_id;
+ dev->node_props.num_sdma_engines = gpu->device_info->num_sdma_engines;
+ dev->node_props.num_sdma_xgmi_engines =
+ gpu->device_info->num_xgmi_sdma_engines;
+ dev->node_props.num_gws = (hws_gws_support &&
+ dev->gpu->dqm->sched_policy != KFD_SCHED_POLICY_NO_HWS) ?
+ amdgpu_amdkfd_get_num_gws(dev->gpu->kgd) : 0;
kfd_fill_mem_clk_max_info(dev);
kfd_fill_iolink_non_crat_info(dev);
@@ -1298,6 +1311,7 @@ int kfd_topology_add_device(struct kfd_dev *gpu)
case CHIP_POLARIS10:
case CHIP_POLARIS11:
case CHIP_POLARIS12:
+ case CHIP_VEGAM:
pr_debug("Adding doorbell packet type capability\n");
dev->node_props.capability |= ((HSA_CAP_DOORBELL_TYPE_1_0 <<
HSA_CAP_DOORBELL_TYPE_TOTALBITS_SHIFT) &
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.h b/drivers/gpu/drm/amd/amdkfd/kfd_topology.h
index 84710cfd23c2..276354aa0fcc 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.h
@@ -65,6 +65,7 @@ struct kfd_node_properties {
uint32_t max_waves_per_simd;
uint32_t lds_size_in_kb;
uint32_t gds_size_in_kb;
+ uint32_t num_gws;
uint32_t wave_front_size;
uint32_t array_count;
uint32_t simd_arrays_per_engine;
@@ -78,6 +79,8 @@ struct kfd_node_properties {
uint32_t max_engine_clk_fcompute;
uint32_t max_engine_clk_ccompute;
int32_t drm_render_minor;
+ uint32_t num_sdma_engines;
+ uint32_t num_sdma_xgmi_engines;
uint16_t marketing_name[KFD_TOPOLOGY_PUBLIC_NAME_SIZE];
};
diff --git a/drivers/gpu/drm/amd/display/Kconfig b/drivers/gpu/drm/amd/display/Kconfig
index 0c25baded852..5c826faae240 100644
--- a/drivers/gpu/drm/amd/display/Kconfig
+++ b/drivers/gpu/drm/amd/display/Kconfig
@@ -6,7 +6,6 @@ config DRM_AMD_DC
bool "AMD DC - Enable new display engine"
default y
select DRM_AMD_DC_DCN1_0 if X86 && !(KCOV_INSTRUMENT_ALL && KCOV_ENABLE_COMPARISONS)
- select DRM_AMD_DC_DCN1_01 if X86 && !(KCOV_INSTRUMENT_ALL && KCOV_ENABLE_COMPARISONS)
help
Choose this option if you want to use the new display engine
support for AMDGPU. This adds required support for Vega and
@@ -17,11 +16,6 @@ config DRM_AMD_DC_DCN1_0
help
RV family support for display engine
-config DRM_AMD_DC_DCN1_01
- def_bool n
- help
- RV2 family for display engine
-
config DEBUG_KERNEL_DC
bool "Enable kgdb break in DC"
depends on DRM_AMD_DC
diff --git a/drivers/gpu/drm/amd/display/Makefile b/drivers/gpu/drm/amd/display/Makefile
index cfde1568c79a..496cee000f10 100644
--- a/drivers/gpu/drm/amd/display/Makefile
+++ b/drivers/gpu/drm/amd/display/Makefile
@@ -28,6 +28,7 @@ AMDDALPATH = $(RELATIVE_AMD_DISPLAY_PATH)
subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/inc/
subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/inc/hw
+subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/clk_mgr
subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/modules/inc
subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/modules/freesync
subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/modules/color
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index ab7c5c3004ee..b16c658074d2 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -54,15 +54,17 @@
#include <linux/version.h>
#include <linux/types.h>
#include <linux/pm_runtime.h>
+#include <linux/pci.h>
#include <linux/firmware.h>
-#include <drm/drmP.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_uapi.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_dp_mst_helper.h>
#include <drm/drm_fb_helper.h>
+#include <drm/drm_fourcc.h>
#include <drm/drm_edid.h>
+#include <drm/drm_vblank.h>
#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
#include "ivsrcid/irqsrcs_dcn_1_0.h"
@@ -616,6 +618,10 @@ error:
static void amdgpu_dm_fini(struct amdgpu_device *adev)
{
amdgpu_dm_destroy_drm_device(&adev->dm);
+
+ /* DC Destroy TODO: Replace destroy DAL */
+ if (adev->dm.dc)
+ dc_destroy(&adev->dm.dc);
/*
* TODO: pageflip, vlank interrupt
*
@@ -630,9 +636,6 @@ static void amdgpu_dm_fini(struct amdgpu_device *adev)
mod_freesync_destroy(adev->dm.freesync_module);
adev->dm.freesync_module = NULL;
}
- /* DC Destroy TODO: Replace destroy DAL */
- if (adev->dm.dc)
- dc_destroy(&adev->dm.dc);
mutex_destroy(&adev->dm.dc_lock);
@@ -664,13 +667,11 @@ static int load_dmcu_fw(struct amdgpu_device *adev)
case CHIP_VEGA20:
return 0;
case CHIP_RAVEN:
-#if defined(CONFIG_DRM_AMD_DC_DCN1_01)
if (ASICREV_IS_PICASSO(adev->external_rev_id))
fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
else
-#endif
return 0;
break;
default:
@@ -2592,7 +2593,7 @@ fill_plane_buffer_attributes(struct amdgpu_device *adev,
address->type = PLN_ADDR_TYPE_GRAPHICS;
address->grph.addr.low_part = lower_32_bits(afb->address);
address->grph.addr.high_part = upper_32_bits(afb->address);
- } else {
+ } else if (format < SURFACE_PIXEL_FORMAT_INVALID) {
uint64_t chroma_addr = afb->address + fb->offsets[1];
plane_size->video.luma_size.x = 0;
@@ -2967,16 +2968,16 @@ static void update_stream_scaling_settings(const struct drm_display_mode *mode,
}
static enum dc_color_depth
-convert_color_depth_from_display_info(const struct drm_connector *connector)
+convert_color_depth_from_display_info(const struct drm_connector *connector,
+ const struct drm_connector_state *state)
{
- struct dm_connector_state *dm_conn_state =
- to_dm_connector_state(connector->state);
uint32_t bpc = connector->display_info.bpc;
- /* TODO: Remove this when there's support for max_bpc in drm */
- if (dm_conn_state && bpc > dm_conn_state->max_bpc)
- /* Round down to nearest even number. */
- bpc = dm_conn_state->max_bpc - (dm_conn_state->max_bpc & 1);
+ if (state) {
+ bpc = state->max_bpc;
+ /* Round down to the nearest even number. */
+ bpc = bpc - (bpc & 1);
+ }
switch (bpc) {
case 0:
@@ -3094,11 +3095,12 @@ static void adjust_colour_depth_from_display_info(struct dc_crtc_timing *timing_
}
-static void
-fill_stream_properties_from_drm_display_mode(struct dc_stream_state *stream,
- const struct drm_display_mode *mode_in,
- const struct drm_connector *connector,
- const struct dc_stream_state *old_stream)
+static void fill_stream_properties_from_drm_display_mode(
+ struct dc_stream_state *stream,
+ const struct drm_display_mode *mode_in,
+ const struct drm_connector *connector,
+ const struct drm_connector_state *connector_state,
+ const struct dc_stream_state *old_stream)
{
struct dc_crtc_timing *timing_out = &stream->timing;
const struct drm_display_info *info = &connector->display_info;
@@ -3121,7 +3123,7 @@ fill_stream_properties_from_drm_display_mode(struct dc_stream_state *stream,
timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
timing_out->display_color_depth = convert_color_depth_from_display_info(
- connector);
+ connector, connector_state);
timing_out->scan_type = SCANNING_TYPE_NODATA;
timing_out->hdmi_vic = 0;
@@ -3318,6 +3320,8 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
{
struct drm_display_mode *preferred_mode = NULL;
struct drm_connector *drm_connector;
+ const struct drm_connector_state *con_state =
+ dm_state ? &dm_state->base : NULL;
struct dc_stream_state *stream = NULL;
struct drm_display_mode mode = *drm_mode;
bool native_mode_found = false;
@@ -3390,10 +3394,10 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
*/
if (!scale || mode_refresh != preferred_refresh)
fill_stream_properties_from_drm_display_mode(stream,
- &mode, &aconnector->base, NULL);
+ &mode, &aconnector->base, con_state, NULL);
else
fill_stream_properties_from_drm_display_mode(stream,
- &mode, &aconnector->base, old_stream);
+ &mode, &aconnector->base, con_state, old_stream);
update_stream_scaling_settings(&mode, dm_state, stream);
@@ -3618,9 +3622,6 @@ int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
} else if (property == adev->mode_info.underscan_property) {
dm_new_state->underscan_enable = val;
ret = 0;
- } else if (property == adev->mode_info.max_bpc_property) {
- dm_new_state->max_bpc = val;
- ret = 0;
} else if (property == adev->mode_info.abm_level_property) {
dm_new_state->abm_level = val;
ret = 0;
@@ -3666,9 +3667,6 @@ int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
} else if (property == adev->mode_info.underscan_property) {
*val = dm_state->underscan_enable;
ret = 0;
- } else if (property == adev->mode_info.max_bpc_property) {
- *val = dm_state->max_bpc;
- ret = 0;
} else if (property == adev->mode_info.abm_level_property) {
*val = dm_state->abm_level;
ret = 0;
@@ -3677,6 +3675,13 @@ int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
return ret;
}
+static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
+{
+ struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
+
+ drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
+}
+
static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
{
struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
@@ -3705,6 +3710,11 @@ static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
drm_connector_unregister(connector);
drm_connector_cleanup(connector);
+ if (aconnector->i2c) {
+ i2c_del_adapter(&aconnector->i2c->base);
+ kfree(aconnector->i2c);
+ }
+
kfree(connector);
}
@@ -3725,7 +3735,6 @@ void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
state->underscan_enable = false;
state->underscan_hborder = 0;
state->underscan_vborder = 0;
- state->max_bpc = 8;
__drm_atomic_helper_connector_reset(connector, &state->base);
}
@@ -3751,7 +3760,6 @@ amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
new_state->underscan_enable = state->underscan_enable;
new_state->underscan_hborder = state->underscan_hborder;
new_state->underscan_vborder = state->underscan_vborder;
- new_state->max_bpc = state->max_bpc;
return &new_state->base;
}
@@ -3764,7 +3772,8 @@ static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
.atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
.atomic_set_property = amdgpu_dm_connector_atomic_set_property,
- .atomic_get_property = amdgpu_dm_connector_atomic_get_property
+ .atomic_get_property = amdgpu_dm_connector_atomic_get_property,
+ .early_unregister = amdgpu_dm_connector_unregister
};
static int get_modes(struct drm_connector *connector)
@@ -3879,6 +3888,129 @@ fail:
return result;
}
+static int fill_hdr_info_packet(const struct drm_connector_state *state,
+ struct dc_info_packet *out)
+{
+ struct hdmi_drm_infoframe frame;
+ unsigned char buf[30]; /* 26 + 4 */
+ ssize_t len;
+ int ret, i;
+
+ memset(out, 0, sizeof(*out));
+
+ if (!state->hdr_output_metadata)
+ return 0;
+
+ ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
+ if (ret)
+ return ret;
+
+ len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
+ if (len < 0)
+ return (int)len;
+
+ /* Static metadata is a fixed 26 bytes + 4 byte header. */
+ if (len != 30)
+ return -EINVAL;
+
+ /* Prepare the infopacket for DC. */
+ switch (state->connector->connector_type) {
+ case DRM_MODE_CONNECTOR_HDMIA:
+ out->hb0 = 0x87; /* type */
+ out->hb1 = 0x01; /* version */
+ out->hb2 = 0x1A; /* length */
+ out->sb[0] = buf[3]; /* checksum */
+ i = 1;
+ break;
+
+ case DRM_MODE_CONNECTOR_DisplayPort:
+ case DRM_MODE_CONNECTOR_eDP:
+ out->hb0 = 0x00; /* sdp id, zero */
+ out->hb1 = 0x87; /* type */
+ out->hb2 = 0x1D; /* payload len - 1 */
+ out->hb3 = (0x13 << 2); /* sdp version */
+ out->sb[0] = 0x01; /* version */
+ out->sb[1] = 0x1A; /* length */
+ i = 2;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ memcpy(&out->sb[i], &buf[4], 26);
+ out->valid = true;
+
+ print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
+ sizeof(out->sb), false);
+
+ return 0;
+}
+
+static bool
+is_hdr_metadata_different(const struct drm_connector_state *old_state,
+ const struct drm_connector_state *new_state)
+{
+ struct drm_property_blob *old_blob = old_state->hdr_output_metadata;
+ struct drm_property_blob *new_blob = new_state->hdr_output_metadata;
+
+ if (old_blob != new_blob) {
+ if (old_blob && new_blob &&
+ old_blob->length == new_blob->length)
+ return memcmp(old_blob->data, new_blob->data,
+ old_blob->length);
+
+ return true;
+ }
+
+ return false;
+}
+
+static int
+amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
+ struct drm_atomic_state *state)
+{
+ struct drm_connector_state *new_con_state =
+ drm_atomic_get_new_connector_state(state, conn);
+ struct drm_connector_state *old_con_state =
+ drm_atomic_get_old_connector_state(state, conn);
+ struct drm_crtc *crtc = new_con_state->crtc;
+ struct drm_crtc_state *new_crtc_state;
+ int ret;
+
+ if (!crtc)
+ return 0;
+
+ if (is_hdr_metadata_different(old_con_state, new_con_state)) {
+ struct dc_info_packet hdr_infopacket;
+
+ ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
+ if (ret)
+ return ret;
+
+ new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
+ if (IS_ERR(new_crtc_state))
+ return PTR_ERR(new_crtc_state);
+
+ /*
+ * DC considers the stream backends changed if the
+ * static metadata changes. Forcing the modeset also
+ * gives a simple way for userspace to switch from
+ * 8bpc to 10bpc when setting the metadata to enter
+ * or exit HDR.
+ *
+ * Changing the static metadata after it's been
+ * set is permissible, however. So only force a
+ * modeset if we're entering or exiting HDR.
+ */
+ new_crtc_state->mode_changed =
+ !old_con_state->hdr_output_metadata ||
+ !new_con_state->hdr_output_metadata;
+ }
+
+ return 0;
+}
+
static const struct drm_connector_helper_funcs
amdgpu_dm_connector_helper_funcs = {
/*
@@ -3889,6 +4021,7 @@ amdgpu_dm_connector_helper_funcs = {
*/
.get_modes = get_modes,
.mode_valid = amdgpu_dm_connector_mode_valid,
+ .atomic_check = amdgpu_dm_connector_atomic_check,
};
static void dm_crtc_helper_disable(struct drm_crtc *crtc)
@@ -4098,6 +4231,9 @@ static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
struct amdgpu_device *adev;
struct amdgpu_bo *rbo;
struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
+ struct list_head list;
+ struct ttm_validate_buffer tv;
+ struct ww_acquire_ctx ticket;
uint64_t tiling_flags;
uint32_t domain;
int r;
@@ -4114,9 +4250,17 @@ static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
obj = new_state->fb->obj[0];
rbo = gem_to_amdgpu_bo(obj);
adev = amdgpu_ttm_adev(rbo->tbo.bdev);
- r = amdgpu_bo_reserve(rbo, false);
- if (unlikely(r != 0))
+ INIT_LIST_HEAD(&list);
+
+ tv.bo = &rbo->tbo;
+ tv.num_shared = 1;
+ list_add(&tv.head, &list);
+
+ r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL, true);
+ if (r) {
+ dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
return r;
+ }
if (plane->type != DRM_PLANE_TYPE_CURSOR)
domain = amdgpu_display_supported_domains(adev);
@@ -4127,21 +4271,21 @@ static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
if (unlikely(r != 0)) {
if (r != -ERESTARTSYS)
DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
- amdgpu_bo_unreserve(rbo);
+ ttm_eu_backoff_reservation(&ticket, &list);
return r;
}
r = amdgpu_ttm_alloc_gart(&rbo->tbo);
if (unlikely(r != 0)) {
amdgpu_bo_unpin(rbo);
- amdgpu_bo_unreserve(rbo);
+ ttm_eu_backoff_reservation(&ticket, &list);
DRM_ERROR("%p bind failed\n", rbo);
return r;
}
amdgpu_bo_get_tiling_flags(rbo, &tiling_flags);
- amdgpu_bo_unreserve(rbo);
+ ttm_eu_backoff_reservation(&ticket, &list);
afb->address = amdgpu_bo_gpu_offset(rbo);
@@ -4592,6 +4736,15 @@ static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
amdgpu_dm_connector->num_modes =
drm_add_edid_modes(connector, edid);
+ /* sorting the probed modes before calling function
+ * amdgpu_dm_get_native_mode() since EDID can have
+ * more than one preferred mode. The modes that are
+ * later in the probed mode list could be of higher
+ * and preferred resolution. For example, 3840x2160
+ * resolution in base EDID preferred timing and 4096x2160
+ * preferred resolution in DID extension block later.
+ */
+ drm_mode_sort(&connector->probed_modes);
amdgpu_dm_get_native_mode(connector);
} else {
amdgpu_dm_connector->num_modes = 0;
@@ -4671,9 +4824,12 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
drm_object_attach_property(&aconnector->base.base,
adev->mode_info.underscan_vborder_property,
0);
- drm_object_attach_property(&aconnector->base.base,
- adev->mode_info.max_bpc_property,
- 0);
+
+ drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
+
+ /* This defaults to the max in the range, but we want 8bpc. */
+ aconnector->base.state->max_bpc = 8;
+ aconnector->base.state->max_requested_bpc = 8;
if (connector_type == DRM_MODE_CONNECTOR_eDP &&
dc_is_dmcu_initialized(adev->dm.dc)) {
@@ -4684,6 +4840,10 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
connector_type == DRM_MODE_CONNECTOR_eDP) {
+ drm_object_attach_property(
+ &aconnector->base.base,
+ dm->ddev->mode_config.hdr_output_metadata_property, 0);
+
drm_connector_attach_vrr_capable_property(
&aconnector->base);
}
@@ -4952,12 +5112,12 @@ static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
int x, y;
int xorigin = 0, yorigin = 0;
- if (!crtc || !plane->state->fb) {
- position->enable = false;
- position->x = 0;
- position->y = 0;
+ position->enable = false;
+ position->x = 0;
+ position->y = 0;
+
+ if (!crtc || !plane->state->fb)
return 0;
- }
if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
(plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
@@ -4971,6 +5131,10 @@ static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
x = plane->state->crtc_x;
y = plane->state->crtc_y;
+ if (x <= -amdgpu_crtc->max_cursor_width ||
+ y <= -amdgpu_crtc->max_cursor_height)
+ return 0;
+
if (crtc->primary->state) {
/* avivo cursor are offset into the total surface */
x += crtc->primary->state->src_x >> 16;
@@ -5114,6 +5278,11 @@ static void update_freesync_state_on_stream(
amdgpu_dm_vrr_active(new_crtc_state)) {
mod_freesync_handle_v_update(dm->freesync_module,
new_stream, &vrr_params);
+
+ /* Need to call this before the frame ends. */
+ dc_stream_adjust_vmin_vmax(dm->dc,
+ new_crtc_state->stream,
+ &vrr_params.adjust);
}
}
@@ -5452,11 +5621,6 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
}
if (acrtc_state->stream) {
-
- if (acrtc_state->freesync_timing_changed)
- bundle->stream_update.adjust =
- &acrtc_state->stream->adjust;
-
if (acrtc_state->freesync_vrr_info_changed)
bundle->stream_update.vrr_infopacket =
&acrtc_state->stream->vrr_infopacket;
@@ -5477,6 +5641,20 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
bundle->stream_update.abm_level = &acrtc_state->abm_level;
+ /*
+ * If FreeSync state on the stream has changed then we need to
+ * re-adjust the min/max bounds now that DC doesn't handle this
+ * as part of commit.
+ */
+ if (amdgpu_dm_vrr_active(dm_old_crtc_state) !=
+ amdgpu_dm_vrr_active(acrtc_state)) {
+ spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
+ dc_stream_adjust_vmin_vmax(
+ dm->dc, acrtc_state->stream,
+ &acrtc_state->vrr_params.adjust);
+ spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
+ }
+
mutex_lock(&dm->dc_lock);
dc_commit_updates_for_stream(dm->dc,
bundle->surface_updates,
@@ -5768,7 +5946,9 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
struct dc_surface_update dummy_updates[MAX_SURFACES];
struct dc_stream_update stream_update;
+ struct dc_info_packet hdr_packet;
struct dc_stream_status *status = NULL;
+ bool abm_changed, hdr_changed, scaling_changed;
memset(&dummy_updates, 0, sizeof(dummy_updates));
memset(&stream_update, 0, sizeof(stream_update));
@@ -5785,11 +5965,19 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
- if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state) &&
- (dm_new_crtc_state->abm_level == dm_old_crtc_state->abm_level))
+ scaling_changed = is_scaling_state_different(dm_new_con_state,
+ dm_old_con_state);
+
+ abm_changed = dm_new_crtc_state->abm_level !=
+ dm_old_crtc_state->abm_level;
+
+ hdr_changed =
+ is_hdr_metadata_different(old_con_state, new_con_state);
+
+ if (!scaling_changed && !abm_changed && !hdr_changed)
continue;
- if (is_scaling_state_different(dm_new_con_state, dm_old_con_state)) {
+ if (scaling_changed) {
update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
dm_new_con_state, (struct dc_stream_state *)dm_new_crtc_state->stream);
@@ -5797,12 +5985,17 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
stream_update.dst = dm_new_crtc_state->stream->dst;
}
- if (dm_new_crtc_state->abm_level != dm_old_crtc_state->abm_level) {
+ if (abm_changed) {
dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
stream_update.abm_level = &dm_new_crtc_state->abm_level;
}
+ if (hdr_changed) {
+ fill_hdr_info_packet(new_con_state, &hdr_packet);
+ stream_update.hdr_static_metadata = &hdr_packet;
+ }
+
status = dc_stream_get_status(dm_new_crtc_state->stream);
WARN_ON(!status);
WARN_ON(!status->plane_count);
@@ -6148,6 +6341,11 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
+ ret = fill_hdr_info_packet(drm_new_conn_state,
+ &new_stream->hdr_static_metadata);
+ if (ret)
+ goto fail;
+
if (dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
new_crtc_state->mode_changed = false;
@@ -6327,6 +6525,10 @@ static bool should_reset_plane(struct drm_atomic_state *state,
if (!new_crtc_state)
return true;
+ /* CRTC Degamma changes currently require us to recreate planes. */
+ if (new_crtc_state->color_mgmt_changed)
+ return true;
+
if (drm_atomic_crtc_needs_modeset(new_crtc_state))
return true;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
index 978ff14a7d45..811253d7f157 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
@@ -26,8 +26,11 @@
#ifndef __AMDGPU_DM_H__
#define __AMDGPU_DM_H__
-#include <drm/drmP.h>
#include <drm/drm_atomic.h>
+#include <drm/drm_connector.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_dp_mst_helper.h>
+#include <drm/drm_plane.h>
/*
* This file contains the definition for amdgpu_display_manager
@@ -304,7 +307,6 @@ struct dm_connector_state {
enum amdgpu_rmx_type scaling;
uint8_t underscan_vborder;
uint8_t underscan_hborder;
- uint8_t max_bpc;
bool underscan_enable;
bool freesync_capable;
uint8_t abm_level;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c
index a10e3a50d9ef..bc67e6502733 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c
@@ -24,6 +24,7 @@
*/
#include <drm/drm_crtc.h>
+#include <drm/drm_vblank.h>
#include "amdgpu.h"
#include "amdgpu_dm.h"
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
index 1d5fc5ad3bee..e611b5376d8c 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
@@ -23,7 +23,9 @@
*
*/
-#include <linux/debugfs.h>
+#include <linux/uaccess.h>
+
+#include <drm/drm_debugfs.h>
#include "dc.h"
#include "amdgpu.h"
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
index e6cd67342df8..97b2c3b16bef 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
@@ -28,7 +28,6 @@
#include <linux/version.h>
#include <linux/i2c.h>
-#include <drm/drmP.h>
#include <drm/drm_probe_helper.h>
#include <drm/amdgpu_drm.h>
#include <drm/drm_edid.h>
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
index fd22b4474dbf..1b59d3d42f7b 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
@@ -23,8 +23,6 @@
*
*/
-#include <drm/drmP.h>
-
#include "dm_services_types.h"
#include "dc.h"
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c
index 350e7a620d45..b37e8c9653e1 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c
@@ -24,7 +24,6 @@
#include <linux/string.h>
#include <linux/acpi.h>
-#include <drm/drmP.h>
#include <drm/drm_probe_helper.h>
#include <drm/amdgpu_drm.h>
#include "dm_services.h"
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c
index d915e8c8769b..022da5d45d4d 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c
@@ -26,7 +26,6 @@
#include <linux/string.h>
#include <linux/acpi.h>
-#include <drm/drmP.h>
#include <drm/drm_probe_helper.h>
#include <drm/amdgpu_drm.h>
#include "dm_services.h"
diff --git a/drivers/gpu/drm/amd/display/dc/Makefile b/drivers/gpu/drm/amd/display/dc/Makefile
index b8ddb4acccdb..6da4e4f844b2 100644
--- a/drivers/gpu/drm/amd/display/dc/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/Makefile
@@ -23,7 +23,7 @@
# Makefile for Display Core (dc) component.
#
-DC_LIBS = basics bios calcs dce gpio irq virtual
+DC_LIBS = basics bios calcs clk_mgr dce gpio irq virtual
ifdef CONFIG_DRM_AMD_DC_DCN1_0
DC_LIBS += dcn10 dml
diff --git a/drivers/gpu/drm/amd/display/dc/basics/vector.c b/drivers/gpu/drm/amd/display/dc/basics/vector.c
index d28e9cf0e961..8f93d25f91ee 100644
--- a/drivers/gpu/drm/amd/display/dc/basics/vector.c
+++ b/drivers/gpu/drm/amd/display/dc/basics/vector.c
@@ -23,6 +23,8 @@
*
*/
+#include <linux/slab.h>
+
#include "dm_services.h"
#include "include/vector.h"
diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c
index a4c97d32e751..461eef1de124 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c
@@ -23,6 +23,8 @@
*
*/
+#include <linux/slab.h>
+
#include "dm_services.h"
#include "atom.h"
diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
index fd5266a58297..fecd766ece37 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
@@ -23,6 +23,8 @@
*
*/
+#include <linux/slab.h>
+
#include "dm_services.h"
#include "ObjectID.h"
@@ -1313,6 +1315,8 @@ static enum bp_result bios_parser_get_encoder_cap_info(
ATOM_ENCODER_CAP_RECORD_HBR3_EN) ? 1 : 0;
info->HDMI_6GB_EN = (record->encodercaps &
ATOM_ENCODER_CAP_RECORD_HDMI6Gbps_EN) ? 1 : 0;
+ info->DP_IS_USB_C = (record->encodercaps &
+ ATOM_ENCODER_CAP_RECORD_USB_C_TYPE) ? 1 : 0;
return BP_RESULT_OK;
}
diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c b/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c
index 8196f3bb10c7..53deba42007a 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c
@@ -57,11 +57,6 @@ bool dal_bios_parser_init_cmd_tbl_helper2(
return true;
#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
case DCN_VERSION_1_0:
- *h = dal_cmd_tbl_helper_dce112_get_table2();
- return true;
-#endif
-
-#if defined(CONFIG_DRM_AMD_DC_DCN1_01)
case DCN_VERSION_1_01:
*h = dal_cmd_tbl_helper_dce112_get_table2();
return true;
diff --git a/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c b/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c
index f3aa7b53d2aa..7108d51a9c5b 100644
--- a/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c
+++ b/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c
@@ -23,6 +23,8 @@
*
*/
+#include <linux/slab.h>
+
#include "dm_services.h"
#include "dce_calcs.h"
#include "dc.h"
diff --git a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calc_auto.h b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calc_auto.h
index 03f06f682ead..ce35de79a6c7 100644
--- a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calc_auto.h
+++ b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calc_auto.h
@@ -26,6 +26,7 @@
#ifndef _DCN_CALC_AUTO_H_
#define _DCN_CALC_AUTO_H_
+#include "dc.h"
#include "dcn_calcs.h"
void scaler_settings_calculation(struct dcn_bw_internal_vars *v);
diff --git a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
index 1b4b51657f5e..5c1e0adb142b 100644
--- a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
+++ b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
@@ -24,11 +24,10 @@
*/
#include "dm_services.h"
+#include "dc.h"
#include "dcn_calcs.h"
#include "dcn_calc_auto.h"
-#include "dc.h"
#include "dal_asic_id.h"
-
#include "resource.h"
#include "dcn10/dcn10_resource.h"
#include "dcn10/dcn10_hubbub.h"
@@ -712,7 +711,7 @@ bool dcn_validate_bandwidth(
const struct resource_pool *pool = dc->res_pool;
struct dcn_bw_internal_vars *v = &context->dcn_bw_vars;
- int i, input_idx;
+ int i, input_idx, k;
int vesa_sync_start, asic_blank_end, asic_blank_start;
bool bw_limit_pass;
float bw_limit;
@@ -873,8 +872,19 @@ bool dcn_validate_bandwidth(
v->lb_bit_per_pixel[input_idx] = 30;
v->viewport_width[input_idx] = pipe->stream->timing.h_addressable;
v->viewport_height[input_idx] = pipe->stream->timing.v_addressable;
- v->scaler_rec_out_width[input_idx] = pipe->stream->timing.h_addressable;
- v->scaler_recout_height[input_idx] = pipe->stream->timing.v_addressable;
+ /*
+ * for cases where we have no plane, we want to validate up to 1080p
+ * source size because here we are only interested in if the output
+ * timing is supported or not. if we cannot support native resolution
+ * of the high res display, we still want to support lower res up scale
+ * to native
+ */
+ if (v->viewport_width[input_idx] > 1920)
+ v->viewport_width[input_idx] = 1920;
+ if (v->viewport_height[input_idx] > 1080)
+ v->viewport_height[input_idx] = 1080;
+ v->scaler_rec_out_width[input_idx] = v->viewport_width[input_idx];
+ v->scaler_recout_height[input_idx] = v->viewport_height[input_idx];
v->override_hta_ps[input_idx] = 1;
v->override_vta_ps[input_idx] = 1;
v->override_hta_pschroma[input_idx] = 1;
@@ -1023,6 +1033,43 @@ bool dcn_validate_bandwidth(
mode_support_and_system_configuration(v);
}
+ display_pipe_configuration(v);
+
+ for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+ if (v->source_scan[k] == dcn_bw_hor)
+ v->swath_width_y[k] = v->viewport_width[k] / v->dpp_per_plane[k];
+ else
+ v->swath_width_y[k] = v->viewport_height[k] / v->dpp_per_plane[k];
+ }
+ for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+ if (v->source_pixel_format[k] == dcn_bw_rgb_sub_64) {
+ v->byte_per_pixel_dety[k] = 8.0;
+ v->byte_per_pixel_detc[k] = 0.0;
+ } else if (v->source_pixel_format[k] == dcn_bw_rgb_sub_32) {
+ v->byte_per_pixel_dety[k] = 4.0;
+ v->byte_per_pixel_detc[k] = 0.0;
+ } else if (v->source_pixel_format[k] == dcn_bw_rgb_sub_16) {
+ v->byte_per_pixel_dety[k] = 2.0;
+ v->byte_per_pixel_detc[k] = 0.0;
+ } else if (v->source_pixel_format[k] == dcn_bw_yuv420_sub_8) {
+ v->byte_per_pixel_dety[k] = 1.0;
+ v->byte_per_pixel_detc[k] = 2.0;
+ } else {
+ v->byte_per_pixel_dety[k] = 4.0f / 3.0f;
+ v->byte_per_pixel_detc[k] = 8.0f / 3.0f;
+ }
+ }
+
+ v->total_data_read_bandwidth = 0.0;
+ for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+ v->read_bandwidth_plane_luma[k] = v->swath_width_y[k] * v->dpp_per_plane[k] *
+ dcn_bw_ceil2(v->byte_per_pixel_dety[k], 1.0) / (v->htotal[k] / v->pixel_clock[k]) * v->v_ratio[k];
+ v->read_bandwidth_plane_chroma[k] = v->swath_width_y[k] / 2.0 * v->dpp_per_plane[k] *
+ dcn_bw_ceil2(v->byte_per_pixel_detc[k], 2.0) / (v->htotal[k] / v->pixel_clock[k]) * v->v_ratio[k] / 2.0;
+ v->total_data_read_bandwidth = v->total_data_read_bandwidth +
+ v->read_bandwidth_plane_luma[k] + v->read_bandwidth_plane_chroma[k];
+ }
+
BW_VAL_TRACE_END_VOLTAGE_LEVEL();
if (v->voltage_level != number_of_states_plus_one && !fast_validate) {
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/Makefile b/drivers/gpu/drm/amd/display/dc/clk_mgr/Makefile
new file mode 100644
index 000000000000..650e2b88c917
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/Makefile
@@ -0,0 +1,75 @@
+#
+# Copyright 2017 Advanced Micro Devices, Inc.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the "Software"),
+# to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+# OTHER DEALINGS IN THE SOFTWARE.
+#
+#
+# Makefile for the 'clk_mgr' sub-component of DAL.
+# It provides the control and status of HW CLK_MGR pins.
+
+CLK_MGR = clk_mgr.o
+
+AMD_DAL_CLK_MGR = $(addprefix $(AMDDALPATH)/dc/clk_mgr/,$(CLK_MGR))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_CLK_MGR)
+
+
+###############################################################################
+# DCE 100 and DCE8x
+###############################################################################
+CLK_MGR_DCE100 = dce_clk_mgr.o
+
+AMD_DAL_CLK_MGR_DCE100 = $(addprefix $(AMDDALPATH)/dc/clk_mgr/dce100/,$(CLK_MGR_DCE100))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_CLK_MGR_DCE100)
+
+###############################################################################
+# DCE 100 and DCE8x
+###############################################################################
+CLK_MGR_DCE110 = dce110_clk_mgr.o
+
+AMD_DAL_CLK_MGR_DCE110 = $(addprefix $(AMDDALPATH)/dc/clk_mgr/dce110/,$(CLK_MGR_DCE110))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_CLK_MGR_DCE110)
+###############################################################################
+# DCE 112
+###############################################################################
+CLK_MGR_DCE112 = dce112_clk_mgr.o
+
+AMD_DAL_CLK_MGR_DCE112 = $(addprefix $(AMDDALPATH)/dc/clk_mgr/dce112/,$(CLK_MGR_DCE112))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_CLK_MGR_DCE112)
+###############################################################################
+# DCE 120
+###############################################################################
+CLK_MGR_DCE120 = dce120_clk_mgr.o
+
+AMD_DAL_CLK_MGR_DCE120 = $(addprefix $(AMDDALPATH)/dc/clk_mgr/dce120/,$(CLK_MGR_DCE120))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_CLK_MGR_DCE120)
+ifdef CONFIG_DRM_AMD_DC_DCN1_0
+###############################################################################
+# DCN10
+###############################################################################
+CLK_MGR_DCN10 = rv1_clk_mgr.o rv1_clk_mgr_vbios_smu.o rv2_clk_mgr.o
+
+AMD_DAL_CLK_MGR_DCN10 = $(addprefix $(AMDDALPATH)/dc/clk_mgr/dcn10/,$(CLK_MGR_DCN10))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_CLK_MGR_DCN10)
+endif
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c
new file mode 100644
index 000000000000..cb3f6a74d9e3
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c
@@ -0,0 +1,136 @@
+/*
+ * Copyright 2012-16 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include <linux/slab.h>
+
+#include "dal_asic_id.h"
+#include "dc_types.h"
+#include "dccg.h"
+#include "clk_mgr_internal.h"
+
+#include "dce100/dce_clk_mgr.h"
+#include "dce110/dce110_clk_mgr.h"
+#include "dce112/dce112_clk_mgr.h"
+#include "dce120/dce120_clk_mgr.h"
+#include "dcn10/rv1_clk_mgr.h"
+#include "dcn10/rv2_clk_mgr.h"
+
+
+int clk_mgr_helper_get_active_display_cnt(
+ struct dc *dc,
+ struct dc_state *context)
+{
+ int i, display_count;
+
+ display_count = 0;
+ for (i = 0; i < context->stream_count; i++) {
+ const struct dc_stream_state *stream = context->streams[i];
+
+ /*
+ * Only notify active stream or virtual stream.
+ * Need to notify virtual stream to work around
+ * headless case. HPD does not fire when system is in
+ * S0i2.
+ */
+ if (!stream->dpms_off || stream->signal == SIGNAL_TYPE_VIRTUAL)
+ display_count++;
+ }
+
+ return display_count;
+}
+
+
+struct clk_mgr *dc_clk_mgr_create(struct dc_context *ctx, struct pp_smu_funcs *pp_smu, struct dccg *dccg)
+{
+ struct hw_asic_id asic_id = ctx->asic_id;
+
+ struct clk_mgr_internal *clk_mgr = kzalloc(sizeof(*clk_mgr), GFP_KERNEL);
+
+ if (clk_mgr == NULL) {
+ BREAK_TO_DEBUGGER();
+ return NULL;
+ }
+
+ switch (asic_id.chip_family) {
+ case FAMILY_CI:
+ case FAMILY_KV:
+ dce_clk_mgr_construct(ctx, clk_mgr);
+ break;
+ case FAMILY_CZ:
+ dce110_clk_mgr_construct(ctx, clk_mgr);
+ break;
+ case FAMILY_VI:
+ if (ASIC_REV_IS_TONGA_P(asic_id.hw_internal_rev) ||
+ ASIC_REV_IS_FIJI_P(asic_id.hw_internal_rev)) {
+ dce_clk_mgr_construct(ctx, clk_mgr);
+ break;
+ }
+ if (ASIC_REV_IS_POLARIS10_P(asic_id.hw_internal_rev) ||
+ ASIC_REV_IS_POLARIS11_M(asic_id.hw_internal_rev) ||
+ ASIC_REV_IS_POLARIS12_V(asic_id.hw_internal_rev)) {
+ dce112_clk_mgr_construct(ctx, clk_mgr);
+ break;
+ }
+ if (ASIC_REV_IS_VEGAM(asic_id.hw_internal_rev)) {
+ dce112_clk_mgr_construct(ctx, clk_mgr);
+ break;
+ }
+ break;
+ case FAMILY_AI:
+ if (ASICREV_IS_VEGA20_P(asic_id.hw_internal_rev))
+ dce121_clk_mgr_construct(ctx, clk_mgr);
+ else
+ dce120_clk_mgr_construct(ctx, clk_mgr);
+ break;
+
+#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+ case FAMILY_RV:
+ if (ASICREV_IS_RAVEN2(asic_id.hw_internal_rev)) {
+ rv2_clk_mgr_construct(ctx, clk_mgr, pp_smu);
+ break;
+ }
+ if (ASICREV_IS_RAVEN(asic_id.hw_internal_rev) ||
+ ASICREV_IS_PICASSO(asic_id.hw_internal_rev)) {
+ rv1_clk_mgr_construct(ctx, clk_mgr, pp_smu);
+ break;
+ }
+ break;
+#endif /* Family RV */
+
+ default:
+ ASSERT(0); /* Unknown Asic */
+ break;
+ }
+
+ return &clk_mgr->base;
+}
+
+void dc_destroy_clk_mgr(struct clk_mgr *clk_mgr_base)
+{
+ struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
+
+ kfree(clk_mgr);
+}
+
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce100/dce_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce100/dce_clk_mgr.c
new file mode 100644
index 000000000000..814450fefffa
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce100/dce_clk_mgr.c
@@ -0,0 +1,471 @@
+/*
+ * Copyright 2012-16 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+
+#include "dccg.h"
+#include "clk_mgr_internal.h"
+#include "dce_clk_mgr.h"
+#include "dce110/dce110_clk_mgr.h"
+#include "dce112/dce112_clk_mgr.h"
+#include "reg_helper.h"
+#include "dmcu.h"
+#include "core_types.h"
+#include "dal_asic_id.h"
+
+/*
+ * Currently the register shifts and masks in this file are used for dce100 and dce80
+ * which has identical definitions.
+ * TODO: remove this when DPREFCLK_CNTL and dpref DENTIST_DISPCLK_CNTL
+ * is moved to dccg, where it belongs
+ */
+#include "dce/dce_8_0_d.h"
+#include "dce/dce_8_0_sh_mask.h"
+
+#define REG(reg) \
+ (clk_mgr->regs->reg)
+
+#undef FN
+#define FN(reg_name, field_name) \
+ clk_mgr->clk_mgr_shift->field_name, clk_mgr->clk_mgr_mask->field_name
+
+static const struct clk_mgr_registers disp_clk_regs = {
+ CLK_COMMON_REG_LIST_DCE_BASE()
+};
+
+static const struct clk_mgr_shift disp_clk_shift = {
+ CLK_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(__SHIFT)
+};
+
+static const struct clk_mgr_mask disp_clk_mask = {
+ CLK_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(_MASK)
+};
+
+
+/* Max clock values for each state indexed by "enum clocks_state": */
+static const struct state_dependent_clocks dce80_max_clks_by_state[] = {
+/* ClocksStateInvalid - should not be used */
+{ .display_clk_khz = 0, .pixel_clk_khz = 0 },
+/* ClocksStateUltraLow - not expected to be used for DCE 8.0 */
+{ .display_clk_khz = 0, .pixel_clk_khz = 0 },
+/* ClocksStateLow */
+{ .display_clk_khz = 352000, .pixel_clk_khz = 330000},
+/* ClocksStateNominal */
+{ .display_clk_khz = 600000, .pixel_clk_khz = 400000 },
+/* ClocksStatePerformance */
+{ .display_clk_khz = 600000, .pixel_clk_khz = 400000 } };
+
+int dentist_get_divider_from_did(int did)
+{
+ if (did < DENTIST_BASE_DID_1)
+ did = DENTIST_BASE_DID_1;
+ if (did > DENTIST_MAX_DID)
+ did = DENTIST_MAX_DID;
+
+ if (did < DENTIST_BASE_DID_2) {
+ return DENTIST_DIVIDER_RANGE_1_START + DENTIST_DIVIDER_RANGE_1_STEP
+ * (did - DENTIST_BASE_DID_1);
+ } else if (did < DENTIST_BASE_DID_3) {
+ return DENTIST_DIVIDER_RANGE_2_START + DENTIST_DIVIDER_RANGE_2_STEP
+ * (did - DENTIST_BASE_DID_2);
+ } else if (did < DENTIST_BASE_DID_4) {
+ return DENTIST_DIVIDER_RANGE_3_START + DENTIST_DIVIDER_RANGE_3_STEP
+ * (did - DENTIST_BASE_DID_3);
+ } else {
+ return DENTIST_DIVIDER_RANGE_4_START + DENTIST_DIVIDER_RANGE_4_STEP
+ * (did - DENTIST_BASE_DID_4);
+ }
+}
+
+/* SW will adjust DP REF Clock average value for all purposes
+ * (DP DTO / DP Audio DTO and DP GTC)
+ if clock is spread for all cases:
+ -if SS enabled on DP Ref clock and HW de-spreading enabled with SW
+ calculations for DS_INCR/DS_MODULO (this is planned to be default case)
+ -if SS enabled on DP Ref clock and HW de-spreading enabled with HW
+ calculations (not planned to be used, but average clock should still
+ be valid)
+ -if SS enabled on DP Ref clock and HW de-spreading disabled
+ (should not be case with CIK) then SW should program all rates
+ generated according to average value (case as with previous ASICs)
+ */
+
+int dce_adjust_dp_ref_freq_for_ss(struct clk_mgr_internal *clk_mgr_dce, int dp_ref_clk_khz)
+{
+ if (clk_mgr_dce->ss_on_dprefclk && clk_mgr_dce->dprefclk_ss_divider != 0) {
+ struct fixed31_32 ss_percentage = dc_fixpt_div_int(
+ dc_fixpt_from_fraction(clk_mgr_dce->dprefclk_ss_percentage,
+ clk_mgr_dce->dprefclk_ss_divider), 200);
+ struct fixed31_32 adj_dp_ref_clk_khz;
+
+ ss_percentage = dc_fixpt_sub(dc_fixpt_one, ss_percentage);
+ adj_dp_ref_clk_khz = dc_fixpt_mul_int(ss_percentage, dp_ref_clk_khz);
+ dp_ref_clk_khz = dc_fixpt_floor(adj_dp_ref_clk_khz);
+ }
+ return dp_ref_clk_khz;
+}
+
+int dce_get_dp_ref_freq_khz(struct clk_mgr *clk_mgr_base)
+{
+ struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
+ int dprefclk_wdivider;
+ int dprefclk_src_sel;
+ int dp_ref_clk_khz = 600000;
+ int target_div;
+
+ /* ASSERT DP Reference Clock source is from DFS*/
+ REG_GET(DPREFCLK_CNTL, DPREFCLK_SRC_SEL, &dprefclk_src_sel);
+ ASSERT(dprefclk_src_sel == 0);
+
+ /* Read the mmDENTIST_DISPCLK_CNTL to get the currently
+ * programmed DID DENTIST_DPREFCLK_WDIVIDER*/
+ REG_GET(DENTIST_DISPCLK_CNTL, DENTIST_DPREFCLK_WDIVIDER, &dprefclk_wdivider);
+
+ /* Convert DENTIST_DPREFCLK_WDIVIDERto actual divider*/
+ target_div = dentist_get_divider_from_did(dprefclk_wdivider);
+
+ /* Calculate the current DFS clock, in kHz.*/
+ dp_ref_clk_khz = (DENTIST_DIVIDER_RANGE_SCALE_FACTOR
+ * clk_mgr->dentist_vco_freq_khz) / target_div;
+
+ return dce_adjust_dp_ref_freq_for_ss(clk_mgr, dp_ref_clk_khz);
+}
+
+int dce12_get_dp_ref_freq_khz(struct clk_mgr *clk_mgr_base)
+{
+ struct clk_mgr_internal *clk_mgr_dce = TO_CLK_MGR_INTERNAL(clk_mgr_base);
+
+ return dce_adjust_dp_ref_freq_for_ss(clk_mgr_dce, clk_mgr_base->dprefclk_khz);
+}
+
+/* unit: in_khz before mode set, get pixel clock from context. ASIC register
+ * may not be programmed yet
+ */
+uint32_t dce_get_max_pixel_clock_for_all_paths(struct dc_state *context)
+{
+ uint32_t max_pix_clk = 0;
+ int i;
+
+ for (i = 0; i < MAX_PIPES; i++) {
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
+
+ if (pipe_ctx->stream == NULL)
+ continue;
+
+ /* do not check under lay */
+ if (pipe_ctx->top_pipe)
+ continue;
+
+ if (pipe_ctx->stream_res.pix_clk_params.requested_pix_clk_100hz / 10 > max_pix_clk)
+ max_pix_clk = pipe_ctx->stream_res.pix_clk_params.requested_pix_clk_100hz / 10;
+
+ /* raise clock state for HBR3/2 if required. Confirmed with HW DCE/DPCS
+ * logic for HBR3 still needs Nominal (0.8V) on VDDC rail
+ */
+ if (dc_is_dp_signal(pipe_ctx->stream->signal) &&
+ pipe_ctx->stream_res.pix_clk_params.requested_sym_clk > max_pix_clk)
+ max_pix_clk = pipe_ctx->stream_res.pix_clk_params.requested_sym_clk;
+ }
+
+ return max_pix_clk;
+}
+
+enum dm_pp_clocks_state dce_get_required_clocks_state(
+ struct clk_mgr *clk_mgr_base,
+ struct dc_state *context)
+{
+ struct clk_mgr_internal *clk_mgr_dce = TO_CLK_MGR_INTERNAL(clk_mgr_base);
+ int i;
+ enum dm_pp_clocks_state low_req_clk;
+ int max_pix_clk = dce_get_max_pixel_clock_for_all_paths(context);
+
+ /* Iterate from highest supported to lowest valid state, and update
+ * lowest RequiredState with the lowest state that satisfies
+ * all required clocks
+ */
+ for (i = clk_mgr_dce->max_clks_state; i >= DM_PP_CLOCKS_STATE_ULTRA_LOW; i--)
+ if (context->bw_ctx.bw.dce.dispclk_khz >
+ clk_mgr_dce->max_clks_by_state[i].display_clk_khz
+ || max_pix_clk >
+ clk_mgr_dce->max_clks_by_state[i].pixel_clk_khz)
+ break;
+
+ low_req_clk = i + 1;
+ if (low_req_clk > clk_mgr_dce->max_clks_state) {
+ /* set max clock state for high phyclock, invalid on exceeding display clock */
+ if (clk_mgr_dce->max_clks_by_state[clk_mgr_dce->max_clks_state].display_clk_khz
+ < context->bw_ctx.bw.dce.dispclk_khz)
+ low_req_clk = DM_PP_CLOCKS_STATE_INVALID;
+ else
+ low_req_clk = clk_mgr_dce->max_clks_state;
+ }
+
+ return low_req_clk;
+}
+
+
+/* TODO: remove use the two broken down functions */
+int dce_set_clock(
+ struct clk_mgr *clk_mgr_base,
+ int requested_clk_khz)
+{
+ struct clk_mgr_internal *clk_mgr_dce = TO_CLK_MGR_INTERNAL(clk_mgr_base);
+ struct bp_pixel_clock_parameters pxl_clk_params = { 0 };
+ struct dc_bios *bp = clk_mgr_base->ctx->dc_bios;
+ int actual_clock = requested_clk_khz;
+ struct dmcu *dmcu = clk_mgr_dce->base.ctx->dc->res_pool->dmcu;
+
+ /* Make sure requested clock isn't lower than minimum threshold*/
+ if (requested_clk_khz > 0)
+ requested_clk_khz = max(requested_clk_khz,
+ clk_mgr_dce->dentist_vco_freq_khz / 64);
+
+ /* Prepare to program display clock*/
+ pxl_clk_params.target_pixel_clock_100hz = requested_clk_khz * 10;
+ pxl_clk_params.pll_id = CLOCK_SOURCE_ID_DFS;
+
+ if (clk_mgr_dce->dfs_bypass_active)
+ pxl_clk_params.flags.SET_DISPCLK_DFS_BYPASS = true;
+
+ bp->funcs->program_display_engine_pll(bp, &pxl_clk_params);
+
+ if (clk_mgr_dce->dfs_bypass_active) {
+ /* Cache the fixed display clock*/
+ clk_mgr_dce->dfs_bypass_disp_clk =
+ pxl_clk_params.dfs_bypass_display_clock;
+ actual_clock = pxl_clk_params.dfs_bypass_display_clock;
+ }
+
+ /* from power down, we need mark the clock state as ClocksStateNominal
+ * from HWReset, so when resume we will call pplib voltage regulator.*/
+ if (requested_clk_khz == 0)
+ clk_mgr_dce->cur_min_clks_state = DM_PP_CLOCKS_STATE_NOMINAL;
+
+ if (dmcu && dmcu->funcs->is_dmcu_initialized(dmcu))
+ dmcu->funcs->set_psr_wait_loop(dmcu, actual_clock / 1000 / 7);
+
+ return actual_clock;
+}
+
+
+static void dce_clock_read_integrated_info(struct clk_mgr_internal *clk_mgr_dce)
+{
+ struct dc_debug_options *debug = &clk_mgr_dce->base.ctx->dc->debug;
+ struct dc_bios *bp = clk_mgr_dce->base.ctx->dc_bios;
+ struct integrated_info info = { { { 0 } } };
+ struct dc_firmware_info fw_info = { { 0 } };
+ int i;
+
+ if (bp->integrated_info)
+ info = *bp->integrated_info;
+
+ clk_mgr_dce->dentist_vco_freq_khz = info.dentist_vco_freq;
+ if (clk_mgr_dce->dentist_vco_freq_khz == 0) {
+ bp->funcs->get_firmware_info(bp, &fw_info);
+ clk_mgr_dce->dentist_vco_freq_khz =
+ fw_info.smu_gpu_pll_output_freq;
+ if (clk_mgr_dce->dentist_vco_freq_khz == 0)
+ clk_mgr_dce->dentist_vco_freq_khz = 3600000;
+ }
+
+ /*update the maximum display clock for each power state*/
+ for (i = 0; i < NUMBER_OF_DISP_CLK_VOLTAGE; ++i) {
+ enum dm_pp_clocks_state clk_state = DM_PP_CLOCKS_STATE_INVALID;
+
+ switch (i) {
+ case 0:
+ clk_state = DM_PP_CLOCKS_STATE_ULTRA_LOW;
+ break;
+
+ case 1:
+ clk_state = DM_PP_CLOCKS_STATE_LOW;
+ break;
+
+ case 2:
+ clk_state = DM_PP_CLOCKS_STATE_NOMINAL;
+ break;
+
+ case 3:
+ clk_state = DM_PP_CLOCKS_STATE_PERFORMANCE;
+ break;
+
+ default:
+ clk_state = DM_PP_CLOCKS_STATE_INVALID;
+ break;
+ }
+
+ /*Do not allow bad VBIOS/SBIOS to override with invalid values,
+ * check for > 100MHz*/
+ if (info.disp_clk_voltage[i].max_supported_clk >= 100000)
+ clk_mgr_dce->max_clks_by_state[clk_state].display_clk_khz =
+ info.disp_clk_voltage[i].max_supported_clk;
+ }
+
+ if (!debug->disable_dfs_bypass && bp->integrated_info)
+ if (bp->integrated_info->gpu_cap_info & DFS_BYPASS_ENABLE)
+ clk_mgr_dce->dfs_bypass_enabled = true;
+}
+
+void dce_clock_read_ss_info(struct clk_mgr_internal *clk_mgr_dce)
+{
+ struct dc_bios *bp = clk_mgr_dce->base.ctx->dc_bios;
+ int ss_info_num = bp->funcs->get_ss_entry_number(
+ bp, AS_SIGNAL_TYPE_GPU_PLL);
+
+ if (ss_info_num) {
+ struct spread_spectrum_info info = { { 0 } };
+ enum bp_result result = bp->funcs->get_spread_spectrum_info(
+ bp, AS_SIGNAL_TYPE_GPU_PLL, 0, &info);
+
+ /* Based on VBIOS, VBIOS will keep entry for GPU PLL SS
+ * even if SS not enabled and in that case
+ * SSInfo.spreadSpectrumPercentage !=0 would be sign
+ * that SS is enabled
+ */
+ if (result == BP_RESULT_OK &&
+ info.spread_spectrum_percentage != 0) {
+ clk_mgr_dce->ss_on_dprefclk = true;
+ clk_mgr_dce->dprefclk_ss_divider = info.spread_percentage_divider;
+
+ if (info.type.CENTER_MODE == 0) {
+ /* TODO: Currently for DP Reference clock we
+ * need only SS percentage for
+ * downspread */
+ clk_mgr_dce->dprefclk_ss_percentage =
+ info.spread_spectrum_percentage;
+ }
+
+ return;
+ }
+
+ result = bp->funcs->get_spread_spectrum_info(
+ bp, AS_SIGNAL_TYPE_DISPLAY_PORT, 0, &info);
+
+ /* Based on VBIOS, VBIOS will keep entry for DPREFCLK SS
+ * even if SS not enabled and in that case
+ * SSInfo.spreadSpectrumPercentage !=0 would be sign
+ * that SS is enabled
+ */
+ if (result == BP_RESULT_OK &&
+ info.spread_spectrum_percentage != 0) {
+ clk_mgr_dce->ss_on_dprefclk = true;
+ clk_mgr_dce->dprefclk_ss_divider = info.spread_percentage_divider;
+
+ if (info.type.CENTER_MODE == 0) {
+ /* Currently for DP Reference clock we
+ * need only SS percentage for
+ * downspread */
+ clk_mgr_dce->dprefclk_ss_percentage =
+ info.spread_spectrum_percentage;
+ }
+ }
+ }
+}
+
+static void dce_pplib_apply_display_requirements(
+ struct dc *dc,
+ struct dc_state *context)
+{
+ struct dm_pp_display_configuration *pp_display_cfg = &context->pp_display_cfg;
+
+ pp_display_cfg->avail_mclk_switch_time_us = dce110_get_min_vblank_time_us(context);
+
+ dce110_fill_display_configs(context, pp_display_cfg);
+
+ if (memcmp(&dc->current_state->pp_display_cfg, pp_display_cfg, sizeof(*pp_display_cfg)) != 0)
+ dm_pp_apply_display_requirements(dc->ctx, pp_display_cfg);
+}
+
+static void dce_update_clocks(struct clk_mgr *clk_mgr_base,
+ struct dc_state *context,
+ bool safe_to_lower)
+{
+ struct clk_mgr_internal *clk_mgr_dce = TO_CLK_MGR_INTERNAL(clk_mgr_base);
+ struct dm_pp_power_level_change_request level_change_req;
+ int patched_disp_clk = context->bw_ctx.bw.dce.dispclk_khz;
+
+ /*TODO: W/A for dal3 linux, investigate why this works */
+ if (!clk_mgr_dce->dfs_bypass_active)
+ patched_disp_clk = patched_disp_clk * 115 / 100;
+
+ level_change_req.power_level = dce_get_required_clocks_state(clk_mgr_base, context);
+ /* get max clock state from PPLIB */
+ if ((level_change_req.power_level < clk_mgr_dce->cur_min_clks_state && safe_to_lower)
+ || level_change_req.power_level > clk_mgr_dce->cur_min_clks_state) {
+ if (dm_pp_apply_power_level_change_request(clk_mgr_base->ctx, &level_change_req))
+ clk_mgr_dce->cur_min_clks_state = level_change_req.power_level;
+ }
+
+ if (should_set_clock(safe_to_lower, patched_disp_clk, clk_mgr_base->clks.dispclk_khz)) {
+ patched_disp_clk = dce_set_clock(clk_mgr_base, patched_disp_clk);
+ clk_mgr_base->clks.dispclk_khz = patched_disp_clk;
+ }
+ dce_pplib_apply_display_requirements(clk_mgr_base->ctx->dc, context);
+}
+
+
+
+
+
+
+
+
+static struct clk_mgr_funcs dce_funcs = {
+ .get_dp_ref_clk_frequency = dce_get_dp_ref_freq_khz,
+ .update_clocks = dce_update_clocks
+};
+
+void dce_clk_mgr_construct(
+ struct dc_context *ctx,
+ struct clk_mgr_internal *clk_mgr)
+{
+ struct clk_mgr *base = &clk_mgr->base;
+ struct dm_pp_static_clock_info static_clk_info = {0};
+
+ memcpy(clk_mgr->max_clks_by_state,
+ dce80_max_clks_by_state,
+ sizeof(dce80_max_clks_by_state));
+
+ base->ctx = ctx;
+ base->funcs = &dce_funcs;
+
+ clk_mgr->regs = &disp_clk_regs;
+ clk_mgr->clk_mgr_shift = &disp_clk_shift;
+ clk_mgr->clk_mgr_mask = &disp_clk_mask;
+ clk_mgr->dfs_bypass_disp_clk = 0;
+
+ clk_mgr->dprefclk_ss_percentage = 0;
+ clk_mgr->dprefclk_ss_divider = 1000;
+ clk_mgr->ss_on_dprefclk = false;
+
+ if (dm_pp_get_static_clocks(ctx, &static_clk_info))
+ clk_mgr->max_clks_state = static_clk_info.max_clocks_state;
+ else
+ clk_mgr->max_clks_state = DM_PP_CLOCKS_STATE_NOMINAL;
+ clk_mgr->cur_min_clks_state = DM_PP_CLOCKS_STATE_INVALID;
+
+ dce_clock_read_integrated_info(clk_mgr);
+ dce_clock_read_ss_info(clk_mgr);
+}
+
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce100/dce_clk_mgr.h b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce100/dce_clk_mgr.h
new file mode 100644
index 000000000000..f3bc7ab68aab
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce100/dce_clk_mgr.h
@@ -0,0 +1,81 @@
+/*
+ * Copyright 2012-16 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+
+#ifndef _DCE_CLK_MGR_H_
+#define _DCE_CLK_MGR_H_
+
+#include "dc.h"
+
+/* Starting DID for each range */
+enum dentist_base_divider_id {
+ DENTIST_BASE_DID_1 = 0x08,
+ DENTIST_BASE_DID_2 = 0x40,
+ DENTIST_BASE_DID_3 = 0x60,
+ DENTIST_BASE_DID_4 = 0x7e,
+ DENTIST_MAX_DID = 0x7f
+};
+
+/* Starting point and step size for each divider range.*/
+enum dentist_divider_range {
+ DENTIST_DIVIDER_RANGE_1_START = 8, /* 2.00 */
+ DENTIST_DIVIDER_RANGE_1_STEP = 1, /* 0.25 */
+ DENTIST_DIVIDER_RANGE_2_START = 64, /* 16.00 */
+ DENTIST_DIVIDER_RANGE_2_STEP = 2, /* 0.50 */
+ DENTIST_DIVIDER_RANGE_3_START = 128, /* 32.00 */
+ DENTIST_DIVIDER_RANGE_3_STEP = 4, /* 1.00 */
+ DENTIST_DIVIDER_RANGE_4_START = 248, /* 62.00 */
+ DENTIST_DIVIDER_RANGE_4_STEP = 264, /* 66.00 */
+ DENTIST_DIVIDER_RANGE_SCALE_FACTOR = 4
+};
+
+/* functions shared by other dce clk mgrs */
+int dce_adjust_dp_ref_freq_for_ss(struct clk_mgr_internal *clk_mgr_dce, int dp_ref_clk_khz);
+int dce_get_dp_ref_freq_khz(struct clk_mgr *clk_mgr_base);
+enum dm_pp_clocks_state dce_get_required_clocks_state(
+ struct clk_mgr *clk_mgr_base,
+ struct dc_state *context);
+
+uint32_t dce_get_max_pixel_clock_for_all_paths(struct dc_state *context);
+
+
+void dce_clk_mgr_construct(
+ struct dc_context *ctx,
+ struct clk_mgr_internal *clk_mgr_dce);
+
+void dce_clock_read_ss_info(struct clk_mgr_internal *dccg_dce);
+
+int dce12_get_dp_ref_freq_khz(struct clk_mgr *dccg);
+
+int dce_set_clock(
+ struct clk_mgr *clk_mgr_base,
+ int requested_clk_khz);
+
+
+void dce_clk_mgr_destroy(struct clk_mgr **clk_mgr);
+
+int dentist_get_divider_from_did(int did);
+
+#endif /* _DCE_CLK_MGR_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce110/dce110_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce110/dce110_clk_mgr.c
new file mode 100644
index 000000000000..c1a92c16535c
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce110/dce110_clk_mgr.c
@@ -0,0 +1,276 @@
+/*
+ * Copyright 2012-16 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "core_types.h"
+#include "clk_mgr_internal.h"
+
+#include "dce/dce_11_0_d.h"
+#include "dce/dce_11_0_sh_mask.h"
+#include "dce110_clk_mgr.h"
+#include "../clk_mgr/dce100/dce_clk_mgr.h"
+
+/* set register offset */
+#define SR(reg_name)\
+ .reg_name = mm ## reg_name
+
+/* set register offset with instance */
+#define SRI(reg_name, block, id)\
+ .reg_name = mm ## block ## id ## _ ## reg_name
+
+static const struct clk_mgr_registers disp_clk_regs = {
+ CLK_COMMON_REG_LIST_DCE_BASE()
+};
+
+static const struct clk_mgr_shift disp_clk_shift = {
+ CLK_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(__SHIFT)
+};
+
+static const struct clk_mgr_mask disp_clk_mask = {
+ CLK_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(_MASK)
+};
+
+static const struct state_dependent_clocks dce110_max_clks_by_state[] = {
+/*ClocksStateInvalid - should not be used*/
+{ .display_clk_khz = 0, .pixel_clk_khz = 0 },
+/*ClocksStateUltraLow - currently by HW design team not supposed to be used*/
+{ .display_clk_khz = 352000, .pixel_clk_khz = 330000 },
+/*ClocksStateLow*/
+{ .display_clk_khz = 352000, .pixel_clk_khz = 330000 },
+/*ClocksStateNominal*/
+{ .display_clk_khz = 467000, .pixel_clk_khz = 400000 },
+/*ClocksStatePerformance*/
+{ .display_clk_khz = 643000, .pixel_clk_khz = 400000 } };
+
+static int determine_sclk_from_bounding_box(
+ const struct dc *dc,
+ int required_sclk)
+{
+ int i;
+
+ /*
+ * Some asics do not give us sclk levels, so we just report the actual
+ * required sclk
+ */
+ if (dc->sclk_lvls.num_levels == 0)
+ return required_sclk;
+
+ for (i = 0; i < dc->sclk_lvls.num_levels; i++) {
+ if (dc->sclk_lvls.clocks_in_khz[i] >= required_sclk)
+ return dc->sclk_lvls.clocks_in_khz[i];
+ }
+ /*
+ * even maximum level could not satisfy requirement, this
+ * is unexpected at this stage, should have been caught at
+ * validation time
+ */
+ ASSERT(0);
+ return dc->sclk_lvls.clocks_in_khz[dc->sclk_lvls.num_levels - 1];
+}
+
+uint32_t dce110_get_min_vblank_time_us(const struct dc_state *context)
+{
+ uint8_t j;
+ uint32_t min_vertical_blank_time = -1;
+
+ for (j = 0; j < context->stream_count; j++) {
+ struct dc_stream_state *stream = context->streams[j];
+ uint32_t vertical_blank_in_pixels = 0;
+ uint32_t vertical_blank_time = 0;
+
+ vertical_blank_in_pixels = stream->timing.h_total *
+ (stream->timing.v_total
+ - stream->timing.v_addressable);
+
+ vertical_blank_time = vertical_blank_in_pixels
+ * 10000 / stream->timing.pix_clk_100hz;
+
+ if (min_vertical_blank_time > vertical_blank_time)
+ min_vertical_blank_time = vertical_blank_time;
+ }
+
+ return min_vertical_blank_time;
+}
+
+void dce110_fill_display_configs(
+ const struct dc_state *context,
+ struct dm_pp_display_configuration *pp_display_cfg)
+{
+ int j;
+ int num_cfgs = 0;
+
+ for (j = 0; j < context->stream_count; j++) {
+ int k;
+
+ const struct dc_stream_state *stream = context->streams[j];
+ struct dm_pp_single_disp_config *cfg =
+ &pp_display_cfg->disp_configs[num_cfgs];
+ const struct pipe_ctx *pipe_ctx = NULL;
+
+ for (k = 0; k < MAX_PIPES; k++)
+ if (stream == context->res_ctx.pipe_ctx[k].stream) {
+ pipe_ctx = &context->res_ctx.pipe_ctx[k];
+ break;
+ }
+
+ ASSERT(pipe_ctx != NULL);
+
+ /* only notify active stream */
+ if (stream->dpms_off)
+ continue;
+
+ num_cfgs++;
+ cfg->signal = pipe_ctx->stream->signal;
+ cfg->pipe_idx = pipe_ctx->stream_res.tg->inst;
+ cfg->src_height = stream->src.height;
+ cfg->src_width = stream->src.width;
+ cfg->ddi_channel_mapping =
+ stream->link->ddi_channel_mapping.raw;
+ cfg->transmitter =
+ stream->link->link_enc->transmitter;
+ cfg->link_settings.lane_count =
+ stream->link->cur_link_settings.lane_count;
+ cfg->link_settings.link_rate =
+ stream->link->cur_link_settings.link_rate;
+ cfg->link_settings.link_spread =
+ stream->link->cur_link_settings.link_spread;
+ cfg->sym_clock = stream->phy_pix_clk;
+ /* Round v_refresh*/
+ cfg->v_refresh = stream->timing.pix_clk_100hz * 100;
+ cfg->v_refresh /= stream->timing.h_total;
+ cfg->v_refresh = (cfg->v_refresh + stream->timing.v_total / 2)
+ / stream->timing.v_total;
+ }
+
+ pp_display_cfg->display_count = num_cfgs;
+}
+
+void dce11_pplib_apply_display_requirements(
+ struct dc *dc,
+ struct dc_state *context)
+{
+ struct dm_pp_display_configuration *pp_display_cfg = &context->pp_display_cfg;
+
+ pp_display_cfg->all_displays_in_sync =
+ context->bw_ctx.bw.dce.all_displays_in_sync;
+ pp_display_cfg->nb_pstate_switch_disable =
+ context->bw_ctx.bw.dce.nbp_state_change_enable == false;
+ pp_display_cfg->cpu_cc6_disable =
+ context->bw_ctx.bw.dce.cpuc_state_change_enable == false;
+ pp_display_cfg->cpu_pstate_disable =
+ context->bw_ctx.bw.dce.cpup_state_change_enable == false;
+ pp_display_cfg->cpu_pstate_separation_time =
+ context->bw_ctx.bw.dce.blackout_recovery_time_us;
+
+ pp_display_cfg->min_memory_clock_khz = context->bw_ctx.bw.dce.yclk_khz
+ / MEMORY_TYPE_MULTIPLIER_CZ;
+
+ pp_display_cfg->min_engine_clock_khz = determine_sclk_from_bounding_box(
+ dc,
+ context->bw_ctx.bw.dce.sclk_khz);
+
+ /*
+ * As workaround for >4x4K lightup set dcfclock to min_engine_clock value.
+ * This is not required for less than 5 displays,
+ * thus don't request decfclk in dc to avoid impact
+ * on power saving.
+ *
+ */
+ pp_display_cfg->min_dcfclock_khz = (context->stream_count > 4) ?
+ pp_display_cfg->min_engine_clock_khz : 0;
+
+ pp_display_cfg->min_engine_clock_deep_sleep_khz
+ = context->bw_ctx.bw.dce.sclk_deep_sleep_khz;
+
+ pp_display_cfg->avail_mclk_switch_time_us =
+ dce110_get_min_vblank_time_us(context);
+ /* TODO: dce11.2*/
+ pp_display_cfg->avail_mclk_switch_time_in_disp_active_us = 0;
+
+ pp_display_cfg->disp_clk_khz = dc->clk_mgr->clks.dispclk_khz;
+
+ dce110_fill_display_configs(context, pp_display_cfg);
+
+ /* TODO: is this still applicable?*/
+ if (pp_display_cfg->display_count == 1) {
+ const struct dc_crtc_timing *timing =
+ &context->streams[0]->timing;
+
+ pp_display_cfg->crtc_index =
+ pp_display_cfg->disp_configs[0].pipe_idx;
+ pp_display_cfg->line_time_in_us = timing->h_total * 10000 / timing->pix_clk_100hz;
+ }
+
+ if (memcmp(&dc->current_state->pp_display_cfg, pp_display_cfg, sizeof(*pp_display_cfg)) != 0)
+ dm_pp_apply_display_requirements(dc->ctx, pp_display_cfg);
+}
+
+static void dce11_update_clocks(struct clk_mgr *clk_mgr_base,
+ struct dc_state *context,
+ bool safe_to_lower)
+{
+ struct clk_mgr_internal *clk_mgr_dce = TO_CLK_MGR_INTERNAL(clk_mgr_base);
+ struct dm_pp_power_level_change_request level_change_req;
+ int patched_disp_clk = context->bw_ctx.bw.dce.dispclk_khz;
+
+ /*TODO: W/A for dal3 linux, investigate why this works */
+ if (!clk_mgr_dce->dfs_bypass_active)
+ patched_disp_clk = patched_disp_clk * 115 / 100;
+
+ level_change_req.power_level = dce_get_required_clocks_state(clk_mgr_base, context);
+ /* get max clock state from PPLIB */
+ if ((level_change_req.power_level < clk_mgr_dce->cur_min_clks_state && safe_to_lower)
+ || level_change_req.power_level > clk_mgr_dce->cur_min_clks_state) {
+ if (dm_pp_apply_power_level_change_request(clk_mgr_base->ctx, &level_change_req))
+ clk_mgr_dce->cur_min_clks_state = level_change_req.power_level;
+ }
+
+ if (should_set_clock(safe_to_lower, patched_disp_clk, clk_mgr_base->clks.dispclk_khz)) {
+ context->bw_ctx.bw.dce.dispclk_khz = dce_set_clock(clk_mgr_base, patched_disp_clk);
+ clk_mgr_base->clks.dispclk_khz = patched_disp_clk;
+ }
+ dce11_pplib_apply_display_requirements(clk_mgr_base->ctx->dc, context);
+}
+
+static struct clk_mgr_funcs dce110_funcs = {
+ .get_dp_ref_clk_frequency = dce_get_dp_ref_freq_khz,
+ .update_clocks = dce11_update_clocks
+};
+
+void dce110_clk_mgr_construct(
+ struct dc_context *ctx,
+ struct clk_mgr_internal *clk_mgr)
+{
+ memcpy(clk_mgr->max_clks_by_state,
+ dce110_max_clks_by_state,
+ sizeof(dce110_max_clks_by_state));
+
+ dce_clk_mgr_construct(ctx, clk_mgr);
+
+ clk_mgr->regs = &disp_clk_regs;
+ clk_mgr->clk_mgr_shift = &disp_clk_shift;
+ clk_mgr->clk_mgr_mask = &disp_clk_mask;
+ clk_mgr->base.funcs = &dce110_funcs;
+
+}
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce110/dce110_clk_mgr.h b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce110/dce110_clk_mgr.h
new file mode 100644
index 000000000000..c0eb2ea6fb3a
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce110/dce110_clk_mgr.h
@@ -0,0 +1,44 @@
+/*
+ * Copyright 2012-16 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef DAL_DC_DCE_DCE110_CLK_MGR_H_
+#define DAL_DC_DCE_DCE110_CLK_MGR_H_
+
+void dce110_clk_mgr_construct(
+ struct dc_context *ctx,
+ struct clk_mgr_internal *clk_mgr);
+
+void dce110_fill_display_configs(
+ const struct dc_state *context,
+ struct dm_pp_display_configuration *pp_display_cfg);
+
+/* functions shared with other clk mgr*/
+void dce11_pplib_apply_display_requirements(
+ struct dc *dc,
+ struct dc_state *context);
+
+uint32_t dce110_get_min_vblank_time_us(const struct dc_state *context);
+
+#endif /* DAL_DC_DCE_DCE110_CLK_MGR_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce112/dce112_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce112/dce112_clk_mgr.c
new file mode 100644
index 000000000000..778392c73187
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce112/dce112_clk_mgr.c
@@ -0,0 +1,239 @@
+/*
+ * Copyright 2012-16 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "core_types.h"
+#include "clk_mgr_internal.h"
+
+#include "dce/dce_11_2_d.h"
+#include "dce/dce_11_2_sh_mask.h"
+#include "dce100/dce_clk_mgr.h"
+#include "dce110/dce110_clk_mgr.h"
+#include "dce112_clk_mgr.h"
+#include "dal_asic_id.h"
+
+/* set register offset */
+#define SR(reg_name)\
+ .reg_name = mm ## reg_name
+
+/* set register offset with instance */
+#define SRI(reg_name, block, id)\
+ .reg_name = mm ## block ## id ## _ ## reg_name
+
+static const struct clk_mgr_registers disp_clk_regs = {
+ CLK_COMMON_REG_LIST_DCE_BASE()
+};
+
+static const struct clk_mgr_shift disp_clk_shift = {
+ CLK_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(__SHIFT)
+};
+
+static const struct clk_mgr_mask disp_clk_mask = {
+ CLK_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(_MASK)
+};
+
+static const struct state_dependent_clocks dce112_max_clks_by_state[] = {
+/*ClocksStateInvalid - should not be used*/
+{ .display_clk_khz = 0, .pixel_clk_khz = 0 },
+/*ClocksStateUltraLow - currently by HW design team not supposed to be used*/
+{ .display_clk_khz = 389189, .pixel_clk_khz = 346672 },
+/*ClocksStateLow*/
+{ .display_clk_khz = 459000, .pixel_clk_khz = 400000 },
+/*ClocksStateNominal*/
+{ .display_clk_khz = 667000, .pixel_clk_khz = 600000 },
+/*ClocksStatePerformance*/
+{ .display_clk_khz = 1132000, .pixel_clk_khz = 600000 } };
+
+
+//TODO: remove use the two broken down functions
+int dce112_set_clock(struct clk_mgr *clk_mgr_base, int requested_clk_khz)
+{
+ struct clk_mgr_internal *clk_mgr_dce = TO_CLK_MGR_INTERNAL(clk_mgr_base);
+ struct bp_set_dce_clock_parameters dce_clk_params;
+ struct dc_bios *bp = clk_mgr_base->ctx->dc_bios;
+ struct dc *core_dc = clk_mgr_base->ctx->dc;
+ struct dmcu *dmcu = core_dc->res_pool->dmcu;
+ int actual_clock = requested_clk_khz;
+ /* Prepare to program display clock*/
+ memset(&dce_clk_params, 0, sizeof(dce_clk_params));
+
+ /* Make sure requested clock isn't lower than minimum threshold*/
+ if (requested_clk_khz > 0)
+ requested_clk_khz = max(requested_clk_khz,
+ clk_mgr_dce->dentist_vco_freq_khz / 62);
+
+ dce_clk_params.target_clock_frequency = requested_clk_khz;
+ dce_clk_params.pll_id = CLOCK_SOURCE_ID_DFS;
+ dce_clk_params.clock_type = DCECLOCK_TYPE_DISPLAY_CLOCK;
+
+ bp->funcs->set_dce_clock(bp, &dce_clk_params);
+ actual_clock = dce_clk_params.target_clock_frequency;
+
+ /*
+ * from power down, we need mark the clock state as ClocksStateNominal
+ * from HWReset, so when resume we will call pplib voltage regulator.
+ */
+ if (requested_clk_khz == 0)
+ clk_mgr_dce->cur_min_clks_state = DM_PP_CLOCKS_STATE_NOMINAL;
+
+ /*Program DP ref Clock*/
+ /*VBIOS will determine DPREFCLK frequency, so we don't set it*/
+ dce_clk_params.target_clock_frequency = 0;
+ dce_clk_params.clock_type = DCECLOCK_TYPE_DPREFCLK;
+ if (!ASICREV_IS_VEGA20_P(clk_mgr_base->ctx->asic_id.hw_internal_rev))
+ dce_clk_params.flags.USE_GENLOCK_AS_SOURCE_FOR_DPREFCLK =
+ (dce_clk_params.pll_id ==
+ CLOCK_SOURCE_COMBO_DISPLAY_PLL0);
+ else
+ dce_clk_params.flags.USE_GENLOCK_AS_SOURCE_FOR_DPREFCLK = false;
+
+ bp->funcs->set_dce_clock(bp, &dce_clk_params);
+
+ if (!IS_FPGA_MAXIMUS_DC(core_dc->ctx->dce_environment)) {
+ if (dmcu && dmcu->funcs->is_dmcu_initialized(dmcu)) {
+ if (clk_mgr_dce->dfs_bypass_disp_clk != actual_clock)
+ dmcu->funcs->set_psr_wait_loop(dmcu,
+ actual_clock / 1000 / 7);
+ }
+ }
+
+ clk_mgr_dce->dfs_bypass_disp_clk = actual_clock;
+ return actual_clock;
+}
+
+int dce112_set_dispclk(struct clk_mgr_internal *clk_mgr, int requested_clk_khz)
+{
+ struct bp_set_dce_clock_parameters dce_clk_params;
+ struct dc_bios *bp = clk_mgr->base.ctx->dc_bios;
+ struct dc *core_dc = clk_mgr->base.ctx->dc;
+ struct dmcu *dmcu = core_dc->res_pool->dmcu;
+ int actual_clock = requested_clk_khz;
+ /* Prepare to program display clock*/
+ memset(&dce_clk_params, 0, sizeof(dce_clk_params));
+
+ /* Make sure requested clock isn't lower than minimum threshold*/
+ if (requested_clk_khz > 0)
+ requested_clk_khz = max(requested_clk_khz,
+ clk_mgr->dentist_vco_freq_khz / 62);
+
+ dce_clk_params.target_clock_frequency = requested_clk_khz;
+ dce_clk_params.pll_id = CLOCK_SOURCE_ID_DFS;
+ dce_clk_params.clock_type = DCECLOCK_TYPE_DISPLAY_CLOCK;
+
+ bp->funcs->set_dce_clock(bp, &dce_clk_params);
+ actual_clock = dce_clk_params.target_clock_frequency;
+
+ /*
+ * from power down, we need mark the clock state as ClocksStateNominal
+ * from HWReset, so when resume we will call pplib voltage regulator.
+ */
+ if (requested_clk_khz == 0)
+ clk_mgr->cur_min_clks_state = DM_PP_CLOCKS_STATE_NOMINAL;
+
+
+ if (!IS_FPGA_MAXIMUS_DC(core_dc->ctx->dce_environment)) {
+ if (dmcu && dmcu->funcs->is_dmcu_initialized(dmcu)) {
+ if (clk_mgr->dfs_bypass_disp_clk != actual_clock)
+ dmcu->funcs->set_psr_wait_loop(dmcu,
+ actual_clock / 1000 / 7);
+ }
+ }
+
+ clk_mgr->dfs_bypass_disp_clk = actual_clock;
+ return actual_clock;
+
+}
+
+int dce112_set_dprefclk(struct clk_mgr_internal *clk_mgr)
+{
+ struct bp_set_dce_clock_parameters dce_clk_params;
+ struct dc_bios *bp = clk_mgr->base.ctx->dc_bios;
+
+ memset(&dce_clk_params, 0, sizeof(dce_clk_params));
+
+ /*Program DP ref Clock*/
+ /*VBIOS will determine DPREFCLK frequency, so we don't set it*/
+ dce_clk_params.target_clock_frequency = 0;
+ dce_clk_params.pll_id = CLOCK_SOURCE_ID_DFS;
+ dce_clk_params.clock_type = DCECLOCK_TYPE_DPREFCLK;
+ if (!ASICREV_IS_VEGA20_P(clk_mgr->base.ctx->asic_id.hw_internal_rev))
+ dce_clk_params.flags.USE_GENLOCK_AS_SOURCE_FOR_DPREFCLK =
+ (dce_clk_params.pll_id ==
+ CLOCK_SOURCE_COMBO_DISPLAY_PLL0);
+ else
+ dce_clk_params.flags.USE_GENLOCK_AS_SOURCE_FOR_DPREFCLK = false;
+
+ bp->funcs->set_dce_clock(bp, &dce_clk_params);
+
+ /* Returns the dp_refclk that was set */
+ return dce_clk_params.target_clock_frequency;
+}
+
+static void dce112_update_clocks(struct clk_mgr *clk_mgr_base,
+ struct dc_state *context,
+ bool safe_to_lower)
+{
+ struct clk_mgr_internal *clk_mgr_dce = TO_CLK_MGR_INTERNAL(clk_mgr_base);
+ struct dm_pp_power_level_change_request level_change_req;
+ int patched_disp_clk = context->bw_ctx.bw.dce.dispclk_khz;
+
+ /*TODO: W/A for dal3 linux, investigate why this works */
+ if (!clk_mgr_dce->dfs_bypass_active)
+ patched_disp_clk = patched_disp_clk * 115 / 100;
+
+ level_change_req.power_level = dce_get_required_clocks_state(clk_mgr_base, context);
+ /* get max clock state from PPLIB */
+ if ((level_change_req.power_level < clk_mgr_dce->cur_min_clks_state && safe_to_lower)
+ || level_change_req.power_level > clk_mgr_dce->cur_min_clks_state) {
+ if (dm_pp_apply_power_level_change_request(clk_mgr_base->ctx, &level_change_req))
+ clk_mgr_dce->cur_min_clks_state = level_change_req.power_level;
+ }
+
+ if (should_set_clock(safe_to_lower, patched_disp_clk, clk_mgr_base->clks.dispclk_khz)) {
+ patched_disp_clk = dce112_set_clock(clk_mgr_base, patched_disp_clk);
+ clk_mgr_base->clks.dispclk_khz = patched_disp_clk;
+ }
+ dce11_pplib_apply_display_requirements(clk_mgr_base->ctx->dc, context);
+}
+
+static struct clk_mgr_funcs dce112_funcs = {
+ .get_dp_ref_clk_frequency = dce_get_dp_ref_freq_khz,
+ .update_clocks = dce112_update_clocks
+};
+
+void dce112_clk_mgr_construct(
+ struct dc_context *ctx,
+ struct clk_mgr_internal *clk_mgr)
+{
+ memcpy(clk_mgr->max_clks_by_state,
+ dce112_max_clks_by_state,
+ sizeof(dce112_max_clks_by_state));
+
+ dce_clk_mgr_construct(ctx, clk_mgr);
+
+ clk_mgr->regs = &disp_clk_regs;
+ clk_mgr->clk_mgr_shift = &disp_clk_shift;
+ clk_mgr->clk_mgr_mask = &disp_clk_mask;
+ clk_mgr->base.funcs = &dce112_funcs;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce112/dce112_clk_mgr.h b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce112/dce112_clk_mgr.h
new file mode 100644
index 000000000000..dfb06db118e1
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce112/dce112_clk_mgr.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright 2012-16 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef DAL_DC_DCE_DCE112_CLK_MGR_H_
+#define DAL_DC_DCE_DCE112_CLK_MGR_H_
+
+
+void dce112_clk_mgr_construct(
+ struct dc_context *ctx,
+ struct clk_mgr_internal *clk_mgr);
+
+/* functions shared with other clk mgr */
+int dce112_set_clock(struct clk_mgr *clk_mgr_base, int requested_clk_khz);
+int dce112_set_dispclk(struct clk_mgr_internal *clk_mgr, int requested_clk_khz);
+int dce112_set_dprefclk(struct clk_mgr_internal *clk_mgr);
+
+#endif /* DAL_DC_DCE_DCE112_CLK_MGR_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce120/dce120_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce120/dce120_clk_mgr.c
new file mode 100644
index 000000000000..08f2e253ccb0
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce120/dce120_clk_mgr.c
@@ -0,0 +1,145 @@
+/*
+ * Copyright 2012-16 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "core_types.h"
+#include "clk_mgr_internal.h"
+
+#include "dce112/dce112_clk_mgr.h"
+#include "dce110/dce110_clk_mgr.h"
+#include "dce120_clk_mgr.h"
+#include "dce100/dce_clk_mgr.h"
+
+static const struct state_dependent_clocks dce120_max_clks_by_state[] = {
+/*ClocksStateInvalid - should not be used*/
+{ .display_clk_khz = 0, .pixel_clk_khz = 0 },
+/*ClocksStateUltraLow - currently by HW design team not supposed to be used*/
+{ .display_clk_khz = 0, .pixel_clk_khz = 0 },
+/*ClocksStateLow*/
+{ .display_clk_khz = 460000, .pixel_clk_khz = 400000 },
+/*ClocksStateNominal*/
+{ .display_clk_khz = 670000, .pixel_clk_khz = 600000 },
+/*ClocksStatePerformance*/
+{ .display_clk_khz = 1133000, .pixel_clk_khz = 600000 } };
+
+/**
+ * dce121_clock_patch_xgmi_ss_info() - Save XGMI spread spectrum info
+ * @clk_mgr: clock manager base structure
+ *
+ * Reads from VBIOS the XGMI spread spectrum info and saves it within
+ * the dce clock manager. This operation will overwrite the existing dprefclk
+ * SS values if the vBIOS query succeeds. Otherwise, it does nothing. It also
+ * sets the ->xgmi_enabled flag.
+ */
+void dce121_clock_patch_xgmi_ss_info(struct clk_mgr *clk_mgr_base)
+{
+ struct clk_mgr_internal *clk_mgr_dce = TO_CLK_MGR_INTERNAL(clk_mgr_base);
+ enum bp_result result;
+ struct spread_spectrum_info info = { { 0 } };
+ struct dc_bios *bp = clk_mgr_dce->base.ctx->dc_bios;
+
+ clk_mgr_dce->xgmi_enabled = false;
+
+ result = bp->funcs->get_spread_spectrum_info(bp, AS_SIGNAL_TYPE_XGMI,
+ 0, &info);
+ if (result == BP_RESULT_OK && info.spread_spectrum_percentage != 0) {
+ clk_mgr_dce->xgmi_enabled = true;
+ clk_mgr_dce->ss_on_dprefclk = true;
+ clk_mgr_dce->dprefclk_ss_divider =
+ info.spread_percentage_divider;
+
+ if (info.type.CENTER_MODE == 0) {
+ /*
+ * Currently for DP Reference clock we
+ * need only SS percentage for
+ * downspread
+ */
+ clk_mgr_dce->dprefclk_ss_percentage =
+ info.spread_spectrum_percentage;
+ }
+ }
+}
+
+static void dce12_update_clocks(struct clk_mgr *clk_mgr_base,
+ struct dc_state *context,
+ bool safe_to_lower)
+{
+ struct clk_mgr_internal *clk_mgr_dce = TO_CLK_MGR_INTERNAL(clk_mgr_base);
+ struct dm_pp_clock_for_voltage_req clock_voltage_req = {0};
+ int max_pix_clk = dce_get_max_pixel_clock_for_all_paths(context);
+ int patched_disp_clk = context->bw_ctx.bw.dce.dispclk_khz;
+
+ /*TODO: W/A for dal3 linux, investigate why this works */
+ if (!clk_mgr_dce->dfs_bypass_active)
+ patched_disp_clk = patched_disp_clk * 115 / 100;
+
+ if (should_set_clock(safe_to_lower, patched_disp_clk, clk_mgr_base->clks.dispclk_khz)) {
+ clock_voltage_req.clk_type = DM_PP_CLOCK_TYPE_DISPLAY_CLK;
+ /*
+ * When xGMI is enabled, the display clk needs to be adjusted
+ * with the WAFL link's SS percentage.
+ */
+ if (clk_mgr_dce->xgmi_enabled)
+ patched_disp_clk = dce_adjust_dp_ref_freq_for_ss(
+ clk_mgr_dce, patched_disp_clk);
+ clock_voltage_req.clocks_in_khz = patched_disp_clk;
+ clk_mgr_base->clks.dispclk_khz = dce112_set_clock(clk_mgr_base, patched_disp_clk);
+
+ dm_pp_apply_clock_for_voltage_request(clk_mgr_base->ctx, &clock_voltage_req);
+ }
+
+ if (should_set_clock(safe_to_lower, max_pix_clk, clk_mgr_base->clks.phyclk_khz)) {
+ clock_voltage_req.clk_type = DM_PP_CLOCK_TYPE_DISPLAYPHYCLK;
+ clock_voltage_req.clocks_in_khz = max_pix_clk;
+ clk_mgr_base->clks.phyclk_khz = max_pix_clk;
+
+ dm_pp_apply_clock_for_voltage_request(clk_mgr_base->ctx, &clock_voltage_req);
+ }
+ dce11_pplib_apply_display_requirements(clk_mgr_base->ctx->dc, context);
+}
+
+
+static struct clk_mgr_funcs dce120_funcs = {
+ .get_dp_ref_clk_frequency = dce12_get_dp_ref_freq_khz,
+ .update_clocks = dce12_update_clocks
+};
+
+void dce120_clk_mgr_construct(struct dc_context *ctx, struct clk_mgr_internal *clk_mgr)
+{
+ memcpy(clk_mgr->max_clks_by_state,
+ dce120_max_clks_by_state,
+ sizeof(dce120_max_clks_by_state));
+
+ dce_clk_mgr_construct(ctx, clk_mgr);
+
+ clk_mgr->base.dprefclk_khz = 600000;
+ clk_mgr->base.funcs = &dce120_funcs;
+}
+
+void dce121_clk_mgr_construct(struct dc_context *ctx, struct clk_mgr_internal *clk_mgr)
+{
+ dce120_clk_mgr_construct(ctx, clk_mgr);
+ clk_mgr->base.dprefclk_khz = 625000;
+}
+
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce120/dce120_clk_mgr.h b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce120/dce120_clk_mgr.h
new file mode 100644
index 000000000000..d12d6fcb167d
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce120/dce120_clk_mgr.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright 2012-16 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef DAL_DC_DCE_DCE120_CLK_MGR_H_
+#define DAL_DC_DCE_DCE120_CLK_MGR_H_
+
+void dce120_clk_mgr_construct(struct dc_context *ctx, struct clk_mgr_internal *clk_mgr);
+void dce121_clk_mgr_construct(struct dc_context *ctx, struct clk_mgr_internal *clk_mgr);
+
+
+
+#endif /* DAL_DC_DCE_DCE120_CLK_MGR_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr.c
index 2b2de1d913c9..04b12bb2243d 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr.c
@@ -23,32 +23,23 @@
*
*/
-#include "dcn10_clk_mgr.h"
+#include <linux/slab.h>
#include "reg_helper.h"
#include "core_types.h"
-
-#define TO_DCE_CLK_MGR(clocks)\
- container_of(clocks, struct dce_clk_mgr, base)
-
-#define REG(reg) \
- (clk_mgr_dce->regs->reg)
-
-#undef FN
-#define FN(reg_name, field_name) \
- clk_mgr_dce->clk_mgr_shift->field_name, clk_mgr_dce->clk_mgr_mask->field_name
-
-#define CTX \
- clk_mgr_dce->base.ctx
-#define DC_LOGGER \
- clk_mgr->ctx->logger
-
-static int dcn1_determine_dppclk_threshold(struct clk_mgr *clk_mgr, struct dc_clocks *new_clocks)
+#include "clk_mgr_internal.h"
+#include "rv1_clk_mgr.h"
+#include "dce100/dce_clk_mgr.h"
+#include "dce112/dce112_clk_mgr.h"
+#include "rv1_clk_mgr_vbios_smu.h"
+#include "rv1_clk_mgr_clk.h"
+
+static int rv1_determine_dppclk_threshold(struct clk_mgr_internal *clk_mgr, struct dc_clocks *new_clocks)
{
bool request_dpp_div = new_clocks->dispclk_khz > new_clocks->dppclk_khz;
- bool dispclk_increase = new_clocks->dispclk_khz > clk_mgr->clks.dispclk_khz;
+ bool dispclk_increase = new_clocks->dispclk_khz > clk_mgr->base.clks.dispclk_khz;
int disp_clk_threshold = new_clocks->max_supported_dppclk_khz;
- bool cur_dpp_div = clk_mgr->clks.dispclk_khz > clk_mgr->clks.dppclk_khz;
+ bool cur_dpp_div = clk_mgr->base.clks.dispclk_khz > clk_mgr->base.clks.dppclk_khz;
/* increase clock, looking for div is 0 for current, request div is 1*/
if (dispclk_increase) {
@@ -78,7 +69,7 @@ static int dcn1_determine_dppclk_threshold(struct clk_mgr *clk_mgr, struct dc_cl
/* current disp clk is lower than current maximum dpp clk,
* no need to ramp
*/
- if (clk_mgr->clks.dispclk_khz <= disp_clk_threshold)
+ if (clk_mgr->base.clks.dispclk_khz <= disp_clk_threshold)
return new_clocks->dispclk_khz;
/* request dpp clk need to be divided by 2 */
@@ -89,15 +80,17 @@ static int dcn1_determine_dppclk_threshold(struct clk_mgr *clk_mgr, struct dc_cl
return disp_clk_threshold;
}
-static void dcn1_ramp_up_dispclk_with_dpp(struct clk_mgr *clk_mgr, struct dc_clocks *new_clocks)
+static void ramp_up_dispclk_with_dpp(struct clk_mgr_internal *clk_mgr, struct dc *dc, struct dc_clocks *new_clocks)
{
- struct dc *dc = clk_mgr->ctx->dc;
- int dispclk_to_dpp_threshold = dcn1_determine_dppclk_threshold(clk_mgr, new_clocks);
- bool request_dpp_div = new_clocks->dispclk_khz > new_clocks->dppclk_khz;
int i;
+ int dispclk_to_dpp_threshold = rv1_determine_dppclk_threshold(clk_mgr, new_clocks);
+ bool request_dpp_div = new_clocks->dispclk_khz > new_clocks->dppclk_khz;
/* set disp clk to dpp clk threshold */
- dce112_set_clock(clk_mgr, dispclk_to_dpp_threshold);
+
+ clk_mgr->funcs->set_dispclk(clk_mgr, dispclk_to_dpp_threshold);
+ clk_mgr->funcs->set_dprefclk(clk_mgr);
+
/* update request dpp clk division option */
for (i = 0; i < dc->res_pool->pipe_count; i++) {
@@ -113,42 +106,23 @@ static void dcn1_ramp_up_dispclk_with_dpp(struct clk_mgr *clk_mgr, struct dc_clo
}
/* If target clk not same as dppclk threshold, set to target clock */
- if (dispclk_to_dpp_threshold != new_clocks->dispclk_khz)
- dce112_set_clock(clk_mgr, new_clocks->dispclk_khz);
-
- clk_mgr->clks.dispclk_khz = new_clocks->dispclk_khz;
- clk_mgr->clks.dppclk_khz = new_clocks->dppclk_khz;
- clk_mgr->clks.max_supported_dppclk_khz = new_clocks->max_supported_dppclk_khz;
-}
-
-static int get_active_display_cnt(
- struct dc *dc,
- struct dc_state *context)
-{
- int i, display_count;
-
- display_count = 0;
- for (i = 0; i < context->stream_count; i++) {
- const struct dc_stream_state *stream = context->streams[i];
-
- /*
- * Only notify active stream or virtual stream.
- * Need to notify virtual stream to work around
- * headless case. HPD does not fire when system is in
- * S0i2.
- */
- if (!stream->dpms_off || stream->signal == SIGNAL_TYPE_VIRTUAL)
- display_count++;
+ if (dispclk_to_dpp_threshold != new_clocks->dispclk_khz) {
+ clk_mgr->funcs->set_dispclk(clk_mgr, new_clocks->dispclk_khz);
+ clk_mgr->funcs->set_dprefclk(clk_mgr);
}
- return display_count;
+
+ clk_mgr->base.clks.dispclk_khz = new_clocks->dispclk_khz;
+ clk_mgr->base.clks.dppclk_khz = new_clocks->dppclk_khz;
+ clk_mgr->base.clks.max_supported_dppclk_khz = new_clocks->max_supported_dppclk_khz;
}
-static void dcn1_update_clocks(struct clk_mgr *clk_mgr,
+static void rv1_update_clocks(struct clk_mgr *clk_mgr_base,
struct dc_state *context,
bool safe_to_lower)
{
- struct dc *dc = clk_mgr->ctx->dc;
+ struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
+ struct dc *dc = clk_mgr_base->ctx->dc;
struct dc_debug_options *debug = &dc->debug;
struct dc_clocks *new_clocks = &context->bw_ctx.bw.dcn.clk;
struct pp_smu_funcs_rv *pp_smu = NULL;
@@ -158,9 +132,12 @@ static void dcn1_update_clocks(struct clk_mgr *clk_mgr,
bool enter_display_off = false;
- display_count = get_active_display_cnt(dc, context);
- if (dc->res_pool->pp_smu)
- pp_smu = &dc->res_pool->pp_smu->rv_funcs;
+ ASSERT(clk_mgr->pp_smu);
+
+ pp_smu = &clk_mgr->pp_smu->rv_funcs;
+
+ display_count = clk_mgr_helper_get_active_display_cnt(dc, context);
+
if (display_count == 0)
enter_display_off = true;
@@ -170,18 +147,18 @@ static void dcn1_update_clocks(struct clk_mgr *clk_mgr,
* if function pointer not set up, this message is
* sent as part of pplib_apply_display_requirements.
*/
- if (pp_smu && pp_smu->set_display_count)
+ if (pp_smu->set_display_count)
pp_smu->set_display_count(&pp_smu->pp_smu, display_count);
}
- if (new_clocks->dispclk_khz > clk_mgr->clks.dispclk_khz
- || new_clocks->phyclk_khz > clk_mgr->clks.phyclk_khz
- || new_clocks->fclk_khz > clk_mgr->clks.fclk_khz
- || new_clocks->dcfclk_khz > clk_mgr->clks.dcfclk_khz)
+ if (new_clocks->dispclk_khz > clk_mgr_base->clks.dispclk_khz
+ || new_clocks->phyclk_khz > clk_mgr_base->clks.phyclk_khz
+ || new_clocks->fclk_khz > clk_mgr_base->clks.fclk_khz
+ || new_clocks->dcfclk_khz > clk_mgr_base->clks.dcfclk_khz)
send_request_to_increase = true;
- if (should_set_clock(safe_to_lower, new_clocks->phyclk_khz, clk_mgr->clks.phyclk_khz)) {
- clk_mgr->clks.phyclk_khz = new_clocks->phyclk_khz;
+ if (should_set_clock(safe_to_lower, new_clocks->phyclk_khz, clk_mgr_base->clks.phyclk_khz)) {
+ clk_mgr_base->clks.phyclk_khz = new_clocks->phyclk_khz;
send_request_to_lower = true;
}
@@ -189,20 +166,20 @@ static void dcn1_update_clocks(struct clk_mgr *clk_mgr,
if (debug->force_fclk_khz != 0)
new_clocks->fclk_khz = debug->force_fclk_khz;
- if (should_set_clock(safe_to_lower, new_clocks->fclk_khz, clk_mgr->clks.fclk_khz)) {
- clk_mgr->clks.fclk_khz = new_clocks->fclk_khz;
+ if (should_set_clock(safe_to_lower, new_clocks->fclk_khz, clk_mgr_base->clks.fclk_khz)) {
+ clk_mgr_base->clks.fclk_khz = new_clocks->fclk_khz;
send_request_to_lower = true;
}
//DCF Clock
- if (should_set_clock(safe_to_lower, new_clocks->dcfclk_khz, clk_mgr->clks.dcfclk_khz)) {
- clk_mgr->clks.dcfclk_khz = new_clocks->dcfclk_khz;
+ if (should_set_clock(safe_to_lower, new_clocks->dcfclk_khz, clk_mgr_base->clks.dcfclk_khz)) {
+ clk_mgr_base->clks.dcfclk_khz = new_clocks->dcfclk_khz;
send_request_to_lower = true;
}
if (should_set_clock(safe_to_lower,
- new_clocks->dcfclk_deep_sleep_khz, clk_mgr->clks.dcfclk_deep_sleep_khz)) {
- clk_mgr->clks.dcfclk_deep_sleep_khz = new_clocks->dcfclk_deep_sleep_khz;
+ new_clocks->dcfclk_deep_sleep_khz, clk_mgr_base->clks.dcfclk_deep_sleep_khz)) {
+ clk_mgr_base->clks.dcfclk_deep_sleep_khz = new_clocks->dcfclk_deep_sleep_khz;
send_request_to_lower = true;
}
@@ -211,10 +188,9 @@ static void dcn1_update_clocks(struct clk_mgr *clk_mgr,
*/
if (send_request_to_increase) {
/*use dcfclk to request voltage*/
- if (pp_smu && pp_smu->set_hard_min_fclk_by_freq &&
+ if (pp_smu->set_hard_min_fclk_by_freq &&
pp_smu->set_hard_min_dcfclk_by_freq &&
pp_smu->set_min_deep_sleep_dcfclk) {
-
pp_smu->set_hard_min_fclk_by_freq(&pp_smu->pp_smu, new_clocks->fclk_khz / 1000);
pp_smu->set_hard_min_dcfclk_by_freq(&pp_smu->pp_smu, new_clocks->dcfclk_khz / 1000);
pp_smu->set_min_deep_sleep_dcfclk(&pp_smu->pp_smu, (new_clocks->dcfclk_deep_sleep_khz + 999) / 1000);
@@ -223,67 +199,67 @@ static void dcn1_update_clocks(struct clk_mgr *clk_mgr,
/* dcn1 dppclk is tied to dispclk */
/* program dispclk on = as a w/a for sleep resume clock ramping issues */
- if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, clk_mgr->clks.dispclk_khz)
- || new_clocks->dispclk_khz == clk_mgr->clks.dispclk_khz) {
- dcn1_ramp_up_dispclk_with_dpp(clk_mgr, new_clocks);
- clk_mgr->clks.dispclk_khz = new_clocks->dispclk_khz;
+ if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, clk_mgr_base->clks.dispclk_khz)
+ || new_clocks->dispclk_khz == clk_mgr_base->clks.dispclk_khz) {
+ ramp_up_dispclk_with_dpp(clk_mgr, dc, new_clocks);
+ clk_mgr_base->clks.dispclk_khz = new_clocks->dispclk_khz;
send_request_to_lower = true;
}
if (!send_request_to_increase && send_request_to_lower) {
/*use dcfclk to request voltage*/
- if (pp_smu && pp_smu->set_hard_min_fclk_by_freq &&
+ if (pp_smu->set_hard_min_fclk_by_freq &&
pp_smu->set_hard_min_dcfclk_by_freq &&
pp_smu->set_min_deep_sleep_dcfclk) {
-
pp_smu->set_hard_min_fclk_by_freq(&pp_smu->pp_smu, new_clocks->fclk_khz / 1000);
pp_smu->set_hard_min_dcfclk_by_freq(&pp_smu->pp_smu, new_clocks->dcfclk_khz / 1000);
pp_smu->set_min_deep_sleep_dcfclk(&pp_smu->pp_smu, (new_clocks->dcfclk_deep_sleep_khz + 999) / 1000);
}
}
}
-static const struct clk_mgr_funcs dcn1_funcs = {
+
+static struct clk_mgr_funcs rv1_clk_funcs = {
.get_dp_ref_clk_frequency = dce12_get_dp_ref_freq_khz,
- .update_clocks = dcn1_update_clocks
+ .update_clocks = rv1_update_clocks,
+};
+
+static struct clk_mgr_internal_funcs rv1_clk_internal_funcs = {
+ .set_dispclk = rv1_vbios_smu_set_dispclk,
+ .set_dprefclk = dce112_set_dprefclk
};
-struct clk_mgr *dcn1_clk_mgr_create(struct dc_context *ctx)
+
+void rv1_clk_mgr_construct(struct dc_context *ctx, struct clk_mgr_internal *clk_mgr, struct pp_smu_funcs *pp_smu)
{
struct dc_debug_options *debug = &ctx->dc->debug;
struct dc_bios *bp = ctx->dc_bios;
struct dc_firmware_info fw_info = { { 0 } };
- struct dce_clk_mgr *clk_mgr_dce = kzalloc(sizeof(*clk_mgr_dce), GFP_KERNEL);
- if (clk_mgr_dce == NULL) {
- BREAK_TO_DEBUGGER();
- return NULL;
- }
-
- clk_mgr_dce->base.ctx = ctx;
- clk_mgr_dce->base.funcs = &dcn1_funcs;
+ clk_mgr->base.ctx = ctx;
+ clk_mgr->pp_smu = pp_smu;
+ clk_mgr->base.funcs = &rv1_clk_funcs;
+ clk_mgr->funcs = &rv1_clk_internal_funcs;
- clk_mgr_dce->dfs_bypass_disp_clk = 0;
+ clk_mgr->dfs_bypass_disp_clk = 0;
- clk_mgr_dce->dprefclk_ss_percentage = 0;
- clk_mgr_dce->dprefclk_ss_divider = 1000;
- clk_mgr_dce->ss_on_dprefclk = false;
+ clk_mgr->dprefclk_ss_percentage = 0;
+ clk_mgr->dprefclk_ss_divider = 1000;
+ clk_mgr->ss_on_dprefclk = false;
+ clk_mgr->base.dprefclk_khz = 600000;
- clk_mgr_dce->dprefclk_khz = 600000;
if (bp->integrated_info)
- clk_mgr_dce->dentist_vco_freq_khz = bp->integrated_info->dentist_vco_freq;
- if (clk_mgr_dce->dentist_vco_freq_khz == 0) {
+ clk_mgr->dentist_vco_freq_khz = bp->integrated_info->dentist_vco_freq;
+ if (clk_mgr->dentist_vco_freq_khz == 0) {
bp->funcs->get_firmware_info(bp, &fw_info);
- clk_mgr_dce->dentist_vco_freq_khz = fw_info.smu_gpu_pll_output_freq;
- if (clk_mgr_dce->dentist_vco_freq_khz == 0)
- clk_mgr_dce->dentist_vco_freq_khz = 3600000;
+ clk_mgr->dentist_vco_freq_khz = fw_info.smu_gpu_pll_output_freq;
+ if (clk_mgr->dentist_vco_freq_khz == 0)
+ clk_mgr->dentist_vco_freq_khz = 3600000;
}
if (!debug->disable_dfs_bypass && bp->integrated_info)
if (bp->integrated_info->gpu_cap_info & DFS_BYPASS_ENABLE)
- clk_mgr_dce->dfs_bypass_enabled = true;
-
- dce_clock_read_ss_info(clk_mgr_dce);
+ clk_mgr->dfs_bypass_enabled = true;
- return &clk_mgr_dce->base;
+ dce_clock_read_ss_info(clk_mgr);
}
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr.h b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr.h
new file mode 100644
index 000000000000..0807478c8212
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright 2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __RV1_CLK_MGR_H__
+#define __RV1_CLK_MGR_H__
+
+void rv1_clk_mgr_construct(struct dc_context *ctx, struct clk_mgr_internal *clk_mgr, struct pp_smu_funcs *pp_smu);
+
+#endif //__DCN10_CLK_MGR_H__
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr_clk.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr_clk.c
new file mode 100644
index 000000000000..61dd12198a3c
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr_clk.c
@@ -0,0 +1,79 @@
+/*
+ * Copyright 2012-16 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "reg_helper.h"
+#include "clk_mgr_internal.h"
+#include "rv1_clk_mgr_clk.h"
+
+#include "ip/Discovery/hwid.h"
+#include "ip/Discovery/v1/ip_offset_1.h"
+#include "ip/CLK/clk_10_0_default.h"
+#include "ip/CLK/clk_10_0_offset.h"
+#include "ip/CLK/clk_10_0_reg.h"
+#include "ip/CLK/clk_10_0_sh_mask.h"
+
+#include "dce100/dce_clk_mgr.h"
+
+#define CLK_BASE_INNER(inst) \
+ CLK_BASE__INST ## inst ## _SEG0
+
+
+#define CLK_REG(reg_name, block, inst)\
+ CLK_BASE(mm ## block ## _ ## inst ## _ ## reg_name ## _BASE_IDX) + \
+ mm ## block ## _ ## inst ## _ ## reg_name
+
+#define REG(reg_name) \
+ CLK_REG(reg_name, CLK0, 0)
+
+
+/* Only used by testing framework*/
+void rv1_dump_clk_registers(struct clk_state_registers *regs, struct clk_bypass *bypass, struct clk_mgr *clk_mgr_base)
+{
+ struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
+
+ regs->CLK0_CLK8_CURRENT_CNT = REG_READ(CLK0_CLK8_CURRENT_CNT) / 10; //dcf clk
+
+ bypass->dcfclk_bypass = REG_READ(CLK0_CLK8_BYPASS_CNTL) & 0x0007;
+ if (bypass->dcfclk_bypass < 0 || bypass->dcfclk_bypass > 4)
+ bypass->dcfclk_bypass = 0;
+
+
+ regs->CLK0_CLK8_DS_CNTL = REG_READ(CLK0_CLK8_DS_CNTL) / 10; //dcf deep sleep divider
+
+ regs->CLK0_CLK8_ALLOW_DS = REG_READ(CLK0_CLK8_ALLOW_DS); //dcf deep sleep allow
+
+ regs->CLK0_CLK10_CURRENT_CNT = REG_READ(CLK0_CLK10_CURRENT_CNT) / 10; //dpref clk
+
+ bypass->dispclk_pypass = REG_READ(CLK0_CLK10_BYPASS_CNTL) & 0x0007;
+ if (bypass->dispclk_pypass < 0 || bypass->dispclk_pypass > 4)
+ bypass->dispclk_pypass = 0;
+
+ regs->CLK0_CLK11_CURRENT_CNT = REG_READ(CLK0_CLK11_CURRENT_CNT) / 10; //disp clk
+
+ bypass->dprefclk_bypass = REG_READ(CLK0_CLK11_BYPASS_CNTL) & 0x0007;
+ if (bypass->dprefclk_bypass < 0 || bypass->dprefclk_bypass > 4)
+ bypass->dprefclk_bypass = 0;
+
+}
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr_clk.h b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr_clk.h
new file mode 100644
index 000000000000..b68e3452efb9
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr_clk.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright 2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef DAL_DC_DCN10_RV1_CLK_MGR_CLK_H_
+#define DAL_DC_DCN10_RV1_CLK_MGR_CLK_H_
+
+#endif /* DAL_DC_DCN10_RV1_CLK_MGR_CLK_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr_vbios_smu.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr_vbios_smu.c
new file mode 100644
index 000000000000..1897e91c8ccb
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr_vbios_smu.c
@@ -0,0 +1,126 @@
+/*
+ * Copyright 2012-16 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "core_types.h"
+#include "clk_mgr_internal.h"
+#include "reg_helper.h"
+
+#define MAX_INSTANCE 5
+#define MAX_SEGMENT 5
+
+struct IP_BASE_INSTANCE {
+ unsigned int segment[MAX_SEGMENT];
+};
+
+struct IP_BASE {
+ struct IP_BASE_INSTANCE instance[MAX_INSTANCE];
+};
+
+
+static const struct IP_BASE MP1_BASE = { { { { 0x00016000, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } } } };
+
+#define mmMP1_SMN_C2PMSG_91 0x29B
+#define mmMP1_SMN_C2PMSG_83 0x293
+#define mmMP1_SMN_C2PMSG_67 0x283
+#define mmMP1_SMN_C2PMSG_91_BASE_IDX 0
+#define mmMP1_SMN_C2PMSG_83_BASE_IDX 0
+#define mmMP1_SMN_C2PMSG_67_BASE_IDX 0
+
+#define MP1_SMN_C2PMSG_91__CONTENT_MASK 0xffffffffL
+#define MP1_SMN_C2PMSG_83__CONTENT_MASK 0xffffffffL
+#define MP1_SMN_C2PMSG_67__CONTENT_MASK 0xffffffffL
+#define MP1_SMN_C2PMSG_91__CONTENT__SHIFT 0x00000000
+#define MP1_SMN_C2PMSG_83__CONTENT__SHIFT 0x00000000
+#define MP1_SMN_C2PMSG_67__CONTENT__SHIFT 0x00000000
+
+#define REG(reg_name) \
+ (MP1_BASE.instance[0].segment[mm ## reg_name ## _BASE_IDX] + mm ## reg_name)
+
+#define FN(reg_name, field) \
+ FD(reg_name##__##field)
+
+#define VBIOSSMC_MSG_SetDispclkFreq 0x4
+#define VBIOSSMC_MSG_SetDprefclkFreq 0x5
+
+int rv1_vbios_smu_send_msg_with_param(struct clk_mgr_internal *clk_mgr, unsigned int msg_id, unsigned int param)
+{
+ /* First clear response register */
+ REG_WRITE(MP1_SMN_C2PMSG_91, 0);
+
+ /* Set the parameter register for the SMU message, unit is Mhz */
+ REG_WRITE(MP1_SMN_C2PMSG_83, param);
+
+ /* Trigger the message transaction by writing the message ID */
+ REG_WRITE(MP1_SMN_C2PMSG_67, msg_id);
+
+ REG_WAIT(MP1_SMN_C2PMSG_91, CONTENT, 1, 10, 200000);
+
+ /* Actual dispclk set is returned in the parameter register */
+ return REG_READ(MP1_SMN_C2PMSG_83);
+}
+
+int rv1_vbios_smu_set_dispclk(struct clk_mgr_internal *clk_mgr, int requested_dispclk_khz)
+{
+ int actual_dispclk_set_mhz = -1;
+ struct dc *core_dc = clk_mgr->base.ctx->dc;
+ struct dmcu *dmcu = core_dc->res_pool->dmcu;
+
+ /* Unit of SMU msg parameter is Mhz */
+ actual_dispclk_set_mhz = rv1_vbios_smu_send_msg_with_param(
+ clk_mgr,
+ VBIOSSMC_MSG_SetDispclkFreq,
+ requested_dispclk_khz / 1000);
+
+ /* Actual dispclk set is returned in the parameter register */
+ actual_dispclk_set_mhz = REG_READ(MP1_SMN_C2PMSG_83) * 1000;
+
+ if (!IS_FPGA_MAXIMUS_DC(core_dc->ctx->dce_environment)) {
+ if (dmcu && dmcu->funcs->is_dmcu_initialized(dmcu)) {
+ if (clk_mgr->dfs_bypass_disp_clk != actual_dispclk_set_mhz)
+ dmcu->funcs->set_psr_wait_loop(dmcu,
+ actual_dispclk_set_mhz / 7);
+ }
+ }
+
+ return actual_dispclk_set_mhz * 1000;
+}
+
+int rv1_vbios_smu_set_dprefclk(struct clk_mgr_internal *clk_mgr)
+{
+ int actual_dprefclk_set_mhz = -1;
+
+ actual_dprefclk_set_mhz = rv1_vbios_smu_send_msg_with_param(
+ clk_mgr,
+ VBIOSSMC_MSG_SetDprefclkFreq,
+ clk_mgr->base.dprefclk_khz / 1000);
+
+ /* TODO: add code for programing DP DTO, currently this is down by command table */
+
+ return actual_dprefclk_set_mhz * 1000;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr_vbios_smu.h b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr_vbios_smu.h
new file mode 100644
index 000000000000..083cb3158859
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr_vbios_smu.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright 2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef DAL_DC_DCN10_RV1_CLK_MGR_VBIOS_SMU_H_
+#define DAL_DC_DCN10_RV1_CLK_MGR_VBIOS_SMU_H_
+
+int rv1_vbios_smu_set_dispclk(struct clk_mgr_internal *clk_mgr, int requested_dispclk_khz);
+int rv1_vbios_smu_set_dprefclk(struct clk_mgr_internal *clk_mgr);
+
+#endif /* DAL_DC_DCN10_RV1_CLK_MGR_VBIOS_SMU_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv2_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv2_clk_mgr.c
new file mode 100644
index 000000000000..b9ba6dbc2b46
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv2_clk_mgr.c
@@ -0,0 +1,43 @@
+/*
+ * Copyright 2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "core_types.h"
+#include "clk_mgr_internal.h"
+#include "rv1_clk_mgr.h"
+#include "rv2_clk_mgr.h"
+#include "dce112/dce112_clk_mgr.h"
+
+static struct clk_mgr_internal_funcs rv2_clk_internal_funcs = {
+ .set_dispclk = dce112_set_dispclk,
+ .set_dprefclk = dce112_set_dprefclk
+};
+
+void rv2_clk_mgr_construct(struct dc_context *ctx, struct clk_mgr_internal *clk_mgr, struct pp_smu_funcs *pp_smu)
+
+{
+ rv1_clk_mgr_construct(ctx, clk_mgr, pp_smu);
+
+ clk_mgr->funcs = &rv2_clk_internal_funcs;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_clk_mgr.h b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv2_clk_mgr.h
index 97007cf33665..0c1f26ca563b 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_clk_mgr.h
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv2_clk_mgr.h
@@ -23,17 +23,10 @@
*
*/
-#ifndef __DCN10_CLK_MGR_H__
-#define __DCN10_CLK_MGR_H__
+#ifndef __RV2_CLK_MGR_H__
+#define __RV2_CLK_MGR_H__
-#include "../dce/dce_clk_mgr.h"
+void rv2_clk_mgr_construct(struct dc_context *ctx, struct clk_mgr_internal *clk_mgr, struct pp_smu_funcs *pp_smu);
-struct clk_bypass {
- uint32_t dcfclk_bypass;
- uint32_t dispclk_pypass;
- uint32_t dprefclk_bypass;
-};
-
-struct clk_mgr *dcn1_clk_mgr_create(struct dc_context *ctx);
#endif //__DCN10_CLK_MGR_H__
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
index 18c775a950cc..ed466087c8b5 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -22,6 +22,8 @@
* Authors: AMD
*/
+#include <linux/slab.h>
+
#include "dm_services.h"
#include "dc.h"
@@ -33,6 +35,7 @@
#include "resource.h"
+#include "clk_mgr.h"
#include "clock_source.h"
#include "dc_bios_types.h"
@@ -169,9 +172,14 @@ static bool create_links(
link = link_create(&link_init_params);
if (link) {
- dc->links[dc->link_count] = link;
- link->dc = dc;
- ++dc->link_count;
+ if (dc->config.edp_not_connected &&
+ link->connector_signal == SIGNAL_TYPE_EDP) {
+ link_destroy(&link);
+ } else {
+ dc->links[dc->link_count] = link;
+ link->dc = dc;
+ ++dc->link_count;
+ }
}
}
@@ -257,7 +265,7 @@ bool dc_stream_adjust_vmin_vmax(struct dc *dc,
for (i = 0; i < MAX_PIPES; i++) {
struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i];
- if (pipe->stream == stream && pipe->stream_res.stream_enc) {
+ if (pipe->stream == stream && pipe->stream_res.tg) {
pipe->stream->adjust = *adjust;
dc->hwss.set_drr(&pipe,
1,
@@ -484,128 +492,6 @@ void dc_stream_set_static_screen_events(struct dc *dc,
dc->hwss.set_static_screen_control(pipes_affected, num_pipes_affected, events);
}
-void dc_link_set_drive_settings(struct dc *dc,
- struct link_training_settings *lt_settings,
- const struct dc_link *link)
-{
-
- int i;
-
- for (i = 0; i < dc->link_count; i++) {
- if (dc->links[i] == link)
- break;
- }
-
- if (i >= dc->link_count)
- ASSERT_CRITICAL(false);
-
- dc_link_dp_set_drive_settings(dc->links[i], lt_settings);
-}
-
-void dc_link_perform_link_training(struct dc *dc,
- struct dc_link_settings *link_setting,
- bool skip_video_pattern)
-{
- int i;
-
- for (i = 0; i < dc->link_count; i++)
- dc_link_dp_perform_link_training(
- dc->links[i],
- link_setting,
- skip_video_pattern);
-}
-
-void dc_link_set_preferred_link_settings(struct dc *dc,
- struct dc_link_settings *link_setting,
- struct dc_link *link)
-{
- int i;
- struct pipe_ctx *pipe;
- struct dc_stream_state *link_stream;
- struct dc_link_settings store_settings = *link_setting;
-
- link->preferred_link_setting = store_settings;
-
- /* Retrain with preferred link settings only relevant for
- * DP signal type
- */
- if (!dc_is_dp_signal(link->connector_signal))
- return;
-
- for (i = 0; i < MAX_PIPES; i++) {
- pipe = &dc->current_state->res_ctx.pipe_ctx[i];
- if (pipe->stream && pipe->stream->link) {
- if (pipe->stream->link == link)
- break;
- }
- }
-
- /* Stream not found */
- if (i == MAX_PIPES)
- return;
-
- link_stream = link->dc->current_state->res_ctx.pipe_ctx[i].stream;
-
- /* Cannot retrain link if backend is off */
- if (link_stream->dpms_off)
- return;
-
- if (link_stream)
- decide_link_settings(link_stream, &store_settings);
-
- if ((store_settings.lane_count != LANE_COUNT_UNKNOWN) &&
- (store_settings.link_rate != LINK_RATE_UNKNOWN))
- dp_retrain_link_dp_test(link, &store_settings, false);
-}
-
-void dc_link_enable_hpd(const struct dc_link *link)
-{
- dc_link_dp_enable_hpd(link);
-}
-
-void dc_link_disable_hpd(const struct dc_link *link)
-{
- dc_link_dp_disable_hpd(link);
-}
-
-
-void dc_link_set_test_pattern(struct dc_link *link,
- enum dp_test_pattern test_pattern,
- const struct link_training_settings *p_link_settings,
- const unsigned char *p_custom_pattern,
- unsigned int cust_pattern_size)
-{
- if (link != NULL)
- dc_link_dp_set_test_pattern(
- link,
- test_pattern,
- p_link_settings,
- p_custom_pattern,
- cust_pattern_size);
-}
-
-uint32_t dc_link_bandwidth_kbps(
- const struct dc_link *link,
- const struct dc_link_settings *link_setting)
-{
- uint32_t link_bw_kbps = link_setting->link_rate * LINK_RATE_REF_FREQ_IN_KHZ; /* bytes per sec */
-
- link_bw_kbps *= 8; /* 8 bits per byte*/
- link_bw_kbps *= link_setting->lane_count;
-
- return link_bw_kbps;
-
-}
-
-const struct dc_link_settings *dc_link_get_link_cap(
- const struct dc_link *link)
-{
- if (link->preferred_link_setting.lane_count != LANE_COUNT_UNKNOWN &&
- link->preferred_link_setting.link_rate != LINK_RATE_UNKNOWN)
- return &link->preferred_link_setting;
- return &link->verified_link_cap;
-}
-
static void destruct(struct dc *dc)
{
dc_release_state(dc->current_state);
@@ -613,6 +499,11 @@ static void destruct(struct dc *dc)
destroy_links(dc);
+ if (dc->clk_mgr) {
+ dc_destroy_clk_mgr(dc->clk_mgr);
+ dc->clk_mgr = NULL;
+ }
+
dc_destroy_resource_pool(dc);
if (dc->ctx->gpio_service)
@@ -756,6 +647,10 @@ static bool construct(struct dc *dc,
if (!dc->res_pool)
goto fail;
+ dc->clk_mgr = dc_clk_mgr_create(dc->ctx, dc->res_pool->pp_smu, dc->res_pool->dccg);
+ if (!dc->clk_mgr)
+ goto fail;
+
/* Creation of current_state must occur after dc->dml
* is initialized in dc_create_resource_pool because
* on creation it copies the contents of dc->dml
@@ -1136,10 +1031,6 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
/* Program all planes within new context*/
for (i = 0; i < context->stream_count; i++) {
const struct dc_link *link = context->streams[i]->link;
- struct dc_stream_status *status;
-
- if (context->streams[i]->apply_seamless_boot_optimization)
- context->streams[i]->apply_seamless_boot_optimization = false;
if (!context->streams[i]->mode_changed)
continue;
@@ -1164,9 +1055,6 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
}
}
- status = dc_stream_get_status_from_state(context, context->streams[i]);
- context->streams[i]->out.otg_offset = status->primary_otg_inst;
-
CONN_MSG_MODE(link, "{%dx%d, %dx%d@%dKhz}",
context->streams[i]->timing.h_addressable,
context->streams[i]->timing.v_addressable,
@@ -1331,71 +1219,94 @@ static bool is_surface_in_context(
static enum surface_update_type get_plane_info_update_type(const struct dc_surface_update *u)
{
union surface_update_flags *update_flags = &u->surface->update_flags;
+ enum surface_update_type update_type = UPDATE_TYPE_FAST;
if (!u->plane_info)
return UPDATE_TYPE_FAST;
- if (u->plane_info->color_space != u->surface->color_space)
+ if (u->plane_info->color_space != u->surface->color_space) {
update_flags->bits.color_space_change = 1;
+ elevate_update_type(&update_type, UPDATE_TYPE_MED);
+ }
- if (u->plane_info->horizontal_mirror != u->surface->horizontal_mirror)
+ if (u->plane_info->horizontal_mirror != u->surface->horizontal_mirror) {
update_flags->bits.horizontal_mirror_change = 1;
+ elevate_update_type(&update_type, UPDATE_TYPE_MED);
+ }
- if (u->plane_info->rotation != u->surface->rotation)
+ if (u->plane_info->rotation != u->surface->rotation) {
update_flags->bits.rotation_change = 1;
+ elevate_update_type(&update_type, UPDATE_TYPE_FULL);
+ }
- if (u->plane_info->format != u->surface->format)
+ if (u->plane_info->format != u->surface->format) {
update_flags->bits.pixel_format_change = 1;
+ elevate_update_type(&update_type, UPDATE_TYPE_FULL);
+ }
- if (u->plane_info->stereo_format != u->surface->stereo_format)
+ if (u->plane_info->stereo_format != u->surface->stereo_format) {
update_flags->bits.stereo_format_change = 1;
+ elevate_update_type(&update_type, UPDATE_TYPE_FULL);
+ }
- if (u->plane_info->per_pixel_alpha != u->surface->per_pixel_alpha)
+ if (u->plane_info->per_pixel_alpha != u->surface->per_pixel_alpha) {
update_flags->bits.per_pixel_alpha_change = 1;
+ elevate_update_type(&update_type, UPDATE_TYPE_MED);
+ }
- if (u->plane_info->global_alpha_value != u->surface->global_alpha_value)
+ if (u->plane_info->global_alpha_value != u->surface->global_alpha_value) {
update_flags->bits.global_alpha_change = 1;
+ elevate_update_type(&update_type, UPDATE_TYPE_MED);
+ }
+
+ if (u->plane_info->sdr_white_level != u->surface->sdr_white_level) {
+ update_flags->bits.sdr_white_level = 1;
+ elevate_update_type(&update_type, UPDATE_TYPE_MED);
+ }
if (u->plane_info->dcc.enable != u->surface->dcc.enable
|| u->plane_info->dcc.grph.independent_64b_blks != u->surface->dcc.grph.independent_64b_blks
- || u->plane_info->dcc.grph.meta_pitch != u->surface->dcc.grph.meta_pitch)
+ || u->plane_info->dcc.grph.meta_pitch != u->surface->dcc.grph.meta_pitch) {
update_flags->bits.dcc_change = 1;
+ elevate_update_type(&update_type, UPDATE_TYPE_MED);
+ }
if (resource_pixel_format_to_bpp(u->plane_info->format) !=
- resource_pixel_format_to_bpp(u->surface->format))
+ resource_pixel_format_to_bpp(u->surface->format)) {
/* different bytes per element will require full bandwidth
* and DML calculation
*/
update_flags->bits.bpp_change = 1;
+ elevate_update_type(&update_type, UPDATE_TYPE_FULL);
+ }
if (u->plane_info->plane_size.grph.surface_pitch != u->surface->plane_size.grph.surface_pitch
|| u->plane_info->plane_size.video.luma_pitch != u->surface->plane_size.video.luma_pitch
- || u->plane_info->plane_size.video.chroma_pitch != u->surface->plane_size.video.chroma_pitch)
+ || u->plane_info->plane_size.video.chroma_pitch != u->surface->plane_size.video.chroma_pitch) {
update_flags->bits.plane_size_change = 1;
+ elevate_update_type(&update_type, UPDATE_TYPE_MED);
+ }
if (memcmp(&u->plane_info->tiling_info, &u->surface->tiling_info,
sizeof(union dc_tiling_info)) != 0) {
update_flags->bits.swizzle_change = 1;
+ elevate_update_type(&update_type, UPDATE_TYPE_MED);
+
/* todo: below are HW dependent, we should add a hook to
* DCE/N resource and validated there.
*/
- if (u->plane_info->tiling_info.gfx9.swizzle != DC_SW_LINEAR)
+ if (u->plane_info->tiling_info.gfx9.swizzle != DC_SW_LINEAR) {
/* swizzled mode requires RQ to be setup properly,
* thus need to run DML to calculate RQ settings
*/
update_flags->bits.bandwidth_change = 1;
+ elevate_update_type(&update_type, UPDATE_TYPE_FULL);
+ }
}
- if (update_flags->bits.rotation_change
- || update_flags->bits.stereo_format_change
- || update_flags->bits.pixel_format_change
- || update_flags->bits.bpp_change
- || update_flags->bits.bandwidth_change
- || update_flags->bits.output_tf_change)
- return UPDATE_TYPE_FULL;
-
- return update_flags->raw ? UPDATE_TYPE_MED : UPDATE_TYPE_FAST;
+ /* This should be UPDATE_TYPE_FAST if nothing has changed. */
+ return update_type;
}
static enum surface_update_type get_scaling_info_update_type(
@@ -1475,6 +1386,9 @@ static enum surface_update_type det_surface_update(const struct dc *dc,
type = get_scaling_info_update_type(u);
elevate_update_type(&overall_type, type);
+ if (u->flip_addr)
+ update_flags->bits.addr_update = 1;
+
if (u->in_transfer_func)
update_flags->bits.in_transfer_func_change = 1;
@@ -1711,13 +1625,6 @@ static void commit_planes_do_stream_update(struct dc *dc,
pipe_ctx->stream &&
pipe_ctx->stream == stream) {
- /* Fast update*/
- // VRR program can be done as part of FAST UPDATE
- if (stream_update->adjust)
- dc->hwss.set_drr(&pipe_ctx, 1,
- stream_update->adjust->v_total_min,
- stream_update->adjust->v_total_max);
-
if (stream_update->periodic_interrupt0 &&
dc->hwss.setup_periodic_interrupt)
dc->hwss.setup_periodic_interrupt(pipe_ctx, VLINE0);
@@ -1792,10 +1699,15 @@ static void commit_planes_for_stream(struct dc *dc,
if (dc->optimize_seamless_boot && surface_count > 0) {
/* Optimize seamless boot flag keeps clocks and watermarks high until
* first flip. After first flip, optimization is required to lower
- * bandwidth.
+ * bandwidth. Important to note that it is expected UEFI will
+ * only light up a single display on POST, therefore we only expect
+ * one stream with seamless boot flag set.
*/
- dc->optimize_seamless_boot = false;
- dc->optimized_required = true;
+ if (stream->apply_seamless_boot_optimization) {
+ stream->apply_seamless_boot_optimization = false;
+ dc->optimize_seamless_boot = false;
+ dc->optimized_required = true;
+ }
}
if (update_type == UPDATE_TYPE_FULL && !dc->optimize_seamless_boot) {
@@ -1870,6 +1782,20 @@ static void commit_planes_for_stream(struct dc *dc,
dc->hwss.pipe_control_lock(dc, top_pipe_to_program, false);
}
+
+ // Fire manual trigger only when bottom plane is flipped
+ for (j = 0; j < dc->res_pool->pipe_count; j++) {
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
+
+ if (pipe_ctx->bottom_pipe ||
+ !pipe_ctx->stream ||
+ pipe_ctx->stream != stream ||
+ !pipe_ctx->plane_state->update_flags.bits.addr_update)
+ continue;
+
+ if (pipe_ctx->stream_res.tg->funcs->program_manual_trigger)
+ pipe_ctx->stream_res.tg->funcs->program_manual_trigger(pipe_ctx->stream_res.tg);
+ }
}
void dc_commit_updates_for_stream(struct dc *dc,
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c
index 83d121510ef5..c026b393f3c5 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c
@@ -23,6 +23,8 @@
*
*/
+#include <linux/delay.h>
+
#include "dm_services.h"
#include "core_types.h"
#include "timing_generator.h"
@@ -45,8 +47,10 @@ enum dc_color_space_type {
COLOR_SPACE_RGB_LIMITED_TYPE,
COLOR_SPACE_YCBCR601_TYPE,
COLOR_SPACE_YCBCR709_TYPE,
+ COLOR_SPACE_YCBCR2020_TYPE,
COLOR_SPACE_YCBCR601_LIMITED_TYPE,
- COLOR_SPACE_YCBCR709_LIMITED_TYPE
+ COLOR_SPACE_YCBCR709_LIMITED_TYPE,
+ COLOR_SPACE_YCBCR709_BLACK_TYPE,
};
static const struct tg_color black_color_format[] = {
@@ -80,7 +84,6 @@ static const struct out_csc_color_matrix_type output_csc_matrix[] = {
{ COLOR_SPACE_YCBCR709_TYPE,
{ 0xE04, 0xF345, 0xFEB7, 0x1004, 0x5D3, 0x1399, 0x1FA,
0x201, 0xFCCA, 0xF533, 0xE04, 0x1004} },
-
/* TODO: correct values below */
{ COLOR_SPACE_YCBCR601_LIMITED_TYPE,
{ 0xE00, 0xF447, 0xFDB9, 0x1000, 0x991,
@@ -88,6 +91,12 @@ static const struct out_csc_color_matrix_type output_csc_matrix[] = {
{ COLOR_SPACE_YCBCR709_LIMITED_TYPE,
{ 0xE00, 0xF349, 0xFEB7, 0x1000, 0x6CE, 0x16E3,
0x24F, 0x200, 0xFCCB, 0xF535, 0xE00, 0x1000} },
+ { COLOR_SPACE_YCBCR2020_TYPE,
+ { 0x1000, 0xF149, 0xFEB7, 0x0000, 0x0868, 0x15B2,
+ 0x01E6, 0x0000, 0xFB88, 0xF478, 0x1000, 0x0000} },
+ { COLOR_SPACE_YCBCR709_BLACK_TYPE,
+ { 0x0000, 0x0000, 0x0000, 0x1000, 0x0000, 0x0000,
+ 0x0000, 0x0200, 0x0000, 0x0000, 0x0000, 0x1000} },
};
static bool is_rgb_type(
@@ -149,6 +158,16 @@ static bool is_ycbcr709_type(
return ret;
}
+static bool is_ycbcr2020_type(
+ enum dc_color_space color_space)
+{
+ bool ret = false;
+
+ if (color_space == COLOR_SPACE_2020_YCBCR)
+ ret = true;
+ return ret;
+}
+
static bool is_ycbcr709_limited_type(
enum dc_color_space color_space)
{
@@ -174,7 +193,12 @@ enum dc_color_space_type get_color_space_type(enum dc_color_space color_space)
type = COLOR_SPACE_YCBCR601_LIMITED_TYPE;
else if (is_ycbcr709_limited_type(color_space))
type = COLOR_SPACE_YCBCR709_LIMITED_TYPE;
-
+ else if (is_ycbcr2020_type(color_space))
+ type = COLOR_SPACE_YCBCR2020_TYPE;
+ else if (color_space == COLOR_SPACE_YCBCR709)
+ type = COLOR_SPACE_YCBCR709_BLACK_TYPE;
+ else if (color_space == COLOR_SPACE_YCBCR709_BLACK)
+ type = COLOR_SPACE_YCBCR709_BLACK_TYPE;
return type;
}
@@ -206,6 +230,7 @@ void color_space_to_black_color(
switch (colorspace) {
case COLOR_SPACE_YCBCR601:
case COLOR_SPACE_YCBCR709:
+ case COLOR_SPACE_YCBCR709_BLACK:
case COLOR_SPACE_YCBCR601_LIMITED:
case COLOR_SPACE_YCBCR709_LIMITED:
case COLOR_SPACE_2020_YCBCR:
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
index b37ecc3ede61..f48863cf796b 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
@@ -23,6 +23,8 @@
*
*/
+#include <linux/slab.h>
+
#include "dm_services.h"
#include "atom.h"
#include "dm_helpers.h"
@@ -42,6 +44,7 @@
#include "fixed31_32.h"
#include "dpcd_defs.h"
#include "dmcu.h"
+#include "hw/clk_mgr.h"
#define DC_LOGGER_INIT(logger)
@@ -704,6 +707,7 @@ bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason)
if (new_connection_type != dc_connection_none) {
link->type = new_connection_type;
+ link->link_state_valid = false;
/* From Disconnected-to-Connected. */
switch (link->connector_signal) {
@@ -906,10 +910,10 @@ bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason)
sink->sink_signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
/* Connectivity log: detection */
- for (i = 0; i < sink->dc_edid.length / EDID_BLOCK_SIZE; i++) {
+ for (i = 0; i < sink->dc_edid.length / DC_EDID_BLOCK_SIZE; i++) {
CONN_DATA_DETECT(link,
- &sink->dc_edid.raw_edid[i * EDID_BLOCK_SIZE],
- EDID_BLOCK_SIZE,
+ &sink->dc_edid.raw_edid[i * DC_EDID_BLOCK_SIZE],
+ DC_EDID_BLOCK_SIZE,
"%s: [Block %d] ", sink->edid_caps.display_name, i);
}
@@ -2337,7 +2341,8 @@ void core_link_resume(struct dc_link *link)
static struct fixed31_32 get_pbn_per_slot(struct dc_stream_state *stream)
{
struct fixed31_32 mbytes_per_sec;
- uint32_t link_rate_in_mbytes_per_sec = dc_link_bandwidth_kbps(stream->link, &stream->link->cur_link_settings);
+ uint32_t link_rate_in_mbytes_per_sec = dc_link_bandwidth_kbps(stream->link,
+ &stream->link->cur_link_settings);
link_rate_in_mbytes_per_sec /= 8000; /* Kbits to MBytes */
mbytes_per_sec = dc_fixpt_from_int(link_rate_in_mbytes_per_sec);
@@ -2631,6 +2636,8 @@ void core_link_enable_stream(
stream->phy_pix_clk,
pipe_ctx->stream_res.audio != NULL);
+ pipe_ctx->stream->link->link_state_valid = true;
+
if (dc_is_dvi_signal(pipe_ctx->stream->signal))
pipe_ctx->stream_res.stream_enc->funcs->dvi_set_stream_attribute(
pipe_ctx->stream_res.stream_enc,
@@ -2713,17 +2720,37 @@ void core_link_disable_stream(struct pipe_ctx *pipe_ctx, int option)
{
struct dc *core_dc = pipe_ctx->stream->ctx->dc;
struct dc_stream_state *stream = pipe_ctx->stream;
+ struct dc_link *link = stream->sink->link;
core_dc->hwss.blank_stream(pipe_ctx);
if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
deallocate_mst_payload(pipe_ctx);
- if (dc_is_hdmi_signal(pipe_ctx->stream->signal))
- dal_ddc_service_write_scdc_data(
- stream->link->ddc, 0,
- stream->timing.flags.LTE_340MCSC_SCRAMBLE);
+ if (dc_is_hdmi_signal(pipe_ctx->stream->signal)) {
+ struct ext_hdmi_settings settings = {0};
+ enum engine_id eng_id = pipe_ctx->stream_res.stream_enc->id;
+ unsigned short masked_chip_caps = link->chip_caps &
+ EXT_DISPLAY_PATH_CAPS__EXT_CHIP_MASK;
+ //Need to inform that sink is going to use legacy HDMI mode.
+ dal_ddc_service_write_scdc_data(
+ link->ddc,
+ 165000,//vbios only handles 165Mhz.
+ false);
+ if (masked_chip_caps == EXT_DISPLAY_PATH_CAPS__HDMI20_TISN65DP159RSBT) {
+ /* DP159, Retimer settings */
+ if (get_ext_hdmi_settings(pipe_ctx, eng_id, &settings))
+ write_i2c_retimer_setting(pipe_ctx,
+ false, false, &settings);
+ else
+ write_i2c_default_retimer_setting(pipe_ctx,
+ false, false);
+ } else if (masked_chip_caps == EXT_DISPLAY_PATH_CAPS__HDMI20_PI3EQX1204) {
+ /* PI3EQX1204, Redriver settings */
+ write_i2c_redriver_setting(pipe_ctx, false);
+ }
+ }
core_dc->hwss.disable_stream(pipe_ctx, option);
disable_link(pipe_ctx->stream->link, pipe_ctx->stream->signal);
@@ -2834,3 +2861,127 @@ uint32_t dc_bandwidth_in_kbps_from_timing(
return kbps;
}
+
+void dc_link_set_drive_settings(struct dc *dc,
+ struct link_training_settings *lt_settings,
+ const struct dc_link *link)
+{
+
+ int i;
+
+ for (i = 0; i < dc->link_count; i++) {
+ if (dc->links[i] == link)
+ break;
+ }
+
+ if (i >= dc->link_count)
+ ASSERT_CRITICAL(false);
+
+ dc_link_dp_set_drive_settings(dc->links[i], lt_settings);
+}
+
+void dc_link_perform_link_training(struct dc *dc,
+ struct dc_link_settings *link_setting,
+ bool skip_video_pattern)
+{
+ int i;
+
+ for (i = 0; i < dc->link_count; i++)
+ dc_link_dp_perform_link_training(
+ dc->links[i],
+ link_setting,
+ skip_video_pattern);
+}
+
+void dc_link_set_preferred_link_settings(struct dc *dc,
+ struct dc_link_settings *link_setting,
+ struct dc_link *link)
+{
+ int i;
+ struct pipe_ctx *pipe;
+ struct dc_stream_state *link_stream;
+ struct dc_link_settings store_settings = *link_setting;
+
+ link->preferred_link_setting = store_settings;
+
+ /* Retrain with preferred link settings only relevant for
+ * DP signal type
+ */
+ if (!dc_is_dp_signal(link->connector_signal))
+ return;
+
+ for (i = 0; i < MAX_PIPES; i++) {
+ pipe = &dc->current_state->res_ctx.pipe_ctx[i];
+ if (pipe->stream && pipe->stream->link) {
+ if (pipe->stream->link == link)
+ break;
+ }
+ }
+
+ /* Stream not found */
+ if (i == MAX_PIPES)
+ return;
+
+ link_stream = link->dc->current_state->res_ctx.pipe_ctx[i].stream;
+
+ /* Cannot retrain link if backend is off */
+ if (link_stream->dpms_off)
+ return;
+
+ if (link_stream)
+ decide_link_settings(link_stream, &store_settings);
+
+ if ((store_settings.lane_count != LANE_COUNT_UNKNOWN) &&
+ (store_settings.link_rate != LINK_RATE_UNKNOWN))
+ dp_retrain_link_dp_test(link, &store_settings, false);
+}
+
+void dc_link_enable_hpd(const struct dc_link *link)
+{
+ dc_link_dp_enable_hpd(link);
+}
+
+void dc_link_disable_hpd(const struct dc_link *link)
+{
+ dc_link_dp_disable_hpd(link);
+}
+
+
+void dc_link_set_test_pattern(struct dc_link *link,
+ enum dp_test_pattern test_pattern,
+ const struct link_training_settings *p_link_settings,
+ const unsigned char *p_custom_pattern,
+ unsigned int cust_pattern_size)
+{
+ if (link != NULL)
+ dc_link_dp_set_test_pattern(
+ link,
+ test_pattern,
+ p_link_settings,
+ p_custom_pattern,
+ cust_pattern_size);
+}
+
+uint32_t dc_link_bandwidth_kbps(
+ const struct dc_link *link,
+ const struct dc_link_settings *link_setting)
+{
+ uint32_t link_bw_kbps =
+ link_setting->link_rate * LINK_RATE_REF_FREQ_IN_KHZ; /* bytes per sec */
+
+ link_bw_kbps *= 8; /* 8 bits per byte*/
+ link_bw_kbps *= link_setting->lane_count;
+
+ return link_bw_kbps;
+
+}
+
+const struct dc_link_settings *dc_link_get_link_cap(
+ const struct dc_link *link)
+{
+ if (link->preferred_link_setting.lane_count != LANE_COUNT_UNKNOWN &&
+ link->preferred_link_setting.link_rate != LINK_RATE_UNKNOWN)
+ return &link->preferred_link_setting;
+ return &link->verified_link_cap;
+}
+
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
index f02092a0dc76..eecc631ca4f8 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
@@ -23,6 +23,8 @@
*
*/
+#include <linux/slab.h>
+
#include "dm_services.h"
#include "dm_helpers.h"
#include "gpio_service_interface.h"
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
index 1ee544a32ebb..65d6caedbd82 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
@@ -2361,6 +2361,7 @@ static bool retrieve_link_cap(struct dc_link *link)
/*Only need to read 1 byte starting from DP_DPRX_FEATURE_ENUMERATION_LIST.
*/
uint8_t dpcd_dprx_data = '\0';
+ uint8_t dpcd_power_state = '\0';
struct dp_device_vendor_id sink_id;
union down_stream_port_count down_strm_port_count;
@@ -2377,6 +2378,17 @@ static bool retrieve_link_cap(struct dc_link *link)
memset(&edp_config_cap, '\0',
sizeof(union edp_configuration_cap));
+ status = core_link_read_dpcd(link, DP_SET_POWER,
+ &dpcd_power_state, sizeof(dpcd_power_state));
+
+ /* Delay 1 ms if AUX CH is in power down state. Based on spec
+ * section 2.3.1.2, if AUX CH may be powered down due to
+ * write to DPCD 600h = 2. Sink AUX CH is monitoring differential
+ * signal and may need up to 1 ms before being able to reply.
+ */
+ if (status != DC_OK || dpcd_power_state == DP_SET_POWER_D3)
+ udelay(1000);
+
for (i = 0; i < read_dpcd_retry_cnt; i++) {
status = core_link_read_dpcd(
link,
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
index eac7186e4f08..1b5756590a6a 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
@@ -22,6 +22,9 @@
* Authors: AMD
*
*/
+
+#include <linux/slab.h>
+
#include "dm_services.h"
#include "resource.h"
@@ -93,10 +96,8 @@ enum dce_version resource_parse_asic_id(struct hw_asic_id asic_id)
#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
case FAMILY_RV:
dc_version = DCN_VERSION_1_0;
-#if defined(CONFIG_DRM_AMD_DC_DCN1_01)
if (ASICREV_IS_RAVEN2(asic_id.hw_internal_rev))
dc_version = DCN_VERSION_1_01;
-#endif
break;
#endif
default:
@@ -147,9 +148,7 @@ struct resource_pool *dc_create_resource_pool(struct dc *dc,
#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
case DCN_VERSION_1_0:
-#if defined(CONFIG_DRM_AMD_DC_DCN1_01)
case DCN_VERSION_1_01:
-#endif
res_pool = dcn10_create_resource_pool(init_data, dc);
break;
#endif
@@ -1184,24 +1183,27 @@ static int acquire_first_split_pipe(
int i;
for (i = 0; i < pool->pipe_count; i++) {
- struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[i];
-
- if (pipe_ctx->top_pipe &&
- pipe_ctx->top_pipe->plane_state == pipe_ctx->plane_state) {
- pipe_ctx->top_pipe->bottom_pipe = pipe_ctx->bottom_pipe;
- if (pipe_ctx->bottom_pipe)
- pipe_ctx->bottom_pipe->top_pipe = pipe_ctx->top_pipe;
-
- memset(pipe_ctx, 0, sizeof(*pipe_ctx));
- pipe_ctx->stream_res.tg = pool->timing_generators[i];
- pipe_ctx->plane_res.hubp = pool->hubps[i];
- pipe_ctx->plane_res.ipp = pool->ipps[i];
- pipe_ctx->plane_res.dpp = pool->dpps[i];
- pipe_ctx->stream_res.opp = pool->opps[i];
- pipe_ctx->plane_res.mpcc_inst = pool->dpps[i]->inst;
- pipe_ctx->pipe_idx = i;
-
- pipe_ctx->stream = stream;
+ struct pipe_ctx *split_pipe = &res_ctx->pipe_ctx[i];
+
+ if (split_pipe->top_pipe && !dc_res_is_odm_head_pipe(split_pipe) &&
+ split_pipe->top_pipe->plane_state == split_pipe->plane_state) {
+ split_pipe->top_pipe->bottom_pipe = split_pipe->bottom_pipe;
+ if (split_pipe->bottom_pipe)
+ split_pipe->bottom_pipe->top_pipe = split_pipe->top_pipe;
+
+ if (split_pipe->top_pipe->plane_state)
+ resource_build_scaling_params(split_pipe->top_pipe);
+
+ memset(split_pipe, 0, sizeof(*split_pipe));
+ split_pipe->stream_res.tg = pool->timing_generators[i];
+ split_pipe->plane_res.hubp = pool->hubps[i];
+ split_pipe->plane_res.ipp = pool->ipps[i];
+ split_pipe->plane_res.dpp = pool->dpps[i];
+ split_pipe->stream_res.opp = pool->opps[i];
+ split_pipe->plane_res.mpcc_inst = pool->dpps[i]->inst;
+ split_pipe->pipe_idx = i;
+
+ split_pipe->stream = stream;
return i;
}
}
@@ -1647,46 +1649,6 @@ static int acquire_first_free_pipe(
return -1;
}
-static struct stream_encoder *find_first_free_match_stream_enc_for_link(
- struct resource_context *res_ctx,
- const struct resource_pool *pool,
- struct dc_stream_state *stream)
-{
- int i;
- int j = -1;
- struct dc_link *link = stream->link;
-
- for (i = 0; i < pool->stream_enc_count; i++) {
- if (!res_ctx->is_stream_enc_acquired[i] &&
- pool->stream_enc[i]) {
- /* Store first available for MST second display
- * in daisy chain use case */
- j = i;
- if (pool->stream_enc[i]->id ==
- link->link_enc->preferred_engine)
- return pool->stream_enc[i];
- }
- }
-
- /*
- * below can happen in cases when stream encoder is acquired:
- * 1) for second MST display in chain, so preferred engine already
- * acquired;
- * 2) for another link, which preferred engine already acquired by any
- * MST configuration.
- *
- * If signal is of DP type and preferred engine not found, return last available
- *
- * TODO - This is just a patch up and a generic solution is
- * required for non DP connectors.
- */
-
- if (j >= 0 && link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT)
- return pool->stream_enc[j];
-
- return NULL;
-}
-
static struct audio *find_first_free_audio(
struct resource_context *res_ctx,
const struct resource_pool *pool,
@@ -1998,7 +1960,7 @@ enum dc_status resource_map_pool_resources(
pipe_ctx = &context->res_ctx.pipe_ctx[pipe_idx];
pipe_ctx->stream_res.stream_enc =
- find_first_free_match_stream_enc_for_link(
+ dc->res_pool->funcs->find_first_free_match_stream_enc_for_link(
&context->res_ctx, pool, stream);
if (!pipe_ctx->stream_res.stream_enc)
@@ -2059,7 +2021,7 @@ void dc_resource_state_construct(
const struct dc *dc,
struct dc_state *dst_ctx)
{
- dst_ctx->clk_mgr = dc->res_pool->clk_mgr;
+ dst_ctx->clk_mgr = dc->clk_mgr;
}
/**
@@ -2354,7 +2316,18 @@ static void set_avi_info_frame(
break;
}
}
+ /* If VIC >= 128, the Source shall use AVI InfoFrame Version 3*/
hdmi_info.bits.VIC0_VIC7 = vic;
+ if (vic >= 128)
+ hdmi_info.bits.header.version = 3;
+ /* If (C1, C0)=(1, 1) and (EC2, EC1, EC0)=(1, 1, 1),
+ * the Source shall use 20 AVI InfoFrame Version 4
+ */
+ if (hdmi_info.bits.C0_C1 == COLORIMETRY_EXTENDED &&
+ hdmi_info.bits.EC0_EC2 == COLORIMETRYEX_RESERVED) {
+ hdmi_info.bits.header.version = 4;
+ hdmi_info.bits.header.length = 14;
+ }
/* pixel repetition
* PR0 - PR3 start from 0 whereas pHwPathMode->mode.timing.flags.pixel
@@ -2373,12 +2346,19 @@ static void set_avi_info_frame(
hdmi_info.bits.bar_right = (stream->timing.h_total
- stream->timing.h_border_right + 1);
+ /* Additional Colorimetry Extension
+ * Used in conduction with C0-C1 and EC0-EC2
+ * 0 = DCI-P3 RGB (D65)
+ * 1 = DCI-P3 RGB (theater)
+ */
+ hdmi_info.bits.ACE0_ACE3 = 0;
+
/* check_sum - Calculate AFMT_AVI_INFO0 ~ AFMT_AVI_INFO3 */
check_sum = &hdmi_info.packet_raw_data.sb[0];
- *check_sum = HDMI_INFOFRAME_TYPE_AVI + HDMI_AVI_INFOFRAME_SIZE + 2;
+ *check_sum = HDMI_INFOFRAME_TYPE_AVI + hdmi_info.bits.header.length + hdmi_info.bits.header.version;
- for (byte_index = 1; byte_index <= HDMI_AVI_INFOFRAME_SIZE; byte_index++)
+ for (byte_index = 1; byte_index <= hdmi_info.bits.header.length; byte_index++)
*check_sum += hdmi_info.packet_raw_data.sb[byte_index];
/* one byte complement */
@@ -2425,21 +2405,6 @@ static void set_spd_info_packet(
*info_packet = stream->vrr_infopacket;
}
-static void set_dp_sdp_info_packet(
- struct dc_info_packet *info_packet,
- struct dc_stream_state *stream)
-{
- /* SPD info packet for custom sdp message */
-
- /* Return if false. If true,
- * set the corresponding bit in the info packet
- */
- if (!stream->dpsdp_infopacket.valid)
- return;
-
- *info_packet = stream->dpsdp_infopacket;
-}
-
static void set_hdr_static_info_packet(
struct dc_info_packet *info_packet,
struct dc_stream_state *stream)
@@ -2495,7 +2460,6 @@ void dc_resource_state_copy_construct(
if (cur_pipe->bottom_pipe)
cur_pipe->bottom_pipe = &dst_ctx->res_ctx.pipe_ctx[cur_pipe->bottom_pipe->pipe_idx];
-
}
for (i = 0; i < dst_ctx->stream_count; i++) {
@@ -2536,7 +2500,6 @@ void resource_build_info_frame(struct pipe_ctx *pipe_ctx)
info->spd.valid = false;
info->hdrsmd.valid = false;
info->vsc.valid = false;
- info->dpsdp.valid = false;
signal = pipe_ctx->stream->signal;
@@ -2556,8 +2519,6 @@ void resource_build_info_frame(struct pipe_ctx *pipe_ctx)
set_spd_info_packet(&info->spd, pipe_ctx->stream);
set_hdr_static_info_packet(&info->hdrsmd, pipe_ctx->stream);
-
- set_dp_sdp_info_packet(&info->dpsdp, pipe_ctx->stream);
}
patch_gamut_packet_checksum(&info->gamut);
@@ -2644,6 +2605,10 @@ bool pipe_need_reprogram(
if (is_vsc_info_packet_changed(pipe_ctx_old->stream, pipe_ctx->stream))
return true;
+ if (false == pipe_ctx_old->stream->link->link_state_valid &&
+ false == pipe_ctx_old->stream->dpms_off)
+ return true;
+
return false;
}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_sink.c b/drivers/gpu/drm/amd/display/dc/core/dc_sink.c
index 9971b515c3eb..5cbfdf1c4b11 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_sink.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_sink.c
@@ -23,6 +23,8 @@
*
*/
+#include <linux/slab.h>
+
#include "dm_services.h"
#include "dm_helpers.h"
#include "core_types.h"
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
index 96e97d25d639..7fe0dbe30666 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
@@ -23,6 +23,9 @@
*
*/
+#include <linux/delay.h>
+#include <linux/slab.h>
+
#include "dm_services.h"
#include "dc.h"
#include "core_types.h"
@@ -47,8 +50,8 @@ void update_stream_signal(struct dc_stream_state *stream, struct dc_sink *sink)
if (dc_is_dvi_signal(stream->signal)) {
if (stream->ctx->dc->caps.dual_link_dvi &&
- (stream->timing.pix_clk_100hz / 10) > TMDS_MAX_PIXEL_CLOCK &&
- sink->sink_signal != SIGNAL_TYPE_DVI_SINGLE_LINK)
+ (stream->timing.pix_clk_100hz / 10) > TMDS_MAX_PIXEL_CLOCK &&
+ sink->sink_signal != SIGNAL_TYPE_DVI_SINGLE_LINK)
stream->signal = SIGNAL_TYPE_DVI_DUAL_LINK;
else
stream->signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
@@ -179,6 +182,9 @@ struct dc_stream_state *dc_copy_stream(const struct dc_stream_state *stream)
if (new_stream->out_transfer_func)
dc_transfer_func_retain(new_stream->out_transfer_func);
+ new_stream->stream_id = new_stream->ctx->dc_stream_id_count;
+ new_stream->ctx->dc_stream_id_count++;
+
kref_init(&new_stream->refcount);
return new_stream;
@@ -229,7 +235,7 @@ static void delay_cursor_until_vupdate(struct pipe_ctx *pipe_ctx, struct dc *dc)
unsigned int us_per_line;
if (stream->ctx->asic_id.chip_family == FAMILY_RV &&
- ASIC_REV_IS_RAVEN(stream->ctx->asic_id.hw_internal_rev)) {
+ ASICREV_IS_RAVEN(stream->ctx->asic_id.hw_internal_rev)) {
vupdate_line = get_vupdate_offset_from_vsync(pipe_ctx);
if (!dc_stream_get_crtc_position(dc, &stream, 1, &vpos, &nvpos))
@@ -371,42 +377,12 @@ uint32_t dc_stream_get_vblank_counter(const struct dc_stream_state *stream)
return 0;
}
-static void build_dp_sdp_info_frame(struct pipe_ctx *pipe_ctx,
- const uint8_t *custom_sdp_message,
- unsigned int sdp_message_size)
-{
- uint8_t i;
- struct encoder_info_frame *info = &pipe_ctx->stream_res.encoder_info_frame;
-
- /* set valid info */
- info->dpsdp.valid = true;
-
- /* set sdp message header */
- info->dpsdp.hb0 = custom_sdp_message[0]; /* package id */
- info->dpsdp.hb1 = custom_sdp_message[1]; /* package type */
- info->dpsdp.hb2 = custom_sdp_message[2]; /* package specific byte 0 any data */
- info->dpsdp.hb3 = custom_sdp_message[3]; /* package specific byte 0 any data */
-
- /* set sdp message data */
- for (i = 0; i < 32; i++)
- info->dpsdp.sb[i] = (custom_sdp_message[i+4]);
-
-}
-
-static void invalid_dp_sdp_info_frame(struct pipe_ctx *pipe_ctx)
-{
- struct encoder_info_frame *info = &pipe_ctx->stream_res.encoder_info_frame;
-
- /* in-valid info */
- info->dpsdp.valid = false;
-}
-
bool dc_stream_send_dp_sdp(const struct dc_stream_state *stream,
const uint8_t *custom_sdp_message,
unsigned int sdp_message_size)
{
int i;
- struct dc *core_dc;
+ struct dc *dc;
struct resource_context *res_ctx;
if (stream == NULL) {
@@ -414,8 +390,8 @@ bool dc_stream_send_dp_sdp(const struct dc_stream_state *stream,
return false;
}
- core_dc = stream->ctx->dc;
- res_ctx = &core_dc->current_state->res_ctx;
+ dc = stream->ctx->dc;
+ res_ctx = &dc->current_state->res_ctx;
for (i = 0; i < MAX_PIPES; i++) {
struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[i];
@@ -423,11 +399,14 @@ bool dc_stream_send_dp_sdp(const struct dc_stream_state *stream,
if (pipe_ctx->stream != stream)
continue;
- build_dp_sdp_info_frame(pipe_ctx, custom_sdp_message, sdp_message_size);
-
- core_dc->hwss.update_info_frame(pipe_ctx);
+ if (dc->hwss.send_immediate_sdp_message != NULL)
+ dc->hwss.send_immediate_sdp_message(pipe_ctx,
+ custom_sdp_message,
+ sdp_message_size);
+ else
+ DC_LOG_WARNING("%s:send_immediate_sdp_message not implemented on this ASIC\n",
+ __func__);
- invalid_dp_sdp_info_frame(pipe_ctx);
}
return true;
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_surface.c b/drivers/gpu/drm/amd/display/dc/core/dc_surface.c
index a5e86f9b148f..87b3b03c3556 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_surface.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_surface.c
@@ -23,6 +23,8 @@
*
*/
+#include <linux/mm.h>
+
/* DC interface (public) */
#include "dm_services.h"
#include "dc.h"
diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
index 44e4b0465587..7ec6884acee4 100644
--- a/drivers/gpu/drm/amd/display/dc/dc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc.h
@@ -39,7 +39,7 @@
#include "inc/hw/dmcu.h"
#include "dml/display_mode_lib.h"
-#define DC_VER "3.2.27"
+#define DC_VER "3.2.32"
#define MAX_SURFACES 3
#define MAX_PLANES 6
@@ -205,6 +205,7 @@ struct dc_config {
bool disable_fractional_pwm;
bool allow_seamless_boot_optimization;
bool power_down_display_on_boot;
+ bool edp_not_connected;
};
enum visual_confirm {
@@ -366,6 +367,7 @@ struct dc_bounding_box_overrides {
int urgent_latency_ns;
int percent_of_ideal_drambw;
int dram_clock_change_latency_ns;
+ int min_dcfclk_mhz;
};
struct dc_state;
@@ -386,6 +388,8 @@ struct dc {
struct dc_state *current_state;
struct resource_pool *res_pool;
+ struct clk_mgr *clk_mgr;
+
/* Display Engine Clock levels */
struct dm_pp_clock_levels sclk_lvls;
@@ -540,12 +544,14 @@ struct dc_plane_status {
union surface_update_flags {
struct {
+ uint32_t addr_update:1;
/* Medium updates */
uint32_t dcc_change:1;
uint32_t color_space_change:1;
uint32_t horizontal_mirror_change:1;
uint32_t per_pixel_alpha_change:1;
uint32_t global_alpha_change:1;
+ uint32_t sdr_white_level:1;
uint32_t rotation_change:1;
uint32_t swizzle_change:1;
uint32_t scaling_change:1;
diff --git a/drivers/gpu/drm/amd/display/dc/dc_helper.c b/drivers/gpu/drm/amd/display/dc/dc_helper.c
index 5e6c5eff49cf..30b2f9edd42f 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_helper.c
+++ b/drivers/gpu/drm/amd/display/dc/dc_helper.c
@@ -26,6 +26,9 @@
* Created on: Aug 30, 2016
* Author: agrodzov
*/
+
+#include <linux/delay.h>
+
#include "dm_services.h"
#include <stdarg.h>
@@ -297,7 +300,7 @@ void generic_reg_wait(const struct dc_context *ctx,
int i;
/* something is terribly wrong if time out is > 200ms. (5Hz) */
- ASSERT(delay_between_poll_us * time_out_num_tries <= 200000);
+ ASSERT(delay_between_poll_us * time_out_num_tries <= 3000000);
for (i = 0; i <= time_out_num_tries; i++) {
if (i) {
diff --git a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
index da55d623647a..c91b8aad78c9 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
@@ -534,6 +534,7 @@ enum dc_color_space {
COLOR_SPACE_DOLBYVISION,
COLOR_SPACE_APPCTRL,
COLOR_SPACE_CUSTOMPOINTS,
+ COLOR_SPACE_YCBCR709_BLACK,
};
enum dc_dither_option {
diff --git a/drivers/gpu/drm/amd/display/dc/dc_link.h b/drivers/gpu/drm/amd/display/dc/dc_link.h
index 7b9429e30d82..094009127e25 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_link.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_link.h
@@ -75,6 +75,7 @@ struct dc_link {
enum dc_irq_source irq_source_hpd_rx;/* aka DP Short Pulse */
bool is_hpd_filter_disabled;
bool dp_ss_off;
+ bool link_state_valid;
/* caps is the same as reported_link_cap. link_traing use
* reported_link_cap. Will clean up. TODO
diff --git a/drivers/gpu/drm/amd/display/dc/dc_stream.h b/drivers/gpu/drm/amd/display/dc/dc_stream.h
index 189bdab929a5..4da138ded8b7 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_stream.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_stream.h
@@ -172,7 +172,6 @@ struct dc_stream_update {
struct periodic_interrupt_config *periodic_interrupt0;
struct periodic_interrupt_config *periodic_interrupt1;
- struct dc_crtc_timing_adjust *adjust;
struct dc_info_packet *vrr_infopacket;
struct dc_info_packet *vsc_infopacket;
struct dc_info_packet *vsp_infopacket;
diff --git a/drivers/gpu/drm/amd/display/dc/dc_types.h b/drivers/gpu/drm/amd/display/dc/dc_types.h
index 6c2a3d9a4c2e..92a670894c05 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_types.h
@@ -104,7 +104,7 @@ struct dc_context {
#define DC_MAX_EDID_BUFFER_SIZE 1024
-#define EDID_BLOCK_SIZE 128
+#define DC_EDID_BLOCK_SIZE 128
#define MAX_SURFACE_NUM 4
#define NUM_PIXEL_FORMATS 10
diff --git a/drivers/gpu/drm/amd/display/dc/dce/Makefile b/drivers/gpu/drm/amd/display/dc/dce/Makefile
index 6d7b64a743ca..fdf3d8f87eee 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dce/Makefile
@@ -28,7 +28,7 @@
DCE = dce_audio.o dce_stream_encoder.o dce_link_encoder.o dce_hwseq.o \
dce_mem_input.o dce_clock_source.o dce_scl_filters.o dce_transform.o \
-dce_clk_mgr.o dce_opp.o dce_dmcu.o dce_abm.o dce_ipp.o dce_aux.o \
+dce_opp.o dce_dmcu.o dce_abm.o dce_ipp.o dce_aux.o \
dce_i2c.o dce_i2c_hw.o dce_i2c_sw.o
AMD_DAL_DCE = $(addprefix $(AMDDALPATH)/dc/dce/,$(DCE))
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c b/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c
index da96229db53a..f8903bcabe49 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c
@@ -23,6 +23,8 @@
*
*/
+#include <linux/slab.h>
+
#include "dce_abm.h"
#include "dm_services.h"
#include "reg_helper.h"
@@ -58,6 +60,9 @@ static bool dce_abm_set_pipe(struct abm *abm, uint32_t controller_id)
struct dce_abm *abm_dce = TO_DCE_ABM(abm);
uint32_t rampingBoundary = 0xFFFF;
+ if (abm->dmcu_is_running == false)
+ return true;
+
REG_WAIT(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 0,
1, 80000);
@@ -302,6 +307,9 @@ static bool dce_abm_set_level(struct abm *abm, uint32_t level)
{
struct dce_abm *abm_dce = TO_DCE_ABM(abm);
+ if (abm->dmcu_is_running == false)
+ return true;
+
REG_WAIT(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 0,
1, 80000);
@@ -320,6 +328,9 @@ static bool dce_abm_immediate_disable(struct abm *abm)
{
struct dce_abm *abm_dce = TO_DCE_ABM(abm);
+ if (abm->dmcu_is_running == false)
+ return true;
+
dce_abm_set_pipe(abm, MCP_DISABLE_ABM_IMMEDIATELY);
abm->stored_backlight_registers.BL_PWM_CNTL =
@@ -443,6 +454,7 @@ static void dce_abm_construct(
base->stored_backlight_registers.BL_PWM_CNTL2 = 0;
base->stored_backlight_registers.BL_PWM_PERIOD_CNTL = 0;
base->stored_backlight_registers.LVTMA_PWRSEQ_REF_DIV_BL_PWM_REF_DIV = 0;
+ base->dmcu_is_running = false;
abm_dce->regs = regs;
abm_dce->abm_shift = abm_shift;
@@ -473,6 +485,9 @@ void dce_abm_destroy(struct abm **abm)
{
struct dce_abm *abm_dce = TO_DCE_ABM(*abm);
+ if (abm_dce->base.dmcu_is_running == true)
+ abm_dce->base.funcs->set_abm_immediate_disable(*abm);
+
kfree(abm_dce);
*abm = NULL;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_audio.c b/drivers/gpu/drm/amd/display/dc/dce/dce_audio.c
index 7f6d724686f1..9b078a71de2e 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_audio.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_audio.c
@@ -23,6 +23,8 @@
*
*/
+#include <linux/slab.h>
+
#include "reg_helper.h"
#include "dce_audio.h"
#include "dce/dce_11_0_d.h"
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
index bd33c47183fc..f2295e780031 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
@@ -23,6 +23,9 @@
*
*/
+#include <linux/delay.h>
+#include <linux/slab.h>
+
#include "dm_services.h"
#include "core_types.h"
#include "dce_aux.h"
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.c
index 963686380738..29d69dfc9848 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.c
@@ -23,6 +23,8 @@
*
*/
+#include <linux/slab.h>
+
#include "dce_clk_mgr.h"
#include "reg_helper.h"
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
index f70437aae8e0..8347be76c60a 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
@@ -23,6 +23,8 @@
*
*/
+#include <linux/slab.h>
+
#include "dm_services.h"
@@ -33,6 +35,7 @@
#include "include/logger_interface.h"
#include "dce_clock_source.h"
+#include "clk_mgr.h"
#include "reg_helper.h"
@@ -183,8 +186,8 @@ static bool calculate_fb_and_fractional_fb_divider(
*RETURNS:
* It fills the PLLSettings structure with PLL Dividers values
* if calculated values are within required tolerance
-* It returns - true if eror is within tolerance
-* - false if eror is not within tolerance
+* It returns - true if error is within tolerance
+* - false if error is not within tolerance
*/
static bool calc_fb_divider_checking_tolerance(
struct calc_pll_clock_source *calc_pll_cs,
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c
index 818536eea00a..ddd30fc0d76b 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c
@@ -23,6 +23,9 @@
*
*/
+#include <linux/delay.h>
+#include <linux/slab.h>
+
#include "core_types.h"
#include "link_encoder.h"
#include "dce_dmcu.h"
@@ -388,6 +391,9 @@ static bool dcn10_dmcu_init(struct dmcu *dmcu)
/* Set initialized ramping boundary value */
REG_WRITE(MASTER_COMM_DATA_REG1, 0xFFFF);
+ /* Set backlight ramping stepsize */
+ REG_WRITE(MASTER_COMM_DATA_REG2, abm_gain_stepsize);
+
/* Set command to initialize microcontroller */
REG_UPDATE(MASTER_COMM_CMD_REG, MASTER_COMM_CMD_REG_BYTE0,
MCP_INIT_DMCU);
@@ -813,6 +819,9 @@ void dce_dmcu_destroy(struct dmcu **dmcu)
{
struct dce_dmcu *dmcu_dce = TO_DCE_DMCU(*dmcu);
+ if (dmcu_dce->base.dmcu_state == DMCU_RUNNING)
+ dmcu_dce->base.funcs->set_psr_enable(*dmcu, false, true);
+
kfree(dmcu_dce);
*dmcu = NULL;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.h b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.h
index 60ce56f60ae3..5bd0df55aa5d 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.h
@@ -263,4 +263,6 @@ struct dmcu *dcn10_dmcu_create(
void dce_dmcu_destroy(struct dmcu **dmcu);
+static const uint32_t abm_gain_stepsize = 0x0060;
+
#endif /* _DCE_ABM_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c
index cd26161bcc4d..5ca558766d2e 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c
@@ -22,6 +22,9 @@
* Authors: AMD
*
*/
+
+#include <linux/delay.h>
+
#include "dce_i2c.h"
#include "dce_i2c_hw.h"
#include "reg_helper.h"
@@ -268,6 +271,8 @@ static bool setup_engine(
struct dce_i2c_hw *dce_i2c_hw)
{
uint32_t i2c_setup_limit = I2C_SETUP_TIME_LIMIT_DCE;
+ /* we have checked I2c not used by DMCU, set SW use I2C REQ to 1 to indicate SW using it*/
+ REG_UPDATE(DC_I2C_ARBITRATION, DC_I2C_SW_USE_I2C_REG_REQ, 1);
if (dce_i2c_hw->setup_limit != 0)
i2c_setup_limit = dce_i2c_hw->setup_limit;
@@ -322,8 +327,6 @@ static void release_engine(
set_speed(dce_i2c_hw, dce_i2c_hw->original_speed);
- /* Release I2C */
- REG_UPDATE(DC_I2C_ARBITRATION, DC_I2C_SW_DONE_USING_I2C_REG, 1);
/* Reset HW engine */
{
@@ -343,6 +346,9 @@ static void release_engine(
/* HW I2c engine - clock gating feature */
if (!dce_i2c_hw->engine_keep_power_up_count)
REG_UPDATE_N(SETUP, 1, FN(SETUP, DC_I2C_DDC1_ENABLE), 0);
+ /* Release I2C after reset, so HW or DMCU could use it */
+ REG_UPDATE_2(DC_I2C_ARBITRATION, DC_I2C_SW_DONE_USING_I2C_REG, 1,
+ DC_I2C_SW_USE_I2C_REG_REQ, 0);
}
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.h b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.h
index 575500755b2e..f718e3d396f2 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.h
@@ -105,6 +105,7 @@ enum {
I2C_SF(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_DATA_DRIVE_SEL, mask_sh),\
I2C_SF(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_INTRA_TRANSACTION_DELAY, mask_sh),\
I2C_SF(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_INTRA_BYTE_DELAY, mask_sh),\
+ I2C_SF(DC_I2C_ARBITRATION, DC_I2C_SW_USE_I2C_REG_REQ, mask_sh),\
I2C_SF(DC_I2C_ARBITRATION, DC_I2C_SW_DONE_USING_I2C_REG, mask_sh),\
I2C_SF(DC_I2C_ARBITRATION, DC_I2C_NO_QUEUED_SW_GO, mask_sh),\
I2C_SF(DC_I2C_ARBITRATION, DC_I2C_SW_PRIORITY, mask_sh),\
@@ -146,6 +147,7 @@ struct dce_i2c_shift {
uint8_t DC_I2C_DDC1_INTRA_TRANSACTION_DELAY;
uint8_t DC_I2C_DDC1_INTRA_BYTE_DELAY;
uint8_t DC_I2C_SW_DONE_USING_I2C_REG;
+ uint8_t DC_I2C_SW_USE_I2C_REG_REQ;
uint8_t DC_I2C_NO_QUEUED_SW_GO;
uint8_t DC_I2C_SW_PRIORITY;
uint8_t DC_I2C_SOFT_RESET;
@@ -184,6 +186,7 @@ struct dce_i2c_mask {
uint32_t DC_I2C_DDC1_INTRA_TRANSACTION_DELAY;
uint32_t DC_I2C_DDC1_INTRA_BYTE_DELAY;
uint32_t DC_I2C_SW_DONE_USING_I2C_REG;
+ uint32_t DC_I2C_SW_USE_I2C_REG_REQ;
uint32_t DC_I2C_NO_QUEUED_SW_GO;
uint32_t DC_I2C_SW_PRIORITY;
uint32_t DC_I2C_SOFT_RESET;
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_sw.c b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_sw.c
index f0266694cb56..a5a11c251e25 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_sw.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_sw.c
@@ -22,6 +22,9 @@
* Authors: AMD
*
*/
+
+#include <linux/delay.h>
+
#include "dce_i2c.h"
#include "dce_i2c_sw.h"
#include "include/gpio_service_interface.h"
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_ipp.c b/drivers/gpu/drm/amd/display/dc/dce/dce_ipp.c
index 5d9506b3d46b..ce30dbf579d4 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_ipp.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_ipp.c
@@ -23,6 +23,8 @@
*
*/
+#include <linux/slab.h>
+
#include "dce_ipp.h"
#include "reg_helper.h"
#include "dm_services.h"
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c
index 314c04a915d2..8527cce81c6f 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c
@@ -23,6 +23,9 @@
*
*/
+#include <linux/delay.h>
+#include <linux/slab.h>
+
#include "reg_helper.h"
#include "core_types.h"
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_opp.c b/drivers/gpu/drm/amd/display/dc/dce/dce_opp.c
index 87093894ea9e..51081d9ae3fb 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_opp.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_opp.c
@@ -23,6 +23,8 @@
*
*/
+#include <linux/slab.h>
+
#include "dm_services.h"
#include "basics/conversion.h"
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c
index 14309fe6f2e6..5e2b4d47c548 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c
@@ -23,6 +23,8 @@
*
*/
+#include <linux/delay.h>
+
#include "dc_bios_types.h"
#include "dce_stream_encoder.h"
#include "reg_helper.h"
@@ -418,6 +420,7 @@ static void dce110_stream_encoder_dp_set_stream_attribute(
break;
case COLOR_SPACE_YCBCR709:
case COLOR_SPACE_YCBCR709_LIMITED:
+ case COLOR_SPACE_YCBCR709_BLACK:
misc0 = misc0 | 0x18; /* bit3=1, bit4=1 */
misc1 = misc1 & ~0x80; /* bit7 = 0*/
dynamic_range_ycbcr = 1; /*bt709*/
@@ -1123,19 +1126,6 @@ union audio_cea_channels {
} channels;
};
-struct audio_clock_info {
- /* pixel clock frequency*/
- uint32_t pixel_clock_in_10khz;
- /* N - 32KHz audio */
- uint32_t n_32khz;
- /* CTS - 32KHz audio*/
- uint32_t cts_32khz;
- uint32_t n_44khz;
- uint32_t cts_44khz;
- uint32_t n_48khz;
- uint32_t cts_48khz;
-};
-
/* 25.2MHz/1.001*/
/* 25.2MHz/1.001*/
/* 25.2MHz*/
diff --git a/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.c
index 87771676acac..799d36299c9b 100644
--- a/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.c
@@ -25,6 +25,7 @@
#include "dm_services.h"
#include "dc.h"
#include "core_types.h"
+#include "clk_mgr.h"
#include "hw_sequencer.h"
#include "dce100_hw_sequencer.h"
#include "resource.h"
@@ -111,8 +112,8 @@ void dce100_prepare_bandwidth(
{
dce110_set_safe_displaymarks(&context->res_ctx, dc->res_pool);
- dc->res_pool->clk_mgr->funcs->update_clocks(
- dc->res_pool->clk_mgr,
+ dc->clk_mgr->funcs->update_clocks(
+ dc->clk_mgr,
context,
false);
}
@@ -123,8 +124,8 @@ void dce100_optimize_bandwidth(
{
dce110_set_safe_displaymarks(&context->res_ctx, dc->res_pool);
- dc->res_pool->clk_mgr->funcs->update_clocks(
- dc->res_pool->clk_mgr,
+ dc->clk_mgr->funcs->update_clocks(
+ dc->clk_mgr,
context,
true);
}
diff --git a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c b/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c
index e938bf9986d3..6248c8455314 100644
--- a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c
@@ -22,6 +22,9 @@
* Authors: AMD
*
*/
+
+#include <linux/slab.h>
+
#include "dm_services.h"
#include "link_encoder.h"
@@ -35,8 +38,6 @@
#include "irq/dce110/irq_service_dce110.h"
#include "dce/dce_link_encoder.h"
#include "dce/dce_stream_encoder.h"
-
-#include "dce/dce_clk_mgr.h"
#include "dce/dce_mem_input.h"
#include "dce/dce_ipp.h"
#include "dce/dce_transform.h"
@@ -137,19 +138,6 @@ static const struct dce110_timing_generator_offsets dce100_tg_offsets[] = {
#define SRI(reg_name, block, id)\
.reg_name = mm ## block ## id ## _ ## reg_name
-
-static const struct clk_mgr_registers disp_clk_regs = {
- CLK_COMMON_REG_LIST_DCE_BASE()
-};
-
-static const struct clk_mgr_shift disp_clk_shift = {
- CLK_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(__SHIFT)
-};
-
-static const struct clk_mgr_mask disp_clk_mask = {
- CLK_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(_MASK)
-};
-
#define ipp_regs(id)\
[id] = {\
IPP_DCE100_REG_LIST_DCE_BASE(id)\
@@ -746,9 +734,6 @@ static void destruct(struct dce110_resource_pool *pool)
dce_aud_destroy(&pool->base.audios[i]);
}
- if (pool->base.clk_mgr != NULL)
- dce_clk_mgr_destroy(&pool->base.clk_mgr);
-
if (pool->base.abm != NULL)
dce_abm_destroy(&pool->base.abm);
@@ -867,13 +852,55 @@ enum dc_status dce100_validate_plane(const struct dc_plane_state *plane_state, s
return DC_FAIL_SURFACE_VALIDATE;
}
+struct stream_encoder *dce100_find_first_free_match_stream_enc_for_link(
+ struct resource_context *res_ctx,
+ const struct resource_pool *pool,
+ struct dc_stream_state *stream)
+{
+ int i;
+ int j = -1;
+ struct dc_link *link = stream->link;
+
+ for (i = 0; i < pool->stream_enc_count; i++) {
+ if (!res_ctx->is_stream_enc_acquired[i] &&
+ pool->stream_enc[i]) {
+ /* Store first available for MST second display
+ * in daisy chain use case
+ */
+ j = i;
+ if (pool->stream_enc[i]->id ==
+ link->link_enc->preferred_engine)
+ return pool->stream_enc[i];
+ }
+ }
+
+ /*
+ * below can happen in cases when stream encoder is acquired:
+ * 1) for second MST display in chain, so preferred engine already
+ * acquired;
+ * 2) for another link, which preferred engine already acquired by any
+ * MST configuration.
+ *
+ * If signal is of DP type and preferred engine not found, return last available
+ *
+ * TODO - This is just a patch up and a generic solution is
+ * required for non DP connectors.
+ */
+
+ if (j >= 0 && link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT)
+ return pool->stream_enc[j];
+
+ return NULL;
+}
+
static const struct resource_funcs dce100_res_pool_funcs = {
.destroy = dce100_destroy_resource_pool,
.link_enc_create = dce100_link_encoder_create,
.validate_bandwidth = dce100_validate_bandwidth,
.validate_plane = dce100_validate_plane,
.add_stream_to_ctx = dce100_add_stream_to_ctx,
- .validate_global = dce100_validate_global
+ .validate_global = dce100_validate_global,
+ .find_first_free_match_stream_enc_for_link = dce100_find_first_free_match_stream_enc_for_link
};
static bool construct(
@@ -932,16 +959,6 @@ static bool construct(
}
}
- pool->base.clk_mgr = dce_clk_mgr_create(ctx,
- &disp_clk_regs,
- &disp_clk_shift,
- &disp_clk_mask);
- if (pool->base.clk_mgr == NULL) {
- dm_error("DC: failed to create display clock!\n");
- BREAK_TO_DEBUGGER();
- goto res_create_fail;
- }
-
pool->base.dmcu = dce_dmcu_create(ctx,
&dmcu_regs,
&dmcu_shift,
diff --git a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.h b/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.h
index 2f366d66635d..fecab7c560f5 100644
--- a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.h
@@ -46,4 +46,9 @@ enum dc_status dce100_add_stream_to_ctx(
struct dc_state *new_ctx,
struct dc_stream_state *dc_stream);
+struct stream_encoder *dce100_find_first_free_match_stream_enc_for_link(
+ struct resource_context *res_ctx,
+ const struct resource_pool *pool,
+ struct dc_stream_state *stream);
+
#endif /* DCE100_RESOURCE_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_compressor.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_compressor.c
index 7b23239d33fe..72b580a4eb85 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_compressor.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_compressor.c
@@ -23,6 +23,9 @@
*
*/
+#include <linux/delay.h>
+#include <linux/slab.h>
+
#include "dm_services.h"
#include "dce/dce_11_0_d.h"
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
index 7ac50ab1b762..753c96f74af0 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
@@ -22,6 +22,9 @@
* Authors: AMD
*
*/
+
+#include <linux/delay.h>
+
#include "dm_services.h"
#include "dc.h"
#include "dc_bios_types.h"
@@ -46,6 +49,7 @@
#include "link_encoder.h"
#include "link_hwss.h"
#include "clock_source.h"
+#include "clk_mgr.h"
#include "abm.h"
#include "audio.h"
#include "reg_helper.h"
@@ -242,6 +246,9 @@ static void build_prescale_params(struct ipp_prescale_params *prescale_params,
prescale_params->mode = IPP_PRESCALE_MODE_FIXED_UNSIGNED;
switch (plane_state->format) {
+ case SURFACE_PIXEL_FORMAT_GRPH_RGB565:
+ prescale_params->scale = 0x2082;
+ break;
case SURFACE_PIXEL_FORMAT_GRPH_ARGB8888:
case SURFACE_PIXEL_FORMAT_GRPH_ABGR8888:
prescale_params->scale = 0x2020;
@@ -957,6 +964,9 @@ void dce110_enable_audio_stream(struct pipe_ctx *pipe_ctx)
struct pp_smu_funcs *pp_smu = NULL;
unsigned int i, num_audio = 1;
+ if (pipe_ctx->stream_res.audio && pipe_ctx->stream_res.audio->enabled == true)
+ return;
+
if (core_dc->res_pool->pp_smu)
pp_smu = core_dc->res_pool->pp_smu;
@@ -976,6 +986,8 @@ void dce110_enable_audio_stream(struct pipe_ctx *pipe_ctx)
/* TODO: audio should be per stream rather than per link */
pipe_ctx->stream_res.stream_enc->funcs->audio_mute_control(
pipe_ctx->stream_res.stream_enc, false);
+ if (pipe_ctx->stream_res.audio)
+ pipe_ctx->stream_res.audio->enabled = true;
}
}
@@ -984,6 +996,9 @@ void dce110_disable_audio_stream(struct pipe_ctx *pipe_ctx, int option)
struct dc *dc = pipe_ctx->stream->ctx->dc;
struct pp_smu_funcs *pp_smu = NULL;
+ if (pipe_ctx->stream_res.audio && pipe_ctx->stream_res.audio->enabled == false)
+ return;
+
pipe_ctx->stream_res.stream_enc->funcs->audio_mute_control(
pipe_ctx->stream_res.stream_enc, true);
if (pipe_ctx->stream_res.audio) {
@@ -1017,6 +1032,8 @@ void dce110_disable_audio_stream(struct pipe_ctx *pipe_ctx, int option)
/* dal_audio_disable_azalia_audio_jack_presence(stream->audio,
* stream->stream_engine_id);
*/
+ if (pipe_ctx->stream_res.audio)
+ pipe_ctx->stream_res.audio->enabled = false;
}
}
@@ -1296,6 +1313,11 @@ static enum dc_status dce110_enable_stream_timing(
pipe_ctx->stream_res.tg->funcs->program_timing(
pipe_ctx->stream_res.tg,
&stream->timing,
+ 0,
+ 0,
+ 0,
+ 0,
+ pipe_ctx->stream->signal,
true);
}
@@ -1488,10 +1510,11 @@ static void disable_vga_and_power_gate_all_controllers(
}
}
-static struct dc_link *get_link_for_edp(struct dc *dc)
+static struct dc_link *get_edp_link(struct dc *dc)
{
int i;
+ // report any eDP links, even unconnected DDI's
for (i = 0; i < dc->link_count; i++) {
if (dc->links[i]->connector_signal == SIGNAL_TYPE_EDP)
return dc->links[i];
@@ -1499,23 +1522,13 @@ static struct dc_link *get_link_for_edp(struct dc *dc)
return NULL;
}
-static struct dc_link *get_link_for_edp_to_turn_off(
+static struct dc_link *get_edp_link_with_sink(
struct dc *dc,
struct dc_state *context)
{
int i;
struct dc_link *link = NULL;
- /* check if eDP panel is suppose to be set mode, if yes, no need to disable */
- for (i = 0; i < context->stream_count; i++) {
- if (context->streams[i]->signal == SIGNAL_TYPE_EDP) {
- if (context->streams[i]->dpms_off == true)
- return context->streams[i]->sink->link;
- else
- return NULL;
- }
- }
-
/* check if there is an eDP panel not in use */
for (i = 0; i < dc->link_count; i++) {
if (dc->links[i]->local_sink &&
@@ -1538,59 +1551,53 @@ static struct dc_link *get_link_for_edp_to_turn_off(
void dce110_enable_accelerated_mode(struct dc *dc, struct dc_state *context)
{
int i;
- struct dc_link *edp_link_to_turnoff = NULL;
- struct dc_link *edp_link = get_link_for_edp(dc);
- bool can_edp_fast_boot_optimize = false;
- bool apply_edp_fast_boot_optimization = false;
+ struct dc_link *edp_link_with_sink = get_edp_link_with_sink(dc, context);
+ struct dc_link *edp_link = get_edp_link(dc);
+ bool can_apply_edp_fast_boot = false;
bool can_apply_seamless_boot = false;
- for (i = 0; i < context->stream_count; i++) {
- if (context->streams[i]->apply_seamless_boot_optimization) {
- can_apply_seamless_boot = true;
- break;
- }
- }
-
if (dc->hwss.init_pipes)
dc->hwss.init_pipes(dc, context);
- if (edp_link) {
- /* this seems to cause blank screens on DCE8 */
- if ((dc->ctx->dce_version == DCE_VERSION_8_0) ||
- (dc->ctx->dce_version == DCE_VERSION_8_1) ||
- (dc->ctx->dce_version == DCE_VERSION_8_3))
- can_edp_fast_boot_optimize = false;
- else
- can_edp_fast_boot_optimize =
- edp_link->link_enc->funcs->is_dig_enabled(edp_link->link_enc);
+ // Check fastboot support, disable on DCE8 because of blank screens
+ if (edp_link && dc->ctx->dce_version != DCE_VERSION_8_0 &&
+ dc->ctx->dce_version != DCE_VERSION_8_1 &&
+ dc->ctx->dce_version != DCE_VERSION_8_3) {
+
+ // enable fastboot if backend is enabled on eDP
+ if (edp_link->link_enc->funcs->is_dig_enabled(edp_link->link_enc)) {
+ /* Find eDP stream and set optimization flag */
+ for (i = 0; i < context->stream_count; i++) {
+ if (context->streams[i]->signal == SIGNAL_TYPE_EDP) {
+ context->streams[i]->apply_edp_fast_boot_optimization = true;
+ can_apply_edp_fast_boot = true;
+ break;
+ }
+ }
+ }
}
- if (can_edp_fast_boot_optimize)
- edp_link_to_turnoff = get_link_for_edp_to_turn_off(dc, context);
-
- /* if OS doesn't light up eDP and eDP link is available, we want to disable
- * If resume from S4/S5, should optimization.
- */
- if (can_edp_fast_boot_optimize && !edp_link_to_turnoff) {
- /* Find eDP stream and set optimization flag */
- for (i = 0; i < context->stream_count; i++) {
- if (context->streams[i]->signal == SIGNAL_TYPE_EDP) {
- context->streams[i]->apply_edp_fast_boot_optimization = true;
- apply_edp_fast_boot_optimization = true;
- }
+ // Check seamless boot support
+ for (i = 0; i < context->stream_count; i++) {
+ if (context->streams[i]->apply_seamless_boot_optimization) {
+ can_apply_seamless_boot = true;
+ break;
}
}
- if (!apply_edp_fast_boot_optimization && !can_apply_seamless_boot) {
- if (edp_link_to_turnoff) {
+ /* eDP should not have stream in resume from S4 and so even with VBios post
+ * it should get turned off
+ */
+ if (!can_apply_edp_fast_boot && !can_apply_seamless_boot) {
+ if (edp_link_with_sink) {
/*turn off backlight before DP_blank and encoder powered down*/
- dc->hwss.edp_backlight_control(edp_link_to_turnoff, false);
+ dc->hwss.edp_backlight_control(edp_link_with_sink, false);
}
/*resume from S3, no vbios posting, no need to power down again*/
power_down_all_hw_blocks(dc);
disable_vga_and_power_gate_all_controllers(dc);
- if (edp_link_to_turnoff)
- dc->hwss.edp_power_control(edp_link_to_turnoff, false);
+ if (edp_link_with_sink)
+ dc->hwss.edp_power_control(edp_link_with_sink, false);
}
bios_set_scratch_acc_mode_change(dc->ctx->dc_bios);
}
@@ -2030,8 +2037,10 @@ enum dc_status dce110_apply_ctx_to_hw(
if (pipe_ctx->stream == NULL)
continue;
- if (pipe_ctx->stream == pipe_ctx_old->stream)
+ if (pipe_ctx->stream == pipe_ctx_old->stream &&
+ pipe_ctx->stream->link->link_state_valid) {
continue;
+ }
if (pipe_ctx_old->stream && !pipe_need_reprogram(pipe_ctx_old, pipe_ctx))
continue;
@@ -2318,6 +2327,7 @@ static void init_hw(struct dc *dc)
struct dc_bios *bp;
struct transform *xfm;
struct abm *abm;
+ struct dmcu *dmcu;
bp = dc->ctx->dc_bios;
for (i = 0; i < dc->res_pool->pipe_count; i++) {
@@ -2345,9 +2355,6 @@ static void init_hw(struct dc *dc)
* default signal on connector). */
struct dc_link *link = dc->links[i];
- if (link->link_enc->connector.id == CONNECTOR_ID_EDP)
- dc->hwss.edp_power_control(link, true);
-
link->link_enc->funcs->hw_init(link->link_enc);
}
@@ -2373,6 +2380,10 @@ static void init_hw(struct dc *dc)
abm->funcs->abm_init(abm);
}
+ dmcu = dc->res_pool->dmcu;
+ if (dmcu != NULL && abm != NULL)
+ abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
+
if (dc->fbc_compressor)
dc->fbc_compressor->funcs->power_up_fbc(dc->fbc_compressor);
@@ -2383,7 +2394,7 @@ void dce110_prepare_bandwidth(
struct dc *dc,
struct dc_state *context)
{
- struct clk_mgr *dccg = dc->res_pool->clk_mgr;
+ struct clk_mgr *dccg = dc->clk_mgr;
dce110_set_safe_displaymarks(&context->res_ctx, dc->res_pool);
@@ -2397,7 +2408,7 @@ void dce110_optimize_bandwidth(
struct dc *dc,
struct dc_state *context)
{
- struct clk_mgr *dccg = dc->res_pool->clk_mgr;
+ struct clk_mgr *dccg = dc->clk_mgr;
dce110_set_displaymarks(dc, context);
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_opp_regamma_v.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_opp_regamma_v.c
index 9b65b77e8823..34c5e3c7c6d2 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_opp_regamma_v.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_opp_regamma_v.c
@@ -23,6 +23,8 @@
*
*/
+#include <linux/delay.h>
+
#include "dm_services.h"
/* include DCE11 register header files */
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
index dcd04e9ea76b..764329264c3b 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
@@ -23,6 +23,8 @@
*
*/
+#include <linux/slab.h>
+
#include "dm_services.h"
#include "link_encoder.h"
@@ -30,8 +32,6 @@
#include "resource.h"
#include "dce110/dce110_resource.h"
-
-#include "dce/dce_clk_mgr.h"
#include "include/irq_service_interface.h"
#include "dce/dce_audio.h"
#include "dce110/dce110_timing_generator.h"
@@ -149,18 +149,6 @@ static const struct dce110_timing_generator_offsets dce110_tg_offsets[] = {
#define SRI(reg_name, block, id)\
.reg_name = mm ## block ## id ## _ ## reg_name
-static const struct clk_mgr_registers disp_clk_regs = {
- CLK_COMMON_REG_LIST_DCE_BASE()
-};
-
-static const struct clk_mgr_shift disp_clk_shift = {
- CLK_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(__SHIFT)
-};
-
-static const struct clk_mgr_mask disp_clk_mask = {
- CLK_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(_MASK)
-};
-
static const struct dce_dmcu_registers dmcu_regs = {
DMCU_DCE110_COMMON_REG_LIST()
};
@@ -811,9 +799,6 @@ static void destruct(struct dce110_resource_pool *pool)
if (pool->base.dmcu != NULL)
dce_dmcu_destroy(&pool->base.dmcu);
- if (pool->base.clk_mgr != NULL)
- dce_clk_mgr_destroy(&pool->base.clk_mgr);
-
if (pool->base.irqs != NULL) {
dal_irq_service_destroy(&pool->base.irqs);
}
@@ -1097,6 +1082,11 @@ static struct pipe_ctx *dce110_acquire_underlay(
pipe_ctx->stream_res.tg->funcs->program_timing(pipe_ctx->stream_res.tg,
&stream->timing,
+ 0,
+ 0,
+ 0,
+ 0,
+ pipe_ctx->stream->signal,
false);
pipe_ctx->stream_res.tg->funcs->enable_advanced_request(
@@ -1129,6 +1119,38 @@ static void dce110_destroy_resource_pool(struct resource_pool **pool)
*pool = NULL;
}
+struct stream_encoder *dce110_find_first_free_match_stream_enc_for_link(
+ struct resource_context *res_ctx,
+ const struct resource_pool *pool,
+ struct dc_stream_state *stream)
+{
+ int i;
+ int j = -1;
+ struct dc_link *link = stream->link;
+
+ for (i = 0; i < pool->stream_enc_count; i++) {
+ if (!res_ctx->is_stream_enc_acquired[i] &&
+ pool->stream_enc[i]) {
+ /* Store first available for MST second display
+ * in daisy chain use case
+ */
+ j = i;
+ if (pool->stream_enc[i]->id ==
+ link->link_enc->preferred_engine)
+ return pool->stream_enc[i];
+ }
+ }
+
+ /*
+ * For CZ and later, we can allow DIG FE and BE to differ for all display types
+ */
+
+ if (j >= 0)
+ return pool->stream_enc[j];
+
+ return NULL;
+}
+
static const struct resource_funcs dce110_res_pool_funcs = {
.destroy = dce110_destroy_resource_pool,
@@ -1137,7 +1159,8 @@ static const struct resource_funcs dce110_res_pool_funcs = {
.validate_plane = dce110_validate_plane,
.acquire_idle_pipe_for_layer = dce110_acquire_underlay,
.add_stream_to_ctx = dce110_add_stream_to_ctx,
- .validate_global = dce110_validate_global
+ .validate_global = dce110_validate_global,
+ .find_first_free_match_stream_enc_for_link = dce110_find_first_free_match_stream_enc_for_link
};
static bool underlay_create(struct dc_context *ctx, struct resource_pool *pool)
@@ -1308,16 +1331,6 @@ static bool construct(
}
}
- pool->base.clk_mgr = dce110_clk_mgr_create(ctx,
- &disp_clk_regs,
- &disp_clk_shift,
- &disp_clk_mask);
- if (pool->base.clk_mgr == NULL) {
- dm_error("DC: failed to create display clock!\n");
- BREAK_TO_DEBUGGER();
- goto res_create_fail;
- }
-
pool->base.dmcu = dce_dmcu_create(ctx,
&dmcu_regs,
&dmcu_shift,
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.h b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.h
index e5f168c1f8c8..aa4531e0800e 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.h
@@ -45,5 +45,10 @@ struct resource_pool *dce110_create_resource_pool(
struct dc *dc,
struct hw_asic_id asic_id);
+struct stream_encoder *dce110_find_first_free_match_stream_enc_for_link(
+ struct resource_context *res_ctx,
+ const struct resource_pool *pool,
+ struct dc_stream_state *stream);
+
#endif /* __DC_RESOURCE_DCE110_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.c
index 1b2fe0df347f..5f7c2c5641c4 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.c
@@ -1952,6 +1952,11 @@ void dce110_tg_set_overscan_color(struct timing_generator *tg,
void dce110_tg_program_timing(struct timing_generator *tg,
const struct dc_crtc_timing *timing,
+ int vready_offset,
+ int vstartup_start,
+ int vupdate_offset,
+ int vupdate_width,
+ const enum signal_type signal,
bool use_vbios)
{
if (use_vbios)
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.h b/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.h
index 734d4965dab1..768ccf27ada9 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.h
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.h
@@ -256,6 +256,11 @@ void dce110_tg_set_overscan_color(struct timing_generator *tg,
void dce110_tg_program_timing(struct timing_generator *tg,
const struct dc_crtc_timing *timing,
+ int vready_offset,
+ int vstartup_start,
+ int vupdate_offset,
+ int vupdate_width,
+ const enum signal_type signal,
bool use_vbios);
bool dce110_tg_is_blanked(struct timing_generator *tg);
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator_v.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator_v.c
index a3cef60380ed..a13a2f58944e 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator_v.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator_v.c
@@ -435,6 +435,11 @@ static void dce110_timing_generator_v_set_blank(struct timing_generator *tg,
static void dce110_timing_generator_v_program_timing(struct timing_generator *tg,
const struct dc_crtc_timing *timing,
+ int vready_offset,
+ int vstartup_start,
+ int vupdate_offset,
+ int vupdate_width,
+ const enum signal_type signal,
bool use_vbios)
{
if (use_vbios)
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_transform_v.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_transform_v.c
index aa8d6b10d2c3..b1aaab5590cc 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_transform_v.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_transform_v.c
@@ -23,6 +23,8 @@
*
*/
+#include <linux/delay.h>
+
#include "dce110_transform_v.h"
#include "dm_services.h"
#include "dc.h"
diff --git a/drivers/gpu/drm/amd/display/dc/dce112/dce112_compressor.c b/drivers/gpu/drm/amd/display/dc/dce112/dce112_compressor.c
index faae12cf7968..51cb45d8b9ab 100644
--- a/drivers/gpu/drm/amd/display/dc/dce112/dce112_compressor.c
+++ b/drivers/gpu/drm/amd/display/dc/dce112/dce112_compressor.c
@@ -23,6 +23,9 @@
*
*/
+#include <linux/delay.h>
+#include <linux/slab.h>
+
#include "dm_services.h"
#include "dce/dce_11_2_d.h"
diff --git a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
index a480b15f6885..c6136e0ed1a4 100644
--- a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
@@ -23,6 +23,8 @@
*
*/
+#include <linux/slab.h>
+
#include "dm_services.h"
#include "link_encoder.h"
@@ -34,8 +36,6 @@
#include "dce110/dce110_timing_generator.h"
#include "irq/dce110/irq_service_dce110.h"
-
-#include "dce/dce_clk_mgr.h"
#include "dce/dce_mem_input.h"
#include "dce/dce_transform.h"
#include "dce/dce_link_encoder.h"
@@ -148,19 +148,6 @@ static const struct dce110_timing_generator_offsets dce112_tg_offsets[] = {
#define SRI(reg_name, block, id)\
.reg_name = mm ## block ## id ## _ ## reg_name
-
-static const struct clk_mgr_registers disp_clk_regs = {
- CLK_COMMON_REG_LIST_DCE_BASE()
-};
-
-static const struct clk_mgr_shift disp_clk_shift = {
- CLK_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(__SHIFT)
-};
-
-static const struct clk_mgr_mask disp_clk_mask = {
- CLK_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(_MASK)
-};
-
static const struct dce_dmcu_registers dmcu_regs = {
DMCU_DCE110_COMMON_REG_LIST()
};
@@ -774,9 +761,6 @@ static void destruct(struct dce110_resource_pool *pool)
if (pool->base.dmcu != NULL)
dce_dmcu_destroy(&pool->base.dmcu);
- if (pool->base.clk_mgr != NULL)
- dce_clk_mgr_destroy(&pool->base.clk_mgr);
-
if (pool->base.irqs != NULL) {
dal_irq_service_destroy(&pool->base.irqs);
}
@@ -993,7 +977,8 @@ static const struct resource_funcs dce112_res_pool_funcs = {
.validate_bandwidth = dce112_validate_bandwidth,
.validate_plane = dce100_validate_plane,
.add_stream_to_ctx = dce112_add_stream_to_ctx,
- .validate_global = dce112_validate_global
+ .validate_global = dce112_validate_global,
+ .find_first_free_match_stream_enc_for_link = dce110_find_first_free_match_stream_enc_for_link
};
static void bw_calcs_data_update_from_pplib(struct dc *dc)
@@ -1224,16 +1209,6 @@ static bool construct(
}
}
- pool->base.clk_mgr = dce112_clk_mgr_create(ctx,
- &disp_clk_regs,
- &disp_clk_shift,
- &disp_clk_mask);
- if (pool->base.clk_mgr == NULL) {
- dm_error("DC: failed to create display clock!\n");
- BREAK_TO_DEBUGGER();
- goto res_create_fail;
- }
-
pool->base.dmcu = dce_dmcu_create(ctx,
&dmcu_regs,
&dmcu_shift,
diff --git a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
index 6d49c7143c67..54be7ab370df 100644
--- a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
@@ -24,6 +24,8 @@
*
*/
+#include <linux/slab.h>
+
#include "dm_services.h"
@@ -46,8 +48,7 @@
#include "dce110/dce110_hw_sequencer.h"
#include "dce120/dce120_hw_sequencer.h"
#include "dce/dce_transform.h"
-
-#include "dce/dce_clk_mgr.h"
+#include "clk_mgr.h"
#include "dce/dce_audio.h"
#include "dce/dce_link_encoder.h"
#include "dce/dce_stream_encoder.h"
@@ -480,7 +481,7 @@ static const struct dc_debug_options debug_defaults = {
.disable_clock_gate = true,
};
-struct clock_source *dce120_clock_source_create(
+static struct clock_source *dce120_clock_source_create(
struct dc_context *ctx,
struct dc_bios *bios,
enum clock_source_id id,
@@ -503,14 +504,14 @@ struct clock_source *dce120_clock_source_create(
return NULL;
}
-void dce120_clock_source_destroy(struct clock_source **clk_src)
+static void dce120_clock_source_destroy(struct clock_source **clk_src)
{
kfree(TO_DCE110_CLK_SRC(*clk_src));
*clk_src = NULL;
}
-bool dce120_hw_sequencer_create(struct dc *dc)
+static bool dce120_hw_sequencer_create(struct dc *dc)
{
/* All registers used by dce11.2 match those in dce11 in offset and
* structure
@@ -609,9 +610,6 @@ static void destruct(struct dce110_resource_pool *pool)
if (pool->base.dmcu != NULL)
dce_dmcu_destroy(&pool->base.dmcu);
-
- if (pool->base.clk_mgr != NULL)
- dce_clk_mgr_destroy(&pool->base.clk_mgr);
}
static void read_dce_straps(
@@ -837,7 +835,8 @@ static const struct resource_funcs dce120_res_pool_funcs = {
.link_enc_create = dce120_link_encoder_create,
.validate_bandwidth = dce112_validate_bandwidth,
.validate_plane = dce100_validate_plane,
- .add_stream_to_ctx = dce112_add_stream_to_ctx
+ .add_stream_to_ctx = dce112_add_stream_to_ctx,
+ .find_first_free_match_stream_enc_for_link = dce110_find_first_free_match_stream_enc_for_link
};
static void bw_calcs_data_update_from_pplib(struct dc *dc)
@@ -1047,17 +1046,6 @@ static bool construct(
}
}
- if (is_vg20)
- pool->base.clk_mgr = dce121_clk_mgr_create(ctx);
- else
- pool->base.clk_mgr = dce120_clk_mgr_create(ctx);
-
- if (pool->base.clk_mgr == NULL) {
- dm_error("DC: failed to create display clock!\n");
- BREAK_TO_DEBUGGER();
- goto dccg_create_fail;
- }
-
pool->base.dmcu = dce_dmcu_create(ctx,
&dmcu_regs,
&dmcu_shift,
@@ -1185,7 +1173,7 @@ static bool construct(
* here.
*/
if (is_vg20 && dce121_xgmi_enabled(dc->hwseq))
- dce121_clock_patch_xgmi_ss_info(pool->base.clk_mgr);
+ dce121_clock_patch_xgmi_ss_info(dc->clk_mgr);
/* Create hardware sequencer */
if (!dce120_hw_sequencer_create(dc))
@@ -1204,7 +1192,6 @@ static bool construct(
irqs_create_fail:
controller_create_fail:
-dccg_create_fail:
clk_src_create_fail:
res_create_fail:
diff --git a/drivers/gpu/drm/amd/display/dc/dce120/dce120_timing_generator.c b/drivers/gpu/drm/amd/display/dc/dce120/dce120_timing_generator.c
index 04b866f0fa1f..098e56962f2a 100644
--- a/drivers/gpu/drm/amd/display/dc/dce120/dce120_timing_generator.c
+++ b/drivers/gpu/drm/amd/display/dc/dce120/dce120_timing_generator.c
@@ -734,8 +734,13 @@ void dce120_tg_set_overscan_color(struct timing_generator *tg,
CRTC_OVERSCAN_COLOR_RED, overscan_color->color_r_cr);
}
-void dce120_tg_program_timing(struct timing_generator *tg,
+static void dce120_tg_program_timing(struct timing_generator *tg,
const struct dc_crtc_timing *timing,
+ int vready_offset,
+ int vstartup_start,
+ int vupdate_offset,
+ int vupdate_width,
+ const enum signal_type signal,
bool use_vbios)
{
if (use_vbios)
@@ -1109,6 +1114,92 @@ static bool dce120_arm_vert_intr(
return true;
}
+
+static bool dce120_is_tg_enabled(struct timing_generator *tg)
+{
+ struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
+ uint32_t value, field;
+
+ value = dm_read_reg_soc15(tg->ctx, mmCRTC0_CRTC_CONTROL,
+ tg110->offsets.crtc);
+ field = get_reg_field_value(value, CRTC0_CRTC_CONTROL,
+ CRTC_CURRENT_MASTER_EN_STATE);
+
+ return field == 1;
+}
+
+static bool dce120_configure_crc(struct timing_generator *tg,
+ const struct crc_params *params)
+{
+ struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
+
+ /* Cannot configure crc on a CRTC that is disabled */
+ if (!dce120_is_tg_enabled(tg))
+ return false;
+
+ /* First, disable CRC before we configure it. */
+ dm_write_reg_soc15(tg->ctx, mmCRTC0_CRTC_CRC_CNTL,
+ tg110->offsets.crtc, 0);
+
+ if (!params->enable)
+ return true;
+
+ /* Program frame boundaries */
+ /* Window A x axis start and end. */
+ CRTC_REG_UPDATE_2(CRTC0_CRTC_CRC0_WINDOWA_X_CONTROL,
+ CRTC_CRC0_WINDOWA_X_START, params->windowa_x_start,
+ CRTC_CRC0_WINDOWA_X_END, params->windowa_x_end);
+
+ /* Window A y axis start and end. */
+ CRTC_REG_UPDATE_2(CRTC0_CRTC_CRC0_WINDOWA_Y_CONTROL,
+ CRTC_CRC0_WINDOWA_Y_START, params->windowa_y_start,
+ CRTC_CRC0_WINDOWA_Y_END, params->windowa_y_end);
+
+ /* Window B x axis start and end. */
+ CRTC_REG_UPDATE_2(CRTC0_CRTC_CRC0_WINDOWB_X_CONTROL,
+ CRTC_CRC0_WINDOWB_X_START, params->windowb_x_start,
+ CRTC_CRC0_WINDOWB_X_END, params->windowb_x_end);
+
+ /* Window B y axis start and end. */
+ CRTC_REG_UPDATE_2(CRTC0_CRTC_CRC0_WINDOWB_Y_CONTROL,
+ CRTC_CRC0_WINDOWB_Y_START, params->windowb_y_start,
+ CRTC_CRC0_WINDOWB_Y_END, params->windowb_y_end);
+
+ /* Set crc mode and selection, and enable. Only using CRC0*/
+ CRTC_REG_UPDATE_3(CRTC0_CRTC_CRC_CNTL,
+ CRTC_CRC_EN, params->continuous_mode ? 1 : 0,
+ CRTC_CRC0_SELECT, params->selection,
+ CRTC_CRC_EN, 1);
+
+ return true;
+}
+
+static bool dce120_get_crc(struct timing_generator *tg, uint32_t *r_cr,
+ uint32_t *g_y, uint32_t *b_cb)
+{
+ struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
+ uint32_t value, field;
+
+ value = dm_read_reg_soc15(tg->ctx, mmCRTC0_CRTC_CRC_CNTL,
+ tg110->offsets.crtc);
+ field = get_reg_field_value(value, CRTC0_CRTC_CRC_CNTL, CRTC_CRC_EN);
+
+ /* Early return if CRC is not enabled for this CRTC */
+ if (!field)
+ return false;
+
+ value = dm_read_reg_soc15(tg->ctx, mmCRTC0_CRTC_CRC0_DATA_RG,
+ tg110->offsets.crtc);
+ *r_cr = get_reg_field_value(value, CRTC0_CRTC_CRC0_DATA_RG, CRC0_R_CR);
+ *g_y = get_reg_field_value(value, CRTC0_CRTC_CRC0_DATA_RG, CRC0_G_Y);
+
+ value = dm_read_reg_soc15(tg->ctx, mmCRTC0_CRTC_CRC0_DATA_B,
+ tg110->offsets.crtc);
+ *b_cb = get_reg_field_value(value, CRTC0_CRTC_CRC0_DATA_B, CRC0_B_CB);
+
+ return true;
+}
+
static const struct timing_generator_funcs dce120_tg_funcs = {
.validate_timing = dce120_tg_validate_timing,
.program_timing = dce120_tg_program_timing,
@@ -1140,6 +1231,9 @@ static const struct timing_generator_funcs dce120_tg_funcs = {
.set_static_screen_control = dce120_timing_generator_set_static_screen_control,
.set_test_pattern = dce120_timing_generator_set_test_pattern,
.arm_vert_intr = dce120_arm_vert_intr,
+ .is_tg_enabled = dce120_is_tg_enabled,
+ .configure_crc = dce120_configure_crc,
+ .get_crc = dce120_get_crc,
};
diff --git a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
index 27d0cc394963..860a524ebcfa 100644
--- a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
@@ -23,6 +23,8 @@
*
*/
+#include <linux/slab.h>
+
#include "dce/dce_8_0_d.h"
#include "dce/dce_8_0_sh_mask.h"
@@ -37,7 +39,6 @@
#include "dce110/dce110_timing_generator.h"
#include "dce110/dce110_resource.h"
#include "dce80/dce80_timing_generator.h"
-#include "dce/dce_clk_mgr.h"
#include "dce/dce_mem_input.h"
#include "dce/dce_link_encoder.h"
#include "dce/dce_stream_encoder.h"
@@ -154,19 +155,6 @@ static const struct dce110_timing_generator_offsets dce80_tg_offsets[] = {
#define SRI(reg_name, block, id)\
.reg_name = mm ## block ## id ## _ ## reg_name
-
-static const struct clk_mgr_registers disp_clk_regs = {
- CLK_COMMON_REG_LIST_DCE_BASE()
-};
-
-static const struct clk_mgr_shift disp_clk_shift = {
- CLK_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(__SHIFT)
-};
-
-static const struct clk_mgr_mask disp_clk_mask = {
- CLK_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(_MASK)
-};
-
#define ipp_regs(id)\
[id] = {\
IPP_COMMON_REG_LIST_DCE_BASE(id)\
@@ -802,9 +790,6 @@ static void destruct(struct dce110_resource_pool *pool)
}
}
- if (pool->base.clk_mgr != NULL)
- dce_clk_mgr_destroy(&pool->base.clk_mgr);
-
if (pool->base.irqs != NULL) {
dal_irq_service_destroy(&pool->base.irqs);
}
@@ -880,7 +865,8 @@ static const struct resource_funcs dce80_res_pool_funcs = {
.validate_bandwidth = dce80_validate_bandwidth,
.validate_plane = dce100_validate_plane,
.add_stream_to_ctx = dce100_add_stream_to_ctx,
- .validate_global = dce80_validate_global
+ .validate_global = dce80_validate_global,
+ .find_first_free_match_stream_enc_for_link = dce100_find_first_free_match_stream_enc_for_link
};
static bool dce80_construct(
@@ -954,16 +940,6 @@ static bool dce80_construct(
}
}
- pool->base.clk_mgr = dce_clk_mgr_create(ctx,
- &disp_clk_regs,
- &disp_clk_shift,
- &disp_clk_mask);
- if (pool->base.clk_mgr == NULL) {
- dm_error("DC: failed to create display clock!\n");
- BREAK_TO_DEBUGGER();
- goto res_create_fail;
- }
-
pool->base.dmcu = dce_dmcu_create(ctx,
&dmcu_regs,
&dmcu_shift,
@@ -1163,16 +1139,6 @@ static bool dce81_construct(
}
}
- pool->base.clk_mgr = dce_clk_mgr_create(ctx,
- &disp_clk_regs,
- &disp_clk_shift,
- &disp_clk_mask);
- if (pool->base.clk_mgr == NULL) {
- dm_error("DC: failed to create display clock!\n");
- BREAK_TO_DEBUGGER();
- goto res_create_fail;
- }
-
pool->base.dmcu = dce_dmcu_create(ctx,
&dmcu_regs,
&dmcu_shift,
@@ -1368,16 +1334,6 @@ static bool dce83_construct(
}
}
- pool->base.clk_mgr = dce_clk_mgr_create(ctx,
- &disp_clk_regs,
- &disp_clk_shift,
- &disp_clk_mask);
- if (pool->base.clk_mgr == NULL) {
- dm_error("DC: failed to create display clock!\n");
- BREAK_TO_DEBUGGER();
- goto res_create_fail;
- }
-
pool->base.dmcu = dce_dmcu_create(ctx,
&dmcu_regs,
&dmcu_shift,
diff --git a/drivers/gpu/drm/amd/display/dc/dce80/dce80_timing_generator.c b/drivers/gpu/drm/amd/display/dc/dce80/dce80_timing_generator.c
index 8b5ce557ee71..397e7f94e1e8 100644
--- a/drivers/gpu/drm/amd/display/dc/dce80/dce80_timing_generator.c
+++ b/drivers/gpu/drm/amd/display/dc/dce80/dce80_timing_generator.c
@@ -107,12 +107,17 @@ static void program_pix_dur(struct timing_generator *tg, uint32_t pix_clk_100hz)
static void program_timing(struct timing_generator *tg,
const struct dc_crtc_timing *timing,
+ int vready_offset,
+ int vstartup_start,
+ int vupdate_offset,
+ int vupdate_width,
+ const enum signal_type signal,
bool use_vbios)
{
if (!use_vbios)
program_pix_dur(tg, timing->pix_clk_100hz);
- dce110_tg_program_timing(tg, timing, use_vbios);
+ dce110_tg_program_timing(tg, timing, 0, 0, 0, 0, 0, use_vbios);
}
static void dce80_timing_generator_enable_advanced_request(
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/Makefile b/drivers/gpu/drm/amd/display/dc/dcn10/Makefile
index 55f293c8a3c0..032f872be89c 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/Makefile
@@ -24,7 +24,7 @@
DCN10 = dcn10_resource.o dcn10_ipp.o dcn10_hw_sequencer.o dcn10_hw_sequencer_debug.o \
dcn10_dpp.o dcn10_opp.o dcn10_optc.o \
- dcn10_hubp.o dcn10_mpc.o dcn10_clk_mgr.o \
+ dcn10_hubp.o dcn10_mpc.o \
dcn10_dpp_dscl.o dcn10_dpp_cm.o dcn10_cm_common.o \
dcn10_hubbub.o dcn10_stream_encoder.o dcn10_link_encoder.o
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.h
index 5ae4d69391a5..3b8cd7410498 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.h
@@ -38,6 +38,22 @@
type exp_resion_start_segment;\
type field_region_linear_slope
+#define TF_HELPER_REG_LIST \
+ uint32_t start_cntl_b; \
+ uint32_t start_cntl_g; \
+ uint32_t start_cntl_r; \
+ uint32_t start_slope_cntl_b; \
+ uint32_t start_slope_cntl_g; \
+ uint32_t start_slope_cntl_r; \
+ uint32_t start_end_cntl1_b; \
+ uint32_t start_end_cntl2_b; \
+ uint32_t start_end_cntl1_g; \
+ uint32_t start_end_cntl2_g; \
+ uint32_t start_end_cntl1_r; \
+ uint32_t start_end_cntl2_r; \
+ uint32_t region_start; \
+ uint32_t region_end
+
#define TF_CM_REG_FIELD_LIST(type) \
type csc_c11; \
type csc_c12
@@ -54,20 +70,7 @@ struct xfer_func_reg {
struct xfer_func_shift shifts;
struct xfer_func_mask masks;
- uint32_t start_cntl_b;
- uint32_t start_cntl_g;
- uint32_t start_cntl_r;
- uint32_t start_slope_cntl_b;
- uint32_t start_slope_cntl_g;
- uint32_t start_slope_cntl_r;
- uint32_t start_end_cntl1_b;
- uint32_t start_end_cntl2_b;
- uint32_t start_end_cntl1_g;
- uint32_t start_end_cntl2_g;
- uint32_t start_end_cntl1_r;
- uint32_t start_end_cntl2_r;
- uint32_t region_start;
- uint32_t region_end;
+ TF_HELPER_REG_LIST;
};
struct cm_color_matrix_shift {
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c
index 0db2a6e96fc0..a1c824efa686 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c
@@ -23,6 +23,8 @@
*
*/
+#include <linux/delay.h>
+
#include "dm_services.h"
#include "dcn10_hubp.h"
#include "dcn10_hubbub.h"
@@ -263,20 +265,15 @@ void hubbub1_wm_change_req_wa(struct hubbub *hubbub)
DCHUBBUB_ARB_WATERMARK_CHANGE_REQUEST, 1);
}
-void hubbub1_program_watermarks(
+void hubbub1_program_urgent_watermarks(
struct hubbub *hubbub,
struct dcn_watermark_set *watermarks,
unsigned int refclk_mhz,
bool safe_to_lower)
{
struct dcn10_hubbub *hubbub1 = TO_DCN10_HUBBUB(hubbub);
- /*
- * Need to clamp to max of the register values (i.e. no wrap)
- * for dcn1, all wm registers are 21-bit wide
- */
uint32_t prog_wm_value;
-
/* Repeat for water mark set A, B, C and D. */
/* clock state A */
if (safe_to_lower || watermarks->a.urgent_ns > hubbub1->watermarks.a.urgent_ns) {
@@ -291,60 +288,14 @@ void hubbub1_program_watermarks(
watermarks->a.urgent_ns, prog_wm_value);
}
- if (REG(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_A)) {
- if (safe_to_lower || watermarks->a.pte_meta_urgent_ns > hubbub1->watermarks.a.pte_meta_urgent_ns) {
- hubbub1->watermarks.a.pte_meta_urgent_ns = watermarks->a.pte_meta_urgent_ns;
- prog_wm_value = convert_and_clamp(watermarks->a.pte_meta_urgent_ns,
- refclk_mhz, 0x1fffff);
- REG_WRITE(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_A, prog_wm_value);
- DC_LOG_BANDWIDTH_CALCS("PTE_META_URGENCY_WATERMARK_A calculated =%d\n"
- "HW register value = 0x%x\n",
- watermarks->a.pte_meta_urgent_ns, prog_wm_value);
- }
- }
-
- if (REG(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A)) {
- if (safe_to_lower || watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns
- > hubbub1->watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns) {
- hubbub1->watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns =
- watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns;
- prog_wm_value = convert_and_clamp(
- watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns,
- refclk_mhz, 0x1fffff);
- REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A, 0,
- DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A, prog_wm_value);
- DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_A calculated =%d\n"
- "HW register value = 0x%x\n",
- watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value);
- }
-
- if (safe_to_lower || watermarks->a.cstate_pstate.cstate_exit_ns
- > hubbub1->watermarks.a.cstate_pstate.cstate_exit_ns) {
- hubbub1->watermarks.a.cstate_pstate.cstate_exit_ns =
- watermarks->a.cstate_pstate.cstate_exit_ns;
- prog_wm_value = convert_and_clamp(
- watermarks->a.cstate_pstate.cstate_exit_ns,
- refclk_mhz, 0x1fffff);
- REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A, 0,
- DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A, prog_wm_value);
- DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_A calculated =%d\n"
- "HW register value = 0x%x\n",
- watermarks->a.cstate_pstate.cstate_exit_ns, prog_wm_value);
- }
- }
-
- if (safe_to_lower || watermarks->a.cstate_pstate.pstate_change_ns
- > hubbub1->watermarks.a.cstate_pstate.pstate_change_ns) {
- hubbub1->watermarks.a.cstate_pstate.pstate_change_ns =
- watermarks->a.cstate_pstate.pstate_change_ns;
- prog_wm_value = convert_and_clamp(
- watermarks->a.cstate_pstate.pstate_change_ns,
+ if (safe_to_lower || watermarks->a.pte_meta_urgent_ns > hubbub1->watermarks.a.pte_meta_urgent_ns) {
+ hubbub1->watermarks.a.pte_meta_urgent_ns = watermarks->a.pte_meta_urgent_ns;
+ prog_wm_value = convert_and_clamp(watermarks->a.pte_meta_urgent_ns,
refclk_mhz, 0x1fffff);
- REG_SET(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A, 0,
- DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A, prog_wm_value);
- DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_A calculated =%d\n"
- "HW register value = 0x%x\n\n",
- watermarks->a.cstate_pstate.pstate_change_ns, prog_wm_value);
+ REG_WRITE(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_A, prog_wm_value);
+ DC_LOG_BANDWIDTH_CALCS("PTE_META_URGENCY_WATERMARK_A calculated =%d\n"
+ "HW register value = 0x%x\n",
+ watermarks->a.pte_meta_urgent_ns, prog_wm_value);
}
/* clock state B */
@@ -360,60 +311,14 @@ void hubbub1_program_watermarks(
watermarks->b.urgent_ns, prog_wm_value);
}
- if (REG(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_B)) {
- if (safe_to_lower || watermarks->b.pte_meta_urgent_ns > hubbub1->watermarks.b.pte_meta_urgent_ns) {
- hubbub1->watermarks.b.pte_meta_urgent_ns = watermarks->b.pte_meta_urgent_ns;
- prog_wm_value = convert_and_clamp(watermarks->b.pte_meta_urgent_ns,
- refclk_mhz, 0x1fffff);
- REG_WRITE(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_B, prog_wm_value);
- DC_LOG_BANDWIDTH_CALCS("PTE_META_URGENCY_WATERMARK_B calculated =%d\n"
- "HW register value = 0x%x\n",
- watermarks->b.pte_meta_urgent_ns, prog_wm_value);
- }
- }
-
- if (REG(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B)) {
- if (safe_to_lower || watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns
- > hubbub1->watermarks.b.cstate_pstate.cstate_enter_plus_exit_ns) {
- hubbub1->watermarks.b.cstate_pstate.cstate_enter_plus_exit_ns =
- watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns;
- prog_wm_value = convert_and_clamp(
- watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns,
- refclk_mhz, 0x1fffff);
- REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B, 0,
- DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B, prog_wm_value);
- DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_B calculated =%d\n"
- "HW register value = 0x%x\n",
- watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value);
- }
-
- if (safe_to_lower || watermarks->b.cstate_pstate.cstate_exit_ns
- > hubbub1->watermarks.b.cstate_pstate.cstate_exit_ns) {
- hubbub1->watermarks.b.cstate_pstate.cstate_exit_ns =
- watermarks->b.cstate_pstate.cstate_exit_ns;
- prog_wm_value = convert_and_clamp(
- watermarks->b.cstate_pstate.cstate_exit_ns,
- refclk_mhz, 0x1fffff);
- REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B, 0,
- DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B, prog_wm_value);
- DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_B calculated =%d\n"
- "HW register value = 0x%x\n",
- watermarks->b.cstate_pstate.cstate_exit_ns, prog_wm_value);
- }
- }
-
- if (safe_to_lower || watermarks->b.cstate_pstate.pstate_change_ns
- > hubbub1->watermarks.b.cstate_pstate.pstate_change_ns) {
- hubbub1->watermarks.b.cstate_pstate.pstate_change_ns =
- watermarks->b.cstate_pstate.pstate_change_ns;
- prog_wm_value = convert_and_clamp(
- watermarks->b.cstate_pstate.pstate_change_ns,
+ if (safe_to_lower || watermarks->b.pte_meta_urgent_ns > hubbub1->watermarks.b.pte_meta_urgent_ns) {
+ hubbub1->watermarks.b.pte_meta_urgent_ns = watermarks->b.pte_meta_urgent_ns;
+ prog_wm_value = convert_and_clamp(watermarks->b.pte_meta_urgent_ns,
refclk_mhz, 0x1fffff);
- REG_SET(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B, 0,
- DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B, prog_wm_value);
- DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_B calculated =%d\n"
- "HW register value = 0x%x\n\n",
- watermarks->b.cstate_pstate.pstate_change_ns, prog_wm_value);
+ REG_WRITE(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_B, prog_wm_value);
+ DC_LOG_BANDWIDTH_CALCS("PTE_META_URGENCY_WATERMARK_B calculated =%d\n"
+ "HW register value = 0x%x\n",
+ watermarks->b.pte_meta_urgent_ns, prog_wm_value);
}
/* clock state C */
@@ -429,60 +334,14 @@ void hubbub1_program_watermarks(
watermarks->c.urgent_ns, prog_wm_value);
}
- if (REG(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_C)) {
- if (safe_to_lower || watermarks->c.pte_meta_urgent_ns > hubbub1->watermarks.c.pte_meta_urgent_ns) {
- hubbub1->watermarks.c.pte_meta_urgent_ns = watermarks->c.pte_meta_urgent_ns;
- prog_wm_value = convert_and_clamp(watermarks->c.pte_meta_urgent_ns,
- refclk_mhz, 0x1fffff);
- REG_WRITE(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_C, prog_wm_value);
- DC_LOG_BANDWIDTH_CALCS("PTE_META_URGENCY_WATERMARK_C calculated =%d\n"
- "HW register value = 0x%x\n",
- watermarks->c.pte_meta_urgent_ns, prog_wm_value);
- }
- }
-
- if (REG(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C)) {
- if (safe_to_lower || watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns
- > hubbub1->watermarks.c.cstate_pstate.cstate_enter_plus_exit_ns) {
- hubbub1->watermarks.c.cstate_pstate.cstate_enter_plus_exit_ns =
- watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns;
- prog_wm_value = convert_and_clamp(
- watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns,
- refclk_mhz, 0x1fffff);
- REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C, 0,
- DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C, prog_wm_value);
- DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_C calculated =%d\n"
- "HW register value = 0x%x\n",
- watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value);
- }
-
- if (safe_to_lower || watermarks->c.cstate_pstate.cstate_exit_ns
- > hubbub1->watermarks.c.cstate_pstate.cstate_exit_ns) {
- hubbub1->watermarks.c.cstate_pstate.cstate_exit_ns =
- watermarks->c.cstate_pstate.cstate_exit_ns;
- prog_wm_value = convert_and_clamp(
- watermarks->c.cstate_pstate.cstate_exit_ns,
- refclk_mhz, 0x1fffff);
- REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C, 0,
- DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C, prog_wm_value);
- DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_C calculated =%d\n"
- "HW register value = 0x%x\n",
- watermarks->c.cstate_pstate.cstate_exit_ns, prog_wm_value);
- }
- }
-
- if (safe_to_lower || watermarks->c.cstate_pstate.pstate_change_ns
- > hubbub1->watermarks.c.cstate_pstate.pstate_change_ns) {
- hubbub1->watermarks.c.cstate_pstate.pstate_change_ns =
- watermarks->c.cstate_pstate.pstate_change_ns;
- prog_wm_value = convert_and_clamp(
- watermarks->c.cstate_pstate.pstate_change_ns,
+ if (safe_to_lower || watermarks->c.pte_meta_urgent_ns > hubbub1->watermarks.c.pte_meta_urgent_ns) {
+ hubbub1->watermarks.c.pte_meta_urgent_ns = watermarks->c.pte_meta_urgent_ns;
+ prog_wm_value = convert_and_clamp(watermarks->c.pte_meta_urgent_ns,
refclk_mhz, 0x1fffff);
- REG_SET(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C, 0,
- DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C, prog_wm_value);
- DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_C calculated =%d\n"
- "HW register value = 0x%x\n\n",
- watermarks->c.cstate_pstate.pstate_change_ns, prog_wm_value);
+ REG_WRITE(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_C, prog_wm_value);
+ DC_LOG_BANDWIDTH_CALCS("PTE_META_URGENCY_WATERMARK_C calculated =%d\n"
+ "HW register value = 0x%x\n",
+ watermarks->c.pte_meta_urgent_ns, prog_wm_value);
}
/* clock state D */
@@ -498,48 +357,199 @@ void hubbub1_program_watermarks(
watermarks->d.urgent_ns, prog_wm_value);
}
- if (REG(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_D)) {
- if (safe_to_lower || watermarks->d.pte_meta_urgent_ns > hubbub1->watermarks.d.pte_meta_urgent_ns) {
- hubbub1->watermarks.d.pte_meta_urgent_ns = watermarks->d.pte_meta_urgent_ns;
- prog_wm_value = convert_and_clamp(watermarks->d.pte_meta_urgent_ns,
- refclk_mhz, 0x1fffff);
- REG_WRITE(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_D, prog_wm_value);
- DC_LOG_BANDWIDTH_CALCS("PTE_META_URGENCY_WATERMARK_D calculated =%d\n"
- "HW register value = 0x%x\n",
- watermarks->d.pte_meta_urgent_ns, prog_wm_value);
- }
+ if (safe_to_lower || watermarks->d.pte_meta_urgent_ns > hubbub1->watermarks.d.pte_meta_urgent_ns) {
+ hubbub1->watermarks.d.pte_meta_urgent_ns = watermarks->d.pte_meta_urgent_ns;
+ prog_wm_value = convert_and_clamp(watermarks->d.pte_meta_urgent_ns,
+ refclk_mhz, 0x1fffff);
+ REG_WRITE(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_D, prog_wm_value);
+ DC_LOG_BANDWIDTH_CALCS("PTE_META_URGENCY_WATERMARK_D calculated =%d\n"
+ "HW register value = 0x%x\n",
+ watermarks->d.pte_meta_urgent_ns, prog_wm_value);
}
+}
- if (REG(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D)) {
- if (safe_to_lower || watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns
- > hubbub1->watermarks.d.cstate_pstate.cstate_enter_plus_exit_ns) {
- hubbub1->watermarks.d.cstate_pstate.cstate_enter_plus_exit_ns =
- watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns;
- prog_wm_value = convert_and_clamp(
- watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns,
- refclk_mhz, 0x1fffff);
- REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D, 0,
- DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D, prog_wm_value);
- DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_D calculated =%d\n"
- "HW register value = 0x%x\n",
- watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value);
- }
+void hubbub1_program_stutter_watermarks(
+ struct hubbub *hubbub,
+ struct dcn_watermark_set *watermarks,
+ unsigned int refclk_mhz,
+ bool safe_to_lower)
+{
+ struct dcn10_hubbub *hubbub1 = TO_DCN10_HUBBUB(hubbub);
+ uint32_t prog_wm_value;
- if (safe_to_lower || watermarks->d.cstate_pstate.cstate_exit_ns
- > hubbub1->watermarks.d.cstate_pstate.cstate_exit_ns) {
- hubbub1->watermarks.d.cstate_pstate.cstate_exit_ns =
- watermarks->d.cstate_pstate.cstate_exit_ns;
- prog_wm_value = convert_and_clamp(
- watermarks->d.cstate_pstate.cstate_exit_ns,
- refclk_mhz, 0x1fffff);
- REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D, 0,
- DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D, prog_wm_value);
- DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_D calculated =%d\n"
- "HW register value = 0x%x\n",
- watermarks->d.cstate_pstate.cstate_exit_ns, prog_wm_value);
- }
+ /* clock state A */
+ if (safe_to_lower || watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns
+ > hubbub1->watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns) {
+ hubbub1->watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns =
+ watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns;
+ prog_wm_value = convert_and_clamp(
+ watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns,
+ refclk_mhz, 0x1fffff);
+ REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A, 0,
+ DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A, prog_wm_value);
+ DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_A calculated =%d\n"
+ "HW register value = 0x%x\n",
+ watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value);
+ }
+
+ if (safe_to_lower || watermarks->a.cstate_pstate.cstate_exit_ns
+ > hubbub1->watermarks.a.cstate_pstate.cstate_exit_ns) {
+ hubbub1->watermarks.a.cstate_pstate.cstate_exit_ns =
+ watermarks->a.cstate_pstate.cstate_exit_ns;
+ prog_wm_value = convert_and_clamp(
+ watermarks->a.cstate_pstate.cstate_exit_ns,
+ refclk_mhz, 0x1fffff);
+ REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A, 0,
+ DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A, prog_wm_value);
+ DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_A calculated =%d\n"
+ "HW register value = 0x%x\n",
+ watermarks->a.cstate_pstate.cstate_exit_ns, prog_wm_value);
+ }
+
+ /* clock state B */
+ if (safe_to_lower || watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns
+ > hubbub1->watermarks.b.cstate_pstate.cstate_enter_plus_exit_ns) {
+ hubbub1->watermarks.b.cstate_pstate.cstate_enter_plus_exit_ns =
+ watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns;
+ prog_wm_value = convert_and_clamp(
+ watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns,
+ refclk_mhz, 0x1fffff);
+ REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B, 0,
+ DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B, prog_wm_value);
+ DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_B calculated =%d\n"
+ "HW register value = 0x%x\n",
+ watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value);
+ }
+
+ if (safe_to_lower || watermarks->b.cstate_pstate.cstate_exit_ns
+ > hubbub1->watermarks.b.cstate_pstate.cstate_exit_ns) {
+ hubbub1->watermarks.b.cstate_pstate.cstate_exit_ns =
+ watermarks->b.cstate_pstate.cstate_exit_ns;
+ prog_wm_value = convert_and_clamp(
+ watermarks->b.cstate_pstate.cstate_exit_ns,
+ refclk_mhz, 0x1fffff);
+ REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B, 0,
+ DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B, prog_wm_value);
+ DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_B calculated =%d\n"
+ "HW register value = 0x%x\n",
+ watermarks->b.cstate_pstate.cstate_exit_ns, prog_wm_value);
+ }
+
+ /* clock state C */
+ if (safe_to_lower || watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns
+ > hubbub1->watermarks.c.cstate_pstate.cstate_enter_plus_exit_ns) {
+ hubbub1->watermarks.c.cstate_pstate.cstate_enter_plus_exit_ns =
+ watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns;
+ prog_wm_value = convert_and_clamp(
+ watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns,
+ refclk_mhz, 0x1fffff);
+ REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C, 0,
+ DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C, prog_wm_value);
+ DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_C calculated =%d\n"
+ "HW register value = 0x%x\n",
+ watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value);
+ }
+
+ if (safe_to_lower || watermarks->c.cstate_pstate.cstate_exit_ns
+ > hubbub1->watermarks.c.cstate_pstate.cstate_exit_ns) {
+ hubbub1->watermarks.c.cstate_pstate.cstate_exit_ns =
+ watermarks->c.cstate_pstate.cstate_exit_ns;
+ prog_wm_value = convert_and_clamp(
+ watermarks->c.cstate_pstate.cstate_exit_ns,
+ refclk_mhz, 0x1fffff);
+ REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C, 0,
+ DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C, prog_wm_value);
+ DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_C calculated =%d\n"
+ "HW register value = 0x%x\n",
+ watermarks->c.cstate_pstate.cstate_exit_ns, prog_wm_value);
+ }
+
+ /* clock state D */
+ if (safe_to_lower || watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns
+ > hubbub1->watermarks.d.cstate_pstate.cstate_enter_plus_exit_ns) {
+ hubbub1->watermarks.d.cstate_pstate.cstate_enter_plus_exit_ns =
+ watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns;
+ prog_wm_value = convert_and_clamp(
+ watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns,
+ refclk_mhz, 0x1fffff);
+ REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D, 0,
+ DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D, prog_wm_value);
+ DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_D calculated =%d\n"
+ "HW register value = 0x%x\n",
+ watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value);
+ }
+
+ if (safe_to_lower || watermarks->d.cstate_pstate.cstate_exit_ns
+ > hubbub1->watermarks.d.cstate_pstate.cstate_exit_ns) {
+ hubbub1->watermarks.d.cstate_pstate.cstate_exit_ns =
+ watermarks->d.cstate_pstate.cstate_exit_ns;
+ prog_wm_value = convert_and_clamp(
+ watermarks->d.cstate_pstate.cstate_exit_ns,
+ refclk_mhz, 0x1fffff);
+ REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D, 0,
+ DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D, prog_wm_value);
+ DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_D calculated =%d\n"
+ "HW register value = 0x%x\n",
+ watermarks->d.cstate_pstate.cstate_exit_ns, prog_wm_value);
+ }
+
+}
+
+void hubbub1_program_pstate_watermarks(
+ struct hubbub *hubbub,
+ struct dcn_watermark_set *watermarks,
+ unsigned int refclk_mhz,
+ bool safe_to_lower)
+{
+ struct dcn10_hubbub *hubbub1 = TO_DCN10_HUBBUB(hubbub);
+ uint32_t prog_wm_value;
+
+ /* clock state A */
+ if (safe_to_lower || watermarks->a.cstate_pstate.pstate_change_ns
+ > hubbub1->watermarks.a.cstate_pstate.pstate_change_ns) {
+ hubbub1->watermarks.a.cstate_pstate.pstate_change_ns =
+ watermarks->a.cstate_pstate.pstate_change_ns;
+ prog_wm_value = convert_and_clamp(
+ watermarks->a.cstate_pstate.pstate_change_ns,
+ refclk_mhz, 0x1fffff);
+ REG_SET(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A, 0,
+ DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A, prog_wm_value);
+ DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_A calculated =%d\n"
+ "HW register value = 0x%x\n\n",
+ watermarks->a.cstate_pstate.pstate_change_ns, prog_wm_value);
}
+ /* clock state B */
+ if (safe_to_lower || watermarks->b.cstate_pstate.pstate_change_ns
+ > hubbub1->watermarks.b.cstate_pstate.pstate_change_ns) {
+ hubbub1->watermarks.b.cstate_pstate.pstate_change_ns =
+ watermarks->b.cstate_pstate.pstate_change_ns;
+ prog_wm_value = convert_and_clamp(
+ watermarks->b.cstate_pstate.pstate_change_ns,
+ refclk_mhz, 0x1fffff);
+ REG_SET(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B, 0,
+ DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B, prog_wm_value);
+ DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_B calculated =%d\n"
+ "HW register value = 0x%x\n\n",
+ watermarks->b.cstate_pstate.pstate_change_ns, prog_wm_value);
+ }
+
+ /* clock state C */
+ if (safe_to_lower || watermarks->c.cstate_pstate.pstate_change_ns
+ > hubbub1->watermarks.c.cstate_pstate.pstate_change_ns) {
+ hubbub1->watermarks.c.cstate_pstate.pstate_change_ns =
+ watermarks->c.cstate_pstate.pstate_change_ns;
+ prog_wm_value = convert_and_clamp(
+ watermarks->c.cstate_pstate.pstate_change_ns,
+ refclk_mhz, 0x1fffff);
+ REG_SET(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C, 0,
+ DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C, prog_wm_value);
+ DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_C calculated =%d\n"
+ "HW register value = 0x%x\n\n",
+ watermarks->c.cstate_pstate.pstate_change_ns, prog_wm_value);
+ }
+
+ /* clock state D */
if (safe_to_lower || watermarks->d.cstate_pstate.pstate_change_ns
> hubbub1->watermarks.d.cstate_pstate.pstate_change_ns) {
hubbub1->watermarks.d.cstate_pstate.pstate_change_ns =
@@ -553,6 +563,22 @@ void hubbub1_program_watermarks(
"HW register value = 0x%x\n\n",
watermarks->d.cstate_pstate.pstate_change_ns, prog_wm_value);
}
+}
+
+void hubbub1_program_watermarks(
+ struct hubbub *hubbub,
+ struct dcn_watermark_set *watermarks,
+ unsigned int refclk_mhz,
+ bool safe_to_lower)
+{
+ struct dcn10_hubbub *hubbub1 = TO_DCN10_HUBBUB(hubbub);
+ /*
+ * Need to clamp to max of the register values (i.e. no wrap)
+ * for dcn1, all wm registers are 21-bit wide
+ */
+ hubbub1_program_urgent_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower);
+ hubbub1_program_stutter_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower);
+ hubbub1_program_pstate_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower);
REG_UPDATE(DCHUBBUB_ARB_SAT_LEVEL,
DCHUBBUB_ARB_SAT_LEVEL, 60 * refclk_mhz);
@@ -903,9 +929,7 @@ void hubbub1_construct(struct hubbub *hubbub,
hubbub1->masks = hubbub_mask;
hubbub1->debug_test_index_pstate = 0x7;
-#if defined(CONFIG_DRM_AMD_DC_DCN1_01)
if (ctx->dce_version == DCN_VERSION_1_01)
hubbub1->debug_test_index_pstate = 0xB;
-#endif
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h
index 85811b24a497..7c2559c9ae23 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h
@@ -262,4 +262,20 @@ void hubbub1_construct(struct hubbub *hubbub,
const struct dcn_hubbub_shift *hubbub_shift,
const struct dcn_hubbub_mask *hubbub_mask);
+void hubbub1_program_urgent_watermarks(
+ struct hubbub *hubbub,
+ struct dcn_watermark_set *watermarks,
+ unsigned int refclk_mhz,
+ bool safe_to_lower);
+void hubbub1_program_stutter_watermarks(
+ struct hubbub *hubbub,
+ struct dcn_watermark_set *watermarks,
+ unsigned int refclk_mhz,
+ bool safe_to_lower);
+void hubbub1_program_pstate_watermarks(
+ struct hubbub *hubbub,
+ struct dcn_watermark_set *watermarks,
+ unsigned int refclk_mhz,
+ bool safe_to_lower);
+
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
index 33d311cea28c..821a280eb481 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
@@ -45,6 +45,8 @@
#include "dcn10_cm_common.h"
#include "dc_link_dp.h"
#include "dccg.h"
+#include "clk_mgr.h"
+
#define DC_LOGGER_INIT(logger)
@@ -658,16 +660,15 @@ static enum dc_status dcn10_enable_stream_timing(
BREAK_TO_DEBUGGER();
return DC_ERROR_UNEXPECTED;
}
- pipe_ctx->stream_res.tg->dlg_otg_param.vready_offset = pipe_ctx->pipe_dlg_param.vready_offset;
- pipe_ctx->stream_res.tg->dlg_otg_param.vstartup_start = pipe_ctx->pipe_dlg_param.vstartup_start;
- pipe_ctx->stream_res.tg->dlg_otg_param.vupdate_offset = pipe_ctx->pipe_dlg_param.vupdate_offset;
- pipe_ctx->stream_res.tg->dlg_otg_param.vupdate_width = pipe_ctx->pipe_dlg_param.vupdate_width;
-
- pipe_ctx->stream_res.tg->dlg_otg_param.signal = pipe_ctx->stream->signal;
pipe_ctx->stream_res.tg->funcs->program_timing(
pipe_ctx->stream_res.tg,
&stream->timing,
+ pipe_ctx->pipe_dlg_param.vready_offset,
+ pipe_ctx->pipe_dlg_param.vstartup_start,
+ pipe_ctx->pipe_dlg_param.vupdate_offset,
+ pipe_ctx->pipe_dlg_param.vupdate_width,
+ pipe_ctx->stream->signal,
true);
#if 0 /* move to after enable_crtc */
@@ -1101,9 +1102,6 @@ static void dcn10_init_hw(struct dc *dc)
*/
struct dc_link *link = dc->links[i];
- if (link->link_enc->connector.id == CONNECTOR_ID_EDP)
- dc->hwss.edp_power_control(link, true);
-
link->link_enc->funcs->hw_init(link->link_enc);
/* Check for enabled DIG to identify enabled display */
@@ -1145,6 +1143,9 @@ static void dcn10_init_hw(struct dc *dc)
if (dmcu != NULL)
dmcu->funcs->dmcu_init(dmcu);
+ if (abm != NULL && dmcu != NULL)
+ abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
+
/* power AFMT HDMI memory TODO: may move to dis/en output save power*/
REG_WRITE(DIO_MEM_PWR_CTRL, 0);
@@ -1159,7 +1160,7 @@ static void dcn10_init_hw(struct dc *dc)
enable_power_gating_plane(dc->hwseq, true);
- memset(&dc->res_pool->clk_mgr->clks, 0, sizeof(dc->res_pool->clk_mgr->clks));
+ memset(&dc->clk_mgr->clks, 0, sizeof(dc->clk_mgr->clks));
}
static void dcn10_reset_hw_ctx_wrap(
@@ -1756,7 +1757,7 @@ static void dcn10_program_output_csc(struct dc *dc,
bool is_lower_pipe_tree_visible(struct pipe_ctx *pipe_ctx)
{
- if (pipe_ctx->plane_state->visible)
+ if (pipe_ctx->plane_state && pipe_ctx->plane_state->visible)
return true;
if (pipe_ctx->bottom_pipe && is_lower_pipe_tree_visible(pipe_ctx->bottom_pipe))
return true;
@@ -1765,7 +1766,7 @@ bool is_lower_pipe_tree_visible(struct pipe_ctx *pipe_ctx)
bool is_upper_pipe_tree_visible(struct pipe_ctx *pipe_ctx)
{
- if (pipe_ctx->plane_state->visible)
+ if (pipe_ctx->plane_state && pipe_ctx->plane_state->visible)
return true;
if (pipe_ctx->top_pipe && is_upper_pipe_tree_visible(pipe_ctx->top_pipe))
return true;
@@ -1774,7 +1775,7 @@ bool is_upper_pipe_tree_visible(struct pipe_ctx *pipe_ctx)
bool is_pipe_tree_visible(struct pipe_ctx *pipe_ctx)
{
- if (pipe_ctx->plane_state->visible)
+ if (pipe_ctx->plane_state && pipe_ctx->plane_state->visible)
return true;
if (pipe_ctx->top_pipe && is_upper_pipe_tree_visible(pipe_ctx->top_pipe))
return true;
@@ -1920,7 +1921,7 @@ static uint16_t fixed_point_to_int_frac(
return result;
}
-void build_prescale_params(struct dc_bias_and_scale *bias_and_scale,
+void dcn10_build_prescale_params(struct dc_bias_and_scale *bias_and_scale,
const struct dc_plane_state *plane_state)
{
if (plane_state->format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN
@@ -1953,7 +1954,7 @@ static void update_dpp(struct dpp *dpp, struct dc_plane_state *plane_state)
plane_state->color_space);
//set scale and bias registers
- build_prescale_params(&bns_params, plane_state);
+ dcn10_build_prescale_params(&bns_params, plane_state);
if (dpp->funcs->dpp_program_bias_and_scale)
dpp->funcs->dpp_program_bias_and_scale(dpp, &bns_params);
}
@@ -2071,7 +2072,7 @@ void update_dchubp_dpp(
*/
if (plane_state->update_flags.bits.full_update) {
bool should_divided_by_2 = context->bw_ctx.bw.dcn.clk.dppclk_khz <=
- dc->res_pool->clk_mgr->clks.dispclk_khz / 2;
+ dc->clk_mgr->clks.dispclk_khz / 2;
dpp->funcs->dpp_dppclk_control(
dpp,
@@ -2084,9 +2085,9 @@ void update_dchubp_dpp(
dpp->inst,
pipe_ctx->plane_res.bw.dppclk_khz);
else
- dc->res_pool->clk_mgr->clks.dppclk_khz = should_divided_by_2 ?
- dc->res_pool->clk_mgr->clks.dispclk_khz / 2 :
- dc->res_pool->clk_mgr->clks.dispclk_khz;
+ dc->clk_mgr->clks.dppclk_khz = should_divided_by_2 ?
+ dc->clk_mgr->clks.dispclk_khz / 2 :
+ dc->clk_mgr->clks.dispclk_khz;
}
/* TODO: Need input parameter to tell current DCHUB pipe tie to which OTG
@@ -2279,14 +2280,15 @@ static void program_all_pipe_in_tree(
if (pipe_ctx->top_pipe == NULL) {
bool blank = !is_pipe_tree_visible(pipe_ctx);
- pipe_ctx->stream_res.tg->dlg_otg_param.vready_offset = pipe_ctx->pipe_dlg_param.vready_offset;
- pipe_ctx->stream_res.tg->dlg_otg_param.vstartup_start = pipe_ctx->pipe_dlg_param.vstartup_start;
- pipe_ctx->stream_res.tg->dlg_otg_param.vupdate_offset = pipe_ctx->pipe_dlg_param.vupdate_offset;
- pipe_ctx->stream_res.tg->dlg_otg_param.vupdate_width = pipe_ctx->pipe_dlg_param.vupdate_width;
- pipe_ctx->stream_res.tg->dlg_otg_param.signal = pipe_ctx->stream->signal;
-
pipe_ctx->stream_res.tg->funcs->program_global_sync(
- pipe_ctx->stream_res.tg);
+ pipe_ctx->stream_res.tg,
+ pipe_ctx->pipe_dlg_param.vready_offset,
+ pipe_ctx->pipe_dlg_param.vstartup_start,
+ pipe_ctx->pipe_dlg_param.vupdate_offset,
+ pipe_ctx->pipe_dlg_param.vupdate_width);
+
+ pipe_ctx->stream_res.tg->funcs->set_vtg_params(
+ pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing);
dc->hwss.blank_pixel_data(dc, pipe_ctx, blank);
@@ -2448,8 +2450,8 @@ static void dcn10_prepare_bandwidth(
if (context->stream_count == 0)
context->bw_ctx.bw.dcn.clk.phyclk_khz = 0;
- dc->res_pool->clk_mgr->funcs->update_clocks(
- dc->res_pool->clk_mgr,
+ dc->clk_mgr->funcs->update_clocks(
+ dc->clk_mgr,
context,
false);
}
@@ -2480,8 +2482,8 @@ static void dcn10_optimize_bandwidth(
if (context->stream_count == 0)
context->bw_ctx.bw.dcn.clk.phyclk_khz = 0;
- dc->res_pool->clk_mgr->funcs->update_clocks(
- dc->res_pool->clk_mgr,
+ dc->clk_mgr->funcs->update_clocks(
+ dc->clk_mgr,
context,
true);
}
@@ -2504,8 +2506,8 @@ static void set_drr(struct pipe_ctx **pipe_ctx,
{
int i = 0;
struct drr_params params = {0};
- // DRR should set trigger event to monitor surface update event
- unsigned int event_triggers = 0x80;
+ // DRR set trigger event mapped to OTG_TRIG_A (bit 11) for manual control flow
+ unsigned int event_triggers = 0x800;
params.vertical_total_max = vmax;
params.vertical_total_min = vmin;
@@ -2644,9 +2646,6 @@ static void dcn10_wait_for_mpcc_disconnect(
res_pool->mpc->funcs->wait_for_idle(res_pool->mpc, mpcc_inst);
pipe_ctx->stream_res.opp->mpcc_disconnect_pending[mpcc_inst] = false;
hubp->funcs->set_blank(hubp, true);
- /*DC_LOG_ERROR(dc->ctx->logger,
- "[debug_mpo: wait_for_mpcc finished waiting on mpcc %d]\n",
- i);*/
}
}
@@ -2790,7 +2789,6 @@ static void apply_front_porch_workaround(
int get_vupdate_offset_from_vsync(struct pipe_ctx *pipe_ctx)
{
- struct timing_generator *optc = pipe_ctx->stream_res.tg;
const struct dc_crtc_timing *dc_crtc_timing = &pipe_ctx->stream->timing;
struct dc_crtc_timing patched_crtc_timing;
int vesa_sync_start;
@@ -2813,7 +2811,7 @@ int get_vupdate_offset_from_vsync(struct pipe_ctx *pipe_ctx)
* interlace_factor;
vertical_line_start = asic_blank_end -
- optc->dlg_otg_param.vstartup_start + 1;
+ pipe_ctx->pipe_dlg_param.vstartup_start + 1;
return vertical_line_start;
}
@@ -2961,6 +2959,18 @@ static void dcn10_unblank_stream(struct pipe_ctx *pipe_ctx,
}
}
+static void dcn10_send_immediate_sdp_message(struct pipe_ctx *pipe_ctx,
+ const uint8_t *custom_sdp_message,
+ unsigned int sdp_message_size)
+{
+ if (dc_is_dp_signal(pipe_ctx->stream->signal)) {
+ pipe_ctx->stream_res.stream_enc->funcs->send_immediate_sdp_message(
+ pipe_ctx->stream_res.stream_enc,
+ custom_sdp_message,
+ sdp_message_size);
+ }
+}
+
static const struct hw_sequencer_funcs dcn10_funcs = {
.program_gamut_remap = program_gamut_remap,
.init_hw = dcn10_init_hw,
@@ -2980,6 +2990,7 @@ static const struct hw_sequencer_funcs dcn10_funcs = {
.enable_timing_synchronization = dcn10_enable_timing_synchronization,
.enable_per_frame_crtc_position_reset = dcn10_enable_per_frame_crtc_position_reset,
.update_info_frame = dce110_update_info_frame,
+ .send_immediate_sdp_message = dcn10_send_immediate_sdp_message,
.enable_stream = dce110_enable_stream,
.disable_stream = dce110_disable_stream,
.unblank_stream = dcn10_unblank_stream,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h
index 4b3b27a5d23b..ef94d6b15843 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h
@@ -83,6 +83,8 @@ struct pipe_ctx *find_top_pipe_for_stream(
int get_vupdate_offset_from_vsync(struct pipe_ctx *pipe_ctx);
+void dcn10_build_prescale_params(struct dc_bias_and_scale *bias_and_scale,
+ const struct dc_plane_state *plane_state);
void lock_all_pipes(struct dc *dc,
struct dc_state *context,
bool lock);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c
index 991622da9ed5..6e47444109d7 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c
@@ -43,7 +43,7 @@
#include "dcn10_hubp.h"
#include "dcn10_hubbub.h"
#include "dcn10_cm_common.h"
-#include "dcn10_clk_mgr.h"
+#include "clk_mgr.h"
static unsigned int snprintf_count(char *pBuf, unsigned int bufSize, char *fmt, ...)
{
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_ipp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_ipp.c
index 08db1e6b5166..0e0c6850247d 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_ipp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_ipp.c
@@ -23,6 +23,8 @@
*
*/
+#include <linux/slab.h>
+
#include "dm_services.h"
#include "dcn10_ipp.h"
#include "reg_helper.h"
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c
index 0126a44ba012..0a520591fd3a 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c
@@ -23,6 +23,9 @@
*
*/
+#include <linux/delay.h>
+#include <linux/slab.h>
+
#include "reg_helper.h"
#include "core_types.h"
@@ -726,6 +729,8 @@ void dcn10_link_encoder_construct(
enc10->base.features.flags.bits.IS_HBR3_CAPABLE =
bp_cap_info.DP_HBR3_EN;
enc10->base.features.flags.bits.HDMI_6GB_EN = bp_cap_info.HDMI_6GB_EN;
+ enc10->base.features.flags.bits.DP_IS_USB_C =
+ bp_cap_info.DP_IS_USB_C;
} else {
DC_LOG_WARNING("%s: Failed to get encoder_cap_info from VBIOS with error code %d!\n",
__func__,
@@ -1357,5 +1362,5 @@ void dcn10_aux_initialize(struct dcn10_link_encoder *enc10)
/* 1/4 window (the maximum allowed) */
AUX_REG_UPDATE(AUX_DPHY_RX_CONTROL0,
- AUX_RX_RECEIVE_WINDOW, 1);
+ AUX_RX_RECEIVE_WINDOW, 0);
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c
index ab958cff3b76..cec69cecf521 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c
@@ -23,6 +23,8 @@
*
*/
+#include <linux/slab.h>
+
#include "dm_services.h"
#include "dcn10_opp.h"
#include "reg_helper.h"
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
index 0345d51e9d6f..e4b850a2d31f 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
@@ -46,9 +46,7 @@
* This is a workaround for a bug that has existed since R5xx and has not been
* fixed keep Front porch at minimum 2 for Interlaced mode or 1 for progressive.
*/
-static void optc1_apply_front_porch_workaround(
- struct timing_generator *optc,
- struct dc_crtc_timing *timing)
+static void apply_front_porch_workaround(struct dc_crtc_timing *timing)
{
if (timing->flags.INTERLACE == 1) {
if (timing->v_front_porch < 2)
@@ -60,24 +58,33 @@ static void optc1_apply_front_porch_workaround(
}
void optc1_program_global_sync(
- struct timing_generator *optc)
+ struct timing_generator *optc,
+ int vready_offset,
+ int vstartup_start,
+ int vupdate_offset,
+ int vupdate_width)
{
struct optc *optc1 = DCN10TG_FROM_TG(optc);
- if (optc->dlg_otg_param.vstartup_start == 0) {
+ optc1->vready_offset = vready_offset;
+ optc1->vstartup_start = vstartup_start;
+ optc1->vupdate_offset = vupdate_offset;
+ optc1->vupdate_width = vupdate_width;
+
+ if (optc1->vstartup_start == 0) {
BREAK_TO_DEBUGGER();
return;
}
REG_SET(OTG_VSTARTUP_PARAM, 0,
- VSTARTUP_START, optc->dlg_otg_param.vstartup_start);
+ VSTARTUP_START, optc1->vstartup_start);
REG_SET_2(OTG_VUPDATE_PARAM, 0,
- VUPDATE_OFFSET, optc->dlg_otg_param.vupdate_offset,
- VUPDATE_WIDTH, optc->dlg_otg_param.vupdate_width);
+ VUPDATE_OFFSET, optc1->vupdate_offset,
+ VUPDATE_WIDTH, optc1->vupdate_width);
REG_SET(OTG_VREADY_PARAM, 0,
- VREADY_OFFSET, optc->dlg_otg_param.vready_offset);
+ VREADY_OFFSET, optc1->vready_offset);
}
static void optc1_disable_stereo(struct timing_generator *optc)
@@ -132,25 +139,32 @@ void optc1_setup_vertical_interrupt2(
void optc1_program_timing(
struct timing_generator *optc,
const struct dc_crtc_timing *dc_crtc_timing,
+ int vready_offset,
+ int vstartup_start,
+ int vupdate_offset,
+ int vupdate_width,
+ const enum signal_type signal,
bool use_vbios)
{
struct dc_crtc_timing patched_crtc_timing;
- uint32_t vesa_sync_start;
uint32_t asic_blank_end;
uint32_t asic_blank_start;
uint32_t v_total;
uint32_t v_sync_end;
- uint32_t v_init, v_fp2;
uint32_t h_sync_polarity, v_sync_polarity;
uint32_t start_point = 0;
uint32_t field_num = 0;
uint32_t h_div_2;
- int32_t vertical_line_start;
struct optc *optc1 = DCN10TG_FROM_TG(optc);
+ optc1->signal = signal;
+ optc1->vready_offset = vready_offset;
+ optc1->vstartup_start = vstartup_start;
+ optc1->vupdate_offset = vupdate_offset;
+ optc1->vupdate_width = vupdate_width;
patched_crtc_timing = *dc_crtc_timing;
- optc1_apply_front_porch_workaround(optc, &patched_crtc_timing);
+ apply_front_porch_workaround(&patched_crtc_timing);
/* Load horizontal timing */
@@ -163,24 +177,16 @@ void optc1_program_timing(
OTG_H_SYNC_A_START, 0,
OTG_H_SYNC_A_END, patched_crtc_timing.h_sync_width);
- /* asic_h_blank_end = HsyncWidth + HbackPorch =
- * vesa. usHorizontalTotal - vesa. usHorizontalSyncStart -
- * vesa.h_left_border
- */
- vesa_sync_start = patched_crtc_timing.h_addressable +
- patched_crtc_timing.h_border_right +
+ /* blank_start = line end - front porch */
+ asic_blank_start = patched_crtc_timing.h_total -
patched_crtc_timing.h_front_porch;
- asic_blank_end = patched_crtc_timing.h_total -
- vesa_sync_start -
+ /* blank_end = blank_start - active */
+ asic_blank_end = asic_blank_start -
+ patched_crtc_timing.h_border_right -
+ patched_crtc_timing.h_addressable -
patched_crtc_timing.h_border_left;
- /* h_blank_start = v_blank_end + v_active */
- asic_blank_start = asic_blank_end +
- patched_crtc_timing.h_border_left +
- patched_crtc_timing.h_addressable +
- patched_crtc_timing.h_border_right;
-
REG_UPDATE_2(OTG_H_BLANK_START_END,
OTG_H_BLANK_START, asic_blank_start,
OTG_H_BLANK_END, asic_blank_end);
@@ -212,24 +218,15 @@ void optc1_program_timing(
OTG_V_SYNC_A_START, 0,
OTG_V_SYNC_A_END, v_sync_end);
- vesa_sync_start = patched_crtc_timing.v_addressable +
- patched_crtc_timing.v_border_bottom +
+ /* blank_start = frame end - front porch */
+ asic_blank_start = patched_crtc_timing.v_total -
patched_crtc_timing.v_front_porch;
- asic_blank_end = (patched_crtc_timing.v_total -
- vesa_sync_start -
- patched_crtc_timing.v_border_top);
-
- /* v_blank_start = v_blank_end + v_active */
- asic_blank_start = asic_blank_end +
- (patched_crtc_timing.v_border_top +
- patched_crtc_timing.v_addressable +
- patched_crtc_timing.v_border_bottom);
-
- vertical_line_start = asic_blank_end - optc->dlg_otg_param.vstartup_start + 1;
- v_fp2 = 0;
- if (vertical_line_start < 0)
- v_fp2 = -vertical_line_start;
+ /* blank_end = blank_start - active */
+ asic_blank_end = asic_blank_start -
+ patched_crtc_timing.v_border_bottom -
+ patched_crtc_timing.v_addressable -
+ patched_crtc_timing.v_border_top;
REG_UPDATE_2(OTG_V_BLANK_START_END,
OTG_V_BLANK_START, asic_blank_start,
@@ -242,10 +239,9 @@ void optc1_program_timing(
REG_UPDATE(OTG_V_SYNC_A_CNTL,
OTG_V_SYNC_A_POL, v_sync_polarity);
- v_init = asic_blank_start;
- if (optc->dlg_otg_param.signal == SIGNAL_TYPE_DISPLAY_PORT ||
- optc->dlg_otg_param.signal == SIGNAL_TYPE_DISPLAY_PORT_MST ||
- optc->dlg_otg_param.signal == SIGNAL_TYPE_EDP) {
+ if (optc1->signal == SIGNAL_TYPE_DISPLAY_PORT ||
+ optc1->signal == SIGNAL_TYPE_DISPLAY_PORT_MST ||
+ optc1->signal == SIGNAL_TYPE_EDP) {
start_point = 1;
if (patched_crtc_timing.flags.INTERLACE == 1)
field_num = 1;
@@ -253,13 +249,10 @@ void optc1_program_timing(
/* Interlace */
if (REG(OTG_INTERLACE_CONTROL)) {
- if (patched_crtc_timing.flags.INTERLACE == 1) {
+ if (patched_crtc_timing.flags.INTERLACE == 1)
REG_UPDATE(OTG_INTERLACE_CONTROL,
OTG_INTERLACE_ENABLE, 1);
- v_init = v_init / 2;
- if ((optc->dlg_otg_param.vstartup_start/2)*2 > asic_blank_end)
- v_fp2 = v_fp2 / 2;
- } else
+ else
REG_UPDATE(OTG_INTERLACE_CONTROL,
OTG_INTERLACE_ENABLE, 0);
}
@@ -268,16 +261,18 @@ void optc1_program_timing(
REG_UPDATE(CONTROL,
VTG0_ENABLE, 0);
- REG_UPDATE_2(CONTROL,
- VTG0_FP2, v_fp2,
- VTG0_VCOUNT_INIT, v_init);
-
/* original code is using VTG offset to address OTG reg, seems wrong */
REG_UPDATE_2(OTG_CONTROL,
OTG_START_POINT_CNTL, start_point,
OTG_FIELD_NUMBER_CNTL, field_num);
- optc1_program_global_sync(optc);
+ optc->funcs->program_global_sync(optc,
+ vready_offset,
+ vstartup_start,
+ vupdate_offset,
+ vupdate_width);
+
+ optc->funcs->set_vtg_params(optc, dc_crtc_timing);
/* TODO
* patched_crtc_timing.flags.HORZ_COUNT_BY_TWO == 1
@@ -296,6 +291,48 @@ void optc1_program_timing(
}
+void optc1_set_vtg_params(struct timing_generator *optc,
+ const struct dc_crtc_timing *dc_crtc_timing)
+{
+ struct dc_crtc_timing patched_crtc_timing;
+ uint32_t asic_blank_end;
+ uint32_t v_init;
+ uint32_t v_fp2 = 0;
+ int32_t vertical_line_start;
+
+ struct optc *optc1 = DCN10TG_FROM_TG(optc);
+
+ patched_crtc_timing = *dc_crtc_timing;
+ apply_front_porch_workaround(&patched_crtc_timing);
+
+ /* VCOUNT_INIT is the start of blank */
+ v_init = patched_crtc_timing.v_total - patched_crtc_timing.v_front_porch;
+
+ /* end of blank = v_init - active */
+ asic_blank_end = v_init -
+ patched_crtc_timing.v_border_bottom -
+ patched_crtc_timing.v_addressable -
+ patched_crtc_timing.v_border_top;
+
+ /* if VSTARTUP is before VSYNC, FP2 is the offset, otherwise 0 */
+ vertical_line_start = asic_blank_end - optc1->vstartup_start + 1;
+ if (vertical_line_start < 0)
+ v_fp2 = -vertical_line_start;
+
+ /* Interlace */
+ if (REG(OTG_INTERLACE_CONTROL)) {
+ if (patched_crtc_timing.flags.INTERLACE == 1) {
+ v_init = v_init / 2;
+ if ((optc1->vstartup_start/2)*2 > asic_blank_end)
+ v_fp2 = v_fp2 / 2;
+ }
+ }
+
+ REG_UPDATE_2(CONTROL,
+ VTG0_FP2, v_fp2,
+ VTG0_VCOUNT_INIT, v_init);
+}
+
void optc1_set_blank_data_double_buffer(struct timing_generator *optc, bool enable)
{
struct optc *optc1 = DCN10TG_FROM_TG(optc);
@@ -754,6 +791,32 @@ void optc1_set_static_screen_control(
OTG_STATIC_SCREEN_FRAME_COUNT, 2);
}
+void optc1_setup_manual_trigger(struct timing_generator *optc)
+{
+ struct optc *optc1 = DCN10TG_FROM_TG(optc);
+
+ REG_SET(OTG_GLOBAL_CONTROL2, 0,
+ MANUAL_FLOW_CONTROL_SEL, optc->inst);
+
+ REG_SET_8(OTG_TRIGA_CNTL, 0,
+ OTG_TRIGA_SOURCE_SELECT, 22,
+ OTG_TRIGA_SOURCE_PIPE_SELECT, optc->inst,
+ OTG_TRIGA_RISING_EDGE_DETECT_CNTL, 1,
+ OTG_TRIGA_FALLING_EDGE_DETECT_CNTL, 0,
+ OTG_TRIGA_POLARITY_SELECT, 0,
+ OTG_TRIGA_FREQUENCY_SELECT, 0,
+ OTG_TRIGA_DELAY, 0,
+ OTG_TRIGA_CLEAR, 1);
+}
+
+void optc1_program_manual_trigger(struct timing_generator *optc)
+{
+ struct optc *optc1 = DCN10TG_FROM_TG(optc);
+
+ REG_SET(OTG_MANUAL_FLOW_CONTROL, 0,
+ MANUAL_FLOW_CONTROL, 1);
+}
+
/**
*****************************************************************************
@@ -786,6 +849,10 @@ void optc1_set_drr(
OTG_FORCE_LOCK_ON_EVENT, 0,
OTG_SET_V_TOTAL_MIN_MASK_EN, 0,
OTG_SET_V_TOTAL_MIN_MASK, 0);
+
+ // Setup manual flow control for EOF via TRIG_A
+ optc->funcs->setup_manual_trigger(optc);
+
} else {
REG_UPDATE_4(OTG_V_TOTAL_CONTROL,
OTG_SET_V_TOTAL_MIN_MASK, 0,
@@ -1420,6 +1487,9 @@ static const struct timing_generator_funcs dcn10_tg_funcs = {
.clear_optc_underflow = optc1_clear_optc_underflow,
.get_crc = optc1_get_crc,
.configure_crc = optc1_configure_crc,
+ .set_vtg_params = optc1_set_vtg_params,
+ .program_manual_trigger = optc1_program_manual_trigger,
+ .setup_manual_trigger = optc1_setup_manual_trigger
};
void dcn10_timing_generator_init(struct optc *optc1)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h
index 4eb9a898c237..444c56c8104f 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h
@@ -84,13 +84,18 @@
SRI(OTG_CRC0_WINDOWA_X_CONTROL, OTG, inst),\
SRI(OTG_CRC0_WINDOWA_Y_CONTROL, OTG, inst),\
SRI(OTG_CRC0_WINDOWB_X_CONTROL, OTG, inst),\
- SRI(OTG_CRC0_WINDOWB_Y_CONTROL, OTG, inst)
+ SRI(OTG_CRC0_WINDOWB_Y_CONTROL, OTG, inst),\
+ SR(GSL_SOURCE_SELECT),\
+ SRI(OTG_GLOBAL_CONTROL2, OTG, inst),\
+ SRI(OTG_TRIGA_MANUAL_TRIG, OTG, inst)
+
#define TG_COMMON_REG_LIST_DCN1_0(inst) \
TG_COMMON_REG_LIST_DCN(inst),\
SRI(OTG_TEST_PATTERN_PARAMETERS, OTG, inst),\
SRI(OTG_TEST_PATTERN_CONTROL, OTG, inst),\
- SRI(OTG_TEST_PATTERN_COLOR, OTG, inst)
+ SRI(OTG_TEST_PATTERN_COLOR, OTG, inst),\
+ SRI(OTG_MANUAL_FLOW_CONTROL, OTG, inst)
struct dcn_optc_registers {
@@ -124,6 +129,8 @@ struct dcn_optc_registers {
uint32_t OTG_V_TOTAL_MIN;
uint32_t OTG_V_TOTAL_CONTROL;
uint32_t OTG_TRIGA_CNTL;
+ uint32_t OTG_TRIGA_MANUAL_TRIG;
+ uint32_t OTG_MANUAL_FLOW_CONTROL;
uint32_t OTG_FORCE_COUNT_NOW_CNTL;
uint32_t OTG_STATIC_SCREEN_CONTROL;
uint32_t OTG_STATUS_FRAME_COUNT;
@@ -156,6 +163,7 @@ struct dcn_optc_registers {
uint32_t OTG_CRC0_WINDOWA_Y_CONTROL;
uint32_t OTG_CRC0_WINDOWB_X_CONTROL;
uint32_t OTG_CRC0_WINDOWB_Y_CONTROL;
+ uint32_t GSL_SOURCE_SELECT;
};
#define TG_COMMON_MASK_SH_LIST_DCN(mask_sh)\
@@ -213,6 +221,11 @@ struct dcn_optc_registers {
SF(OTG0_OTG_TRIGA_CNTL, OTG_TRIGA_SOURCE_PIPE_SELECT, mask_sh),\
SF(OTG0_OTG_TRIGA_CNTL, OTG_TRIGA_RISING_EDGE_DETECT_CNTL, mask_sh),\
SF(OTG0_OTG_TRIGA_CNTL, OTG_TRIGA_FALLING_EDGE_DETECT_CNTL, mask_sh),\
+ SF(OTG0_OTG_TRIGA_CNTL, OTG_TRIGA_POLARITY_SELECT, mask_sh),\
+ SF(OTG0_OTG_TRIGA_CNTL, OTG_TRIGA_FREQUENCY_SELECT, mask_sh),\
+ SF(OTG0_OTG_TRIGA_CNTL, OTG_TRIGA_DELAY, mask_sh),\
+ SF(OTG0_OTG_TRIGA_CNTL, OTG_TRIGA_CLEAR, mask_sh),\
+ SF(OTG0_OTG_TRIGA_MANUAL_TRIG, OTG_TRIGA_MANUAL_TRIG, mask_sh),\
SF(OTG0_OTG_STATIC_SCREEN_CONTROL, OTG_STATIC_SCREEN_EVENT_MASK, mask_sh),\
SF(OTG0_OTG_STATIC_SCREEN_CONTROL, OTG_STATIC_SCREEN_FRAME_COUNT, mask_sh),\
SF(OTG0_OTG_STATUS_FRAME_COUNT, OTG_FRAME_COUNT, mask_sh),\
@@ -266,8 +279,11 @@ struct dcn_optc_registers {
SF(OTG0_OTG_CRC0_WINDOWB_X_CONTROL, OTG_CRC0_WINDOWB_X_START, mask_sh),\
SF(OTG0_OTG_CRC0_WINDOWB_X_CONTROL, OTG_CRC0_WINDOWB_X_END, mask_sh),\
SF(OTG0_OTG_CRC0_WINDOWB_Y_CONTROL, OTG_CRC0_WINDOWB_Y_START, mask_sh),\
- SF(OTG0_OTG_CRC0_WINDOWB_Y_CONTROL, OTG_CRC0_WINDOWB_Y_END, mask_sh)
-
+ SF(OTG0_OTG_CRC0_WINDOWB_Y_CONTROL, OTG_CRC0_WINDOWB_Y_END, mask_sh),\
+ SF(GSL_SOURCE_SELECT, GSL0_READY_SOURCE_SEL, mask_sh),\
+ SF(GSL_SOURCE_SELECT, GSL1_READY_SOURCE_SEL, mask_sh),\
+ SF(GSL_SOURCE_SELECT, GSL2_READY_SOURCE_SEL, mask_sh),\
+ SF(OTG0_OTG_GLOBAL_CONTROL2, MANUAL_FLOW_CONTROL_SEL, mask_sh)
#define TG_COMMON_MASK_SH_LIST_DCN1_0(mask_sh)\
TG_COMMON_MASK_SH_LIST_DCN(mask_sh),\
@@ -282,7 +298,8 @@ struct dcn_optc_registers {
SF(OTG0_OTG_TEST_PATTERN_CONTROL, OTG_TEST_PATTERN_COLOR_FORMAT, mask_sh),\
SF(OTG0_OTG_TEST_PATTERN_COLOR, OTG_TEST_PATTERN_MASK, mask_sh),\
SF(OTG0_OTG_TEST_PATTERN_COLOR, OTG_TEST_PATTERN_DATA, mask_sh),\
- SF(ODM0_OPTC_DATA_SOURCE_SELECT, OPTC_SRC_SEL, mask_sh)
+ SF(ODM0_OPTC_DATA_SOURCE_SELECT, OPTC_SRC_SEL, mask_sh),\
+ SF(OTG0_OTG_MANUAL_FLOW_CONTROL, MANUAL_FLOW_CONTROL, mask_sh),\
#define TG_REG_FIELD_LIST_DCN1_0(type) \
type VSTARTUP_START;\
@@ -338,6 +355,11 @@ struct dcn_optc_registers {
type OTG_TRIGA_SOURCE_PIPE_SELECT;\
type OTG_TRIGA_RISING_EDGE_DETECT_CNTL;\
type OTG_TRIGA_FALLING_EDGE_DETECT_CNTL;\
+ type OTG_TRIGA_POLARITY_SELECT;\
+ type OTG_TRIGA_FREQUENCY_SELECT;\
+ type OTG_TRIGA_DELAY;\
+ type OTG_TRIGA_CLEAR;\
+ type OTG_TRIGA_MANUAL_TRIG;\
type OTG_STATIC_SCREEN_EVENT_MASK;\
type OTG_STATIC_SCREEN_FRAME_COUNT;\
type OTG_FRAME_COUNT;\
@@ -413,7 +435,12 @@ struct dcn_optc_registers {
type OTG_CRC0_WINDOWB_X_START;\
type OTG_CRC0_WINDOWB_X_END;\
type OTG_CRC0_WINDOWB_Y_START;\
- type OTG_CRC0_WINDOWB_Y_END;
+ type OTG_CRC0_WINDOWB_Y_END;\
+ type GSL0_READY_SOURCE_SEL;\
+ type GSL1_READY_SOURCE_SEL;\
+ type GSL2_READY_SOURCE_SEL;\
+ type MANUAL_FLOW_CONTROL;\
+ type MANUAL_FLOW_CONTROL_SEL;
#define TG_REG_FIELD_LIST(type) \
@@ -446,6 +473,12 @@ struct optc {
uint32_t min_v_sync_width;
uint32_t min_v_blank;
uint32_t min_v_blank_interlace;
+
+ int vstartup_start;
+ int vupdate_offset;
+ int vupdate_width;
+ int vready_offset;
+ enum signal_type signal;
};
void dcn10_timing_generator_init(struct optc *optc);
@@ -481,6 +514,11 @@ bool optc1_validate_timing(
void optc1_program_timing(
struct timing_generator *optc,
const struct dc_crtc_timing *dc_crtc_timing,
+ int vready_offset,
+ int vstartup_start,
+ int vupdate_offset,
+ int vupdate_width,
+ const enum signal_type signal,
bool use_vbios);
void optc1_setup_vertical_interrupt0(
@@ -495,7 +533,11 @@ void optc1_setup_vertical_interrupt2(
uint32_t start_line);
void optc1_program_global_sync(
- struct timing_generator *optc);
+ struct timing_generator *optc,
+ int vready_offset,
+ int vstartup_start,
+ int vupdate_offset,
+ int vupdate_width);
bool optc1_disable_crtc(struct timing_generator *optc);
@@ -582,4 +624,7 @@ bool optc1_get_crc(struct timing_generator *optc,
bool optc1_is_two_pixels_per_containter(const struct dc_crtc_timing *timing);
+void optc1_set_vtg_params(struct timing_generator *optc,
+ const struct dc_crtc_timing *dc_crtc_timing);
+
#endif /* __DC_TIMING_GENERATOR_DCN10_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
index 7eccb54c421d..3272030c82c5 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
@@ -23,13 +23,14 @@
*
*/
+#include <linux/slab.h>
+
#include "dm_services.h"
#include "dc.h"
#include "resource.h"
#include "include/irq_service_interface.h"
#include "dcn10_resource.h"
-
#include "dcn10_ipp.h"
#include "dcn10_mpc.h"
#include "irq/dcn10/irq_service_dcn10.h"
@@ -40,7 +41,6 @@
#include "dcn10_opp.h"
#include "dcn10_link_encoder.h"
#include "dcn10_stream_encoder.h"
-#include "dcn10_clk_mgr.h"
#include "dce/dce_clock_source.h"
#include "dce/dce_audio.h"
#include "dce/dce_hwseq.h"
@@ -153,9 +153,7 @@ enum dcn10_clk_src_array_id {
DCN10_CLK_SRC_PLL2,
DCN10_CLK_SRC_PLL3,
DCN10_CLK_SRC_TOTAL,
-#if defined(CONFIG_DRM_AMD_DC_DCN1_01)
DCN101_CLK_SRC_TOTAL = DCN10_CLK_SRC_PLL3
-#endif
};
/* begin *********************
@@ -202,6 +200,7 @@ enum dcn10_clk_src_array_id {
#define MMHUB_SR(reg_name)\
.reg_name = MMHUB_BASE(mm ## reg_name ## _BASE_IDX) + \
mm ## reg_name
+
/* macros to expend register list macro defined in HW object header file
* end *********************/
@@ -445,7 +444,6 @@ static const struct bios_registers bios_regs = {
HUBP_REG_LIST_DCN10(id)\
}
-
static const struct dcn_mi_registers hubp_regs[] = {
hubp_regs(0),
hubp_regs(1),
@@ -461,7 +459,6 @@ static const struct dcn_mi_mask hubp_mask = {
HUBP_MASK_SH_LIST_DCN10(_MASK)
};
-
static const struct dcn_hubbub_registers hubbub_reg = {
HUBBUB_REG_LIST_DCN10(0)
};
@@ -504,7 +501,6 @@ static const struct resource_caps res_cap = {
.num_ddc = 4,
};
-#if defined(CONFIG_DRM_AMD_DC_DCN1_01)
static const struct resource_caps rv2_res_cap = {
.num_timing_generator = 3,
.num_opp = 3,
@@ -514,7 +510,6 @@ static const struct resource_caps rv2_res_cap = {
.num_pll = 3,
.num_ddc = 3,
};
-#endif
static const struct dc_plane_cap plane_cap = {
.type = DC_PLANE_TYPE_DCN_UNIVERSAL,
@@ -966,9 +961,6 @@ static void destruct(struct dcn10_resource_pool *pool)
if (pool->base.dmcu != NULL)
dce_dmcu_destroy(&pool->base.dmcu);
- if (pool->base.clk_mgr != NULL)
- dce_clk_mgr_destroy(&pool->base.clk_mgr);
-
kfree(pool->base.pp_smu);
}
@@ -1217,6 +1209,38 @@ static enum dc_status dcn10_get_default_swizzle_mode(struct dc_plane_state *plan
return result;
}
+struct stream_encoder *dcn10_find_first_free_match_stream_enc_for_link(
+ struct resource_context *res_ctx,
+ const struct resource_pool *pool,
+ struct dc_stream_state *stream)
+{
+ int i;
+ int j = -1;
+ struct dc_link *link = stream->link;
+
+ for (i = 0; i < pool->stream_enc_count; i++) {
+ if (!res_ctx->is_stream_enc_acquired[i] &&
+ pool->stream_enc[i]) {
+ /* Store first available for MST second display
+ * in daisy chain use case
+ */
+ j = i;
+ if (pool->stream_enc[i]->id ==
+ link->link_enc->preferred_engine)
+ return pool->stream_enc[i];
+ }
+ }
+
+ /*
+ * For CZ and later, we can allow DIG FE and BE to differ for all display types
+ */
+
+ if (j >= 0)
+ return pool->stream_enc[j];
+
+ return NULL;
+}
+
static const struct dc_cap_funcs cap_funcs = {
.get_dcc_compression_cap = dcn10_get_dcc_compression_cap
};
@@ -1229,7 +1253,8 @@ static const struct resource_funcs dcn10_res_pool_funcs = {
.validate_plane = dcn10_validate_plane,
.validate_global = dcn10_validate_global,
.add_stream_to_ctx = dcn10_add_stream_to_ctx,
- .get_default_swizzle_mode = dcn10_get_default_swizzle_mode
+ .get_default_swizzle_mode = dcn10_get_default_swizzle_mode,
+ .find_first_free_match_stream_enc_for_link = dcn10_find_first_free_match_stream_enc_for_link
};
static uint32_t read_pipe_fuses(struct dc_context *ctx)
@@ -1252,11 +1277,9 @@ static bool construct(
ctx->dc_bios->regs = &bios_regs;
-#if defined(CONFIG_DRM_AMD_DC_DCN1_01)
if (ctx->dce_version == DCN_VERSION_1_01)
pool->base.res_cap = &rv2_res_cap;
else
-#endif
pool->base.res_cap = &res_cap;
pool->base.funcs = &dcn10_res_pool_funcs;
@@ -1273,10 +1296,8 @@ static bool construct(
/* max pipe num for ASIC before check pipe fuses */
pool->base.pipe_count = pool->base.res_cap->num_timing_generator;
-#if defined(CONFIG_DRM_AMD_DC_DCN1_01)
if (dc->ctx->dce_version == DCN_VERSION_1_01)
pool->base.pipe_count = 3;
-#endif
dc->caps.max_video_width = 3840;
dc->caps.max_downscale_ratio = 200;
dc->caps.i2c_speed_in_khz = 100;
@@ -1309,26 +1330,17 @@ static bool construct(
CLOCK_SOURCE_COMBO_PHY_PLL2,
&clk_src_regs[2], false);
-#ifdef CONFIG_DRM_AMD_DC_DCN1_01
if (dc->ctx->dce_version == DCN_VERSION_1_0) {
pool->base.clock_sources[DCN10_CLK_SRC_PLL3] =
dcn10_clock_source_create(ctx, ctx->dc_bios,
CLOCK_SOURCE_COMBO_PHY_PLL3,
&clk_src_regs[3], false);
}
-#else
- pool->base.clock_sources[DCN10_CLK_SRC_PLL3] =
- dcn10_clock_source_create(ctx, ctx->dc_bios,
- CLOCK_SOURCE_COMBO_PHY_PLL3,
- &clk_src_regs[3], false);
-#endif
pool->base.clk_src_count = DCN10_CLK_SRC_TOTAL;
-#if defined(CONFIG_DRM_AMD_DC_DCN1_01)
if (dc->ctx->dce_version == DCN_VERSION_1_01)
pool->base.clk_src_count = DCN101_CLK_SRC_TOTAL;
-#endif
pool->base.dp_clock_source =
dcn10_clock_source_create(ctx, ctx->dc_bios,
@@ -1343,12 +1355,6 @@ static bool construct(
goto fail;
}
}
- pool->base.clk_mgr = dcn1_clk_mgr_create(ctx);
- if (pool->base.clk_mgr == NULL) {
- dm_error("DC: failed to create display clock!\n");
- BREAK_TO_DEBUGGER();
- goto fail;
- }
pool->base.dmcu = dcn10_dmcu_create(ctx,
&dmcu_regs,
@@ -1374,7 +1380,6 @@ static bool construct(
memcpy(dc->dcn_ip, &dcn10_ip_defaults, sizeof(dcn10_ip_defaults));
memcpy(dc->dcn_soc, &dcn10_soc_defaults, sizeof(dcn10_soc_defaults));
-#if defined(CONFIG_DRM_AMD_DC_DCN1_01)
if (dc->ctx->dce_version == DCN_VERSION_1_01) {
struct dcn_soc_bounding_box *dcn_soc = dc->dcn_soc;
struct dcn_ip_params *dcn_ip = dc->dcn_ip;
@@ -1385,7 +1390,6 @@ static bool construct(
dcn_soc->dram_clock_change_latency = 23;
dcn_ip->max_num_dpp = 3;
}
-#endif
if (ASICREV_IS_RV1_F0(dc->ctx->asic_id.hw_internal_rev)) {
dc->dcn_soc->urgent_latency = 3;
dc->debug.disable_dmcu = true;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.h
index 999c684a0b36..633025ccb870 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.h
@@ -42,6 +42,11 @@ struct resource_pool *dcn10_create_resource_pool(
const struct dc_init_data *init_data,
struct dc *dc);
+struct stream_encoder *dcn10_find_first_free_match_stream_enc_for_link(
+ struct resource_context *res_ctx,
+ const struct resource_pool *pool,
+ struct dc_stream_state *stream);
+
#endif /* __DC_RESOURCE_DCN10_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c
index 8ee9f6dc1d62..b9ffbf6b58ff 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c
@@ -23,6 +23,7 @@
*
*/
+#include <linux/delay.h>
#include "dc_bios_types.h"
#include "dcn10_stream_encoder.h"
@@ -415,6 +416,7 @@ void enc1_stream_encoder_dp_set_stream_attribute(
case COLOR_SPACE_APPCTRL:
case COLOR_SPACE_CUSTOMPOINTS:
case COLOR_SPACE_UNKNOWN:
+ case COLOR_SPACE_YCBCR709_BLACK:
/* do nothing */
break;
}
@@ -471,7 +473,7 @@ void enc1_stream_encoder_dp_set_stream_attribute(
hw_crtc_timing.v_addressable + hw_crtc_timing.v_border_bottom);
}
-static void enc1_stream_encoder_set_stream_attribute_helper(
+void enc1_stream_encoder_set_stream_attribute_helper(
struct dcn10_stream_encoder *enc1,
struct dc_crtc_timing *crtc_timing)
{
@@ -726,11 +728,9 @@ void enc1_stream_encoder_update_dp_info_packets(
3, /* packetIndex */
&info_frame->hdrsmd);
- if (info_frame->dpsdp.valid)
- enc1_update_generic_info_packet(
- enc1,
- 4,/* packetIndex */
- &info_frame->dpsdp);
+ /* packetIndex 4 is used for send immediate sdp message, and please
+ * use other packetIndex (such as 5,6) for other info packet
+ */
/* enable/disable transmission of packet(s).
* If enabled, packet transmission begins on the next frame
@@ -738,7 +738,101 @@ void enc1_stream_encoder_update_dp_info_packets(
REG_UPDATE(DP_SEC_CNTL, DP_SEC_GSP0_ENABLE, info_frame->vsc.valid);
REG_UPDATE(DP_SEC_CNTL, DP_SEC_GSP2_ENABLE, info_frame->spd.valid);
REG_UPDATE(DP_SEC_CNTL, DP_SEC_GSP3_ENABLE, info_frame->hdrsmd.valid);
- REG_UPDATE(DP_SEC_CNTL, DP_SEC_GSP4_ENABLE, info_frame->dpsdp.valid);
+
+
+ /* This bit is the master enable bit.
+ * When enabling secondary stream engine,
+ * this master bit must also be set.
+ * This register shared with audio info frame.
+ * Therefore we need to enable master bit
+ * if at least on of the fields is not 0
+ */
+ value = REG_READ(DP_SEC_CNTL);
+ if (value)
+ REG_UPDATE(DP_SEC_CNTL, DP_SEC_STREAM_ENABLE, 1);
+}
+
+void enc1_stream_encoder_send_immediate_sdp_message(
+ struct stream_encoder *enc,
+ const uint8_t *custom_sdp_message,
+ unsigned int sdp_message_size)
+{
+ struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
+ uint32_t value = 0;
+
+ /* TODOFPGA Figure out a proper number for max_retries polling for lock
+ * use 50 for now.
+ */
+ uint32_t max_retries = 50;
+
+ /* check if GSP4 is transmitted */
+ REG_WAIT(DP_SEC_CNTL2, DP_SEC_GSP4_SEND_PENDING,
+ 0, 10, max_retries);
+
+ /* disable GSP4 transmitting */
+ REG_UPDATE(DP_SEC_CNTL2, DP_SEC_GSP4_SEND, 0);
+
+ /* transmit GSP4 at the earliest time in a frame */
+ REG_UPDATE(DP_SEC_CNTL2, DP_SEC_GSP4_SEND_ANY_LINE, 1);
+
+ /*we need turn on clock before programming AFMT block*/
+ REG_UPDATE(AFMT_CNTL, AFMT_AUDIO_CLOCK_EN, 1);
+
+ /* check if HW reading GSP memory */
+ REG_WAIT(AFMT_VBI_PACKET_CONTROL, AFMT_GENERIC_CONFLICT,
+ 0, 10, max_retries);
+
+ /* HW does is not reading GSP memory not reading too long ->
+ * something wrong. clear GPS memory access and notify?
+ * hw SW is writing to GSP memory
+ */
+ REG_UPDATE(AFMT_VBI_PACKET_CONTROL, AFMT_GENERIC_CONFLICT_CLR, 1);
+
+ /* use generic packet 4 for immediate sdp message */
+ REG_UPDATE(AFMT_VBI_PACKET_CONTROL,
+ AFMT_GENERIC_INDEX, 4);
+
+ /* write generic packet header
+ * (4th byte is for GENERIC0 only)
+ */
+ REG_SET_4(AFMT_GENERIC_HDR, 0,
+ AFMT_GENERIC_HB0, custom_sdp_message[0],
+ AFMT_GENERIC_HB1, custom_sdp_message[1],
+ AFMT_GENERIC_HB2, custom_sdp_message[2],
+ AFMT_GENERIC_HB3, custom_sdp_message[3]);
+
+ /* write generic packet contents
+ * (we never use last 4 bytes)
+ * there are 8 (0-7) mmDIG0_AFMT_GENERIC0_x registers
+ */
+ {
+ const uint32_t *content =
+ (const uint32_t *) &custom_sdp_message[4];
+
+ REG_WRITE(AFMT_GENERIC_0, *content++);
+ REG_WRITE(AFMT_GENERIC_1, *content++);
+ REG_WRITE(AFMT_GENERIC_2, *content++);
+ REG_WRITE(AFMT_GENERIC_3, *content++);
+ REG_WRITE(AFMT_GENERIC_4, *content++);
+ REG_WRITE(AFMT_GENERIC_5, *content++);
+ REG_WRITE(AFMT_GENERIC_6, *content++);
+ REG_WRITE(AFMT_GENERIC_7, *content);
+ }
+
+ /* check whether GENERIC4 registers double buffer update in immediate mode
+ * is pending
+ */
+ REG_WAIT(AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC4_IMMEDIATE_UPDATE_PENDING,
+ 0, 10, max_retries);
+
+ /* atomically update double-buffered GENERIC4 registers in immediate mode
+ * (update immediately)
+ */
+ REG_UPDATE(AFMT_VBI_PACKET_CONTROL1,
+ AFMT_GENERIC4_IMMEDIATE_UPDATE, 1);
+
+ /* enable GSP4 transmitting */
+ REG_UPDATE(DP_SEC_CNTL2, DP_SEC_GSP4_SEND, 1);
/* This bit is the master enable bit.
* When enabling secondary stream engine,
@@ -998,19 +1092,6 @@ union audio_cea_channels {
} channels;
};
-struct audio_clock_info {
- /* pixel clock frequency*/
- uint32_t pixel_clock_in_10khz;
- /* N - 32KHz audio */
- uint32_t n_32khz;
- /* CTS - 32KHz audio*/
- uint32_t cts_32khz;
- uint32_t n_44khz;
- uint32_t cts_44khz;
- uint32_t n_48khz;
- uint32_t cts_48khz;
-};
-
/* 25.2MHz/1.001*/
/* 25.2MHz/1.001*/
/* 25.2MHz*/
@@ -1113,7 +1194,7 @@ static union audio_cea_channels speakers_to_channels(
return cea_channels;
}
-static void get_audio_clock_info(
+void get_audio_clock_info(
enum dc_color_depth color_depth,
uint32_t crtc_pixel_clock_in_khz,
uint32_t actual_pixel_clock_in_khz,
@@ -1317,7 +1398,7 @@ static void enc1_se_setup_dp_audio(
REG_UPDATE(AFMT_60958_0, AFMT_60958_CS_CLOCK_ACCURACY, 0);
}
-static void enc1_se_enable_audio_clock(
+void enc1_se_enable_audio_clock(
struct stream_encoder *enc,
bool enable)
{
@@ -1339,7 +1420,7 @@ static void enc1_se_enable_audio_clock(
*/
}
-static void enc1_se_enable_dp_audio(
+void enc1_se_enable_dp_audio(
struct stream_encoder *enc)
{
struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
@@ -1462,6 +1543,8 @@ static const struct stream_encoder_funcs dcn10_str_enc_funcs = {
enc1_stream_encoder_stop_hdmi_info_packets,
.update_dp_info_packets =
enc1_stream_encoder_update_dp_info_packets,
+ .send_immediate_sdp_message =
+ enc1_stream_encoder_send_immediate_sdp_message,
.stop_dp_info_packets =
enc1_stream_encoder_stop_dp_info_packets,
.dp_blank =
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h
index e654c2f55971..46c93ffc28d2 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h
@@ -81,6 +81,7 @@
SRI(DP_MSE_RATE_UPDATE, DP, id), \
SRI(DP_PIXEL_FORMAT, DP, id), \
SRI(DP_SEC_CNTL, DP, id), \
+ SRI(DP_SEC_CNTL2, DP, id), \
SRI(DP_STEER_FIFO, DP, id), \
SRI(DP_VID_M, DP, id), \
SRI(DP_VID_N, DP, id), \
@@ -118,10 +119,12 @@ struct dcn10_stream_enc_registers {
uint32_t AFMT_60958_1;
uint32_t AFMT_60958_2;
uint32_t DIG_FE_CNTL;
+ uint32_t DIG_FE_CNTL2;
uint32_t DP_MSE_RATE_CNTL;
uint32_t DP_MSE_RATE_UPDATE;
uint32_t DP_PIXEL_FORMAT;
uint32_t DP_SEC_CNTL;
+ uint32_t DP_SEC_CNTL2;
uint32_t DP_STEER_FIFO;
uint32_t DP_VID_M;
uint32_t DP_VID_N;
@@ -191,6 +194,10 @@ struct dcn10_stream_enc_registers {
SE_SF(DP0_DP_SEC_CNTL, DP_SEC_GSP2_ENABLE, mask_sh),\
SE_SF(DP0_DP_SEC_CNTL, DP_SEC_GSP3_ENABLE, mask_sh),\
SE_SF(DP0_DP_SEC_CNTL, DP_SEC_MPG_ENABLE, mask_sh),\
+ SE_SF(DP0_DP_SEC_CNTL2, DP_SEC_GSP4_SEND, mask_sh),\
+ SE_SF(DP0_DP_SEC_CNTL2, DP_SEC_GSP4_SEND_PENDING, mask_sh),\
+ SE_SF(DP0_DP_SEC_CNTL4, DP_SEC_GSP4_LINE_NUM, mask_sh),\
+ SE_SF(DP0_DP_SEC_CNTL2, DP_SEC_GSP4_SEND_ANY_LINE, mask_sh),\
SE_SF(DP0_DP_VID_STREAM_CNTL, DP_VID_STREAM_DIS_DEFER, mask_sh),\
SE_SF(DP0_DP_VID_STREAM_CNTL, DP_VID_STREAM_ENABLE, mask_sh),\
SE_SF(DP0_DP_VID_STREAM_CNTL, DP_VID_STREAM_STATUS, mask_sh),\
@@ -245,6 +252,7 @@ struct dcn10_stream_enc_registers {
SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC2_FRAME_UPDATE_PENDING, mask_sh),\
SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC3_FRAME_UPDATE_PENDING, mask_sh),\
SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC4_FRAME_UPDATE_PENDING, mask_sh),\
+ SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC4_IMMEDIATE_UPDATE_PENDING, mask_sh),\
SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC5_FRAME_UPDATE_PENDING, mask_sh),\
SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC6_FRAME_UPDATE_PENDING, mask_sh),\
SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC7_FRAME_UPDATE_PENDING, mask_sh),\
@@ -253,6 +261,7 @@ struct dcn10_stream_enc_registers {
SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC2_FRAME_UPDATE, mask_sh),\
SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC3_FRAME_UPDATE, mask_sh),\
SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC4_FRAME_UPDATE, mask_sh),\
+ SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC4_IMMEDIATE_UPDATE, mask_sh),\
SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC5_FRAME_UPDATE, mask_sh),\
SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC6_FRAME_UPDATE, mask_sh),\
SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC7_FRAME_UPDATE, mask_sh),\
@@ -260,6 +269,7 @@ struct dcn10_stream_enc_registers {
SE_SF(DP0_DP_SEC_CNTL, DP_SEC_GSP5_ENABLE, mask_sh),\
SE_SF(DP0_DP_SEC_CNTL, DP_SEC_GSP6_ENABLE, mask_sh),\
SE_SF(DP0_DP_SEC_CNTL, DP_SEC_GSP7_ENABLE, mask_sh),\
+ SE_SF(DP0_DP_SEC_CNTL2, DP_SEC_GSP7_PPS, mask_sh),\
SE_SF(DP0_DP_SEC_CNTL2, DP_SEC_GSP7_SEND, mask_sh),\
SE_SF(DP0_DP_DB_CNTL, DP_DB_DISABLE, mask_sh),\
SE_SF(DP0_DP_MSA_COLORIMETRY, DP_MSA_MISC0, mask_sh),\
@@ -304,6 +314,7 @@ struct dcn10_stream_enc_registers {
type AFMT_GENERIC2_FRAME_UPDATE_PENDING;\
type AFMT_GENERIC3_FRAME_UPDATE_PENDING;\
type AFMT_GENERIC4_FRAME_UPDATE_PENDING;\
+ type AFMT_GENERIC4_IMMEDIATE_UPDATE_PENDING;\
type AFMT_GENERIC5_FRAME_UPDATE_PENDING;\
type AFMT_GENERIC6_FRAME_UPDATE_PENDING;\
type AFMT_GENERIC7_FRAME_UPDATE_PENDING;\
@@ -312,6 +323,7 @@ struct dcn10_stream_enc_registers {
type AFMT_GENERIC2_FRAME_UPDATE;\
type AFMT_GENERIC3_FRAME_UPDATE;\
type AFMT_GENERIC4_FRAME_UPDATE;\
+ type AFMT_GENERIC4_IMMEDIATE_UPDATE;\
type AFMT_GENERIC5_FRAME_UPDATE;\
type AFMT_GENERIC6_FRAME_UPDATE;\
type AFMT_GENERIC7_FRAME_UPDATE;\
@@ -366,7 +378,12 @@ struct dcn10_stream_enc_registers {
type DP_SEC_GSP5_ENABLE;\
type DP_SEC_GSP6_ENABLE;\
type DP_SEC_GSP7_ENABLE;\
+ type DP_SEC_GSP7_PPS;\
type DP_SEC_GSP7_SEND;\
+ type DP_SEC_GSP4_SEND;\
+ type DP_SEC_GSP4_SEND_PENDING;\
+ type DP_SEC_GSP4_LINE_NUM;\
+ type DP_SEC_GSP4_SEND_ANY_LINE;\
type DP_SEC_MPG_ENABLE;\
type DP_VID_STREAM_DIS_DEFER;\
type DP_VID_STREAM_ENABLE;\
@@ -484,6 +501,11 @@ void enc1_stream_encoder_update_dp_info_packets(
struct stream_encoder *enc,
const struct encoder_info_frame *info_frame);
+void enc1_stream_encoder_send_immediate_sdp_message(
+ struct stream_encoder *enc,
+ const uint8_t *custom_sdp_message,
+ unsigned int sdp_message_size);
+
void enc1_stream_encoder_stop_dp_info_packets(
struct stream_encoder *enc);
@@ -530,4 +552,21 @@ void enc1_dig_connect_to_otg(
struct stream_encoder *enc,
int tg_inst);
+void enc1_stream_encoder_set_stream_attribute_helper(
+ struct dcn10_stream_encoder *enc1,
+ struct dc_crtc_timing *crtc_timing);
+
+void enc1_se_enable_audio_clock(
+ struct stream_encoder *enc,
+ bool enable);
+
+void enc1_se_enable_dp_audio(
+ struct stream_encoder *enc);
+
+void get_audio_clock_info(
+ enum dc_color_depth color_depth,
+ uint32_t crtc_pixel_clock_in_khz,
+ uint32_t actual_pixel_clock_in_khz,
+ struct audio_clock_info *audio_clock_info);
+
#endif /* __DC_STREAM_ENCODER_DCN10_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dm_pp_smu.h b/drivers/gpu/drm/amd/display/dc/dm_pp_smu.h
index 4fc4208d1472..471f3df88c92 100644
--- a/drivers/gpu/drm/amd/display/dc/dm_pp_smu.h
+++ b/drivers/gpu/drm/amd/display/dc/dm_pp_smu.h
@@ -41,6 +41,7 @@ enum pp_smu_ver {
*/
PP_SMU_UNSUPPORTED,
PP_SMU_VER_RV,
+
PP_SMU_VER_MAX
};
@@ -56,12 +57,31 @@ struct pp_smu {
const void *dm;
};
+enum pp_smu_status {
+ PP_SMU_RESULT_UNDEFINED = 0,
+ PP_SMU_RESULT_OK = 1,
+ PP_SMU_RESULT_FAIL,
+ PP_SMU_RESULT_UNSUPPORTED
+};
+
+
+#define PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MIN 0x0
+#define PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX 0xFFFF
+
+enum wm_type {
+ WM_TYPE_PSTATE_CHG = 0,
+ WM_TYPE_RETRAINING = 1,
+};
+
+/* This structure is a copy of WatermarkRowGeneric_t defined by smuxx_driver_if.h*/
struct pp_smu_wm_set_range {
- unsigned int wm_inst;
- uint32_t min_fill_clk_mhz;
- uint32_t max_fill_clk_mhz;
- uint32_t min_drain_clk_mhz;
- uint32_t max_drain_clk_mhz;
+ uint16_t min_fill_clk_mhz;
+ uint16_t max_fill_clk_mhz;
+ uint16_t min_drain_clk_mhz;
+ uint16_t max_drain_clk_mhz;
+
+ uint8_t wm_inst;
+ uint8_t wm_type;
};
#define MAX_WATERMARK_SETS 4
@@ -80,6 +100,7 @@ struct pp_smu_funcs_rv {
/* PPSMC_MSG_SetDisplayCount
* 0 triggers S0i2 optimization
*/
+
void (*set_display_count)(struct pp_smu *pp, int count);
/* reader and writer WM's are sent together as part of one table*/
@@ -115,13 +136,13 @@ struct pp_smu_funcs_rv {
/* PME w/a */
void (*set_pme_wa_enable)(struct pp_smu *pp);
-
};
struct pp_smu_funcs {
struct pp_smu ctx;
union {
struct pp_smu_funcs_rv rv_funcs;
+
};
};
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_enums.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_enums.h
index c59e582c1f40..174c414e0982 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_enums.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_enums.h
@@ -147,4 +147,10 @@ enum dm_validation_status {
DML_FAIL_V_RATIO_PREFETCH,
};
+enum writeback_config {
+ dm_normal,
+ dm_whole_buffer_for_single_stream_no_interleave,
+ dm_whole_buffer_for_single_stream_interleave,
+};
+
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h
index c5b791d158a7..6cc59f138095 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h
@@ -219,6 +219,9 @@ struct _vcs_dpi_display_pipe_source_params_st {
unsigned char xfc_enable;
unsigned char xfc_slave;
struct _vcs_dpi_display_xfc_params_st xfc_params;
+ //for vstartuplines calculation freesync
+ unsigned char v_total_min;
+ unsigned char v_total_max;
};
struct writeback_st {
int wb_src_height;
@@ -289,6 +292,8 @@ struct _vcs_dpi_display_pipe_dest_params_st {
unsigned char otg_inst;
unsigned char odm_combine;
unsigned char use_maximum_vstartup;
+ unsigned int vtotal_max;
+ unsigned int vtotal_min;
};
struct _vcs_dpi_display_pipe_params_st {
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/gpio_base.c b/drivers/gpu/drm/amd/display/dc/gpio/gpio_base.c
index cf76ea2d9f5a..d03b38e80d9b 100644
--- a/drivers/gpu/drm/amd/display/dc/gpio/gpio_base.c
+++ b/drivers/gpu/drm/amd/display/dc/gpio/gpio_base.c
@@ -27,6 +27,8 @@
* Pre-requisites: headers required by header of this unit
*/
+#include <linux/slab.h>
+
#include "dm_services.h"
#include "include/gpio_interface.h"
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c b/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c
index 3c63a3c04dbb..a7fab44f66b6 100644
--- a/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c
+++ b/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c
@@ -27,6 +27,8 @@
* Pre-requisites: headers required by header of this unit
*/
+#include <linux/slab.h>
+
#include "dm_services.h"
#include "include/gpio_interface.h"
#include "include/gpio_service_interface.h"
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/hw_ddc.c b/drivers/gpu/drm/amd/display/dc/gpio/hw_ddc.c
index 310f48965b27..240cdd8d9689 100644
--- a/drivers/gpu/drm/amd/display/dc/gpio/hw_ddc.c
+++ b/drivers/gpu/drm/amd/display/dc/gpio/hw_ddc.c
@@ -23,6 +23,9 @@
*
*/
+#include <linux/delay.h>
+#include <linux/slab.h>
+
#include "dm_services.h"
#include "include/gpio_types.h"
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c b/drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c
index c2028c4744a6..a15aca47342c 100644
--- a/drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c
+++ b/drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c
@@ -23,6 +23,8 @@
*
*/
+#include <linux/slab.h>
+
#include "dm_services.h"
/*
@@ -84,10 +86,6 @@ bool dal_hw_factory_init(
return true;
#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
case DCN_VERSION_1_0:
- dal_hw_factory_dcn10_init(factory);
- return true;
-#endif
-#if defined(CONFIG_DRM_AMD_DC_DCN1_01)
case DCN_VERSION_1_01:
dal_hw_factory_dcn10_init(factory);
return true;
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/hw_hpd.c b/drivers/gpu/drm/amd/display/dc/gpio/hw_hpd.c
index 784feccc5853..5e11d748e6f3 100644
--- a/drivers/gpu/drm/amd/display/dc/gpio/hw_hpd.c
+++ b/drivers/gpu/drm/amd/display/dc/gpio/hw_hpd.c
@@ -23,6 +23,8 @@
*
*/
+#include <linux/slab.h>
+
#include "dm_services.h"
#include "include/gpio_types.h"
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c b/drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c
index 236ca28784a9..77615146b96e 100644
--- a/drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c
+++ b/drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c
@@ -84,11 +84,6 @@ bool dal_hw_translate_init(
dal_hw_translate_dcn10_init(translate);
return true;
#endif
-#if defined(CONFIG_DRM_AMD_DC_DCN1_01)
- case DCN_VERSION_1_01:
- dal_hw_translate_dcn10_init(translate);
- return true;
-#endif
default:
BREAK_TO_DEBUGGER();
diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_types.h b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
index 6f5ab05d6467..80709c9343c1 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/core_types.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
@@ -82,7 +82,6 @@ void core_link_disable_stream(struct pipe_ctx *pipe_ctx, int option);
void core_link_set_avmute(struct pipe_ctx *pipe_ctx, bool enable);
/********** DAL Core*********************/
-#include "hw/clk_mgr.h"
#include "transform.h"
#include "dpp.h"
@@ -123,6 +122,11 @@ struct resource_funcs {
enum dc_status (*get_default_swizzle_mode)(
struct dc_plane_state *plane_state);
+ struct stream_encoder *(*find_first_free_match_stream_enc_for_link)(
+ struct resource_context *res_ctx,
+ const struct resource_pool *pool,
+ struct dc_stream_state *stream);
+
};
struct audio_support{
@@ -173,7 +177,6 @@ struct resource_pool {
unsigned int audio_count;
struct audio_support audio_support;
- struct clk_mgr *clk_mgr;
struct dccg *dccg;
struct irq_service *irqs;
@@ -212,6 +215,25 @@ struct plane_resource {
struct dcn_fe_bandwidth bw;
};
+union pipe_update_flags {
+ struct {
+ uint32_t enable : 1;
+ uint32_t disable : 1;
+ uint32_t odm : 1;
+ uint32_t global_sync : 1;
+ uint32_t opp_changed : 1;
+ uint32_t tg_changed : 1;
+ uint32_t mpcc : 1;
+ uint32_t dppclk : 1;
+ uint32_t hubp_interdependent : 1;
+ uint32_t hubp_rq_dlg_ttu : 1;
+ uint32_t gamut_remap : 1;
+ uint32_t scaler : 1;
+ uint32_t viewport : 1;
+ } bits;
+ uint32_t raw;
+};
+
struct pipe_ctx {
struct dc_plane_state *plane_state;
struct dc_stream_state *stream;
@@ -234,6 +256,7 @@ struct pipe_ctx {
struct _vcs_dpi_display_rq_regs_st rq_regs;
struct _vcs_dpi_display_pipe_dest_params_st pipe_dlg_param;
#endif
+ union pipe_update_flags update_flags;
};
struct resource_context {
diff --git a/drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h b/drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h
index 263c09630c06..806f3041db14 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h
@@ -32,7 +32,7 @@
#include "bw_fixed.h"
#include "../dml/display_mode_lib.h"
-#include "hw/clk_mgr.h"
+
struct dc;
struct dc_state;
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/abm.h b/drivers/gpu/drm/amd/display/dc/inc/hw/abm.h
index 86dc39a02408..d607b3191954 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/abm.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/abm.h
@@ -37,7 +37,7 @@ struct abm_backlight_registers {
struct abm {
struct dc_context *ctx;
const struct abm_funcs *funcs;
-
+ bool dmcu_is_running;
/* registers setting needs to be saved and restored at InitBacklight */
struct abm_backlight_registers stored_backlight_registers;
};
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/audio.h b/drivers/gpu/drm/amd/display/dc/inc/hw/audio.h
index 925204f49717..6ed1fb8c9300 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/audio.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/audio.h
@@ -57,6 +57,7 @@ struct audio {
const struct audio_funcs *funcs;
struct dc_context *ctx;
unsigned int inst;
+ bool enabled;
};
#endif /* __DAL_AUDIO__ */
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h
index 31bd6d5183ab..721e13135e76 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h
@@ -26,17 +26,22 @@
#ifndef __DAL_CLK_MGR_H__
#define __DAL_CLK_MGR_H__
-#include "dm_services_types.h"
#include "dc.h"
-struct clk_mgr {
- struct dc_context *ctx;
- const struct clk_mgr_funcs *funcs;
+/* Public interfaces */
- struct dc_clocks clks;
+struct clk_states {
+ uint32_t dprefclk_khz;
};
struct clk_mgr_funcs {
+ /*
+ * This function should set new clocks based on the input "safe_to_lower".
+ * If safe_to_lower == false, then only clocks which are to be increased
+ * should changed.
+ * If safe_to_lower == true, then only clocks which are to be decreased
+ * should be changed.
+ */
void (*update_clocks)(struct clk_mgr *clk_mgr,
struct dc_state *context,
bool safe_to_lower);
@@ -44,6 +49,24 @@ struct clk_mgr_funcs {
int (*get_dp_ref_clk_frequency)(struct clk_mgr *clk_mgr);
void (*init_clocks)(struct clk_mgr *clk_mgr);
+
+ void (*enable_pme_wa) (struct clk_mgr *clk_mgr);
};
+void dce121_clock_patch_xgmi_ss_info(struct clk_mgr *clk_mgr_base);
+
+struct clk_mgr {
+ struct dc_context *ctx;
+ struct clk_mgr_funcs *funcs;
+ struct dc_clocks clks;
+ int dprefclk_khz; // Used by program pixel clock in clock source funcs, need to figureout where this goes
+};
+
+/* forward declarations */
+struct dccg;
+
+struct clk_mgr *dc_clk_mgr_create(struct dc_context *ctx, struct pp_smu_funcs *pp_smu, struct dccg *dccg);
+
+void dc_destroy_clk_mgr(struct clk_mgr *clk_mgr);
+
#endif /* __DAL_CLK_MGR_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.h b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h
index c8f8c442142a..6e189b1283aa 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h
@@ -1,5 +1,5 @@
/*
- * Copyright 2012-16 Advanced Micro Devices, Inc.
+ * Copyright 2018 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -23,14 +23,40 @@
*
*/
-
-#ifndef _DCE_CLK_MGR_H_
-#define _DCE_CLK_MGR_H_
+#ifndef __DAL_CLK_MGR_INTERNAL_H__
+#define __DAL_CLK_MGR_INTERNAL_H__
#include "clk_mgr.h"
-#include "dccg.h"
-#define MEMORY_TYPE_MULTIPLIER_CZ 4
+/*
+ * only thing needed from here is MEMORY_TYPE_MULTIPLIER_CZ, which is also
+ * used in resource, perhaps this should be defined somewhere more common.
+ */
+#include "resource.h"
+
+/*
+ ***************************************************************************************
+ ****************** Clock Manager Private Macros and Defines ***************************
+ ***************************************************************************************
+ */
+
+#define TO_CLK_MGR_INTERNAL(clk_mgr)\
+ container_of(clk_mgr, struct clk_mgr_internal, base)
+
+#define CTX \
+ clk_mgr->base.ctx
+#define DC_LOGGER \
+ clk_mgr->ctx->logger
+
+
+
+
+#define CLK_BASE(inst) \
+ CLK_BASE_INNER(inst)
+
+#define CLK_SRI(reg_name, block, inst)\
+ .reg_name = CLK_BASE(mm ## block ## _ ## inst ## _ ## reg_name ## _BASE_IDX) + \
+ mm ## block ## _ ## inst ## _ ## reg_name
#define CLK_COMMON_REG_LIST_DCE_BASE() \
.DPREFCLK_CNTL = mmDPREFCLK_CNTL, \
@@ -50,12 +76,31 @@
CLK_SF(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_WDIVIDER, mask_sh),\
CLK_SF(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_CHG_DONE, mask_sh)
+#define CLK_MASK_SH_LIST_RV1(mask_sh) \
+ CLK_COMMON_MASK_SH_LIST_DCN_COMMON_BASE(mask_sh),\
+ CLK_SF(MP1_SMN_C2PMSG_67, CONTENT, mask_sh),\
+ CLK_SF(MP1_SMN_C2PMSG_83, CONTENT, mask_sh),\
+ CLK_SF(MP1_SMN_C2PMSG_91, CONTENT, mask_sh),
+
+
#define CLK_REG_FIELD_LIST(type) \
type DPREFCLK_SRC_SEL; \
type DENTIST_DPREFCLK_WDIVIDER; \
type DENTIST_DISPCLK_WDIVIDER; \
type DENTIST_DISPCLK_CHG_DONE;
+/*
+ ***************************************************************************************
+ ****************** Clock Manager Private Structures ***********************************
+ ***************************************************************************************
+ */
+
+struct clk_mgr_registers {
+ uint32_t DPREFCLK_CNTL;
+ uint32_t DENTIST_DISPCLK_CNTL;
+
+};
+
struct clk_mgr_shift {
CLK_REG_FIELD_LIST(uint8_t)
};
@@ -64,34 +109,42 @@ struct clk_mgr_mask {
CLK_REG_FIELD_LIST(uint32_t)
};
-struct clk_mgr_registers {
- uint32_t DPREFCLK_CNTL;
- uint32_t DENTIST_DISPCLK_CNTL;
-};
struct state_dependent_clocks {
int display_clk_khz;
int pixel_clk_khz;
};
-struct dce_clk_mgr {
+struct clk_mgr_internal {
struct clk_mgr base;
+ struct pp_smu_funcs *pp_smu;
+ struct clk_mgr_internal_funcs *funcs;
+
+ struct dccg *dccg;
+
+ /*
+ * For backwards compatbility with previous implementation
+ * TODO: remove these after everything transitions to new pattern
+ * Rationale is that clk registers change a lot across DCE versions
+ * and a shared data structure doesn't really make sense.
+ */
const struct clk_mgr_registers *regs;
const struct clk_mgr_shift *clk_mgr_shift;
const struct clk_mgr_mask *clk_mgr_mask;
- struct dccg *dccg;
-
struct state_dependent_clocks max_clks_by_state[DM_PP_CLOCKS_MAX_STATES];
+ /*TODO: figure out which of the below fields should be here vs in asic specific portion */
int dentist_vco_freq_khz;
/* Cache the status of DFS-bypass feature*/
bool dfs_bypass_enabled;
/* True if the DFS-bypass feature is enabled and active. */
bool dfs_bypass_active;
- /* Cache the display clock returned by VBIOS if DFS-bypass is enabled.
- * This is basically "Crystal Frequency In KHz" (XTALIN) frequency */
+ /*
+ * Cache the display clock returned by VBIOS if DFS-bypass is enabled.
+ * This is basically "Crystal Frequency In KHz" (XTALIN) frequency
+ */
int dfs_bypass_disp_clk;
/**
@@ -126,74 +179,33 @@ struct dce_clk_mgr {
* DPREFCLK SS percentage Divider (100 or 1000).
*/
int dprefclk_ss_divider;
- int dprefclk_khz;
enum dm_pp_clocks_state max_clks_state;
enum dm_pp_clocks_state cur_min_clks_state;
};
-/* Starting DID for each range */
-enum dentist_base_divider_id {
- DENTIST_BASE_DID_1 = 0x08,
- DENTIST_BASE_DID_2 = 0x40,
- DENTIST_BASE_DID_3 = 0x60,
- DENTIST_BASE_DID_4 = 0x7e,
- DENTIST_MAX_DID = 0x7f
+struct clk_mgr_internal_funcs {
+ int (*set_dispclk)(struct clk_mgr_internal *clk_mgr, int requested_dispclk_khz);
+ int (*set_dprefclk)(struct clk_mgr_internal *clk_mgr);
};
-/* Starting point and step size for each divider range.*/
-enum dentist_divider_range {
- DENTIST_DIVIDER_RANGE_1_START = 8, /* 2.00 */
- DENTIST_DIVIDER_RANGE_1_STEP = 1, /* 0.25 */
- DENTIST_DIVIDER_RANGE_2_START = 64, /* 16.00 */
- DENTIST_DIVIDER_RANGE_2_STEP = 2, /* 0.50 */
- DENTIST_DIVIDER_RANGE_3_START = 128, /* 32.00 */
- DENTIST_DIVIDER_RANGE_3_STEP = 4, /* 1.00 */
- DENTIST_DIVIDER_RANGE_4_START = 248, /* 62.00 */
- DENTIST_DIVIDER_RANGE_4_STEP = 264, /* 66.00 */
- DENTIST_DIVIDER_RANGE_SCALE_FACTOR = 4
-};
+
+/*
+ ***************************************************************************************
+ ****************** Clock Manager Level Helper functions *******************************
+ ***************************************************************************************
+ */
+
static inline bool should_set_clock(bool safe_to_lower, int calc_clk, int cur_clk)
{
return ((safe_to_lower && calc_clk < cur_clk) || calc_clk > cur_clk);
}
-void dce_clock_read_ss_info(struct dce_clk_mgr *dccg_dce);
-
-int dce12_get_dp_ref_freq_khz(struct clk_mgr *dccg);
-
-void dce110_fill_display_configs(
- const struct dc_state *context,
- struct dm_pp_display_configuration *pp_display_cfg);
-
-int dce112_set_clock(struct clk_mgr *dccg, int requested_clk_khz);
-
-struct clk_mgr *dce_clk_mgr_create(
- struct dc_context *ctx,
- const struct clk_mgr_registers *regs,
- const struct clk_mgr_shift *clk_shift,
- const struct clk_mgr_mask *clk_mask);
-
-struct clk_mgr *dce110_clk_mgr_create(
- struct dc_context *ctx,
- const struct clk_mgr_registers *regs,
- const struct clk_mgr_shift *clk_shift,
- const struct clk_mgr_mask *clk_mask);
-
-struct clk_mgr *dce112_clk_mgr_create(
- struct dc_context *ctx,
- const struct clk_mgr_registers *regs,
- const struct clk_mgr_shift *clk_shift,
- const struct clk_mgr_mask *clk_mask);
-
-struct clk_mgr *dce120_clk_mgr_create(struct dc_context *ctx);
-
-struct clk_mgr *dce121_clk_mgr_create(struct dc_context *ctx);
-void dce121_clock_patch_xgmi_ss_info(struct clk_mgr *clk_mgr);
+int clk_mgr_helper_get_active_display_cnt(
+ struct dc *dc,
+ struct dc_state *context);
-void dce_clk_mgr_destroy(struct clk_mgr **clk_mgr);
-int dentist_get_divider_from_did(int did);
-#endif /* _DCE_CLK_MGR_H_ */
+#endif //__DAL_CLK_MGR_INTERNAL_H__
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h
index fb7967b39edb..b55c5a2e56e2 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h
@@ -64,7 +64,22 @@ struct dcn_dpp_state {
uint32_t gamut_remap_c33_c34;
};
+struct CM_bias_params {
+ uint32_t cm_bias_cr_r;
+ uint32_t cm_bias_y_g;
+ uint32_t cm_bias_cb_b;
+ uint32_t cm_bias_format;
+};
+
struct dpp_funcs {
+
+ void (*dpp_program_cm_dealpha)(struct dpp *dpp_base,
+ uint32_t enable, uint32_t additive_blending);
+
+ void (*dpp_program_cm_bias)(
+ struct dpp *dpp_base,
+ struct CM_bias_params *bias_params);
+
void (*dpp_read_state)(struct dpp *dpp, struct dcn_dpp_state *s);
void (*dpp_reset)(struct dpp *dpp);
@@ -155,9 +170,11 @@ struct dpp_funcs {
uint32_t width,
uint32_t height
);
+
void (*dpp_set_hdr_multiplier)(
struct dpp *dpp_base,
uint32_t multiplier);
+
void (*set_optional_cursor_attributes)(
struct dpp *dpp_base,
struct dpp_cursor_attributes *attr);
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h b/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h
index c9d3e37e9531..ca162079a41b 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h
@@ -59,6 +59,7 @@ struct encoder_feature_support {
uint32_t IS_TPS3_CAPABLE:1;
uint32_t IS_TPS4_CAPABLE:1;
uint32_t HDMI_6GB_EN:1;
+ uint32_t DP_IS_USB_C:1;
} bits;
uint32_t raw;
} flags;
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h b/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h
index 49854eb73d1d..74db1d82fa35 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h
@@ -52,6 +52,19 @@ enum dp_component_depth {
DP_COMPONENT_PIXEL_DEPTH_16BPC = 0x00000004
};
+struct audio_clock_info {
+ /* pixel clock frequency*/
+ uint32_t pixel_clock_in_10khz;
+ /* N - 32KHz audio */
+ uint32_t n_32khz;
+ /* CTS - 32KHz audio*/
+ uint32_t cts_32khz;
+ uint32_t n_44khz;
+ uint32_t cts_44khz;
+ uint32_t n_48khz;
+ uint32_t cts_48khz;
+};
+
struct encoder_info_frame {
/* auxiliary video information */
struct dc_info_packet avi;
@@ -63,8 +76,6 @@ struct encoder_info_frame {
struct dc_info_packet vsc;
/* HDR Static MetaData */
struct dc_info_packet hdrsmd;
- /* custom sdp message */
- struct dc_info_packet dpsdp;
};
struct encoder_unblank_param {
@@ -123,6 +134,11 @@ struct stream_encoder_funcs {
struct stream_encoder *enc,
const struct encoder_info_frame *info_frame);
+ void (*send_immediate_sdp_message)(
+ struct stream_encoder *enc,
+ const uint8_t *custom_sdp_message,
+ unsigned int sdp_message_size);
+
void (*stop_dp_info_packets)(
struct stream_encoder *enc);
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
index 067d53caf28a..a89d0cf59cca 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
@@ -70,14 +70,6 @@ enum crtc_state {
CRTC_STATE_VACTIVE
};
-struct _dlg_otg_param {
- int vstartup_start;
- int vupdate_offset;
- int vupdate_width;
- int vready_offset;
- enum signal_type signal;
-};
-
struct vupdate_keepout_params {
int start_offset;
int end_offset;
@@ -126,7 +118,6 @@ struct timing_generator {
const struct timing_generator_funcs *funcs;
struct dc_bios *bp;
struct dc_context *ctx;
- struct _dlg_otg_param dlg_otg_param;
int inst;
};
@@ -140,7 +131,13 @@ struct timing_generator_funcs {
const struct dc_crtc_timing *timing);
void (*program_timing)(struct timing_generator *tg,
const struct dc_crtc_timing *timing,
- bool use_vbios);
+ int vready_offset,
+ int vstartup_start,
+ int vupdate_offset,
+ int vupdate_width,
+ const enum signal_type signal,
+ bool use_vbios
+ );
void (*setup_vertical_interrupt0)(
struct timing_generator *optc,
uint32_t start_line,
@@ -210,7 +207,11 @@ struct timing_generator_funcs {
bool (*arm_vert_intr)(struct timing_generator *tg, uint8_t width);
- void (*program_global_sync)(struct timing_generator *tg);
+ void (*program_global_sync)(struct timing_generator *tg,
+ int vready_offset,
+ int vstartup_start,
+ int vupdate_offset,
+ int vupdate_width);
void (*enable_optc_clock)(struct timing_generator *tg, bool enable);
void (*program_stereo)(struct timing_generator *tg,
const struct dc_crtc_timing *timing, struct crtc_stereo_flags *flags);
@@ -237,6 +238,11 @@ struct timing_generator_funcs {
bool (*get_crc)(struct timing_generator *tg,
uint32_t *r_cr, uint32_t *g_y, uint32_t *b_cb);
+ void (*program_manual_trigger)(struct timing_generator *optc);
+ void (*setup_manual_trigger)(struct timing_generator *optc);
+
+ void (*set_vtg_params)(struct timing_generator *optc,
+ const struct dc_crtc_timing *dc_crtc_timing);
};
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
index 33905468e2b9..eb1c12ed026a 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
@@ -158,6 +158,11 @@ struct hw_sequencer_funcs {
void (*update_info_frame)(struct pipe_ctx *pipe_ctx);
+ void (*send_immediate_sdp_message)(
+ struct pipe_ctx *pipe_ctx,
+ const uint8_t *custom_sdp_message,
+ unsigned int sdp_message_size);
+
void (*enable_stream)(struct pipe_ctx *pipe_ctx);
void (*disable_stream)(struct pipe_ctx *pipe_ctx,
diff --git a/drivers/gpu/drm/amd/display/dc/inc/resource.h b/drivers/gpu/drm/amd/display/dc/inc/resource.h
index 3ce0a4fc5822..08915b737799 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/resource.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/resource.h
@@ -30,6 +30,8 @@
#include "dal_asic_id.h"
#include "dm_pp_smu.h"
+#define MEMORY_TYPE_MULTIPLIER_CZ 4
+
enum dce_version resource_parse_asic_id(
struct hw_asic_id asic_id);
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dce110/irq_service_dce110.c b/drivers/gpu/drm/amd/display/dc/irq/dce110/irq_service_dce110.c
index 86987f5e8bd5..1a581c464345 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/dce110/irq_service_dce110.c
+++ b/drivers/gpu/drm/amd/display/dc/irq/dce110/irq_service_dce110.c
@@ -23,6 +23,8 @@
*
*/
+#include <linux/slab.h>
+
#include "dm_services.h"
#include "include/logger_interface.h"
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dce120/irq_service_dce120.c b/drivers/gpu/drm/amd/display/dc/irq/dce120/irq_service_dce120.c
index 750ba0ab4106..15380336cb51 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/dce120/irq_service_dce120.c
+++ b/drivers/gpu/drm/amd/display/dc/irq/dce120/irq_service_dce120.c
@@ -23,6 +23,8 @@
*
*/
+#include <linux/slab.h>
+
#include "dm_services.h"
#include "include/logger_interface.h"
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dce80/irq_service_dce80.c b/drivers/gpu/drm/amd/display/dc/irq/dce80/irq_service_dce80.c
index de218fe84a43..281fee8ad1e5 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/dce80/irq_service_dce80.c
+++ b/drivers/gpu/drm/amd/display/dc/irq/dce80/irq_service_dce80.c
@@ -23,6 +23,8 @@
*
*/
+#include <linux/slab.h>
+
#include "dm_services.h"
#include "include/logger_interface.h"
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn10/irq_service_dcn10.c b/drivers/gpu/drm/amd/display/dc/irq/dcn10/irq_service_dcn10.c
index 10ac6deff5ff..ebf483e3f098 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/dcn10/irq_service_dcn10.c
+++ b/drivers/gpu/drm/amd/display/dc/irq/dcn10/irq_service_dcn10.c
@@ -23,6 +23,8 @@
*
*/
+#include <linux/slab.h>
+
#include "dm_services.h"
#include "include/logger_interface.h"
diff --git a/drivers/gpu/drm/amd/display/dc/irq/irq_service.c b/drivers/gpu/drm/amd/display/dc/irq/irq_service.c
index 604bea01fc13..0878550a8178 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/irq_service.c
+++ b/drivers/gpu/drm/amd/display/dc/irq/irq_service.c
@@ -23,6 +23,8 @@
*
*/
+#include <linux/slab.h>
+
#include "dm_services.h"
#include "include/irq_service_interface.h"
diff --git a/drivers/gpu/drm/amd/display/dc/os_types.h b/drivers/gpu/drm/amd/display/dc/os_types.h
index c0d9f332baed..30ec80ac6fc8 100644
--- a/drivers/gpu/drm/amd/display/dc/os_types.h
+++ b/drivers/gpu/drm/amd/display/dc/os_types.h
@@ -26,11 +26,13 @@
#ifndef _OS_TYPES_H_
#define _OS_TYPES_H_
-#include <asm/byteorder.h>
+#include <linux/kgdb.h>
+#include <linux/kref.h>
#include <linux/types.h>
-#include <drm/drmP.h>
-#include <linux/kref.h>
+#include <asm/byteorder.h>
+
+#include <drm/drm_print.h>
#include "cgs_common.h"
diff --git a/drivers/gpu/drm/amd/display/dc/virtual/virtual_link_encoder.c b/drivers/gpu/drm/amd/display/dc/virtual/virtual_link_encoder.c
index 1c079ba37c30..3464b2d5b89a 100644
--- a/drivers/gpu/drm/amd/display/dc/virtual/virtual_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/virtual/virtual_link_encoder.c
@@ -23,6 +23,8 @@
*
*/
+#include <linux/slab.h>
+
#include "dm_services.h"
#include "dm_services_types.h"
diff --git a/drivers/gpu/drm/amd/display/dc/virtual/virtual_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/virtual/virtual_stream_encoder.c
index fdcf9e66d852..484047155aae 100644
--- a/drivers/gpu/drm/amd/display/dc/virtual/virtual_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/virtual/virtual_stream_encoder.c
@@ -23,6 +23,8 @@
*
*/
+#include <linux/slab.h>
+
#include "dm_services.h"
#include "virtual_stream_encoder.h"
diff --git a/drivers/gpu/drm/amd/display/include/bios_parser_types.h b/drivers/gpu/drm/amd/display/include/bios_parser_types.h
index 01bf01a34a08..c30437ae8395 100644
--- a/drivers/gpu/drm/amd/display/include/bios_parser_types.h
+++ b/drivers/gpu/drm/amd/display/include/bios_parser_types.h
@@ -307,7 +307,8 @@ struct bp_encoder_cap_info {
uint32_t DP_HBR2_EN:1;
uint32_t DP_HBR3_EN:1;
uint32_t HDMI_6GB_EN:1;
- uint32_t RESERVED:30;
+ uint32_t DP_IS_USB_C:1;
+ uint32_t RESERVED:27;
};
#endif /*__DAL_BIOS_PARSER_TYPES_H__ */
diff --git a/drivers/gpu/drm/amd/display/include/dal_asic_id.h b/drivers/gpu/drm/amd/display/include/dal_asic_id.h
index 4c8ce7938f01..b302ff3180a4 100644
--- a/drivers/gpu/drm/amd/display/include/dal_asic_id.h
+++ b/drivers/gpu/drm/amd/display/include/dal_asic_id.h
@@ -132,20 +132,18 @@
#define RAVEN_A0 0x01
#define RAVEN_B0 0x21
#define PICASSO_A0 0x41
-#if defined(CONFIG_DRM_AMD_DC_DCN1_01)
/* DCN1_01 */
#define RAVEN2_A0 0x81
-#endif
-#define RAVEN_UNKNOWN 0xFF
-
-#define ASIC_REV_IS_RAVEN(eChipRev) ((eChipRev >= RAVEN_A0) && eChipRev < RAVEN_UNKNOWN)
#define RAVEN1_F0 0xF0
-#define ASICREV_IS_RV1_F0(eChipRev) ((eChipRev >= RAVEN1_F0) && (eChipRev < RAVEN_UNKNOWN))
+#define RAVEN_UNKNOWN 0xFF
-#if defined(CONFIG_DRM_AMD_DC_DCN1_01)
+#define ASICREV_IS_RAVEN(eChipRev) ((eChipRev >= RAVEN_A0) && eChipRev < RAVEN_UNKNOWN)
#define ASICREV_IS_PICASSO(eChipRev) ((eChipRev >= PICASSO_A0) && (eChipRev < RAVEN2_A0))
#define ASICREV_IS_RAVEN2(eChipRev) ((eChipRev >= RAVEN2_A0) && (eChipRev < 0xF0))
-#endif /* DCN1_01 */
+
+
+#define ASICREV_IS_RV1_F0(eChipRev) ((eChipRev >= RAVEN1_F0) && (eChipRev < RAVEN_UNKNOWN))
+
#define FAMILY_RV 142 /* DCN 1*/
diff --git a/drivers/gpu/drm/amd/display/include/dal_types.h b/drivers/gpu/drm/amd/display/include/dal_types.h
index f5bd869d4320..dabdbc0999d4 100644
--- a/drivers/gpu/drm/amd/display/include/dal_types.h
+++ b/drivers/gpu/drm/amd/display/include/dal_types.h
@@ -45,9 +45,7 @@ enum dce_version {
DCE_VERSION_12_1,
DCE_VERSION_MAX,
DCN_VERSION_1_0,
-#if defined(CONFIG_DRM_AMD_DC_DCN1_01)
DCN_VERSION_1_01,
-#endif /* DCN1_01 */
DCN_VERSION_MAX
};
diff --git a/drivers/gpu/drm/amd/display/include/set_mode_types.h b/drivers/gpu/drm/amd/display/include/set_mode_types.h
index 2b836e582c08..845fea8a387f 100644
--- a/drivers/gpu/drm/amd/display/include/set_mode_types.h
+++ b/drivers/gpu/drm/amd/display/include/set_mode_types.h
@@ -84,7 +84,10 @@ union hdmi_info_packet {
uint16_t bar_left;
uint16_t bar_right;
- uint8_t reserved[14];
+ uint8_t F140_F143:4;
+ uint8_t ACE0_ACE3:4;
+
+ uint8_t reserved[13];
} bits;
struct info_packet_raw_data packet_raw_data;
diff --git a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
index a1055413bade..b31af9be41eb 100644
--- a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
+++ b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
@@ -23,6 +23,9 @@
*
*/
+#include <linux/mm.h>
+#include <linux/slab.h>
+
#include "dc.h"
#include "opp.h"
#include "color_gamma.h"
@@ -240,16 +243,27 @@ struct dividers {
struct fixed31_32 divider3;
};
-static void build_coefficients(struct gamma_coefficients *coefficients, bool is_2_4)
+enum gamma_type_index {
+ gamma_type_index_2_4,
+ gamma_type_index_2_2,
+ gamma_type_index_2_2_flat
+};
+
+static void build_coefficients(struct gamma_coefficients *coefficients, enum gamma_type_index type)
{
- static const int32_t numerator01[] = { 31308, 180000};
- static const int32_t numerator02[] = { 12920, 4500};
- static const int32_t numerator03[] = { 55, 99};
- static const int32_t numerator04[] = { 55, 99};
- static const int32_t numerator05[] = { 2400, 2200};
+ static const int32_t numerator01[] = { 31308, 180000, 0};
+ static const int32_t numerator02[] = { 12920, 4500, 0};
+ static const int32_t numerator03[] = { 55, 99, 0};
+ static const int32_t numerator04[] = { 55, 99, 0};
+ static const int32_t numerator05[] = { 2400, 2200, 2200};
uint32_t i = 0;
- uint32_t index = is_2_4 == true ? 0:1;
+ uint32_t index = 0;
+
+ if (type == gamma_type_index_2_2)
+ index = 1;
+ else if (type == gamma_type_index_2_2_flat)
+ index = 2;
do {
coefficients->a0[i] = dc_fixpt_from_fraction(
@@ -697,7 +711,7 @@ static void build_de_pq(struct pwl_float_data_ex *de_pq,
static void build_regamma(struct pwl_float_data_ex *rgb_regamma,
uint32_t hw_points_num,
- const struct hw_x_point *coordinate_x, bool is_2_4)
+ const struct hw_x_point *coordinate_x, enum gamma_type_index type)
{
uint32_t i;
@@ -705,7 +719,7 @@ static void build_regamma(struct pwl_float_data_ex *rgb_regamma,
struct pwl_float_data_ex *rgb = rgb_regamma;
const struct hw_x_point *coord_x = coordinate_x;
- build_coefficients(&coeff, is_2_4);
+ build_coefficients(&coeff, type);
i = 0;
@@ -892,13 +906,13 @@ static bool build_freesync_hdr(struct pwl_float_data_ex *rgb_regamma,
static void build_degamma(struct pwl_float_data_ex *curve,
uint32_t hw_points_num,
- const struct hw_x_point *coordinate_x, bool is_2_4)
+ const struct hw_x_point *coordinate_x, enum gamma_type_index type)
{
uint32_t i;
struct gamma_coefficients coeff;
uint32_t begin_index, end_index;
- build_coefficients(&coeff, is_2_4);
+ build_coefficients(&coeff, type);
i = 0;
/* X points is 2^-25 to 2^7
@@ -1558,13 +1572,15 @@ bool mod_color_calculate_regamma_params(struct dc_transfer_func *output_tf,
output_tf->tf == TRANSFER_FUNCTION_SRGB) {
if (ramp == NULL)
return true;
- if (ramp->is_identity || (!mapUserRamp && ramp->type == GAMMA_RGB_256))
+ if ((ramp->is_identity && ramp->type != GAMMA_CS_TFM_1D) ||
+ (!mapUserRamp && ramp->type == GAMMA_RGB_256))
return true;
}
output_tf->type = TF_TYPE_DISTRIBUTED_POINTS;
- if (ramp && (mapUserRamp || ramp->type != GAMMA_RGB_256)) {
+ if (ramp && ramp->type != GAMMA_CS_TFM_1D &&
+ (mapUserRamp || ramp->type != GAMMA_RGB_256)) {
rgb_user = kvcalloc(ramp->num_entries + _EXTRA_POINTS,
sizeof(*rgb_user),
GFP_KERNEL);
@@ -1614,7 +1630,7 @@ bool mod_color_calculate_regamma_params(struct dc_transfer_func *output_tf,
coordinates_x,
output_tf->sdr_ref_white_level);
} else if (tf == TRANSFER_FUNCTION_GAMMA22 &&
- fs_params != NULL) {
+ fs_params != NULL && fs_params->skip_tm == 0) {
build_freesync_hdr(rgb_regamma,
MAX_HW_POINTS,
coordinates_x,
@@ -1627,7 +1643,9 @@ bool mod_color_calculate_regamma_params(struct dc_transfer_func *output_tf,
build_regamma(rgb_regamma,
MAX_HW_POINTS,
- coordinates_x, tf == TRANSFER_FUNCTION_SRGB ? true:false);
+ coordinates_x, tf == TRANSFER_FUNCTION_SRGB ? gamma_type_index_2_4 :
+ tf == TRANSFER_FUNCTION_GAMMA22 ?
+ gamma_type_index_2_2_flat : gamma_type_index_2_2);
}
map_regamma_hw_to_x_user(ramp, coeff, rgb_user,
coordinates_x, axis_x, rgb_regamma,
@@ -1832,7 +1850,9 @@ bool mod_color_calculate_degamma_params(struct dc_transfer_func *input_tf,
build_degamma(curve,
MAX_HW_POINTS,
coordinates_x,
- tf == TRANSFER_FUNCTION_SRGB ? true : false);
+ tf == TRANSFER_FUNCTION_SRGB ?
+ gamma_type_index_2_4 : tf == TRANSFER_FUNCTION_GAMMA22 ?
+ gamma_type_index_2_2_flat : gamma_type_index_2_2);
else if (tf == TRANSFER_FUNCTION_LINEAR) {
// just copy coordinates_x into curve
i = 0;
@@ -1932,7 +1952,10 @@ bool mod_color_calculate_curve(enum dc_transfer_func_predefined trans,
build_regamma(rgb_regamma,
MAX_HW_POINTS,
- coordinates_x, trans == TRANSFER_FUNCTION_SRGB ? true:false);
+ coordinates_x,
+ trans == TRANSFER_FUNCTION_SRGB ?
+ gamma_type_index_2_4 : trans == TRANSFER_FUNCTION_GAMMA22 ?
+ gamma_type_index_2_2_flat : gamma_type_index_2_2);
for (i = 0; i <= MAX_HW_POINTS ; i++) {
points->red[i] = rgb_regamma[i].r;
points->green[i] = rgb_regamma[i].g;
@@ -2002,7 +2025,8 @@ bool mod_color_calculate_degamma_curve(enum dc_transfer_func_predefined trans,
kvfree(rgb_degamma);
} else if (trans == TRANSFER_FUNCTION_SRGB ||
- trans == TRANSFER_FUNCTION_BT709) {
+ trans == TRANSFER_FUNCTION_BT709 ||
+ trans == TRANSFER_FUNCTION_GAMMA22) {
rgb_degamma = kvcalloc(MAX_HW_POINTS + _EXTRA_POINTS,
sizeof(*rgb_degamma),
GFP_KERNEL);
@@ -2011,7 +2035,10 @@ bool mod_color_calculate_degamma_curve(enum dc_transfer_func_predefined trans,
build_degamma(rgb_degamma,
MAX_HW_POINTS,
- coordinates_x, trans == TRANSFER_FUNCTION_SRGB ? true:false);
+ coordinates_x,
+ trans == TRANSFER_FUNCTION_SRGB ?
+ gamma_type_index_2_4 : trans == TRANSFER_FUNCTION_GAMMA22 ?
+ gamma_type_index_2_2_flat : gamma_type_index_2_2);
for (i = 0; i <= MAX_HW_POINTS ; i++) {
points->red[i] = rgb_degamma[i].r;
points->green[i] = rgb_degamma[i].g;
diff --git a/drivers/gpu/drm/amd/display/modules/color/color_gamma.h b/drivers/gpu/drm/amd/display/modules/color/color_gamma.h
index a6e164df090a..369953fafadf 100644
--- a/drivers/gpu/drm/amd/display/modules/color/color_gamma.h
+++ b/drivers/gpu/drm/amd/display/modules/color/color_gamma.h
@@ -79,6 +79,7 @@ struct freesync_hdr_tf_params {
unsigned int max_content; // luminance in nits
unsigned int min_display; // luminance in 1/10000 nits
unsigned int max_display; // luminance in nits
+ unsigned int skip_tm; // skip tm
};
void setup_x_points_distribution(void);
diff --git a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
index 19b1eaebe484..7c20171a3b6d 100644
--- a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
+++ b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
@@ -23,6 +23,8 @@
*
*/
+#include <linux/slab.h>
+
#include "dm_services.h"
#include "dc.h"
#include "mod_freesync.h"
diff --git a/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c b/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c
index db06fab2ad5c..bc13c552797f 100644
--- a/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c
+++ b/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c
@@ -63,7 +63,9 @@ void mod_build_vsc_infopacket(const struct dc_stream_state *stream,
if (stream->psr_version != 0)
vscPacketRevision = 2;
- if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR420)
+ /* Update to revision 5 for extended colorimetry support for DPCD 1.4+ */
+ if (stream->link->dpcd_caps.dpcd_rev.raw >= 0x14 &&
+ stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED)
vscPacketRevision = 5;
/* VSC packet not needed based on the features
diff --git a/drivers/gpu/drm/amd/include/asic_reg/df/df_3_6_offset.h b/drivers/gpu/drm/amd/include/asic_reg/df/df_3_6_offset.h
index a9575db8d7aa..6efcaa93e17b 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/df/df_3_6_offset.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/df/df_3_6_offset.h
@@ -30,4 +30,22 @@
#define mmDF_CS_UMC_AON0_DramBaseAddress0 0x0044
#define mmDF_CS_UMC_AON0_DramBaseAddress0_BASE_IDX 0
+#define smnPerfMonCtlLo0 0x01d440UL
+#define smnPerfMonCtlHi0 0x01d444UL
+#define smnPerfMonCtlLo1 0x01d450UL
+#define smnPerfMonCtlHi1 0x01d454UL
+#define smnPerfMonCtlLo2 0x01d460UL
+#define smnPerfMonCtlHi2 0x01d464UL
+#define smnPerfMonCtlLo3 0x01d470UL
+#define smnPerfMonCtlHi3 0x01d474UL
+
+#define smnPerfMonCtrLo0 0x01d448UL
+#define smnPerfMonCtrHi0 0x01d44cUL
+#define smnPerfMonCtrLo1 0x01d458UL
+#define smnPerfMonCtrHi1 0x01d45cUL
+#define smnPerfMonCtrLo2 0x01d468UL
+#define smnPerfMonCtrHi2 0x01d46cUL
+#define smnPerfMonCtrLo3 0x01d478UL
+#define smnPerfMonCtrHi3 0x01d47cUL
+
#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_0_offset.h
index 529b37db274c..f1d048e0ed2c 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_0_offset.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_0_offset.h
@@ -829,6 +829,8 @@
#define mmTD_CNTL_BASE_IDX 0
#define mmTD_STATUS 0x0526
#define mmTD_STATUS_BASE_IDX 0
+#define mmTD_EDC_CNT 0x052e
+#define mmTD_EDC_CNT_BASE_IDX 0
#define mmTD_DSM_CNTL 0x052f
#define mmTD_DSM_CNTL_BASE_IDX 0
#define mmTD_DSM_CNTL2 0x0530
@@ -845,6 +847,8 @@
#define mmTA_STATUS_BASE_IDX 0
#define mmTA_SCRATCH 0x0564
#define mmTA_SCRATCH_BASE_IDX 0
+#define mmTA_EDC_CNT 0x0586
+#define mmTA_EDC_CNT_BASE_IDX 0
// addressBlock: gc_gdsdec
@@ -1051,6 +1055,13 @@
#define mmGC_USER_RB_BACKEND_DISABLE_BASE_IDX 0
+// addressBlock: gc_ea_gceadec2
+// base address: 0x9c00
+#define mmGCEA_EDC_CNT 0x0706
+#define mmGCEA_EDC_CNT_BASE_IDX 0
+#define mmGCEA_EDC_CNT2 0x0707
+#define mmGCEA_EDC_CNT2_BASE_IDX 0
+
// addressBlock: gc_rmi_rmidec
// base address: 0x9e00
#define mmRMI_GENERAL_CNTL 0x0780
@@ -1709,6 +1720,8 @@
#define mmTC_CFG_L1_VOLATILE_BASE_IDX 0
#define mmTC_CFG_L2_VOLATILE 0x0b23
#define mmTC_CFG_L2_VOLATILE_BASE_IDX 0
+#define mmTCI_EDC_CNT 0x0b60
+#define mmTCI_EDC_CNT_BASE_IDX 0
#define mmTCI_STATUS 0x0b61
#define mmTCI_STATUS_BASE_IDX 0
#define mmTCI_CNTL_1 0x0b62
@@ -2594,6 +2607,24 @@
#define mmCP_RB_DOORBELL_CONTROL_SCH_7_BASE_IDX 0
#define mmCP_RB_DOORBELL_CLEAR 0x1188
#define mmCP_RB_DOORBELL_CLEAR_BASE_IDX 0
+#define mmCPF_EDC_TAG_CNT 0x1189
+#define mmCPF_EDC_TAG_CNT_BASE_IDX 0
+#define mmCPF_EDC_ROQ_CNT 0x118a
+#define mmCPF_EDC_ROQ_CNT_BASE_IDX 0
+#define mmCPG_EDC_TAG_CNT 0x118b
+#define mmCPG_EDC_TAG_CNT_BASE_IDX 0
+#define mmCPG_EDC_DMA_CNT 0x118d
+#define mmCPG_EDC_DMA_CNT_BASE_IDX 0
+#define mmCPC_EDC_SCRATCH_CNT 0x118e
+#define mmCPC_EDC_SCRATCH_CNT_BASE_IDX 0
+#define mmCPC_EDC_UCODE_CNT 0x118f
+#define mmCPC_EDC_UCODE_CNT_BASE_IDX 0
+#define mmDC_EDC_STATE_CNT 0x1191
+#define mmDC_EDC_STATE_CNT_BASE_IDX 0
+#define mmDC_EDC_CSINVOC_CNT 0x1192
+#define mmDC_EDC_CSINVOC_CNT_BASE_IDX 0
+#define mmDC_EDC_RESTORE_CNT 0x1193
+#define mmDC_EDC_RESTORE_CNT_BASE_IDX 0
#define mmCP_GFX_MQD_CONTROL 0x11a0
#define mmCP_GFX_MQD_CONTROL_BASE_IDX 0
#define mmCP_GFX_MQD_BASE_ADDR 0x11a1
diff --git a/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_6_1_smn.h b/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_6_1_smn.h
index 8c75669eb500..9470ec5e0f42 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_6_1_smn.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_6_1_smn.h
@@ -54,5 +54,8 @@
#define smnPCIE_PERF_COUNT0_TXCLK2 0x11180258
#define smnPCIE_PERF_COUNT1_TXCLK2 0x1118025c
+#define smnPCIE_RX_NUM_NAK 0x11180038
+#define smnPCIE_RX_NUM_NAK_GENERATED 0x1118003c
+
#endif // _nbio_6_1_SMN_HEADER
diff --git a/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_0_smn.h b/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_0_smn.h
index 5563f0715896..caf5ffdc130a 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_0_smn.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_0_smn.h
@@ -51,4 +51,7 @@
#define smnPCIE_PERF_COUNT0_TXCLK2 0x11180258
#define smnPCIE_PERF_COUNT1_TXCLK2 0x1118025c
+#define smnPCIE_RX_NUM_NAK 0x11180038
+#define smnPCIE_RX_NUM_NAK_GENERATED 0x1118003c
+
#endif // _nbio_7_0_SMN_HEADER
diff --git a/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_4_0_smn.h b/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_4_0_smn.h
index c1457d880c4d..4bcacf529852 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_4_0_smn.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_4_0_smn.h
@@ -50,4 +50,7 @@
#define smnPCIE_PERF_CNTL_EVENT_LC_PORT_SEL 0x1118024c
#define smnPCIE_PERF_CNTL_EVENT_CI_PORT_SEL 0x11180250
+#define smnPCIE_RX_NUM_NAK 0x11180038
+#define smnPCIE_RX_NUM_NAK_GENERATED 0x1118003c
+
#endif // _nbio_7_4_0_SMN_HEADER
diff --git a/drivers/gpu/drm/amd/include/cik_structs.h b/drivers/gpu/drm/amd/include/cik_structs.h
index 749eab94e335..699e658c3cec 100644
--- a/drivers/gpu/drm/amd/include/cik_structs.h
+++ b/drivers/gpu/drm/amd/include/cik_structs.h
@@ -282,8 +282,7 @@ struct cik_sdma_rlc_registers {
uint32_t reserved_123;
uint32_t reserved_124;
uint32_t reserved_125;
- uint32_t reserved_126;
- uint32_t reserved_127;
+ /* reserved_126,127: repurposed for driver-internal use */
uint32_t sdma_engine_id;
uint32_t sdma_queue_id;
};
diff --git a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
index b897aca9b4c9..98b9533e672b 100644
--- a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
+++ b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
@@ -174,6 +174,7 @@ struct tile_config {
#define ALLOC_MEM_FLAGS_GTT (1 << 1)
#define ALLOC_MEM_FLAGS_USERPTR (1 << 2)
#define ALLOC_MEM_FLAGS_DOORBELL (1 << 3)
+#define ALLOC_MEM_FLAGS_MMIO_REMAP (1 << 4)
/*
* Allocation flags attributes/access options.
diff --git a/drivers/gpu/drm/amd/include/kgd_pp_interface.h b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
index 2b579ba9b685..9f661bf96ed0 100644
--- a/drivers/gpu/drm/amd/include/kgd_pp_interface.h
+++ b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
@@ -109,8 +109,12 @@ enum amd_pp_sensors {
AMDGPU_PP_SENSOR_UVD_DCLK,
AMDGPU_PP_SENSOR_VCE_ECCLK,
AMDGPU_PP_SENSOR_GPU_LOAD,
+ AMDGPU_PP_SENSOR_MEM_LOAD,
AMDGPU_PP_SENSOR_GFX_MCLK,
AMDGPU_PP_SENSOR_GPU_TEMP,
+ AMDGPU_PP_SENSOR_EDGE_TEMP = AMDGPU_PP_SENSOR_GPU_TEMP,
+ AMDGPU_PP_SENSOR_HOTSPOT_TEMP,
+ AMDGPU_PP_SENSOR_MEM_TEMP,
AMDGPU_PP_SENSOR_VCE_POWER,
AMDGPU_PP_SENSOR_UVD_POWER,
AMDGPU_PP_SENSOR_GPU_POWER,
@@ -159,6 +163,13 @@ struct pp_states_info {
uint32_t states[16];
};
+enum PP_HWMON_TEMP {
+ PP_TEMP_EDGE = 0,
+ PP_TEMP_JUNCTION,
+ PP_TEMP_MEM,
+ PP_TEMP_MAX
+};
+
#define PP_GROUP_MASK 0xF0000000
#define PP_GROUP_SHIFT 28
diff --git a/drivers/gpu/drm/amd/include/v9_structs.h b/drivers/gpu/drm/amd/include/v9_structs.h
index ceaf4932258d..8b383dbe1cda 100644
--- a/drivers/gpu/drm/amd/include/v9_structs.h
+++ b/drivers/gpu/drm/amd/include/v9_structs.h
@@ -151,8 +151,7 @@ struct v9_sdma_mqd {
uint32_t reserved_123;
uint32_t reserved_124;
uint32_t reserved_125;
- uint32_t reserved_126;
- uint32_t reserved_127;
+ /* reserved_126,127: repurposed for driver-internal use */
uint32_t sdma_engine_id;
uint32_t sdma_queue_id;
};
diff --git a/drivers/gpu/drm/amd/include/vi_structs.h b/drivers/gpu/drm/amd/include/vi_structs.h
index 717fbae1d362..c17613287cd0 100644
--- a/drivers/gpu/drm/amd/include/vi_structs.h
+++ b/drivers/gpu/drm/amd/include/vi_structs.h
@@ -151,8 +151,7 @@ struct vi_sdma_mqd {
uint32_t reserved_123;
uint32_t reserved_124;
uint32_t reserved_125;
- uint32_t reserved_126;
- uint32_t reserved_127;
+ /* reserved_126,127: repurposed for driver-internal use */
uint32_t sdma_engine_id;
uint32_t sdma_queue_id;
};
diff --git a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
index eec329ab6037..9c67adee2c9e 100644
--- a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
+++ b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
@@ -20,9 +20,9 @@
* OTHER DEALINGS IN THE SOFTWARE.
*/
-#include "pp_debug.h"
#include <linux/firmware.h>
-#include <drm/drmP.h>
+
+#include "pp_debug.h"
#include "amdgpu.h"
#include "amdgpu_smu.h"
#include "soc15_common.h"
@@ -30,6 +30,36 @@
#include "atom.h"
#include "amd_pcie.h"
+int smu_get_smc_version(struct smu_context *smu, uint32_t *if_version, uint32_t *smu_version)
+{
+ int ret = 0;
+
+ if (!if_version && !smu_version)
+ return -EINVAL;
+
+ if (if_version) {
+ ret = smu_send_smc_msg(smu, SMU_MSG_GetDriverIfVersion);
+ if (ret)
+ return ret;
+
+ ret = smu_read_smc_arg(smu, if_version);
+ if (ret)
+ return ret;
+ }
+
+ if (smu_version) {
+ ret = smu_send_smc_msg(smu, SMU_MSG_GetSmuVersion);
+ if (ret)
+ return ret;
+
+ ret = smu_read_smc_arg(smu, smu_version);
+ if (ret)
+ return ret;
+ }
+
+ return ret;
+}
+
int smu_dpm_set_power_gate(struct smu_context *smu, uint32_t block_type,
bool gate)
{
@@ -168,6 +198,8 @@ int smu_sys_set_pp_table(struct smu_context *smu, void *buf, size_t size)
ATOM_COMMON_TABLE_HEADER *header = (ATOM_COMMON_TABLE_HEADER *)buf;
int ret = 0;
+ if (!smu->pm_enabled)
+ return -EINVAL;
if (header->usStructureSize != size) {
pr_err("pp table size not matched !\n");
return -EIO;
@@ -203,6 +235,8 @@ int smu_feature_init_dpm(struct smu_context *smu)
int ret = 0;
uint32_t unallowed_feature_mask[SMU_FEATURE_MAX/32];
+ if (!smu->pm_enabled)
+ return ret;
mutex_lock(&feature->mutex);
bitmap_fill(feature->allowed, SMU_FEATURE_MAX);
mutex_unlock(&feature->mutex);
@@ -314,6 +348,7 @@ static int smu_early_init(void *handle)
struct smu_context *smu = &adev->smu;
smu->adev = adev;
+ smu->pm_enabled = !!amdgpu_dpm;
mutex_init(&smu->mutex);
return smu_set_funcs(adev);
@@ -323,6 +358,9 @@ static int smu_late_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct smu_context *smu = &adev->smu;
+
+ if (!smu->pm_enabled)
+ return 0;
mutex_lock(&smu->mutex);
smu_handle_task(&adev->smu,
smu->smu_dpm.dpm_level,
@@ -406,9 +444,6 @@ static int smu_sw_init(void *handle)
struct smu_context *smu = &adev->smu;
int ret;
- if (!is_support_sw_smu(adev))
- return -EINVAL;
-
smu->pool_size = adev->pm.smu_prv_buffer_size;
smu->smu_feature.feature_num = SMU_FEATURE_MAX;
mutex_init(&smu->smu_feature.mutex);
@@ -460,9 +495,6 @@ static int smu_sw_fini(void *handle)
struct smu_context *smu = &adev->smu;
int ret;
- if (!is_support_sw_smu(adev))
- return -EINVAL;
-
ret = smu_smc_table_sw_fini(smu);
if (ret) {
pr_err("Failed to sw fini smc table!\n");
@@ -612,10 +644,6 @@ static int smu_smc_table_hw_init(struct smu_context *smu,
* check if the format_revision in vbios is up to pptable header
* version, and the structure size is not 0.
*/
- ret = smu_get_clk_info_from_vbios(smu);
- if (ret)
- return ret;
-
ret = smu_check_pptable(smu);
if (ret)
return ret;
@@ -716,6 +744,9 @@ static int smu_smc_table_hw_init(struct smu_context *smu,
*/
ret = smu_set_tool_table_location(smu);
+ if (!smu_is_dpm_running(smu))
+ pr_info("dpm has been disabled\n");
+
return ret;
}
@@ -788,9 +819,6 @@ static int smu_hw_init(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct smu_context *smu = &adev->smu;
- if (!is_support_sw_smu(adev))
- return -EINVAL;
-
if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
ret = smu_load_microcode(smu);
if (ret)
@@ -831,7 +859,10 @@ static int smu_hw_init(void *handle)
mutex_unlock(&smu->mutex);
- adev->pm.dpm_enabled = true;
+ if (!smu->pm_enabled)
+ adev->pm.dpm_enabled = false;
+ else
+ adev->pm.dpm_enabled = true;
pr_info("SMU is initialized successfully!\n");
@@ -849,9 +880,6 @@ static int smu_hw_fini(void *handle)
struct smu_table_context *table_context = &smu->smu_table;
int ret = 0;
- if (!is_support_sw_smu(adev))
- return -EINVAL;
-
kfree(table_context->driver_pptable);
table_context->driver_pptable = NULL;
@@ -906,9 +934,6 @@ static int smu_suspend(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct smu_context *smu = &adev->smu;
- if (!is_support_sw_smu(adev))
- return -EINVAL;
-
ret = smu_system_features_control(smu, false);
if (ret)
return ret;
@@ -924,9 +949,6 @@ static int smu_resume(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct smu_context *smu = &adev->smu;
- if (!is_support_sw_smu(adev))
- return -EINVAL;
-
pr_info("SMU is resuming...\n");
mutex_lock(&smu->mutex);
@@ -955,7 +977,7 @@ int smu_display_configuration_change(struct smu_context *smu,
int index = 0;
int num_of_active_display = 0;
- if (!is_support_sw_smu(smu->adev))
+ if (!smu->pm_enabled || !is_support_sw_smu(smu->adev))
return -EINVAL;
if (!display_config)
@@ -1083,7 +1105,7 @@ static int smu_enable_umd_pstate(void *handle,
struct smu_context *smu = (struct smu_context*)(handle);
struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
- if (!smu_dpm_ctx->dpm_context)
+ if (!smu->pm_enabled || !smu_dpm_ctx->dpm_context)
return -EINVAL;
if (!(smu_dpm_ctx->dpm_level & profile_mode_mask)) {
@@ -1126,6 +1148,8 @@ int smu_adjust_power_state_dynamic(struct smu_context *smu,
long workload;
struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
+ if (!smu->pm_enabled)
+ return -EINVAL;
if (!skip_display_settings) {
ret = smu_display_config_changed(smu);
if (ret) {
@@ -1134,6 +1158,8 @@ int smu_adjust_power_state_dynamic(struct smu_context *smu,
}
}
+ if (!smu->pm_enabled)
+ return -EINVAL;
ret = smu_apply_clocks_adjust_rules(smu);
if (ret) {
pr_err("Failed to apply clocks adjust rules!");
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c
index 70f7f47a2fcf..cc57fb953e62 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c
@@ -225,7 +225,16 @@ int phm_register_irq_handlers(struct pp_hwmgr *hwmgr)
int phm_start_thermal_controller(struct pp_hwmgr *hwmgr)
{
int ret = 0;
- struct PP_TemperatureRange range = {TEMP_RANGE_MIN, TEMP_RANGE_MAX};
+ struct PP_TemperatureRange range = {
+ TEMP_RANGE_MIN,
+ TEMP_RANGE_MAX,
+ TEMP_RANGE_MAX,
+ TEMP_RANGE_MIN,
+ TEMP_RANGE_MAX,
+ TEMP_RANGE_MAX,
+ TEMP_RANGE_MIN,
+ TEMP_RANGE_MAX,
+ TEMP_RANGE_MAX};
struct amdgpu_device *adev = hwmgr->adev;
if (hwmgr->hwmgr_func->get_thermal_temperature_range)
@@ -239,6 +248,13 @@ int phm_start_thermal_controller(struct pp_hwmgr *hwmgr)
adev->pm.dpm.thermal.min_temp = range.min;
adev->pm.dpm.thermal.max_temp = range.max;
+ adev->pm.dpm.thermal.max_edge_emergency_temp = range.edge_emergency_max;
+ adev->pm.dpm.thermal.min_hotspot_temp = range.hotspot_min;
+ adev->pm.dpm.thermal.max_hotspot_crit_temp = range.hotspot_crit_max;
+ adev->pm.dpm.thermal.max_hotspot_emergency_temp = range.hotspot_emergency_max;
+ adev->pm.dpm.thermal.min_mem_temp = range.mem_min;
+ adev->pm.dpm.thermal.max_mem_crit_temp = range.mem_crit_max;
+ adev->pm.dpm.thermal.max_mem_emergency_temp = range.mem_emergency_max;
return ret;
}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
index 048757e8f494..c5986d28fbf1 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
@@ -24,6 +24,7 @@
#include <linux/delay.h>
#include <linux/fb.h>
#include <linux/module.h>
+#include <linux/pci.h>
#include <linux/slab.h>
#include <asm/div64.h>
#include <drm/amdgpu_drm.h>
@@ -3532,9 +3533,12 @@ static int smu7_read_sensor(struct pp_hwmgr *hwmgr, int idx,
*size = 4;
return 0;
case AMDGPU_PP_SENSOR_GPU_LOAD:
+ case AMDGPU_PP_SENSOR_MEM_LOAD:
offset = data->soft_regs_start + smum_get_offsetof(hwmgr,
SMU_SoftRegisters,
- AverageGraphicsActivity);
+ (idx == AMDGPU_PP_SENSOR_GPU_LOAD) ?
+ AverageGraphicsActivity:
+ AverageMemoryActivity);
activity_percent = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, offset);
activity_percent += 0x80;
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c
index 101c09b212ad..d09690fca452 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c
@@ -20,6 +20,9 @@
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
+
+#include <linux/pci.h>
+
#include "hwmgr.h"
#include "pp_debug.h"
#include "ppatomctrl.h"
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
index 384c37875cd0..3be8eb21fd6e 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
@@ -24,6 +24,7 @@
#include <linux/delay.h>
#include <linux/fb.h>
#include <linux/module.h>
+#include <linux/pci.h>
#include <linux/slab.h>
#include "hwmgr.h"
@@ -356,6 +357,7 @@ static void vega10_init_dpm_defaults(struct pp_hwmgr *hwmgr)
struct vega10_hwmgr *data = hwmgr->backend;
int i;
uint32_t sub_vendor_id, hw_revision;
+ uint32_t top32, bottom32;
struct amdgpu_device *adev = hwmgr->adev;
vega10_initialize_power_tune_defaults(hwmgr);
@@ -499,6 +501,14 @@ static void vega10_init_dpm_defaults(struct pp_hwmgr *hwmgr)
(hw_revision == 0) &&
(sub_vendor_id != 0x1002))
data->smu_features[GNLD_PCC_LIMIT].supported = true;
+
+ /* Get the SN to turn into a Unique ID */
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumTop32);
+ top32 = smum_get_argument(hwmgr);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumBottom32);
+ bottom32 = smum_get_argument(hwmgr);
+
+ adev->unique_id = ((uint64_t)bottom32 << 32) | top32;
}
#ifdef PPLIB_VEGA10_EVV_SUPPORT
@@ -2267,8 +2277,8 @@ static int vega10_populate_avfs_parameters(struct pp_hwmgr *hwmgr)
pp_table->AcgAvfsGb.m1 = avfs_params.ulAcgGbFuseTableM1;
pp_table->AcgAvfsGb.m2 = avfs_params.ulAcgGbFuseTableM2;
pp_table->AcgAvfsGb.b = avfs_params.ulAcgGbFuseTableB;
- pp_table->AcgAvfsGb.m1_shift = 0;
- pp_table->AcgAvfsGb.m2_shift = 0;
+ pp_table->AcgAvfsGb.m1_shift = 24;
+ pp_table->AcgAvfsGb.m2_shift = 12;
pp_table->AcgAvfsGb.b_shift = 0;
} else {
@@ -2364,6 +2374,10 @@ static int vega10_avfs_enable(struct pp_hwmgr *hwmgr, bool enable)
struct vega10_hwmgr *data = hwmgr->backend;
if (data->smu_features[GNLD_AVFS].supported) {
+ /* Already enabled or disabled */
+ if (!(enable ^ data->smu_features[GNLD_AVFS].enabled))
+ return 0;
+
if (enable) {
PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
true,
@@ -2466,11 +2480,6 @@ static void vega10_check_dpm_table_updated(struct pp_hwmgr *hwmgr)
return;
}
}
-
- if (data->need_update_dpm_table & DPMTABLE_OD_UPDATE_VDDC) {
- data->need_update_dpm_table &= ~DPMTABLE_OD_UPDATE_VDDC;
- data->need_update_dpm_table |= DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_OD_UPDATE_MCLK;
- }
}
/**
@@ -3683,6 +3692,10 @@ static int vega10_set_power_state_tasks(struct pp_hwmgr *hwmgr,
vega10_update_avfs(hwmgr);
+ /*
+ * Clear all OD flags except DPMTABLE_OD_UPDATE_VDDC.
+ * That will help to keep AVFS disabled.
+ */
data->need_update_dpm_table &= DPMTABLE_OD_UPDATE_VDDC;
return 0;
@@ -3785,6 +3798,18 @@ static int vega10_read_sensor(struct pp_hwmgr *hwmgr, int idx,
*((uint32_t *)value) = vega10_thermal_get_temperature(hwmgr);
*size = 4;
break;
+ case AMDGPU_PP_SENSOR_HOTSPOT_TEMP:
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetTemperatureHotspot);
+ *((uint32_t *)value) = smum_get_argument(hwmgr) *
+ PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
+ *size = 4;
+ break;
+ case AMDGPU_PP_SENSOR_MEM_TEMP:
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetTemperatureHBM);
+ *((uint32_t *)value) = smum_get_argument(hwmgr) *
+ PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
+ *size = 4;
+ break;
case AMDGPU_PP_SENSOR_UVD_POWER:
*((uint32_t *)value) = data->uvd_power_gated ? 0 : 1;
*size = 4;
@@ -4852,12 +4877,22 @@ static int vega10_notify_cac_buffer_info(struct pp_hwmgr *hwmgr,
static int vega10_get_thermal_temperature_range(struct pp_hwmgr *hwmgr,
struct PP_TemperatureRange *thermal_data)
{
- struct phm_ppt_v2_information *table_info =
- (struct phm_ppt_v2_information *)hwmgr->pptable;
+ struct vega10_hwmgr *data = hwmgr->backend;
+ PPTable_t *pp_table = &(data->smc_state_table.pp_table);
memcpy(thermal_data, &SMU7ThermalWithDelayPolicy[0], sizeof(struct PP_TemperatureRange));
- thermal_data->max = table_info->tdp_table->usSoftwareShutdownTemp *
+ thermal_data->max = pp_table->TedgeLimit *
+ PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
+ thermal_data->edge_emergency_max = (pp_table->TedgeLimit + CTF_OFFSET_EDGE) *
+ PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
+ thermal_data->hotspot_crit_max = pp_table->ThotspotLimit *
+ PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
+ thermal_data->hotspot_emergency_max = (pp_table->ThotspotLimit + CTF_OFFSET_HOTSPOT) *
+ PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
+ thermal_data->mem_crit_max = pp_table->ThbmLimit *
+ PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
+ thermal_data->mem_emergency_max = (pp_table->ThbmLimit + CTF_OFFSET_HBM)*
PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
return 0;
@@ -4988,13 +5023,70 @@ static bool vega10_check_clk_voltage_valid(struct pp_hwmgr *hwmgr,
return true;
}
+static void vega10_odn_update_power_state(struct pp_hwmgr *hwmgr)
+{
+ struct vega10_hwmgr *data = hwmgr->backend;
+ struct pp_power_state *ps = hwmgr->request_ps;
+ struct vega10_power_state *vega10_ps;
+ struct vega10_single_dpm_table *gfx_dpm_table =
+ &data->dpm_table.gfx_table;
+ struct vega10_single_dpm_table *soc_dpm_table =
+ &data->dpm_table.soc_table;
+ struct vega10_single_dpm_table *mem_dpm_table =
+ &data->dpm_table.mem_table;
+ int max_level;
+
+ if (!ps)
+ return;
+
+ vega10_ps = cast_phw_vega10_power_state(&ps->hardware);
+ max_level = vega10_ps->performance_level_count - 1;
+
+ if (vega10_ps->performance_levels[max_level].gfx_clock !=
+ gfx_dpm_table->dpm_levels[gfx_dpm_table->count - 1].value)
+ vega10_ps->performance_levels[max_level].gfx_clock =
+ gfx_dpm_table->dpm_levels[gfx_dpm_table->count - 1].value;
+
+ if (vega10_ps->performance_levels[max_level].soc_clock !=
+ soc_dpm_table->dpm_levels[soc_dpm_table->count - 1].value)
+ vega10_ps->performance_levels[max_level].soc_clock =
+ soc_dpm_table->dpm_levels[soc_dpm_table->count - 1].value;
+
+ if (vega10_ps->performance_levels[max_level].mem_clock !=
+ mem_dpm_table->dpm_levels[mem_dpm_table->count - 1].value)
+ vega10_ps->performance_levels[max_level].mem_clock =
+ mem_dpm_table->dpm_levels[mem_dpm_table->count - 1].value;
+
+ if (!hwmgr->ps)
+ return;
+
+ ps = (struct pp_power_state *)((unsigned long)(hwmgr->ps) + hwmgr->ps_size * (hwmgr->num_ps - 1));
+ vega10_ps = cast_phw_vega10_power_state(&ps->hardware);
+ max_level = vega10_ps->performance_level_count - 1;
+
+ if (vega10_ps->performance_levels[max_level].gfx_clock !=
+ gfx_dpm_table->dpm_levels[gfx_dpm_table->count - 1].value)
+ vega10_ps->performance_levels[max_level].gfx_clock =
+ gfx_dpm_table->dpm_levels[gfx_dpm_table->count - 1].value;
+
+ if (vega10_ps->performance_levels[max_level].soc_clock !=
+ soc_dpm_table->dpm_levels[soc_dpm_table->count - 1].value)
+ vega10_ps->performance_levels[max_level].soc_clock =
+ soc_dpm_table->dpm_levels[soc_dpm_table->count - 1].value;
+
+ if (vega10_ps->performance_levels[max_level].mem_clock !=
+ mem_dpm_table->dpm_levels[mem_dpm_table->count - 1].value)
+ vega10_ps->performance_levels[max_level].mem_clock =
+ mem_dpm_table->dpm_levels[mem_dpm_table->count - 1].value;
+}
+
static void vega10_odn_update_soc_table(struct pp_hwmgr *hwmgr,
enum PP_OD_DPM_TABLE_COMMAND type)
{
struct vega10_hwmgr *data = hwmgr->backend;
struct phm_ppt_v2_information *table_info = hwmgr->pptable;
struct phm_ppt_v1_clock_voltage_dependency_table *dep_table = table_info->vdd_dep_on_socclk;
- struct vega10_single_dpm_table *dpm_table = &data->golden_dpm_table.soc_table;
+ struct vega10_single_dpm_table *dpm_table = &data->golden_dpm_table.mem_table;
struct vega10_odn_clock_voltage_dependency_table *podn_vdd_dep_on_socclk =
&data->odn_dpm_table.vdd_dep_on_socclk;
@@ -5018,7 +5110,8 @@ static void vega10_odn_update_soc_table(struct pp_hwmgr *hwmgr,
break;
}
if (j == od_vddc_lookup_table->count) {
- od_vddc_lookup_table->entries[j-1].us_vdd =
+ j = od_vddc_lookup_table->count - 1;
+ od_vddc_lookup_table->entries[j].us_vdd =
podn_vdd_dep->entries[i].vddc;
data->need_update_dpm_table |= DPMTABLE_OD_UPDATE_VDDC;
}
@@ -5026,25 +5119,38 @@ static void vega10_odn_update_soc_table(struct pp_hwmgr *hwmgr,
}
dpm_table = &data->dpm_table.soc_table;
for (i = 0; i < dep_table->count; i++) {
- if (dep_table->entries[i].vddInd == podn_vdd_dep->entries[dep_table->count-1].vddInd &&
- dep_table->entries[i].clk < podn_vdd_dep->entries[dep_table->count-1].clk) {
+ if (dep_table->entries[i].vddInd == podn_vdd_dep->entries[podn_vdd_dep->count-1].vddInd &&
+ dep_table->entries[i].clk < podn_vdd_dep->entries[podn_vdd_dep->count-1].clk) {
data->need_update_dpm_table |= DPMTABLE_UPDATE_SOCCLK;
- podn_vdd_dep_on_socclk->entries[i].clk = podn_vdd_dep->entries[dep_table->count-1].clk;
- dpm_table->dpm_levels[i].value = podn_vdd_dep_on_socclk->entries[i].clk;
+ for (; (i < dep_table->count) &&
+ (dep_table->entries[i].clk < podn_vdd_dep->entries[podn_vdd_dep->count - 1].clk); i++) {
+ podn_vdd_dep_on_socclk->entries[i].clk = podn_vdd_dep->entries[podn_vdd_dep->count-1].clk;
+ dpm_table->dpm_levels[i].value = podn_vdd_dep_on_socclk->entries[i].clk;
+ }
+ break;
+ } else {
+ dpm_table->dpm_levels[i].value = dep_table->entries[i].clk;
+ podn_vdd_dep_on_socclk->entries[i].vddc = dep_table->entries[i].vddc;
+ podn_vdd_dep_on_socclk->entries[i].vddInd = dep_table->entries[i].vddInd;
+ podn_vdd_dep_on_socclk->entries[i].clk = dep_table->entries[i].clk;
}
}
if (podn_vdd_dep_on_socclk->entries[podn_vdd_dep_on_socclk->count - 1].clk <
- podn_vdd_dep->entries[dep_table->count-1].clk) {
+ podn_vdd_dep->entries[podn_vdd_dep->count - 1].clk) {
data->need_update_dpm_table |= DPMTABLE_UPDATE_SOCCLK;
- podn_vdd_dep_on_socclk->entries[podn_vdd_dep_on_socclk->count - 1].clk = podn_vdd_dep->entries[dep_table->count-1].clk;
- dpm_table->dpm_levels[podn_vdd_dep_on_socclk->count - 1].value = podn_vdd_dep->entries[dep_table->count-1].clk;
+ podn_vdd_dep_on_socclk->entries[podn_vdd_dep_on_socclk->count - 1].clk =
+ podn_vdd_dep->entries[podn_vdd_dep->count - 1].clk;
+ dpm_table->dpm_levels[podn_vdd_dep_on_socclk->count - 1].value =
+ podn_vdd_dep->entries[podn_vdd_dep->count - 1].clk;
}
if (podn_vdd_dep_on_socclk->entries[podn_vdd_dep_on_socclk->count - 1].vddInd <
- podn_vdd_dep->entries[dep_table->count-1].vddInd) {
+ podn_vdd_dep->entries[podn_vdd_dep->count - 1].vddInd) {
data->need_update_dpm_table |= DPMTABLE_UPDATE_SOCCLK;
- podn_vdd_dep_on_socclk->entries[podn_vdd_dep_on_socclk->count - 1].vddInd = podn_vdd_dep->entries[dep_table->count-1].vddInd;
+ podn_vdd_dep_on_socclk->entries[podn_vdd_dep_on_socclk->count - 1].vddInd =
+ podn_vdd_dep->entries[podn_vdd_dep->count - 1].vddInd;
}
}
+ vega10_odn_update_power_state(hwmgr);
}
static int vega10_odn_edit_dpm_table(struct pp_hwmgr *hwmgr,
@@ -5079,6 +5185,11 @@ static int vega10_odn_edit_dpm_table(struct pp_hwmgr *hwmgr,
} else if (PP_OD_RESTORE_DEFAULT_TABLE == type) {
memcpy(&(data->dpm_table), &(data->golden_dpm_table), sizeof(struct vega10_dpm_table));
vega10_odn_initial_default_setting(hwmgr);
+ vega10_odn_update_power_state(hwmgr);
+ /* force to update all clock tables */
+ data->need_update_dpm_table = DPMTABLE_UPDATE_SCLK |
+ DPMTABLE_UPDATE_MCLK |
+ DPMTABLE_UPDATE_SOCCLK;
return 0;
} else if (PP_OD_COMMIT_DPM_TABLE == type) {
vega10_check_dpm_table_updated(hwmgr);
@@ -5201,8 +5312,12 @@ static const struct pp_hwmgr_func vega10_hwmgr_funcs = {
int vega10_hwmgr_init(struct pp_hwmgr *hwmgr)
{
+ struct amdgpu_device *adev = hwmgr->adev;
+
hwmgr->hwmgr_func = &vega10_hwmgr_funcs;
hwmgr->pptable_func = &vega10_pptable_funcs;
+ if (amdgpu_passthrough(adev))
+ return vega10_baco_set_cap(hwmgr);
return 0;
}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c
index b6767d74dc85..f29af5ca0aa0 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c
@@ -21,6 +21,7 @@
*
*/
#include <linux/module.h>
+#include <linux/pci.h>
#include <linux/slab.h>
#include <linux/fb.h>
@@ -1371,3 +1372,27 @@ int vega10_get_powerplay_table_entry(struct pp_hwmgr *hwmgr,
return result;
}
+
+int vega10_baco_set_cap(struct pp_hwmgr *hwmgr)
+{
+ int result = 0;
+
+ const ATOM_Vega10_POWERPLAYTABLE *powerplay_table;
+
+ powerplay_table = get_powerplay_table(hwmgr);
+
+ PP_ASSERT_WITH_CODE((powerplay_table != NULL),
+ "Missing PowerPlay Table!", return -1);
+
+ result = check_powerplay_tables(hwmgr, powerplay_table);
+
+ PP_ASSERT_WITH_CODE((result == 0),
+ "check_powerplay_tables failed", return result);
+
+ set_hw_cap(
+ hwmgr,
+ 0 != (le32_to_cpu(powerplay_table->ulPlatformCaps) & ATOM_VEGA10_PP_PLATFORM_CAP_BACO),
+ PHM_PlatformCaps_BACO);
+ return result;
+}
+
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.h b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.h
index d83ed2af7aa3..da5fbec9b0cd 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.h
@@ -59,4 +59,5 @@ extern int vega10_get_number_of_powerplay_table_entries(struct pp_hwmgr *hwmgr);
extern int vega10_get_powerplay_table_entry(struct pp_hwmgr *hwmgr, uint32_t entry_index,
struct pp_power_state *power_state, int (*call_back_func)(struct pp_hwmgr *, void *,
struct pp_power_state *, void *, uint32_t));
+extern int vega10_baco_set_cap(struct pp_hwmgr *hwmgr);
#endif
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
index 707cd4b0357f..efb6d3762feb 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
@@ -289,6 +289,8 @@ static int vega12_set_features_platform_caps(struct pp_hwmgr *hwmgr)
static void vega12_init_dpm_defaults(struct pp_hwmgr *hwmgr)
{
struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
+ struct amdgpu_device *adev = hwmgr->adev;
+ uint32_t top32, bottom32;
int i;
data->smu_features[GNLD_DPM_PREFETCHER].smu_feature_id =
@@ -353,6 +355,14 @@ static void vega12_init_dpm_defaults(struct pp_hwmgr *hwmgr)
((data->registry_data.disallowed_features >> i) & 1) ?
false : true;
}
+
+ /* Get the SN to turn into a Unique ID */
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumTop32);
+ top32 = smum_get_argument(hwmgr);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumBottom32);
+ bottom32 = smum_get_argument(hwmgr);
+
+ adev->unique_id = ((uint64_t)bottom32 << 32) | top32;
}
static int vega12_set_private_data_based_on_pptable(struct pp_hwmgr *hwmgr)
@@ -1237,21 +1247,39 @@ static uint32_t vega12_dpm_get_mclk(struct pp_hwmgr *hwmgr, bool low)
return (mem_clk * 100);
}
+static int vega12_get_metrics_table(struct pp_hwmgr *hwmgr, SmuMetrics_t *metrics_table)
+{
+ struct vega12_hwmgr *data =
+ (struct vega12_hwmgr *)(hwmgr->backend);
+ int ret = 0;
+
+ if (!data->metrics_time || time_after(jiffies, data->metrics_time + HZ / 2)) {
+ ret = smum_smc_table_manager(hwmgr, (uint8_t *)metrics_table,
+ TABLE_SMU_METRICS, true);
+ if (ret) {
+ pr_info("Failed to export SMU metrics table!\n");
+ return ret;
+ }
+ memcpy(&data->metrics_table, metrics_table, sizeof(SmuMetrics_t));
+ data->metrics_time = jiffies;
+ } else
+ memcpy(metrics_table, &data->metrics_table, sizeof(SmuMetrics_t));
+
+ return ret;
+}
+
static int vega12_get_gpu_power(struct pp_hwmgr *hwmgr, uint32_t *query)
{
-#if 0
- uint32_t value;
+ SmuMetrics_t metrics_table;
+ int ret = 0;
- PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(hwmgr,
- PPSMC_MSG_GetCurrPkgPwr),
- "Failed to get current package power!",
- return -EINVAL);
+ ret = vega12_get_metrics_table(hwmgr, &metrics_table);
+ if (ret)
+ return ret;
- value = smum_get_argument(hwmgr);
- /* power value is an integer */
- *query = value << 8;
-#endif
- return 0;
+ *query = metrics_table.CurrSocketPower << 8;
+
+ return ret;
}
static int vega12_get_current_gfx_clk_freq(struct pp_hwmgr *hwmgr, uint32_t *gfx_freq)
@@ -1290,25 +1318,27 @@ static int vega12_get_current_mclk_freq(struct pp_hwmgr *hwmgr, uint32_t *mclk_f
static int vega12_get_current_activity_percent(
struct pp_hwmgr *hwmgr,
+ int idx,
uint32_t *activity_percent)
{
+ SmuMetrics_t metrics_table;
int ret = 0;
- uint32_t current_activity = 50;
-#if 0
- ret = smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetAverageGfxActivity, 0);
- if (!ret) {
- current_activity = smum_get_argument(hwmgr);
- if (current_activity > 100) {
- PP_ASSERT(false,
- "[GetCurrentActivityPercent] Activity Percentage Exceeds 100!");
- current_activity = 100;
- }
- } else
- PP_ASSERT(false,
- "[GetCurrentActivityPercent] Attempt To Send Get Average Graphics Activity to SMU Failed!");
-#endif
- *activity_percent = current_activity;
+ ret = vega12_get_metrics_table(hwmgr, &metrics_table);
+ if (ret)
+ return ret;
+
+ switch (idx) {
+ case AMDGPU_PP_SENSOR_GPU_LOAD:
+ *activity_percent = metrics_table.AverageGfxActivity;
+ break;
+ case AMDGPU_PP_SENSOR_MEM_LOAD:
+ *activity_percent = metrics_table.AverageUclkActivity;
+ break;
+ default:
+ pr_err("Invalid index for retrieving clock activity\n");
+ return -EINVAL;
+ }
return ret;
}
@@ -1317,6 +1347,7 @@ static int vega12_read_sensor(struct pp_hwmgr *hwmgr, int idx,
void *value, int *size)
{
struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
+ SmuMetrics_t metrics_table;
int ret = 0;
switch (idx) {
@@ -1331,7 +1362,8 @@ static int vega12_read_sensor(struct pp_hwmgr *hwmgr, int idx,
*size = 4;
break;
case AMDGPU_PP_SENSOR_GPU_LOAD:
- ret = vega12_get_current_activity_percent(hwmgr, (uint32_t *)value);
+ case AMDGPU_PP_SENSOR_MEM_LOAD:
+ ret = vega12_get_current_activity_percent(hwmgr, idx, (uint32_t *)value);
if (!ret)
*size = 4;
break;
@@ -1339,6 +1371,24 @@ static int vega12_read_sensor(struct pp_hwmgr *hwmgr, int idx,
*((uint32_t *)value) = vega12_thermal_get_temperature(hwmgr);
*size = 4;
break;
+ case AMDGPU_PP_SENSOR_HOTSPOT_TEMP:
+ ret = vega12_get_metrics_table(hwmgr, &metrics_table);
+ if (ret)
+ return ret;
+
+ *((uint32_t *)value) = metrics_table.TemperatureHotspot *
+ PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
+ *size = 4;
+ break;
+ case AMDGPU_PP_SENSOR_MEM_TEMP:
+ ret = vega12_get_metrics_table(hwmgr, &metrics_table);
+ if (ret)
+ return ret;
+
+ *((uint32_t *)value) = metrics_table.TemperatureHBM *
+ PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
+ *size = 4;
+ break;
case AMDGPU_PP_SENSOR_UVD_POWER:
*((uint32_t *)value) = data->uvd_power_gated ? 0 : 1;
*size = 4;
@@ -1349,6 +1399,8 @@ static int vega12_read_sensor(struct pp_hwmgr *hwmgr, int idx,
break;
case AMDGPU_PP_SENSOR_GPU_POWER:
ret = vega12_get_gpu_power(hwmgr, (uint32_t *)value);
+ if (!ret)
+ *size = 4;
break;
case AMDGPU_PP_SENSOR_ENABLED_SMC_FEATURES_MASK:
ret = vega12_get_enabled_smc_features(hwmgr, (uint64_t *)value);
@@ -2526,12 +2578,23 @@ static int vega12_notify_cac_buffer_info(struct pp_hwmgr *hwmgr,
static int vega12_get_thermal_temperature_range(struct pp_hwmgr *hwmgr,
struct PP_TemperatureRange *thermal_data)
{
- struct phm_ppt_v3_information *pptable_information =
- (struct phm_ppt_v3_information *)hwmgr->pptable;
+ struct vega12_hwmgr *data =
+ (struct vega12_hwmgr *)(hwmgr->backend);
+ PPTable_t *pp_table = &(data->smc_state_table.pp_table);
memcpy(thermal_data, &SMU7ThermalWithDelayPolicy[0], sizeof(struct PP_TemperatureRange));
- thermal_data->max = pptable_information->us_software_shutdown_temp *
+ thermal_data->max = pp_table->TedgeLimit *
+ PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
+ thermal_data->edge_emergency_max = (pp_table->TedgeLimit + CTF_OFFSET_EDGE) *
+ PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
+ thermal_data->hotspot_crit_max = pp_table->ThotspotLimit *
+ PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
+ thermal_data->hotspot_emergency_max = (pp_table->ThotspotLimit + CTF_OFFSET_HOTSPOT) *
+ PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
+ thermal_data->mem_crit_max = pp_table->ThbmLimit *
+ PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
+ thermal_data->mem_emergency_max = (pp_table->ThbmLimit + CTF_OFFSET_HBM)*
PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
return 0;
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.h
index b3e424d28994..73875399666a 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.h
@@ -396,6 +396,9 @@ struct vega12_hwmgr {
/* ---- Gfxoff ---- */
bool gfxoff_controlled_by_driver;
+
+ unsigned long metrics_time;
+ SmuMetrics_t metrics_table;
};
#define VEGA12_DPM2_NEAR_TDP_DEC 10
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
index 9b9f87b84910..f27c6fbb192e 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
@@ -97,6 +97,27 @@ static void vega20_set_default_registry_data(struct pp_hwmgr *hwmgr)
if (hwmgr->smu_version < 0x282100)
data->registry_data.disallowed_features |= FEATURE_ECC_MASK;
+ if (!(hwmgr->feature_mask & PP_PCIE_DPM_MASK))
+ data->registry_data.disallowed_features |= FEATURE_DPM_LINK_MASK;
+
+ if (!(hwmgr->feature_mask & PP_SCLK_DPM_MASK))
+ data->registry_data.disallowed_features |= FEATURE_DPM_GFXCLK_MASK;
+
+ if (!(hwmgr->feature_mask & PP_SOCCLK_DPM_MASK))
+ data->registry_data.disallowed_features |= FEATURE_DPM_SOCCLK_MASK;
+
+ if (!(hwmgr->feature_mask & PP_MCLK_DPM_MASK))
+ data->registry_data.disallowed_features |= FEATURE_DPM_UCLK_MASK;
+
+ if (!(hwmgr->feature_mask & PP_DCEFCLK_DPM_MASK))
+ data->registry_data.disallowed_features |= FEATURE_DPM_DCEFCLK_MASK;
+
+ if (!(hwmgr->feature_mask & PP_ULV_MASK))
+ data->registry_data.disallowed_features |= FEATURE_ULV_MASK;
+
+ if (!(hwmgr->feature_mask & PP_SCLK_DEEP_SLEEP_MASK))
+ data->registry_data.disallowed_features |= FEATURE_DS_GFXCLK_MASK;
+
data->registry_data.od_state_in_dc_support = 0;
data->registry_data.thermal_support = 1;
data->registry_data.skip_baco_hardware = 0;
@@ -303,6 +324,8 @@ static int vega20_set_features_platform_caps(struct pp_hwmgr *hwmgr)
static void vega20_init_dpm_defaults(struct pp_hwmgr *hwmgr)
{
struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend);
+ struct amdgpu_device *adev = hwmgr->adev;
+ uint32_t top32, bottom32;
int i;
data->smu_features[GNLD_DPM_PREFETCHER].smu_feature_id =
@@ -372,6 +395,14 @@ static void vega20_init_dpm_defaults(struct pp_hwmgr *hwmgr)
((data->registry_data.disallowed_features >> i) & 1) ?
false : true;
}
+
+ /* Get the SN to turn into a Unique ID */
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumTop32);
+ top32 = smum_get_argument(hwmgr);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumBottom32);
+ bottom32 = smum_get_argument(hwmgr);
+
+ adev->unique_id = ((uint64_t)bottom32 << 32) | top32;
}
static int vega20_set_private_data_based_on_pptable(struct pp_hwmgr *hwmgr)
@@ -2094,6 +2125,7 @@ static int vega20_get_current_clk_freq(struct pp_hwmgr *hwmgr,
}
static int vega20_get_current_activity_percent(struct pp_hwmgr *hwmgr,
+ int idx,
uint32_t *activity_percent)
{
int ret = 0;
@@ -2103,7 +2135,17 @@ static int vega20_get_current_activity_percent(struct pp_hwmgr *hwmgr,
if (ret)
return ret;
- *activity_percent = metrics_table.AverageGfxActivity;
+ switch (idx) {
+ case AMDGPU_PP_SENSOR_GPU_LOAD:
+ *activity_percent = metrics_table.AverageGfxActivity;
+ break;
+ case AMDGPU_PP_SENSOR_MEM_LOAD:
+ *activity_percent = metrics_table.AverageUclkActivity;
+ break;
+ default:
+ pr_err("Invalid index for retrieving clock activity\n");
+ return -EINVAL;
+ }
return ret;
}
@@ -2134,14 +2176,33 @@ static int vega20_read_sensor(struct pp_hwmgr *hwmgr, int idx,
*size = 4;
break;
case AMDGPU_PP_SENSOR_GPU_LOAD:
- ret = vega20_get_current_activity_percent(hwmgr, (uint32_t *)value);
+ case AMDGPU_PP_SENSOR_MEM_LOAD:
+ ret = vega20_get_current_activity_percent(hwmgr, idx, (uint32_t *)value);
if (!ret)
*size = 4;
break;
- case AMDGPU_PP_SENSOR_GPU_TEMP:
+ case AMDGPU_PP_SENSOR_HOTSPOT_TEMP:
*((uint32_t *)value) = vega20_thermal_get_temperature(hwmgr);
*size = 4;
break;
+ case AMDGPU_PP_SENSOR_EDGE_TEMP:
+ ret = vega20_get_metrics_table(hwmgr, &metrics_table);
+ if (ret)
+ return ret;
+
+ *((uint32_t *)value) = metrics_table.TemperatureEdge *
+ PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
+ *size = 4;
+ break;
+ case AMDGPU_PP_SENSOR_MEM_TEMP:
+ ret = vega20_get_metrics_table(hwmgr, &metrics_table);
+ if (ret)
+ return ret;
+
+ *((uint32_t *)value) = metrics_table.TemperatureHBM *
+ PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
+ *size = 4;
+ break;
case AMDGPU_PP_SENSOR_UVD_POWER:
*((uint32_t *)value) = data->uvd_power_gated ? 0 : 1;
*size = 4;
@@ -3974,12 +4035,23 @@ static int vega20_notify_cac_buffer_info(struct pp_hwmgr *hwmgr,
static int vega20_get_thermal_temperature_range(struct pp_hwmgr *hwmgr,
struct PP_TemperatureRange *thermal_data)
{
- struct phm_ppt_v3_information *pptable_information =
- (struct phm_ppt_v3_information *)hwmgr->pptable;
+ struct vega20_hwmgr *data =
+ (struct vega20_hwmgr *)(hwmgr->backend);
+ PPTable_t *pp_table = &(data->smc_state_table.pp_table);
memcpy(thermal_data, &SMU7ThermalWithDelayPolicy[0], sizeof(struct PP_TemperatureRange));
- thermal_data->max = pptable_information->us_software_shutdown_temp *
+ thermal_data->max = pp_table->TedgeLimit *
+ PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
+ thermal_data->edge_emergency_max = (pp_table->TedgeLimit + CTF_OFFSET_EDGE) *
+ PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
+ thermal_data->hotspot_crit_max = pp_table->ThotspotLimit *
+ PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
+ thermal_data->hotspot_emergency_max = (pp_table->ThotspotLimit + CTF_OFFSET_HOTSPOT) *
+ PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
+ thermal_data->mem_crit_max = pp_table->ThbmLimit *
+ PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
+ thermal_data->mem_emergency_max = (pp_table->ThbmLimit + CTF_OFFSET_HBM)*
PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
return 0;
diff --git a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
index c8b168b3413b..3eb1de9ecf73 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
@@ -401,8 +401,12 @@ struct smu_context
uint32_t workload_setting[WORKLOAD_POLICY_MAX];
uint32_t power_profile_mode;
uint32_t default_power_profile_mode;
+ bool pm_enabled;
uint32_t smc_if_version;
+
+ unsigned long metrics_time;
+ void *metrics_table;
};
struct pptable_funcs {
@@ -458,6 +462,8 @@ struct pptable_funcs {
uint32_t *mclk_mask,
uint32_t *soc_mask);
int (*set_cpu_power_state)(struct smu_context *smu);
+ int (*set_ppfeature_status)(struct smu_context *smu, uint64_t ppfeatures);
+ int (*get_ppfeature_status)(struct smu_context *smu, char *buf);
};
struct smu_funcs
@@ -727,7 +733,10 @@ struct smu_funcs
((smu)->funcs->get_mclk ? (smu)->funcs->get_mclk((smu), (low)) : 0)
#define smu_set_xgmi_pstate(smu, pstate) \
((smu)->funcs->set_xgmi_pstate ? (smu)->funcs->set_xgmi_pstate((smu), (pstate)) : 0)
-
+#define smu_set_ppfeature_status(smu, ppfeatures) \
+ ((smu)->ppt_funcs->set_ppfeature_status ? (smu)->ppt_funcs->set_ppfeature_status((smu), (ppfeatures)) : -EINVAL)
+#define smu_get_ppfeature_status(smu, buf) \
+ ((smu)->ppt_funcs->get_ppfeature_status ? (smu)->ppt_funcs->get_ppfeature_status((smu), (buf)) : -EINVAL)
extern int smu_get_atom_data_table(struct smu_context *smu, uint32_t table,
uint16_t *size, uint8_t *frev, uint8_t *crev,
@@ -767,4 +776,5 @@ extern int smu_dpm_set_power_gate(struct smu_context *smu,uint32_t block_type, b
extern int smu_handle_task(struct smu_context *smu,
enum amd_dpm_forced_level level,
enum amd_pp_task task_id);
+int smu_get_smc_version(struct smu_context *smu, uint32_t *if_version, uint32_t *smu_version);
#endif
diff --git a/drivers/gpu/drm/amd/powerplay/inc/power_state.h b/drivers/gpu/drm/amd/powerplay/inc/power_state.h
index a99b5cbb113e..a5f2227a3971 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/power_state.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/power_state.h
@@ -124,6 +124,13 @@ struct PP_StateSoftwareAlgorithmBlock {
struct PP_TemperatureRange {
int min;
int max;
+ int edge_emergency_max;
+ int hotspot_min;
+ int hotspot_crit_max;
+ int hotspot_emergency_max;
+ int mem_min;
+ int mem_crit_max;
+ int mem_emergency_max;
};
struct PP_StateValidationBlock {
diff --git a/drivers/gpu/drm/amd/powerplay/inc/pp_thermal.h b/drivers/gpu/drm/amd/powerplay/inc/pp_thermal.h
index 201d2b6329ab..3e30768f9e1c 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/pp_thermal.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/pp_thermal.h
@@ -27,14 +27,18 @@
static const struct PP_TemperatureRange SMU7ThermalWithDelayPolicy[] =
{
- {-273150, 99000},
- { 120000, 120000},
+ {-273150, 99000, 99000, -273150, 99000, 99000, -273150, 99000, 99000},
+ { 120000, 120000, 120000, 120000, 120000, 120000, 120000, 120000, 120000},
};
static const struct PP_TemperatureRange SMU7ThermalPolicy[] =
{
- {-273150, 99000},
- { 120000, 120000},
+ {-273150, 99000, 99000, -273150, 99000, 99000, -273150, 99000, 99000},
+ { 120000, 120000, 120000, 120000, 120000, 120000, 120000, 120000, 120000},
};
+#define CTF_OFFSET_EDGE 5
+#define CTF_OFFSET_HOTSPOT 5
+#define CTF_OFFSET_HBM 5
+
#endif
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h b/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h
index aa8d81f4111e..02c965d64256 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h
@@ -36,6 +36,9 @@
#define smnMP0_FW_INTF 0x30101c0
#define smnMP1_PUB_CTRL 0x3010b14
+#define TEMP_RANGE_MIN (0)
+#define TEMP_RANGE_MAX (80 * 1000)
+
struct smu_11_0_max_sustainable_clocks {
uint32_t display_clock;
uint32_t phy_clock;
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smumgr.h b/drivers/gpu/drm/amd/powerplay/inc/smumgr.h
index 82550a8a3a3f..c5288831aa15 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/smumgr.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/smumgr.h
@@ -41,6 +41,7 @@ enum SMU_MEMBER {
HandshakeDisables = 0,
VoltageChangeTimeout,
AverageGraphicsActivity,
+ AverageMemoryActivity,
PreVBlankGap,
VBlankTimeout,
UcodeLoadStatus,
diff --git a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c
index 92903a4cc4d8..463275f88e89 100644
--- a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c
+++ b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c
@@ -20,8 +20,10 @@
* OTHER DEALINGS IN THE SOFTWARE.
*/
-#include "pp_debug.h"
#include <linux/firmware.h>
+#include <linux/module.h>
+
+#include "pp_debug.h"
#include "amdgpu.h"
#include "amdgpu_smu.h"
#include "atomfirmware.h"
@@ -223,20 +225,27 @@ static int smu_v11_0_check_fw_status(struct smu_context *smu)
static int smu_v11_0_check_fw_version(struct smu_context *smu)
{
- uint32_t smu_version = 0xff;
+ uint32_t if_version = 0xff, smu_version = 0xff;
+ uint16_t smu_major;
+ uint8_t smu_minor, smu_debug;
int ret = 0;
- ret = smu_send_smc_msg(smu, SMU_MSG_GetDriverIfVersion);
+ ret = smu_get_smc_version(smu, &if_version, &smu_version);
if (ret)
- goto err;
+ return ret;
- ret = smu_read_smc_arg(smu, &smu_version);
- if (ret)
- goto err;
+ smu_major = (smu_version >> 16) & 0xffff;
+ smu_minor = (smu_version >> 8) & 0xff;
+ smu_debug = (smu_version >> 0) & 0xff;
- if (smu_version != smu->smc_if_version)
+ pr_info("SMU Driver IF Version = 0x%08x, SMU FW Version = 0x%08x (%d.%d.%d)\n",
+ if_version, smu_version, smu_major, smu_minor, smu_debug);
+
+ if (if_version != smu->smc_if_version) {
+ pr_err("SMU driver if version not matched\n");
ret = -EINVAL;
-err:
+ }
+
return ret;
}
@@ -353,6 +362,8 @@ static int smu_v11_0_init_power(struct smu_context *smu)
{
struct smu_power_context *smu_power = &smu->smu_power;
+ if (!smu->pm_enabled)
+ return 0;
if (smu_power->power_context || smu_power->power_context_size != 0)
return -EINVAL;
@@ -362,6 +373,13 @@ static int smu_v11_0_init_power(struct smu_context *smu)
return -ENOMEM;
smu_power->power_context_size = sizeof(struct smu_11_0_dpm_context);
+ smu->metrics_time = 0;
+ smu->metrics_table = kzalloc(sizeof(SmuMetrics_t), GFP_KERNEL);
+ if (!smu->metrics_table) {
+ kfree(smu_power->power_context);
+ return -ENOMEM;
+ }
+
return 0;
}
@@ -369,10 +387,14 @@ static int smu_v11_0_fini_power(struct smu_context *smu)
{
struct smu_power_context *smu_power = &smu->smu_power;
+ if (!smu->pm_enabled)
+ return 0;
if (!smu_power->power_context || smu_power->power_context_size == 0)
return -EINVAL;
+ kfree(smu->metrics_table);
kfree(smu_power->power_context);
+ smu->metrics_table = NULL;
smu_power->power_context = NULL;
smu_power->power_context_size = 0;
@@ -634,6 +656,8 @@ static int smu_v11_0_set_min_dcef_deep_sleep(struct smu_context *smu)
{
struct smu_table_context *table_context = &smu->smu_table;
+ if (!smu->pm_enabled)
+ return 0;
if (!table_context)
return -EINVAL;
@@ -662,6 +686,9 @@ static int smu_v11_0_set_tool_table_location(struct smu_context *smu)
static int smu_v11_0_init_display(struct smu_context *smu)
{
int ret = 0;
+
+ if (!smu->pm_enabled)
+ return ret;
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_NumOfDisplays, 0);
return ret;
}
@@ -671,6 +698,8 @@ static int smu_v11_0_update_feature_enable_state(struct smu_context *smu, uint32
uint32_t feature_low = 0, feature_high = 0;
int ret = 0;
+ if (!smu->pm_enabled)
+ return ret;
if (feature_id >= 0 && feature_id < 31)
feature_low = (1 << feature_id);
else if (feature_id > 31 && feature_id < 63)
@@ -777,10 +806,13 @@ static int smu_v11_0_system_features_control(struct smu_context *smu,
uint32_t feature_mask[2];
int ret = 0;
- ret = smu_send_smc_msg(smu, (en ? SMU_MSG_EnableAllSmuFeatures :
- SMU_MSG_DisableAllSmuFeatures));
- if (ret)
- return ret;
+ if (smu->pm_enabled) {
+ ret = smu_send_smc_msg(smu, (en ? SMU_MSG_EnableAllSmuFeatures :
+ SMU_MSG_DisableAllSmuFeatures));
+ if (ret)
+ return ret;
+ }
+
ret = smu_feature_get_enabled_mask(smu, feature_mask, 2);
if (ret)
return ret;
@@ -797,6 +829,8 @@ static int smu_v11_0_notify_display_change(struct smu_context *smu)
{
int ret = 0;
+ if (!smu->pm_enabled)
+ return ret;
if (smu_feature_is_enabled(smu, FEATURE_DPM_UCLK_BIT))
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetUclkFastSwitch, 1);
@@ -809,6 +843,8 @@ smu_v11_0_get_max_sustainable_clock(struct smu_context *smu, uint32_t *clock,
{
int ret = 0;
+ if (!smu->pm_enabled)
+ return ret;
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetDcModeMaxDpmFreq,
clock_select << 16);
if (ret) {
@@ -995,9 +1031,20 @@ static int smu_v11_0_get_current_clk_freq(struct smu_context *smu, uint32_t clk_
static int smu_v11_0_get_thermal_range(struct smu_context *smu,
struct PP_TemperatureRange *range)
{
+ PPTable_t *pptable = smu->smu_table.driver_pptable;
memcpy(range, &SMU7ThermalWithDelayPolicy[0], sizeof(struct PP_TemperatureRange));
- range->max = smu->smu_table.software_shutdown_temp *
+ range->max = pptable->TedgeLimit *
+ PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
+ range->edge_emergency_max = (pptable->TedgeLimit + CTF_OFFSET_EDGE) *
+ PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
+ range->hotspot_crit_max = pptable->ThotspotLimit *
+ PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
+ range->hotspot_emergency_max = (pptable->ThotspotLimit + CTF_OFFSET_HOTSPOT) *
+ PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
+ range->mem_crit_max = pptable->ThbmLimit *
+ PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
+ range->mem_emergency_max = (pptable->ThbmLimit + CTF_OFFSET_HBM)*
PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
return 0;
@@ -1062,9 +1109,20 @@ static int smu_v11_0_set_thermal_fan_table(struct smu_context *smu)
static int smu_v11_0_start_thermal_control(struct smu_context *smu)
{
int ret = 0;
- struct PP_TemperatureRange range;
+ struct PP_TemperatureRange range = {
+ TEMP_RANGE_MIN,
+ TEMP_RANGE_MAX,
+ TEMP_RANGE_MAX,
+ TEMP_RANGE_MIN,
+ TEMP_RANGE_MAX,
+ TEMP_RANGE_MAX,
+ TEMP_RANGE_MIN,
+ TEMP_RANGE_MAX,
+ TEMP_RANGE_MAX};
struct amdgpu_device *adev = smu->adev;
+ if (!smu->pm_enabled)
+ return ret;
smu_v11_0_get_thermal_range(smu, &range);
if (smu->smu_table.thermal_controller_type) {
@@ -1082,11 +1140,39 @@ static int smu_v11_0_start_thermal_control(struct smu_context *smu)
adev->pm.dpm.thermal.min_temp = range.min;
adev->pm.dpm.thermal.max_temp = range.max;
+ adev->pm.dpm.thermal.max_edge_emergency_temp = range.edge_emergency_max;
+ adev->pm.dpm.thermal.min_hotspot_temp = range.hotspot_min;
+ adev->pm.dpm.thermal.max_hotspot_crit_temp = range.hotspot_crit_max;
+ adev->pm.dpm.thermal.max_hotspot_emergency_temp = range.hotspot_emergency_max;
+ adev->pm.dpm.thermal.min_mem_temp = range.mem_min;
+ adev->pm.dpm.thermal.max_mem_crit_temp = range.mem_crit_max;
+ adev->pm.dpm.thermal.max_mem_emergency_temp = range.mem_emergency_max;
+
+ return ret;
+}
+
+static int smu_v11_0_get_metrics_table(struct smu_context *smu,
+ SmuMetrics_t *metrics_table)
+{
+ int ret = 0;
+
+ if (!smu->metrics_time || time_after(jiffies, smu->metrics_time + HZ / 1000)) {
+ ret = smu_update_table(smu, TABLE_SMU_METRICS,
+ (void *)metrics_table, false);
+ if (ret) {
+ pr_info("Failed to export SMU metrics table!\n");
+ return ret;
+ }
+ memcpy(smu->metrics_table, metrics_table, sizeof(SmuMetrics_t));
+ smu->metrics_time = jiffies;
+ } else
+ memcpy(metrics_table, smu->metrics_table, sizeof(SmuMetrics_t));
return ret;
}
static int smu_v11_0_get_current_activity_percent(struct smu_context *smu,
+ enum amd_pp_sensors sensor,
uint32_t *value)
{
int ret = 0;
@@ -1095,31 +1181,64 @@ static int smu_v11_0_get_current_activity_percent(struct smu_context *smu,
if (!value)
return -EINVAL;
- ret = smu_update_table(smu, TABLE_SMU_METRICS, (void *)&metrics, false);
+ ret = smu_v11_0_get_metrics_table(smu, &metrics);
if (ret)
return ret;
- *value = metrics.AverageGfxActivity;
+ switch (sensor) {
+ case AMDGPU_PP_SENSOR_GPU_LOAD:
+ *value = metrics.AverageGfxActivity;
+ break;
+ case AMDGPU_PP_SENSOR_MEM_LOAD:
+ *value = metrics.AverageUclkActivity;
+ break;
+ default:
+ pr_err("Invalid sensor for retrieving clock activity\n");
+ return -EINVAL;
+ }
return 0;
}
-static int smu_v11_0_thermal_get_temperature(struct smu_context *smu, uint32_t *value)
+static int smu_v11_0_thermal_get_temperature(struct smu_context *smu,
+ enum amd_pp_sensors sensor,
+ uint32_t *value)
{
struct amdgpu_device *adev = smu->adev;
+ SmuMetrics_t metrics;
uint32_t temp = 0;
+ int ret = 0;
if (!value)
return -EINVAL;
- temp = RREG32_SOC15(THM, 0, mmCG_MULT_THERMAL_STATUS);
- temp = (temp & CG_MULT_THERMAL_STATUS__CTF_TEMP_MASK) >>
- CG_MULT_THERMAL_STATUS__CTF_TEMP__SHIFT;
+ ret = smu_v11_0_get_metrics_table(smu, &metrics);
+ if (ret)
+ return ret;
- temp = temp & 0x1ff;
- temp *= SMU11_TEMPERATURE_UNITS_PER_CENTIGRADES;
+ switch (sensor) {
+ case AMDGPU_PP_SENSOR_HOTSPOT_TEMP:
+ temp = RREG32_SOC15(THM, 0, mmCG_MULT_THERMAL_STATUS);
+ temp = (temp & CG_MULT_THERMAL_STATUS__CTF_TEMP_MASK) >>
+ CG_MULT_THERMAL_STATUS__CTF_TEMP__SHIFT;
- *value = temp;
+ temp = temp & 0x1ff;
+ temp *= SMU11_TEMPERATURE_UNITS_PER_CENTIGRADES;
+
+ *value = temp;
+ break;
+ case AMDGPU_PP_SENSOR_EDGE_TEMP:
+ *value = metrics.TemperatureEdge *
+ PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
+ break;
+ case AMDGPU_PP_SENSOR_MEM_TEMP:
+ *value = metrics.TemperatureHBM *
+ PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
+ break;
+ default:
+ pr_err("Invalid sensor for retrieving temp\n");
+ return -EINVAL;
+ }
return 0;
}
@@ -1132,7 +1251,7 @@ static int smu_v11_0_get_gpu_power(struct smu_context *smu, uint32_t *value)
if (!value)
return -EINVAL;
- ret = smu_update_table(smu, TABLE_SMU_METRICS, (void *)&metrics, false);
+ ret = smu_v11_0_get_metrics_table(smu, &metrics);
if (ret)
return ret;
@@ -1174,7 +1293,9 @@ static int smu_v11_0_read_sensor(struct smu_context *smu,
int ret = 0;
switch (sensor) {
case AMDGPU_PP_SENSOR_GPU_LOAD:
+ case AMDGPU_PP_SENSOR_MEM_LOAD:
ret = smu_v11_0_get_current_activity_percent(smu,
+ sensor,
(uint32_t *)data);
*size = 4;
break;
@@ -1186,8 +1307,10 @@ static int smu_v11_0_read_sensor(struct smu_context *smu,
ret = smu_get_current_clk_freq(smu, PPCLK_GFXCLK, (uint32_t *)data);
*size = 4;
break;
- case AMDGPU_PP_SENSOR_GPU_TEMP:
- ret = smu_v11_0_thermal_get_temperature(smu, (uint32_t *)data);
+ case AMDGPU_PP_SENSOR_HOTSPOT_TEMP:
+ case AMDGPU_PP_SENSOR_EDGE_TEMP:
+ case AMDGPU_PP_SENSOR_MEM_TEMP:
+ ret = smu_v11_0_thermal_get_temperature(smu, sensor, (uint32_t *)data);
*size = 4;
break;
case AMDGPU_PP_SENSOR_GPU_POWER:
@@ -1235,6 +1358,8 @@ smu_v11_0_display_clock_voltage_request(struct smu_context *smu,
PPCLK_e clk_select = 0;
uint32_t clk_freq = clock_req->clock_freq_in_khz / 1000;
+ if (!smu->pm_enabled)
+ return -EINVAL;
if (smu_feature_is_enabled(smu, FEATURE_DPM_DCEFCLK_BIT)) {
switch (clk_type) {
case amd_pp_dcef_clock:
@@ -1518,7 +1643,7 @@ static int smu_v11_0_get_power_profile_mode(struct smu_context *smu, char *buf)
"PD_Data_error_rate_coeff"};
int result = 0;
- if (!buf)
+ if (!smu->pm_enabled || !buf)
return -EINVAL;
size += sprintf(buf + size, "%16s %s %s %s %s %s %s %s %s %s %s\n",
@@ -1605,6 +1730,8 @@ static int smu_v11_0_set_power_profile_mode(struct smu_context *smu, long *input
smu->power_profile_mode = input[size];
+ if (!smu->pm_enabled)
+ return ret;
if (smu->power_profile_mode > PP_SMC_POWER_PROFILE_CUSTOM) {
pr_err("Invalid power profile mode %d\n", smu->power_profile_mode);
return -EINVAL;
@@ -1710,24 +1837,24 @@ static int smu_v11_0_update_od8_settings(struct smu_context *smu,
static int smu_v11_0_dpm_set_uvd_enable(struct smu_context *smu, bool enable)
{
- if (!smu_feature_is_supported(smu, FEATURE_DPM_VCE_BIT))
+ if (!smu_feature_is_supported(smu, FEATURE_DPM_UVD_BIT))
return 0;
- if (enable == smu_feature_is_enabled(smu, FEATURE_DPM_VCE_BIT))
+ if (enable == smu_feature_is_enabled(smu, FEATURE_DPM_UVD_BIT))
return 0;
- return smu_feature_set_enabled(smu, FEATURE_DPM_VCE_BIT, enable);
+ return smu_feature_set_enabled(smu, FEATURE_DPM_UVD_BIT, enable);
}
static int smu_v11_0_dpm_set_vce_enable(struct smu_context *smu, bool enable)
{
- if (!smu_feature_is_supported(smu, FEATURE_DPM_UVD_BIT))
+ if (!smu_feature_is_supported(smu, FEATURE_DPM_VCE_BIT))
return 0;
- if (enable == smu_feature_is_enabled(smu, FEATURE_DPM_UVD_BIT))
+ if (enable == smu_feature_is_enabled(smu, FEATURE_DPM_VCE_BIT))
return 0;
- return smu_feature_set_enabled(smu, FEATURE_DPM_UVD_BIT, enable);
+ return smu_feature_set_enabled(smu, FEATURE_DPM_VCE_BIT, enable);
}
static int smu_v11_0_get_current_rpm(struct smu_context *smu,
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c
index 669bd0c2a16c..7184d39dcbee 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c
@@ -25,6 +25,7 @@
#include <linux/fb.h>
#include "linux/delay.h"
#include <linux/types.h>
+#include <linux/pci.h>
#include "smumgr.h"
#include "pp_debug.h"
@@ -2254,6 +2255,8 @@ static uint32_t ci_get_offsetof(uint32_t type, uint32_t member)
return offsetof(SMU7_SoftRegisters, VoltageChangeTimeout);
case AverageGraphicsActivity:
return offsetof(SMU7_SoftRegisters, AverageGraphicsA);
+ case AverageMemoryActivity:
+ return offsetof(SMU7_SoftRegisters, AverageMemoryA);
case PreVBlankGap:
return offsetof(SMU7_SoftRegisters, PreVBlankGap);
case VBlankTimeout:
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c
index bc8375cbf297..0ce85b73338e 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c
@@ -2304,6 +2304,8 @@ static uint32_t fiji_get_offsetof(uint32_t type, uint32_t member)
return offsetof(SMU73_SoftRegisters, VoltageChangeTimeout);
case AverageGraphicsActivity:
return offsetof(SMU73_SoftRegisters, AverageGraphicsActivity);
+ case AverageMemoryActivity:
+ return offsetof(SMU73_SoftRegisters, AverageMemoryActivity);
case PreVBlankGap:
return offsetof(SMU73_SoftRegisters, PreVBlankGap);
case VBlankTimeout:
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c
index 375ccf6ff5f2..73091ac0b647 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c
@@ -25,6 +25,7 @@
#include "pp_debug.h"
#include <linux/types.h>
#include <linux/kernel.h>
+#include <linux/pci.h>
#include <linux/slab.h>
#include <linux/gfp.h>
@@ -2219,6 +2220,8 @@ static uint32_t iceland_get_offsetof(uint32_t type, uint32_t member)
return offsetof(SMU71_SoftRegisters, VoltageChangeTimeout);
case AverageGraphicsActivity:
return offsetof(SMU71_SoftRegisters, AverageGraphicsActivity);
+ case AverageMemoryActivity:
+ return offsetof(SMU71_SoftRegisters, AverageMemoryActivity);
case PreVBlankGap:
return offsetof(SMU71_SoftRegisters, PreVBlankGap);
case VBlankTimeout:
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c
index 2d4cfe14f72e..d6052e6daef2 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c
@@ -21,6 +21,8 @@
*
*/
+#include <linux/pci.h>
+
#include "pp_debug.h"
#include "smumgr.h"
#include "smu74.h"
@@ -2313,6 +2315,8 @@ static uint32_t polaris10_get_offsetof(uint32_t type, uint32_t member)
return offsetof(SMU74_SoftRegisters, VoltageChangeTimeout);
case AverageGraphicsActivity:
return offsetof(SMU74_SoftRegisters, AverageGraphicsActivity);
+ case AverageMemoryActivity:
+ return offsetof(SMU74_SoftRegisters, AverageMemoryActivity);
case PreVBlankGap:
return offsetof(SMU74_SoftRegisters, PreVBlankGap);
case VBlankTimeout:
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c
index 6d11076a79ba..d409925d1f7d 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c
@@ -21,6 +21,8 @@
*
*/
+#include <linux/pci.h>
+
#include "smumgr.h"
#include "smu10_inc.h"
#include "soc15_common.h"
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c
index 3ed6c5f1e5cf..e4e976b9d64e 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c
@@ -23,6 +23,7 @@
#include "pp_debug.h"
#include <linux/types.h>
#include <linux/kernel.h>
+#include <linux/pci.h>
#include <linux/slab.h>
#include <linux/gfp.h>
@@ -2611,6 +2612,8 @@ static uint32_t tonga_get_offsetof(uint32_t type, uint32_t member)
return offsetof(SMU72_SoftRegisters, VoltageChangeTimeout);
case AverageGraphicsActivity:
return offsetof(SMU72_SoftRegisters, AverageGraphicsActivity);
+ case AverageMemoryActivity:
+ return offsetof(SMU72_SoftRegisters, AverageMemoryActivity);
case PreVBlankGap:
return offsetof(SMU72_SoftRegisters, PreVBlankGap);
case VBlankTimeout:
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c
index c81acc3192ad..672986e9eecb 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c
@@ -21,6 +21,8 @@
*
*/
+#include <linux/pci.h>
+
#include "smumgr.h"
#include "vega10_inc.h"
#include "soc15_common.h"
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c
index ddb801517667..1eaf0fa28ef7 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c
@@ -287,8 +287,26 @@ static int vega12_smu_init(struct pp_hwmgr *hwmgr)
priv->smu_tables.entry[TABLE_OVERDRIVE].version = 0x01;
priv->smu_tables.entry[TABLE_OVERDRIVE].size = sizeof(OverDriveTable_t);
+ /* allocate space for SMU_METRICS table */
+ ret = amdgpu_bo_create_kernel((struct amdgpu_device *)hwmgr->adev,
+ sizeof(SmuMetrics_t),
+ PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_VRAM,
+ &priv->smu_tables.entry[TABLE_SMU_METRICS].handle,
+ &priv->smu_tables.entry[TABLE_SMU_METRICS].mc_addr,
+ &priv->smu_tables.entry[TABLE_SMU_METRICS].table);
+ if (ret)
+ goto err4;
+
+ priv->smu_tables.entry[TABLE_SMU_METRICS].version = 0x01;
+ priv->smu_tables.entry[TABLE_SMU_METRICS].size = sizeof(SmuMetrics_t);
+
return 0;
+err4:
+ amdgpu_bo_free_kernel(&priv->smu_tables.entry[TABLE_OVERDRIVE].handle,
+ &priv->smu_tables.entry[TABLE_OVERDRIVE].mc_addr,
+ &priv->smu_tables.entry[TABLE_OVERDRIVE].table);
err3:
amdgpu_bo_free_kernel(&priv->smu_tables.entry[TABLE_AVFS_FUSE_OVERRIDE].handle,
&priv->smu_tables.entry[TABLE_AVFS_FUSE_OVERRIDE].mc_addr,
@@ -334,6 +352,9 @@ static int vega12_smu_fini(struct pp_hwmgr *hwmgr)
amdgpu_bo_free_kernel(&priv->smu_tables.entry[TABLE_OVERDRIVE].handle,
&priv->smu_tables.entry[TABLE_OVERDRIVE].mc_addr,
&priv->smu_tables.entry[TABLE_OVERDRIVE].table);
+ amdgpu_bo_free_kernel(&priv->smu_tables.entry[TABLE_SMU_METRICS].handle,
+ &priv->smu_tables.entry[TABLE_SMU_METRICS].mc_addr,
+ &priv->smu_tables.entry[TABLE_SMU_METRICS].table);
kfree(hwmgr->smu_backend);
hwmgr->smu_backend = NULL;
}
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c
index 1e69300f6175..d499204b2184 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c
@@ -2167,6 +2167,8 @@ static uint32_t vegam_get_offsetof(uint32_t type, uint32_t member)
return offsetof(SMU75_SoftRegisters, VoltageChangeTimeout);
case AverageGraphicsActivity:
return offsetof(SMU75_SoftRegisters, AverageGraphicsActivity);
+ case AverageMemoryActivity:
+ return offsetof(SMU75_SoftRegisters, AverageMemoryActivity);
case PreVBlankGap:
return offsetof(SMU75_SoftRegisters, PreVBlankGap);
case VBlankTimeout:
diff --git a/drivers/gpu/drm/amd/powerplay/vega20_ppt.c b/drivers/gpu/drm/amd/powerplay/vega20_ppt.c
index 8fafcbdb1dfd..4aa8f5a69c4c 100644
--- a/drivers/gpu/drm/amd/powerplay/vega20_ppt.c
+++ b/drivers/gpu/drm/amd/powerplay/vega20_ppt.c
@@ -2374,6 +2374,157 @@ static int vega20_odn_edit_dpm_table(struct smu_context *smu,
return ret;
}
+static int vega20_get_enabled_smc_features(struct smu_context *smu,
+ uint64_t *features_enabled)
+{
+ uint32_t feature_mask[2] = {0, 0};
+ int ret = 0;
+
+ ret = smu_feature_get_enabled_mask(smu, feature_mask, 2);
+ if (ret)
+ return ret;
+
+ *features_enabled = ((((uint64_t)feature_mask[0] << SMU_FEATURES_LOW_SHIFT) & SMU_FEATURES_LOW_MASK) |
+ (((uint64_t)feature_mask[1] << SMU_FEATURES_HIGH_SHIFT) & SMU_FEATURES_HIGH_MASK));
+
+ return ret;
+}
+
+static int vega20_enable_smc_features(struct smu_context *smu,
+ bool enable, uint64_t feature_mask)
+{
+ uint32_t smu_features_low, smu_features_high;
+ int ret = 0;
+
+ smu_features_low = (uint32_t)((feature_mask & SMU_FEATURES_LOW_MASK) >> SMU_FEATURES_LOW_SHIFT);
+ smu_features_high = (uint32_t)((feature_mask & SMU_FEATURES_HIGH_MASK) >> SMU_FEATURES_HIGH_SHIFT);
+
+ if (enable) {
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_EnableSmuFeaturesLow,
+ smu_features_low);
+ if (ret)
+ return ret;
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_EnableSmuFeaturesHigh,
+ smu_features_high);
+ if (ret)
+ return ret;
+ } else {
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_DisableSmuFeaturesLow,
+ smu_features_low);
+ if (ret)
+ return ret;
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_DisableSmuFeaturesHigh,
+ smu_features_high);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+
+}
+
+static int vega20_get_ppfeature_status(struct smu_context *smu, char *buf)
+{
+ static const char *ppfeature_name[] = {
+ "DPM_PREFETCHER",
+ "GFXCLK_DPM",
+ "UCLK_DPM",
+ "SOCCLK_DPM",
+ "UVD_DPM",
+ "VCE_DPM",
+ "ULV",
+ "MP0CLK_DPM",
+ "LINK_DPM",
+ "DCEFCLK_DPM",
+ "GFXCLK_DS",
+ "SOCCLK_DS",
+ "LCLK_DS",
+ "PPT",
+ "TDC",
+ "THERMAL",
+ "GFX_PER_CU_CG",
+ "RM",
+ "DCEFCLK_DS",
+ "ACDC",
+ "VR0HOT",
+ "VR1HOT",
+ "FW_CTF",
+ "LED_DISPLAY",
+ "FAN_CONTROL",
+ "GFX_EDC",
+ "GFXOFF",
+ "CG",
+ "FCLK_DPM",
+ "FCLK_DS",
+ "MP1CLK_DS",
+ "MP0CLK_DS",
+ "XGMI",
+ "ECC"};
+ static const char *output_title[] = {
+ "FEATURES",
+ "BITMASK",
+ "ENABLEMENT"};
+ uint64_t features_enabled;
+ int i;
+ int ret = 0;
+ int size = 0;
+
+ ret = vega20_get_enabled_smc_features(smu, &features_enabled);
+ if (ret)
+ return ret;
+
+ size += sprintf(buf + size, "Current ppfeatures: 0x%016llx\n", features_enabled);
+ size += sprintf(buf + size, "%-19s %-22s %s\n",
+ output_title[0],
+ output_title[1],
+ output_title[2]);
+ for (i = 0; i < GNLD_FEATURES_MAX; i++) {
+ size += sprintf(buf + size, "%-19s 0x%016llx %6s\n",
+ ppfeature_name[i],
+ 1ULL << i,
+ (features_enabled & (1ULL << i)) ? "Y" : "N");
+ }
+
+ return size;
+}
+
+static int vega20_set_ppfeature_status(struct smu_context *smu, uint64_t new_ppfeature_masks)
+{
+ uint64_t features_enabled;
+ uint64_t features_to_enable;
+ uint64_t features_to_disable;
+ int ret = 0;
+
+ if (new_ppfeature_masks >= (1ULL << GNLD_FEATURES_MAX))
+ return -EINVAL;
+
+ ret = vega20_get_enabled_smc_features(smu, &features_enabled);
+ if (ret)
+ return ret;
+
+ features_to_disable =
+ features_enabled & ~new_ppfeature_masks;
+ features_to_enable =
+ ~features_enabled & new_ppfeature_masks;
+
+ pr_debug("features_to_disable 0x%llx\n", features_to_disable);
+ pr_debug("features_to_enable 0x%llx\n", features_to_enable);
+
+ if (features_to_disable) {
+ ret = vega20_enable_smc_features(smu, false, features_to_disable);
+ if (ret)
+ return ret;
+ }
+
+ if (features_to_enable) {
+ ret = vega20_enable_smc_features(smu, true, features_to_enable);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
static const struct pptable_funcs vega20_ppt_funcs = {
.alloc_dpm_context = vega20_allocate_dpm_context,
.store_powerplay_table = vega20_store_powerplay_table,
@@ -2404,6 +2555,8 @@ static const struct pptable_funcs vega20_ppt_funcs = {
.unforce_dpm_levels = vega20_unforce_dpm_levels,
.upload_dpm_level = vega20_upload_dpm_level,
.get_profiling_clk_mask = vega20_get_profiling_clk_mask,
+ .set_ppfeature_status = vega20_set_ppfeature_status,
+ .get_ppfeature_status = vega20_get_ppfeature_status,
};
void vega20_set_ppt_funcs(struct smu_context *smu)
diff --git a/drivers/gpu/drm/amd/powerplay/vega20_ppt.h b/drivers/gpu/drm/amd/powerplay/vega20_ppt.h
index 5a0d2af63173..87f3a8303645 100644
--- a/drivers/gpu/drm/amd/powerplay/vega20_ppt.h
+++ b/drivers/gpu/drm/amd/powerplay/vega20_ppt.h
@@ -36,6 +36,50 @@
#define AVFS_CURVE 0
#define OD8_HOTCURVE_TEMPERATURE 85
+#define SMU_FEATURES_LOW_MASK 0x00000000FFFFFFFF
+#define SMU_FEATURES_LOW_SHIFT 0
+#define SMU_FEATURES_HIGH_MASK 0xFFFFFFFF00000000
+#define SMU_FEATURES_HIGH_SHIFT 32
+
+enum {
+ GNLD_DPM_PREFETCHER = 0,
+ GNLD_DPM_GFXCLK,
+ GNLD_DPM_UCLK,
+ GNLD_DPM_SOCCLK,
+ GNLD_DPM_UVD,
+ GNLD_DPM_VCE,
+ GNLD_ULV,
+ GNLD_DPM_MP0CLK,
+ GNLD_DPM_LINK,
+ GNLD_DPM_DCEFCLK,
+ GNLD_DS_GFXCLK,
+ GNLD_DS_SOCCLK,
+ GNLD_DS_LCLK,
+ GNLD_PPT,
+ GNLD_TDC,
+ GNLD_THERMAL,
+ GNLD_GFX_PER_CU_CG,
+ GNLD_RM,
+ GNLD_DS_DCEFCLK,
+ GNLD_ACDC,
+ GNLD_VR0HOT,
+ GNLD_VR1HOT,
+ GNLD_FW_CTF,
+ GNLD_LED_DISPLAY,
+ GNLD_FAN_CONTROL,
+ GNLD_DIDT,
+ GNLD_GFXOFF,
+ GNLD_CG,
+ GNLD_DPM_FCLK,
+ GNLD_DS_FCLK,
+ GNLD_DS_MP1CLK,
+ GNLD_DS_MP0CLK,
+ GNLD_XGMI,
+ GNLD_ECC,
+
+ GNLD_FEATURES_MAX
+};
+
struct vega20_dpm_level {
bool enabled;
uint32_t value;
diff --git a/drivers/gpu/drm/arm/malidp_crtc.c b/drivers/gpu/drm/arm/malidp_crtc.c
index eb33a811fd4a..db4451260fff 100644
--- a/drivers/gpu/drm/arm/malidp_crtc.c
+++ b/drivers/gpu/drm/arm/malidp_crtc.c
@@ -459,23 +459,6 @@ static struct drm_crtc_state *malidp_crtc_duplicate_state(struct drm_crtc *crtc)
return &state->base;
}
-static void malidp_crtc_reset(struct drm_crtc *crtc)
-{
- struct malidp_crtc_state *state = NULL;
-
- if (crtc->state) {
- state = to_malidp_crtc_state(crtc->state);
- __drm_atomic_helper_crtc_destroy_state(crtc->state);
- }
-
- kfree(state);
- state = kzalloc(sizeof(*state), GFP_KERNEL);
- if (state) {
- crtc->state = &state->base;
- crtc->state->crtc = crtc;
- }
-}
-
static void malidp_crtc_destroy_state(struct drm_crtc *crtc,
struct drm_crtc_state *state)
{
@@ -489,6 +472,17 @@ static void malidp_crtc_destroy_state(struct drm_crtc *crtc,
kfree(mali_state);
}
+static void malidp_crtc_reset(struct drm_crtc *crtc)
+{
+ struct malidp_crtc_state *state =
+ kzalloc(sizeof(*state), GFP_KERNEL);
+
+ if (crtc->state)
+ malidp_crtc_destroy_state(crtc, crtc->state);
+
+ __drm_atomic_helper_crtc_reset(crtc, &state->base);
+}
+
static int malidp_crtc_enable_vblank(struct drm_crtc *crtc)
{
struct malidp_drm *malidp = crtc_to_malidp_device(crtc);
diff --git a/drivers/gpu/drm/arm/malidp_hw.c b/drivers/gpu/drm/arm/malidp_hw.c
index 5aa7b81ba4c9..50af399d7f6f 100644
--- a/drivers/gpu/drm/arm/malidp_hw.c
+++ b/drivers/gpu/drm/arm/malidp_hw.c
@@ -378,7 +378,8 @@ static void malidp500_modeset(struct malidp_hw_device *hwdev, struct videomode *
int malidp_format_get_bpp(u32 fmt)
{
- int bpp = drm_format_plane_cpp(fmt, 0) * 8;
+ const struct drm_format_info *info = drm_format_info(fmt);
+ int bpp = info->cpp[0] * 8;
if (bpp == 0) {
switch (fmt) {
diff --git a/drivers/gpu/drm/arm/malidp_mw.c b/drivers/gpu/drm/arm/malidp_mw.c
index 5f102bdaf841..2e812525025d 100644
--- a/drivers/gpu/drm/arm/malidp_mw.c
+++ b/drivers/gpu/drm/arm/malidp_mw.c
@@ -158,7 +158,7 @@ malidp_mw_encoder_atomic_check(struct drm_encoder *encoder,
return -EINVAL;
}
- n_planes = drm_format_num_planes(fb->format->format);
+ n_planes = fb->format->num_planes;
for (i = 0; i < n_planes; i++) {
struct drm_gem_cma_object *obj = drm_fb_cma_get_gem_obj(fb, i);
/* memory write buffers are never rotated */
diff --git a/drivers/gpu/drm/arm/malidp_planes.c b/drivers/gpu/drm/arm/malidp_planes.c
index 062e88e238dd..488375bd133d 100644
--- a/drivers/gpu/drm/arm/malidp_planes.c
+++ b/drivers/gpu/drm/arm/malidp_planes.c
@@ -223,14 +223,13 @@ bool malidp_format_mod_supported(struct drm_device *drm,
if (modifier & AFBC_SPLIT) {
if (!info->is_yuv) {
- if (drm_format_plane_cpp(format, 0) <= 2) {
+ if (info->cpp[0] <= 2) {
DRM_DEBUG_KMS("RGB formats <= 16bpp are not supported with SPLIT\n");
return false;
}
}
- if ((drm_format_horz_chroma_subsampling(format) != 1) ||
- (drm_format_vert_chroma_subsampling(format) != 1)) {
+ if ((info->hsub != 1) || (info->vsub != 1)) {
if (!(format == DRM_FORMAT_YUV420_10BIT &&
(map->features & MALIDP_DEVICE_AFBC_YUV_420_10_SUPPORT_SPLIT))) {
DRM_DEBUG_KMS("Formats which are sub-sampled should never be split\n");
@@ -240,8 +239,7 @@ bool malidp_format_mod_supported(struct drm_device *drm,
}
if (modifier & AFBC_CBR) {
- if ((drm_format_horz_chroma_subsampling(format) == 1) ||
- (drm_format_vert_chroma_subsampling(format) == 1)) {
+ if ((info->hsub == 1) || (info->vsub == 1)) {
DRM_DEBUG_KMS("Formats which are not sub-sampled should not have CBR set\n");
return false;
}
diff --git a/drivers/gpu/drm/armada/armada_fb.c b/drivers/gpu/drm/armada/armada_fb.c
index 058ac7d9920f..a2f6472eb482 100644
--- a/drivers/gpu/drm/armada/armada_fb.c
+++ b/drivers/gpu/drm/armada/armada_fb.c
@@ -87,6 +87,7 @@ struct armada_framebuffer *armada_framebuffer_create(struct drm_device *dev,
struct drm_framebuffer *armada_fb_create(struct drm_device *dev,
struct drm_file *dfile, const struct drm_mode_fb_cmd2 *mode)
{
+ const struct drm_format_info *info = drm_get_format_info(dev, mode);
struct armada_gem_object *obj;
struct armada_framebuffer *dfb;
int ret;
@@ -97,7 +98,7 @@ struct drm_framebuffer *armada_fb_create(struct drm_device *dev,
mode->pitches[2]);
/* We can only handle a single plane at the moment */
- if (drm_format_num_planes(mode->pixel_format) > 1 &&
+ if (info->num_planes > 1 &&
(mode->handles[0] != mode->handles[1] ||
mode->handles[0] != mode->handles[2])) {
ret = -EINVAL;
diff --git a/drivers/gpu/drm/ast/Kconfig b/drivers/gpu/drm/ast/Kconfig
index ac47ecfe7801..829620d5326c 100644
--- a/drivers/gpu/drm/ast/Kconfig
+++ b/drivers/gpu/drm/ast/Kconfig
@@ -2,9 +2,8 @@
config DRM_AST
tristate "AST server chips"
depends on DRM && PCI && MMU
- select DRM_TTM
select DRM_KMS_HELPER
- select DRM_TTM
+ select DRM_VRAM_HELPER
help
Say yes for experimental AST GPU driver. Do not enable
this driver without having a working -modesetting,
diff --git a/drivers/gpu/drm/ast/ast_drv.c b/drivers/gpu/drm/ast/ast_drv.c
index 3871b39d4dea..3811997e78c4 100644
--- a/drivers/gpu/drm/ast/ast_drv.c
+++ b/drivers/gpu/drm/ast/ast_drv.c
@@ -205,13 +205,7 @@ static struct pci_driver ast_pci_driver = {
static const struct file_operations ast_fops = {
.owner = THIS_MODULE,
- .open = drm_open,
- .release = drm_release,
- .unlocked_ioctl = drm_ioctl,
- .mmap = ast_mmap,
- .poll = drm_poll,
- .compat_ioctl = drm_compat_ioctl,
- .read = drm_read,
+ DRM_VRAM_MM_FILE_OPERATIONS
};
static struct drm_driver driver = {
@@ -228,10 +222,7 @@ static struct drm_driver driver = {
.minor = DRIVER_MINOR,
.patchlevel = DRIVER_PATCHLEVEL,
- .gem_free_object_unlocked = ast_gem_free_object,
- .dumb_create = ast_dumb_create,
- .dumb_map_offset = ast_dumb_mmap_offset,
-
+ DRM_GEM_VRAM_DRIVER
};
static int __init ast_init(void)
diff --git a/drivers/gpu/drm/ast/ast_drv.h b/drivers/gpu/drm/ast/ast_drv.h
index 1cf0c75e411d..684e15e64a62 100644
--- a/drivers/gpu/drm/ast/ast_drv.h
+++ b/drivers/gpu/drm/ast/ast_drv.h
@@ -31,13 +31,10 @@
#include <drm/drm_encoder.h>
#include <drm/drm_fb_helper.h>
-#include <drm/ttm/ttm_bo_api.h>
-#include <drm/ttm/ttm_bo_driver.h>
-#include <drm/ttm/ttm_placement.h>
-#include <drm/ttm/ttm_memory.h>
-#include <drm/ttm/ttm_module.h>
-
#include <drm/drm_gem.h>
+#include <drm/drm_gem_vram_helper.h>
+
+#include <drm/drm_vram_mm_helper.h>
#include <linux/i2c.h>
#include <linux/i2c-algo-bit.h>
@@ -103,15 +100,7 @@ struct ast_private {
int fb_mtrr;
- struct {
- struct ttm_bo_device bdev;
- } ttm;
-
struct drm_gem_object *cursor_cache;
- uint64_t cursor_cache_gpu_addr;
- /* Acces to this cache is protected by the crtc->mutex of the only crtc
- * we have. */
- struct ttm_bo_kmap_obj cache_kmap;
int next_cursor;
bool support_wide_screen;
enum {
@@ -243,9 +232,6 @@ struct ast_connector {
struct ast_crtc {
struct drm_crtc base;
- struct drm_gem_object *cursor_bo;
- uint64_t cursor_addr;
- int cursor_width, cursor_height;
u8 offset_x, offset_y;
};
@@ -263,7 +249,6 @@ struct ast_fbdev {
struct ast_framebuffer afb;
void *sysram;
int size;
- struct ttm_bo_kmap_obj mapping;
int x1, y1, x2, y2; /* dirty rect */
spinlock_t dirty_lock;
};
@@ -321,73 +306,16 @@ void ast_fbdev_fini(struct drm_device *dev);
void ast_fbdev_set_suspend(struct drm_device *dev, int state);
void ast_fbdev_set_base(struct ast_private *ast, unsigned long gpu_addr);
-struct ast_bo {
- struct ttm_buffer_object bo;
- struct ttm_placement placement;
- struct ttm_bo_kmap_obj kmap;
- struct drm_gem_object gem;
- struct ttm_place placements[3];
- int pin_count;
-};
-#define gem_to_ast_bo(gobj) container_of((gobj), struct ast_bo, gem)
-
-static inline struct ast_bo *
-ast_bo(struct ttm_buffer_object *bo)
-{
- return container_of(bo, struct ast_bo, bo);
-}
-
-
-#define to_ast_obj(x) container_of(x, struct ast_gem_object, base)
-
#define AST_MM_ALIGN_SHIFT 4
#define AST_MM_ALIGN_MASK ((1 << AST_MM_ALIGN_SHIFT) - 1)
-extern int ast_dumb_create(struct drm_file *file,
- struct drm_device *dev,
- struct drm_mode_create_dumb *args);
-
-extern void ast_gem_free_object(struct drm_gem_object *obj);
-extern int ast_dumb_mmap_offset(struct drm_file *file,
- struct drm_device *dev,
- uint32_t handle,
- uint64_t *offset);
-
int ast_mm_init(struct ast_private *ast);
void ast_mm_fini(struct ast_private *ast);
-int ast_bo_create(struct drm_device *dev, int size, int align,
- uint32_t flags, struct ast_bo **pastbo);
-
int ast_gem_create(struct drm_device *dev,
u32 size, bool iskernel,
struct drm_gem_object **obj);
-int ast_bo_pin(struct ast_bo *bo, u32 pl_flag, u64 *gpu_addr);
-int ast_bo_unpin(struct ast_bo *bo);
-
-static inline int ast_bo_reserve(struct ast_bo *bo, bool no_wait)
-{
- int ret;
-
- ret = ttm_bo_reserve(&bo->bo, true, no_wait, NULL);
- if (ret) {
- if (ret != -ERESTARTSYS && ret != -EBUSY)
- DRM_ERROR("reserve failed %p\n", bo);
- return ret;
- }
- return 0;
-}
-
-static inline void ast_bo_unreserve(struct ast_bo *bo)
-{
- ttm_bo_unreserve(&bo->bo);
-}
-
-void ast_ttm_placement(struct ast_bo *bo, int domain);
-int ast_bo_push_sysram(struct ast_bo *bo);
-int ast_mmap(struct file *filp, struct vm_area_struct *vma);
-
/* ast post */
void ast_enable_vga(struct drm_device *dev);
void ast_enable_mmio(struct drm_device *dev);
diff --git a/drivers/gpu/drm/ast/ast_fb.c b/drivers/gpu/drm/ast/ast_fb.c
index e718d0f60d6b..8200b25dad16 100644
--- a/drivers/gpu/drm/ast/ast_fb.c
+++ b/drivers/gpu/drm/ast/ast_fb.c
@@ -48,30 +48,30 @@ static void ast_dirty_update(struct ast_fbdev *afbdev,
int x, int y, int width, int height)
{
int i;
- struct drm_gem_object *obj;
- struct ast_bo *bo;
+ struct drm_gem_vram_object *gbo;
int src_offset, dst_offset;
int bpp = afbdev->afb.base.format->cpp[0];
- int ret = -EBUSY;
+ int ret;
+ u8 *dst;
bool unmap = false;
bool store_for_later = false;
int x2, y2;
unsigned long flags;
- obj = afbdev->afb.obj;
- bo = gem_to_ast_bo(obj);
-
- /*
- * try and reserve the BO, if we fail with busy
- * then the BO is being moved and we should
- * store up the damage until later.
- */
- if (drm_can_sleep())
- ret = ast_bo_reserve(bo, true);
- if (ret) {
- if (ret != -EBUSY)
- return;
+ gbo = drm_gem_vram_of_gem(afbdev->afb.obj);
+ if (drm_can_sleep()) {
+ /* We pin the BO so it won't be moved during the
+ * update. The actual location, video RAM or system
+ * memory, is not important.
+ */
+ ret = drm_gem_vram_pin(gbo, 0);
+ if (ret) {
+ if (ret != -EBUSY)
+ return;
+ store_for_later = true;
+ }
+ } else {
store_for_later = true;
}
@@ -101,25 +101,32 @@ static void ast_dirty_update(struct ast_fbdev *afbdev,
afbdev->x2 = afbdev->y2 = 0;
spin_unlock_irqrestore(&afbdev->dirty_lock, flags);
- if (!bo->kmap.virtual) {
- ret = ttm_bo_kmap(&bo->bo, 0, bo->bo.num_pages, &bo->kmap);
- if (ret) {
+ dst = drm_gem_vram_kmap(gbo, false, NULL);
+ if (IS_ERR(dst)) {
+ DRM_ERROR("failed to kmap fb updates\n");
+ goto out;
+ } else if (!dst) {
+ dst = drm_gem_vram_kmap(gbo, true, NULL);
+ if (IS_ERR(dst)) {
DRM_ERROR("failed to kmap fb updates\n");
- ast_bo_unreserve(bo);
- return;
+ goto out;
}
unmap = true;
}
+
for (i = y; i <= y2; i++) {
/* assume equal stride for now */
- src_offset = dst_offset = i * afbdev->afb.base.pitches[0] + (x * bpp);
- memcpy_toio(bo->kmap.virtual + src_offset, afbdev->sysram + src_offset, (x2 - x + 1) * bpp);
-
+ src_offset = dst_offset =
+ i * afbdev->afb.base.pitches[0] + (x * bpp);
+ memcpy_toio(dst + dst_offset, afbdev->sysram + src_offset,
+ (x2 - x + 1) * bpp);
}
+
if (unmap)
- ttm_bo_kunmap(&bo->kmap);
+ drm_gem_vram_kunmap(gbo);
- ast_bo_unreserve(bo);
+out:
+ drm_gem_vram_unpin(gbo);
}
static void ast_fillrect(struct fb_info *info,
@@ -159,8 +166,6 @@ static struct fb_ops astfb_ops = {
.fb_pan_display = drm_fb_helper_pan_display,
.fb_blank = drm_fb_helper_blank,
.fb_setcmap = drm_fb_helper_setcmap,
- .fb_debug_enter = drm_fb_helper_debug_enter,
- .fb_debug_leave = drm_fb_helper_debug_leave,
};
static int astfb_create_object(struct ast_fbdev *afbdev,
diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c
index 2854399856ba..4c7e31cb45ff 100644
--- a/drivers/gpu/drm/ast/ast_main.c
+++ b/drivers/gpu/drm/ast/ast_main.c
@@ -593,7 +593,7 @@ int ast_gem_create(struct drm_device *dev,
u32 size, bool iskernel,
struct drm_gem_object **obj)
{
- struct ast_bo *astbo;
+ struct drm_gem_vram_object *gbo;
int ret;
*obj = NULL;
@@ -602,80 +602,13 @@ int ast_gem_create(struct drm_device *dev,
if (size == 0)
return -EINVAL;
- ret = ast_bo_create(dev, size, 0, 0, &astbo);
- if (ret) {
+ gbo = drm_gem_vram_create(dev, &dev->vram_mm->bdev, size, 0, false);
+ if (IS_ERR(gbo)) {
+ ret = PTR_ERR(gbo);
if (ret != -ERESTARTSYS)
DRM_ERROR("failed to allocate GEM object\n");
return ret;
}
- *obj = &astbo->gem;
+ *obj = &gbo->gem;
return 0;
}
-
-int ast_dumb_create(struct drm_file *file,
- struct drm_device *dev,
- struct drm_mode_create_dumb *args)
-{
- int ret;
- struct drm_gem_object *gobj;
- u32 handle;
-
- args->pitch = args->width * ((args->bpp + 7) / 8);
- args->size = args->pitch * args->height;
-
- ret = ast_gem_create(dev, args->size, false,
- &gobj);
- if (ret)
- return ret;
-
- ret = drm_gem_handle_create(file, gobj, &handle);
- drm_gem_object_put_unlocked(gobj);
- if (ret)
- return ret;
-
- args->handle = handle;
- return 0;
-}
-
-static void ast_bo_unref(struct ast_bo **bo)
-{
- if ((*bo) == NULL)
- return;
- ttm_bo_put(&((*bo)->bo));
- *bo = NULL;
-}
-
-void ast_gem_free_object(struct drm_gem_object *obj)
-{
- struct ast_bo *ast_bo = gem_to_ast_bo(obj);
-
- ast_bo_unref(&ast_bo);
-}
-
-
-static inline u64 ast_bo_mmap_offset(struct ast_bo *bo)
-{
- return drm_vma_node_offset_addr(&bo->bo.vma_node);
-}
-int
-ast_dumb_mmap_offset(struct drm_file *file,
- struct drm_device *dev,
- uint32_t handle,
- uint64_t *offset)
-{
- struct drm_gem_object *obj;
- struct ast_bo *bo;
-
- obj = drm_gem_object_lookup(file, handle);
- if (obj == NULL)
- return -ENOENT;
-
- bo = gem_to_ast_bo(obj);
- *offset = ast_bo_mmap_offset(bo);
-
- drm_gem_object_put_unlocked(obj);
-
- return 0;
-
-}
-
diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c
index 97fed0627d1c..ffccbef962a4 100644
--- a/drivers/gpu/drm/ast/ast_mode.c
+++ b/drivers/gpu/drm/ast/ast_mode.c
@@ -521,7 +521,6 @@ static void ast_crtc_dpms(struct drm_crtc *crtc, int mode)
}
}
-/* ast is different - we will force move buffers out of VRAM */
static int ast_crtc_do_set_base(struct drm_crtc *crtc,
struct drm_framebuffer *fb,
int x, int y, int atomic)
@@ -529,50 +528,54 @@ static int ast_crtc_do_set_base(struct drm_crtc *crtc,
struct ast_private *ast = crtc->dev->dev_private;
struct drm_gem_object *obj;
struct ast_framebuffer *ast_fb;
- struct ast_bo *bo;
+ struct drm_gem_vram_object *gbo;
int ret;
- u64 gpu_addr;
+ s64 gpu_addr;
+ void *base;
- /* push the previous fb to system ram */
if (!atomic && fb) {
ast_fb = to_ast_framebuffer(fb);
obj = ast_fb->obj;
- bo = gem_to_ast_bo(obj);
- ret = ast_bo_reserve(bo, false);
- if (ret)
- return ret;
- ast_bo_push_sysram(bo);
- ast_bo_unreserve(bo);
+ gbo = drm_gem_vram_of_gem(obj);
+
+ /* unmap if console */
+ if (&ast->fbdev->afb == ast_fb)
+ drm_gem_vram_kunmap(gbo);
+ drm_gem_vram_unpin(gbo);
}
ast_fb = to_ast_framebuffer(crtc->primary->fb);
obj = ast_fb->obj;
- bo = gem_to_ast_bo(obj);
+ gbo = drm_gem_vram_of_gem(obj);
- ret = ast_bo_reserve(bo, false);
+ ret = drm_gem_vram_pin(gbo, DRM_GEM_VRAM_PL_FLAG_VRAM);
if (ret)
return ret;
-
- ret = ast_bo_pin(bo, TTM_PL_FLAG_VRAM, &gpu_addr);
- if (ret) {
- ast_bo_unreserve(bo);
- return ret;
+ gpu_addr = drm_gem_vram_offset(gbo);
+ if (gpu_addr < 0) {
+ ret = (int)gpu_addr;
+ goto err_drm_gem_vram_unpin;
}
if (&ast->fbdev->afb == ast_fb) {
/* if pushing console in kmap it */
- ret = ttm_bo_kmap(&bo->bo, 0, bo->bo.num_pages, &bo->kmap);
- if (ret)
+ base = drm_gem_vram_kmap(gbo, true, NULL);
+ if (IS_ERR(base)) {
+ ret = PTR_ERR(base);
DRM_ERROR("failed to kmap fbcon\n");
- else
+ } else {
ast_fbdev_set_base(ast, gpu_addr);
+ }
}
- ast_bo_unreserve(bo);
ast_set_offset_reg(crtc);
ast_set_start_address_crt1(crtc, (u32)gpu_addr);
return 0;
+
+err_drm_gem_vram_unpin:
+ drm_gem_vram_unpin(gbo);
+ return ret;
}
static int ast_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
@@ -618,21 +621,18 @@ static int ast_crtc_mode_set(struct drm_crtc *crtc,
static void ast_crtc_disable(struct drm_crtc *crtc)
{
- int ret;
-
DRM_DEBUG_KMS("\n");
ast_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
if (crtc->primary->fb) {
+ struct ast_private *ast = crtc->dev->dev_private;
struct ast_framebuffer *ast_fb = to_ast_framebuffer(crtc->primary->fb);
struct drm_gem_object *obj = ast_fb->obj;
- struct ast_bo *bo = gem_to_ast_bo(obj);
+ struct drm_gem_vram_object *gbo = drm_gem_vram_of_gem(obj);
- ret = ast_bo_reserve(bo, false);
- if (ret)
- return;
-
- ast_bo_push_sysram(bo);
- ast_bo_unreserve(bo);
+ /* unmap if console */
+ if (&ast->fbdev->afb == ast_fb)
+ drm_gem_vram_kunmap(gbo);
+ drm_gem_vram_unpin(gbo);
}
crtc->primary->fb = NULL;
}
@@ -918,32 +918,34 @@ static int ast_cursor_init(struct drm_device *dev)
int size;
int ret;
struct drm_gem_object *obj;
- struct ast_bo *bo;
- uint64_t gpu_addr;
+ struct drm_gem_vram_object *gbo;
+ s64 gpu_addr;
+ void *base;
size = (AST_HWC_SIZE + AST_HWC_SIGNATURE_SIZE) * AST_DEFAULT_HWC_NUM;
ret = ast_gem_create(dev, size, true, &obj);
if (ret)
return ret;
- bo = gem_to_ast_bo(obj);
- ret = ast_bo_reserve(bo, false);
- if (unlikely(ret != 0))
- goto fail;
-
- ret = ast_bo_pin(bo, TTM_PL_FLAG_VRAM, &gpu_addr);
- ast_bo_unreserve(bo);
+ gbo = drm_gem_vram_of_gem(obj);
+ ret = drm_gem_vram_pin(gbo, DRM_GEM_VRAM_PL_FLAG_VRAM);
if (ret)
goto fail;
+ gpu_addr = drm_gem_vram_offset(gbo);
+ if (gpu_addr < 0) {
+ drm_gem_vram_unpin(gbo);
+ ret = (int)gpu_addr;
+ goto fail;
+ }
/* kmap the object */
- ret = ttm_bo_kmap(&bo->bo, 0, bo->bo.num_pages, &ast->cache_kmap);
- if (ret)
+ base = drm_gem_vram_kmap(gbo, true, NULL);
+ if (IS_ERR(base)) {
+ ret = PTR_ERR(base);
goto fail;
+ }
ast->cursor_cache = obj;
- ast->cursor_cache_gpu_addr = gpu_addr;
- DRM_DEBUG_KMS("pinned cursor cache at %llx\n", ast->cursor_cache_gpu_addr);
return 0;
fail:
return ret;
@@ -952,7 +954,10 @@ fail:
static void ast_cursor_fini(struct drm_device *dev)
{
struct ast_private *ast = dev->dev_private;
- ttm_bo_kunmap(&ast->cache_kmap);
+ struct drm_gem_vram_object *gbo =
+ drm_gem_vram_of_gem(ast->cursor_cache);
+ drm_gem_vram_kunmap(gbo);
+ drm_gem_vram_unpin(gbo);
drm_gem_object_put_unlocked(ast->cursor_cache);
}
@@ -1173,13 +1178,13 @@ static int ast_cursor_set(struct drm_crtc *crtc,
struct ast_private *ast = crtc->dev->dev_private;
struct ast_crtc *ast_crtc = to_ast_crtc(crtc);
struct drm_gem_object *obj;
- struct ast_bo *bo;
- uint64_t gpu_addr;
+ struct drm_gem_vram_object *gbo;
+ s64 dst_gpu;
+ u64 gpu_addr;
u32 csum;
int ret;
- struct ttm_bo_kmap_obj uobj_map;
u8 *src, *dst;
- bool src_isiomem, dst_isiomem;
+
if (!handle) {
ast_hide_cursor(crtc);
return 0;
@@ -1193,21 +1198,28 @@ static int ast_cursor_set(struct drm_crtc *crtc,
DRM_ERROR("Cannot find cursor object %x for crtc\n", handle);
return -ENOENT;
}
- bo = gem_to_ast_bo(obj);
+ gbo = drm_gem_vram_of_gem(obj);
- ret = ast_bo_reserve(bo, false);
+ ret = drm_gem_vram_pin(gbo, 0);
if (ret)
- goto fail;
-
- ret = ttm_bo_kmap(&bo->bo, 0, bo->bo.num_pages, &uobj_map);
-
- src = ttm_kmap_obj_virtual(&uobj_map, &src_isiomem);
- dst = ttm_kmap_obj_virtual(&ast->cache_kmap, &dst_isiomem);
+ goto err_drm_gem_object_put_unlocked;
+ src = drm_gem_vram_kmap(gbo, true, NULL);
+ if (IS_ERR(src)) {
+ ret = PTR_ERR(src);
+ goto err_drm_gem_vram_unpin;
+ }
- if (src_isiomem == true)
- DRM_ERROR("src cursor bo should be in main memory\n");
- if (dst_isiomem == false)
- DRM_ERROR("dst bo should be in VRAM\n");
+ dst = drm_gem_vram_kmap(drm_gem_vram_of_gem(ast->cursor_cache),
+ false, NULL);
+ if (IS_ERR(dst)) {
+ ret = PTR_ERR(dst);
+ goto err_drm_gem_vram_kunmap;
+ }
+ dst_gpu = drm_gem_vram_offset(drm_gem_vram_of_gem(ast->cursor_cache));
+ if (dst_gpu < 0) {
+ ret = (int)dst_gpu;
+ goto err_drm_gem_vram_kunmap;
+ }
dst += (AST_HWC_SIZE + AST_HWC_SIGNATURE_SIZE)*ast->next_cursor;
@@ -1215,10 +1227,11 @@ static int ast_cursor_set(struct drm_crtc *crtc,
csum = copy_cursor_image(src, dst, width, height);
/* write checksum + signature */
- ttm_bo_kunmap(&uobj_map);
- ast_bo_unreserve(bo);
{
- u8 *dst = (u8 *)ast->cache_kmap.virtual + (AST_HWC_SIZE + AST_HWC_SIGNATURE_SIZE)*ast->next_cursor + AST_HWC_SIZE;
+ struct drm_gem_vram_object *dst_gbo =
+ drm_gem_vram_of_gem(ast->cursor_cache);
+ u8 *dst = drm_gem_vram_kmap(dst_gbo, false, NULL);
+ dst += (AST_HWC_SIZE + AST_HWC_SIGNATURE_SIZE)*ast->next_cursor + AST_HWC_SIZE;
writel(csum, dst);
writel(width, dst + AST_HWC_SIGNATURE_SizeX);
writel(height, dst + AST_HWC_SIGNATURE_SizeY);
@@ -1226,15 +1239,13 @@ static int ast_cursor_set(struct drm_crtc *crtc,
writel(0, dst + AST_HWC_SIGNATURE_HOTSPOTY);
/* set pattern offset */
- gpu_addr = ast->cursor_cache_gpu_addr;
+ gpu_addr = (u64)dst_gpu;
gpu_addr += (AST_HWC_SIZE + AST_HWC_SIGNATURE_SIZE)*ast->next_cursor;
gpu_addr >>= 3;
ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xc8, gpu_addr & 0xff);
ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xc9, (gpu_addr >> 8) & 0xff);
ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xca, (gpu_addr >> 16) & 0xff);
}
- ast_crtc->cursor_width = width;
- ast_crtc->cursor_height = height;
ast_crtc->offset_x = AST_MAX_HWC_WIDTH - width;
ast_crtc->offset_y = AST_MAX_HWC_WIDTH - height;
@@ -1242,9 +1253,17 @@ static int ast_cursor_set(struct drm_crtc *crtc,
ast_show_cursor(crtc);
+ drm_gem_vram_kunmap(gbo);
+ drm_gem_vram_unpin(gbo);
drm_gem_object_put_unlocked(obj);
+
return 0;
-fail:
+
+err_drm_gem_vram_kunmap:
+ drm_gem_vram_kunmap(gbo);
+err_drm_gem_vram_unpin:
+ drm_gem_vram_unpin(gbo);
+err_drm_gem_object_put_unlocked:
drm_gem_object_put_unlocked(obj);
return ret;
}
@@ -1257,7 +1276,9 @@ static int ast_cursor_move(struct drm_crtc *crtc,
int x_offset, y_offset;
u8 *sig;
- sig = (u8 *)ast->cache_kmap.virtual + (AST_HWC_SIZE + AST_HWC_SIGNATURE_SIZE)*ast->next_cursor + AST_HWC_SIZE;
+ sig = drm_gem_vram_kmap(drm_gem_vram_of_gem(ast->cursor_cache),
+ false, NULL);
+ sig += (AST_HWC_SIZE + AST_HWC_SIGNATURE_SIZE)*ast->next_cursor + AST_HWC_SIZE;
writel(x, sig + AST_HWC_SIGNATURE_X);
writel(y, sig + AST_HWC_SIGNATURE_Y);
diff --git a/drivers/gpu/drm/ast/ast_ttm.c b/drivers/gpu/drm/ast/ast_ttm.c
index 75d477b37854..779c53efee8e 100644
--- a/drivers/gpu/drm/ast/ast_ttm.c
+++ b/drivers/gpu/drm/ast/ast_ttm.c
@@ -26,168 +26,21 @@
* Authors: Dave Airlie <airlied@redhat.com>
*/
#include <drm/drmP.h>
-#include <drm/ttm/ttm_page_alloc.h>
#include "ast_drv.h"
-static inline struct ast_private *
-ast_bdev(struct ttm_bo_device *bd)
-{
- return container_of(bd, struct ast_private, ttm.bdev);
-}
-
-static void ast_bo_ttm_destroy(struct ttm_buffer_object *tbo)
-{
- struct ast_bo *bo;
-
- bo = container_of(tbo, struct ast_bo, bo);
-
- drm_gem_object_release(&bo->gem);
- kfree(bo);
-}
-
-static bool ast_ttm_bo_is_ast_bo(struct ttm_buffer_object *bo)
-{
- if (bo->destroy == &ast_bo_ttm_destroy)
- return true;
- return false;
-}
-
-static int
-ast_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
- struct ttm_mem_type_manager *man)
-{
- switch (type) {
- case TTM_PL_SYSTEM:
- man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
- man->available_caching = TTM_PL_MASK_CACHING;
- man->default_caching = TTM_PL_FLAG_CACHED;
- break;
- case TTM_PL_VRAM:
- man->func = &ttm_bo_manager_func;
- man->flags = TTM_MEMTYPE_FLAG_FIXED |
- TTM_MEMTYPE_FLAG_MAPPABLE;
- man->available_caching = TTM_PL_FLAG_UNCACHED |
- TTM_PL_FLAG_WC;
- man->default_caching = TTM_PL_FLAG_WC;
- break;
- default:
- DRM_ERROR("Unsupported memory type %u\n", (unsigned)type);
- return -EINVAL;
- }
- return 0;
-}
-
-static void
-ast_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl)
-{
- struct ast_bo *astbo = ast_bo(bo);
-
- if (!ast_ttm_bo_is_ast_bo(bo))
- return;
-
- ast_ttm_placement(astbo, TTM_PL_FLAG_SYSTEM);
- *pl = astbo->placement;
-}
-
-static int ast_bo_verify_access(struct ttm_buffer_object *bo, struct file *filp)
-{
- struct ast_bo *astbo = ast_bo(bo);
-
- return drm_vma_node_verify_access(&astbo->gem.vma_node,
- filp->private_data);
-}
-
-static int ast_ttm_io_mem_reserve(struct ttm_bo_device *bdev,
- struct ttm_mem_reg *mem)
-{
- struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
- struct ast_private *ast = ast_bdev(bdev);
-
- mem->bus.addr = NULL;
- mem->bus.offset = 0;
- mem->bus.size = mem->num_pages << PAGE_SHIFT;
- mem->bus.base = 0;
- mem->bus.is_iomem = false;
- if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
- return -EINVAL;
- switch (mem->mem_type) {
- case TTM_PL_SYSTEM:
- /* system memory */
- return 0;
- case TTM_PL_VRAM:
- mem->bus.offset = mem->start << PAGE_SHIFT;
- mem->bus.base = pci_resource_start(ast->dev->pdev, 0);
- mem->bus.is_iomem = true;
- break;
- default:
- return -EINVAL;
- break;
- }
- return 0;
-}
-
-static void ast_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
-{
-}
-
-static void ast_ttm_backend_destroy(struct ttm_tt *tt)
-{
- ttm_tt_fini(tt);
- kfree(tt);
-}
-
-static struct ttm_backend_func ast_tt_backend_func = {
- .destroy = &ast_ttm_backend_destroy,
-};
-
-
-static struct ttm_tt *ast_ttm_tt_create(struct ttm_buffer_object *bo,
- uint32_t page_flags)
-{
- struct ttm_tt *tt;
-
- tt = kzalloc(sizeof(struct ttm_tt), GFP_KERNEL);
- if (tt == NULL)
- return NULL;
- tt->func = &ast_tt_backend_func;
- if (ttm_tt_init(tt, bo, page_flags)) {
- kfree(tt);
- return NULL;
- }
- return tt;
-}
-
-struct ttm_bo_driver ast_bo_driver = {
- .ttm_tt_create = ast_ttm_tt_create,
- .init_mem_type = ast_bo_init_mem_type,
- .eviction_valuable = ttm_bo_eviction_valuable,
- .evict_flags = ast_bo_evict_flags,
- .move = NULL,
- .verify_access = ast_bo_verify_access,
- .io_mem_reserve = &ast_ttm_io_mem_reserve,
- .io_mem_free = &ast_ttm_io_mem_free,
-};
-
int ast_mm_init(struct ast_private *ast)
{
+ struct drm_vram_mm *vmm;
int ret;
struct drm_device *dev = ast->dev;
- struct ttm_bo_device *bdev = &ast->ttm.bdev;
-
- ret = ttm_bo_device_init(&ast->ttm.bdev,
- &ast_bo_driver,
- dev->anon_inode->i_mapping,
- true);
- if (ret) {
- DRM_ERROR("Error initialising bo driver; %d\n", ret);
- return ret;
- }
- ret = ttm_bo_init_mm(bdev, TTM_PL_VRAM,
- ast->vram_size >> PAGE_SHIFT);
- if (ret) {
- DRM_ERROR("Failed ttm VRAM init: %d\n", ret);
+ vmm = drm_vram_helper_alloc_mm(
+ dev, pci_resource_start(dev->pdev, 0),
+ ast->vram_size, &drm_gem_vram_mm_funcs);
+ if (IS_ERR(vmm)) {
+ ret = PTR_ERR(vmm);
+ DRM_ERROR("Error initializing VRAM MM; %d\n", ret);
return ret;
}
@@ -203,148 +56,9 @@ void ast_mm_fini(struct ast_private *ast)
{
struct drm_device *dev = ast->dev;
- ttm_bo_device_release(&ast->ttm.bdev);
+ drm_vram_helper_release_mm(dev);
arch_phys_wc_del(ast->fb_mtrr);
arch_io_free_memtype_wc(pci_resource_start(dev->pdev, 0),
pci_resource_len(dev->pdev, 0));
}
-
-void ast_ttm_placement(struct ast_bo *bo, int domain)
-{
- u32 c = 0;
- unsigned i;
-
- bo->placement.placement = bo->placements;
- bo->placement.busy_placement = bo->placements;
- if (domain & TTM_PL_FLAG_VRAM)
- bo->placements[c++].flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_VRAM;
- if (domain & TTM_PL_FLAG_SYSTEM)
- bo->placements[c++].flags = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_SYSTEM;
- if (!c)
- bo->placements[c++].flags = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_SYSTEM;
- bo->placement.num_placement = c;
- bo->placement.num_busy_placement = c;
- for (i = 0; i < c; ++i) {
- bo->placements[i].fpfn = 0;
- bo->placements[i].lpfn = 0;
- }
-}
-
-int ast_bo_create(struct drm_device *dev, int size, int align,
- uint32_t flags, struct ast_bo **pastbo)
-{
- struct ast_private *ast = dev->dev_private;
- struct ast_bo *astbo;
- size_t acc_size;
- int ret;
-
- astbo = kzalloc(sizeof(struct ast_bo), GFP_KERNEL);
- if (!astbo)
- return -ENOMEM;
-
- ret = drm_gem_object_init(dev, &astbo->gem, size);
- if (ret)
- goto error;
-
- astbo->bo.bdev = &ast->ttm.bdev;
-
- ast_ttm_placement(astbo, TTM_PL_FLAG_VRAM | TTM_PL_FLAG_SYSTEM);
-
- acc_size = ttm_bo_dma_acc_size(&ast->ttm.bdev, size,
- sizeof(struct ast_bo));
-
- ret = ttm_bo_init(&ast->ttm.bdev, &astbo->bo, size,
- ttm_bo_type_device, &astbo->placement,
- align >> PAGE_SHIFT, false, acc_size,
- NULL, NULL, ast_bo_ttm_destroy);
- if (ret)
- goto error;
-
- *pastbo = astbo;
- return 0;
-error:
- kfree(astbo);
- return ret;
-}
-
-static inline u64 ast_bo_gpu_offset(struct ast_bo *bo)
-{
- return bo->bo.offset;
-}
-
-int ast_bo_pin(struct ast_bo *bo, u32 pl_flag, u64 *gpu_addr)
-{
- struct ttm_operation_ctx ctx = { false, false };
- int i, ret;
-
- if (bo->pin_count) {
- bo->pin_count++;
- if (gpu_addr)
- *gpu_addr = ast_bo_gpu_offset(bo);
- }
-
- ast_ttm_placement(bo, pl_flag);
- for (i = 0; i < bo->placement.num_placement; i++)
- bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT;
- ret = ttm_bo_validate(&bo->bo, &bo->placement, &ctx);
- if (ret)
- return ret;
-
- bo->pin_count = 1;
- if (gpu_addr)
- *gpu_addr = ast_bo_gpu_offset(bo);
- return 0;
-}
-
-int ast_bo_unpin(struct ast_bo *bo)
-{
- struct ttm_operation_ctx ctx = { false, false };
- int i;
- if (!bo->pin_count) {
- DRM_ERROR("unpin bad %p\n", bo);
- return 0;
- }
- bo->pin_count--;
- if (bo->pin_count)
- return 0;
-
- for (i = 0; i < bo->placement.num_placement ; i++)
- bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT;
- return ttm_bo_validate(&bo->bo, &bo->placement, &ctx);
-}
-
-int ast_bo_push_sysram(struct ast_bo *bo)
-{
- struct ttm_operation_ctx ctx = { false, false };
- int i, ret;
- if (!bo->pin_count) {
- DRM_ERROR("unpin bad %p\n", bo);
- return 0;
- }
- bo->pin_count--;
- if (bo->pin_count)
- return 0;
-
- if (bo->kmap.virtual)
- ttm_bo_kunmap(&bo->kmap);
-
- ast_ttm_placement(bo, TTM_PL_FLAG_SYSTEM);
- for (i = 0; i < bo->placement.num_placement ; i++)
- bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT;
-
- ret = ttm_bo_validate(&bo->bo, &bo->placement, &ctx);
- if (ret) {
- DRM_ERROR("pushing to VRAM failed\n");
- return ret;
- }
- return 0;
-}
-
-int ast_mmap(struct file *filp, struct vm_area_struct *vma)
-{
- struct drm_file *file_priv = filp->private_data;
- struct ast_private *ast = file_priv->minor->dev->dev_private;
-
- return ttm_bo_mmap(filp, vma, &ast->ttm.bdev);
-}
diff --git a/drivers/gpu/drm/ati_pcigart.c b/drivers/gpu/drm/ati_pcigart.c
index 2362f07fe1fc..2a413e291a60 100644
--- a/drivers/gpu/drm/ati_pcigart.c
+++ b/drivers/gpu/drm/ati_pcigart.c
@@ -32,9 +32,12 @@
*/
#include <linux/export.h>
-#include <drm/drmP.h>
#include <drm/ati_pcigart.h>
+#include <drm/drm_device.h>
+#include <drm/drm_os_linux.h>
+#include <drm/drm_pci.h>
+#include <drm/drm_print.h>
# define ATI_PCIGART_PAGE_SIZE 4096 /**< PCI GART page size */
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c
index 8070a558d7b1..81c50772df05 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c
@@ -78,7 +78,8 @@ static void atmel_hlcdc_crtc_mode_set_nofb(struct drm_crtc *c)
unsigned long mode_rate;
struct videomode vm;
unsigned long prate;
- unsigned int cfg;
+ unsigned int mask = ATMEL_HLCDC_CLKDIV_MASK | ATMEL_HLCDC_CLKPOL;
+ unsigned int cfg = 0;
int div;
vm.vfront_porch = adj->crtc_vsync_start - adj->crtc_vdisplay;
@@ -101,7 +102,10 @@ static void atmel_hlcdc_crtc_mode_set_nofb(struct drm_crtc *c)
(adj->crtc_hdisplay - 1) |
((adj->crtc_vdisplay - 1) << 16));
- cfg = ATMEL_HLCDC_CLKSEL;
+ if (!crtc->dc->desc->fixed_clksrc) {
+ cfg |= ATMEL_HLCDC_CLKSEL;
+ mask |= ATMEL_HLCDC_CLKSEL;
+ }
prate = 2 * clk_get_rate(crtc->dc->hlcdc->sys_clk);
mode_rate = adj->crtc_clock * 1000;
@@ -132,11 +136,10 @@ static void atmel_hlcdc_crtc_mode_set_nofb(struct drm_crtc *c)
cfg |= ATMEL_HLCDC_CLKDIV(div);
- regmap_update_bits(regmap, ATMEL_HLCDC_CFG(0),
- ATMEL_HLCDC_CLKSEL | ATMEL_HLCDC_CLKDIV_MASK |
- ATMEL_HLCDC_CLKPOL, cfg);
+ regmap_update_bits(regmap, ATMEL_HLCDC_CFG(0), mask, cfg);
- cfg = 0;
+ state = drm_crtc_state_to_atmel_hlcdc_crtc_state(c->state);
+ cfg = state->output_mode << 8;
if (adj->flags & DRM_MODE_FLAG_NVSYNC)
cfg |= ATMEL_HLCDC_VSPOL;
@@ -144,9 +147,6 @@ static void atmel_hlcdc_crtc_mode_set_nofb(struct drm_crtc *c)
if (adj->flags & DRM_MODE_FLAG_NHSYNC)
cfg |= ATMEL_HLCDC_HSPOL;
- state = drm_crtc_state_to_atmel_hlcdc_crtc_state(c->state);
- cfg |= state->output_mode << 8;
-
regmap_update_bits(regmap, ATMEL_HLCDC_CFG(5),
ATMEL_HLCDC_HSPOL | ATMEL_HLCDC_VSPOL |
ATMEL_HLCDC_VSPDLYS | ATMEL_HLCDC_VSPDLYE |
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
index 0be13eceedba..fb2e7646daeb 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
@@ -364,6 +364,103 @@ static const struct atmel_hlcdc_dc_desc atmel_hlcdc_dc_sama5d4 = {
.nlayers = ARRAY_SIZE(atmel_hlcdc_sama5d4_layers),
.layers = atmel_hlcdc_sama5d4_layers,
};
+
+static const struct atmel_hlcdc_layer_desc atmel_hlcdc_sam9x60_layers[] = {
+ {
+ .name = "base",
+ .formats = &atmel_hlcdc_plane_rgb_formats,
+ .regs_offset = 0x60,
+ .id = 0,
+ .type = ATMEL_HLCDC_BASE_LAYER,
+ .cfgs_offset = 0x2c,
+ .layout = {
+ .xstride = { 2 },
+ .default_color = 3,
+ .general_config = 4,
+ .disc_pos = 5,
+ .disc_size = 6,
+ },
+ .clut_offset = 0x600,
+ },
+ {
+ .name = "overlay1",
+ .formats = &atmel_hlcdc_plane_rgb_formats,
+ .regs_offset = 0x160,
+ .id = 1,
+ .type = ATMEL_HLCDC_OVERLAY_LAYER,
+ .cfgs_offset = 0x2c,
+ .layout = {
+ .pos = 2,
+ .size = 3,
+ .xstride = { 4 },
+ .pstride = { 5 },
+ .default_color = 6,
+ .chroma_key = 7,
+ .chroma_key_mask = 8,
+ .general_config = 9,
+ },
+ .clut_offset = 0xa00,
+ },
+ {
+ .name = "overlay2",
+ .formats = &atmel_hlcdc_plane_rgb_formats,
+ .regs_offset = 0x260,
+ .id = 2,
+ .type = ATMEL_HLCDC_OVERLAY_LAYER,
+ .cfgs_offset = 0x2c,
+ .layout = {
+ .pos = 2,
+ .size = 3,
+ .xstride = { 4 },
+ .pstride = { 5 },
+ .default_color = 6,
+ .chroma_key = 7,
+ .chroma_key_mask = 8,
+ .general_config = 9,
+ },
+ .clut_offset = 0xe00,
+ },
+ {
+ .name = "high-end-overlay",
+ .formats = &atmel_hlcdc_plane_rgb_and_yuv_formats,
+ .regs_offset = 0x360,
+ .id = 3,
+ .type = ATMEL_HLCDC_OVERLAY_LAYER,
+ .cfgs_offset = 0x4c,
+ .layout = {
+ .pos = 2,
+ .size = 3,
+ .memsize = 4,
+ .xstride = { 5, 7 },
+ .pstride = { 6, 8 },
+ .default_color = 9,
+ .chroma_key = 10,
+ .chroma_key_mask = 11,
+ .general_config = 12,
+ .scaler_config = 13,
+ .phicoeffs = {
+ .x = 17,
+ .y = 33,
+ },
+ .csc = 14,
+ },
+ .clut_offset = 0x1200,
+ },
+};
+
+static const struct atmel_hlcdc_dc_desc atmel_hlcdc_dc_sam9x60 = {
+ .min_width = 0,
+ .min_height = 0,
+ .max_width = 2048,
+ .max_height = 2048,
+ .max_spw = 0xff,
+ .max_vpw = 0xff,
+ .max_hpw = 0x3ff,
+ .fixed_clksrc = true,
+ .nlayers = ARRAY_SIZE(atmel_hlcdc_sam9x60_layers),
+ .layers = atmel_hlcdc_sam9x60_layers,
+};
+
static const struct of_device_id atmel_hlcdc_of_match[] = {
{
.compatible = "atmel,at91sam9n12-hlcdc",
@@ -385,6 +482,10 @@ static const struct of_device_id atmel_hlcdc_of_match[] = {
.compatible = "atmel,sama5d4-hlcdc",
.data = &atmel_hlcdc_dc_sama5d4,
},
+ {
+ .compatible = "microchip,sam9x60-hlcdc",
+ .data = &atmel_hlcdc_dc_sam9x60,
+ },
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, atmel_hlcdc_of_match);
@@ -625,10 +726,18 @@ static int atmel_hlcdc_dc_load(struct drm_device *dev)
dc->hlcdc = dev_get_drvdata(dev->dev->parent);
dev->dev_private = dc;
+ if (dc->desc->fixed_clksrc) {
+ ret = clk_prepare_enable(dc->hlcdc->sys_clk);
+ if (ret) {
+ dev_err(dev->dev, "failed to enable sys_clk\n");
+ goto err_destroy_wq;
+ }
+ }
+
ret = clk_prepare_enable(dc->hlcdc->periph_clk);
if (ret) {
dev_err(dev->dev, "failed to enable periph_clk\n");
- goto err_destroy_wq;
+ goto err_sys_clk_disable;
}
pm_runtime_enable(dev->dev);
@@ -664,6 +773,9 @@ static int atmel_hlcdc_dc_load(struct drm_device *dev)
err_periph_clk_disable:
pm_runtime_disable(dev->dev);
clk_disable_unprepare(dc->hlcdc->periph_clk);
+err_sys_clk_disable:
+ if (dc->desc->fixed_clksrc)
+ clk_disable_unprepare(dc->hlcdc->sys_clk);
err_destroy_wq:
destroy_workqueue(dc->wq);
@@ -688,6 +800,8 @@ static void atmel_hlcdc_dc_unload(struct drm_device *dev)
pm_runtime_disable(dev->dev);
clk_disable_unprepare(dc->hlcdc->periph_clk);
+ if (dc->desc->fixed_clksrc)
+ clk_disable_unprepare(dc->hlcdc->sys_clk);
destroy_workqueue(dc->wq);
}
@@ -805,6 +919,8 @@ static int atmel_hlcdc_dc_drm_suspend(struct device *dev)
regmap_read(regmap, ATMEL_HLCDC_IMR, &dc->suspend.imr);
regmap_write(regmap, ATMEL_HLCDC_IDR, dc->suspend.imr);
clk_disable_unprepare(dc->hlcdc->periph_clk);
+ if (dc->desc->fixed_clksrc)
+ clk_disable_unprepare(dc->hlcdc->sys_clk);
return 0;
}
@@ -814,6 +930,8 @@ static int atmel_hlcdc_dc_drm_resume(struct device *dev)
struct drm_device *drm_dev = dev_get_drvdata(dev);
struct atmel_hlcdc_dc *dc = drm_dev->dev_private;
+ if (dc->desc->fixed_clksrc)
+ clk_prepare_enable(dc->hlcdc->sys_clk);
clk_prepare_enable(dc->hlcdc->periph_clk);
regmap_write(dc->hlcdc->regmap, ATMEL_HLCDC_IER, dc->suspend.imr);
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.h b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.h
index 70bd540d644e..0155efb9c443 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.h
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.h
@@ -328,6 +328,7 @@ atmel_hlcdc_layer_to_plane(struct atmel_hlcdc_layer *layer)
* @max_hpw: maximum horizontal back/front porch width
* @conflicting_output_formats: true if RGBXXX output formats conflict with
* each other.
+ * @fixed_clksrc: true if clock source is fixed
* @layers: a layer description table describing available layers
* @nlayers: layer description table size
*/
@@ -340,6 +341,7 @@ struct atmel_hlcdc_dc_desc {
int max_vpw;
int max_hpw;
bool conflicting_output_formats;
+ bool fixed_clksrc;
const struct atmel_hlcdc_layer_desc *layers;
int nlayers;
};
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c
index e836e2de35ce..0ee5b7a3a4b0 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c
@@ -382,7 +382,7 @@ atmel_hlcdc_plane_update_general_settings(struct atmel_hlcdc_plane *plane,
cfg |= ATMEL_HLCDC_LAYER_LAEN;
else
cfg |= ATMEL_HLCDC_LAYER_GAEN |
- ATMEL_HLCDC_LAYER_GA(state->base.alpha >> 8);
+ ATMEL_HLCDC_LAYER_GA(state->base.alpha);
}
if (state->disc_h && state->disc_w)
@@ -603,8 +603,6 @@ static int atmel_hlcdc_plane_atomic_check(struct drm_plane *p,
const struct drm_display_mode *mode;
struct drm_crtc_state *crtc_state;
unsigned int tmp;
- int hsub = 1;
- int vsub = 1;
int ret;
int i;
@@ -642,13 +640,10 @@ static int atmel_hlcdc_plane_atomic_check(struct drm_plane *p,
if (state->nplanes > ATMEL_HLCDC_LAYER_MAX_PLANES)
return -EINVAL;
- hsub = drm_format_horz_chroma_subsampling(fb->format->format);
- vsub = drm_format_vert_chroma_subsampling(fb->format->format);
-
for (i = 0; i < state->nplanes; i++) {
unsigned int offset = 0;
- int xdiv = i ? hsub : 1;
- int ydiv = i ? vsub : 1;
+ int xdiv = i ? fb->format->hsub : 1;
+ int ydiv = i ? fb->format->vsub : 1;
state->bpp[i] = fb->format->cpp[i];
if (!state->bpp[i])
diff --git a/drivers/gpu/drm/bochs/Kconfig b/drivers/gpu/drm/bochs/Kconfig
index 17885fab131d..32b043abb668 100644
--- a/drivers/gpu/drm/bochs/Kconfig
+++ b/drivers/gpu/drm/bochs/Kconfig
@@ -3,7 +3,7 @@ config DRM_BOCHS
tristate "DRM Support for bochs dispi vga interface (qemu stdvga)"
depends on DRM && PCI && MMU
select DRM_KMS_HELPER
- select DRM_TTM
+ select DRM_VRAM_HELPER
help
Choose this option for qemu.
If M is selected the module will be called bochs-drm.
diff --git a/drivers/gpu/drm/bochs/bochs.h b/drivers/gpu/drm/bochs/bochs.h
index 341cc9d1bab4..cc35d492142c 100644
--- a/drivers/gpu/drm/bochs/bochs.h
+++ b/drivers/gpu/drm/bochs/bochs.h
@@ -10,9 +10,9 @@
#include <drm/drm_simple_kms_helper.h>
#include <drm/drm_gem.h>
+#include <drm/drm_gem_vram_helper.h>
-#include <drm/ttm/ttm_bo_driver.h>
-#include <drm/ttm/ttm_page_alloc.h>
+#include <drm/drm_vram_mm_helper.h>
/* ---------------------------------------------------------------------- */
@@ -73,38 +73,8 @@ struct bochs_device {
struct drm_device *dev;
struct drm_simple_display_pipe pipe;
struct drm_connector connector;
-
- /* ttm */
- struct {
- struct ttm_bo_device bdev;
- bool initialized;
- } ttm;
-};
-
-struct bochs_bo {
- struct ttm_buffer_object bo;
- struct ttm_placement placement;
- struct ttm_bo_kmap_obj kmap;
- struct drm_gem_object gem;
- struct ttm_place placements[3];
- int pin_count;
};
-static inline struct bochs_bo *bochs_bo(struct ttm_buffer_object *bo)
-{
- return container_of(bo, struct bochs_bo, bo);
-}
-
-static inline struct bochs_bo *gem_to_bochs_bo(struct drm_gem_object *gem)
-{
- return container_of(gem, struct bochs_bo, gem);
-}
-
-static inline u64 bochs_bo_mmap_offset(struct bochs_bo *bo)
-{
- return drm_vma_node_offset_addr(&bo->bo.vma_node);
-}
-
/* ---------------------------------------------------------------------- */
/* bochs_hw.c */
@@ -122,26 +92,6 @@ int bochs_hw_load_edid(struct bochs_device *bochs);
/* bochs_mm.c */
int bochs_mm_init(struct bochs_device *bochs);
void bochs_mm_fini(struct bochs_device *bochs);
-int bochs_mmap(struct file *filp, struct vm_area_struct *vma);
-
-int bochs_gem_create(struct drm_device *dev, u32 size, bool iskernel,
- struct drm_gem_object **obj);
-int bochs_gem_init_object(struct drm_gem_object *obj);
-void bochs_gem_free_object(struct drm_gem_object *obj);
-int bochs_dumb_create(struct drm_file *file, struct drm_device *dev,
- struct drm_mode_create_dumb *args);
-int bochs_dumb_mmap_offset(struct drm_file *file, struct drm_device *dev,
- uint32_t handle, uint64_t *offset);
-
-int bochs_bo_pin(struct bochs_bo *bo, u32 pl_flag);
-int bochs_bo_unpin(struct bochs_bo *bo);
-
-int bochs_gem_prime_pin(struct drm_gem_object *obj);
-void bochs_gem_prime_unpin(struct drm_gem_object *obj);
-void *bochs_gem_prime_vmap(struct drm_gem_object *obj);
-void bochs_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr);
-int bochs_gem_prime_mmap(struct drm_gem_object *obj,
- struct vm_area_struct *vma);
/* bochs_kms.c */
int bochs_kms_init(struct bochs_device *bochs);
diff --git a/drivers/gpu/drm/bochs/bochs_drv.c b/drivers/gpu/drm/bochs/bochs_drv.c
index b86cc705138c..8f3a5bda9d03 100644
--- a/drivers/gpu/drm/bochs/bochs_drv.c
+++ b/drivers/gpu/drm/bochs/bochs_drv.c
@@ -7,6 +7,7 @@
#include <linux/slab.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_probe_helper.h>
+#include <drm/drm_atomic_helper.h>
#include "bochs.h"
@@ -60,14 +61,7 @@ err:
static const struct file_operations bochs_fops = {
.owner = THIS_MODULE,
- .open = drm_open,
- .release = drm_release,
- .unlocked_ioctl = drm_ioctl,
- .compat_ioctl = drm_compat_ioctl,
- .poll = drm_poll,
- .read = drm_read,
- .llseek = no_llseek,
- .mmap = bochs_mmap,
+ DRM_VRAM_MM_FILE_OPERATIONS
};
static struct drm_driver bochs_driver = {
@@ -79,17 +73,8 @@ static struct drm_driver bochs_driver = {
.date = "20130925",
.major = 1,
.minor = 0,
- .gem_free_object_unlocked = bochs_gem_free_object,
- .dumb_create = bochs_dumb_create,
- .dumb_map_offset = bochs_dumb_mmap_offset,
-
- .gem_prime_export = drm_gem_prime_export,
- .gem_prime_import = drm_gem_prime_import,
- .gem_prime_pin = bochs_gem_prime_pin,
- .gem_prime_unpin = bochs_gem_prime_unpin,
- .gem_prime_vmap = bochs_gem_prime_vmap,
- .gem_prime_vunmap = bochs_gem_prime_vunmap,
- .gem_prime_mmap = bochs_gem_prime_mmap,
+ DRM_GEM_VRAM_DRIVER,
+ DRM_GEM_VRAM_DRIVER_PRIME,
};
/* ---------------------------------------------------------------------- */
@@ -171,6 +156,7 @@ static void bochs_pci_remove(struct pci_dev *pdev)
{
struct drm_device *dev = pci_get_drvdata(pdev);
+ drm_atomic_helper_shutdown(dev);
drm_dev_unregister(dev);
bochs_unload(dev);
drm_dev_put(dev);
diff --git a/drivers/gpu/drm/bochs/bochs_kms.c b/drivers/gpu/drm/bochs/bochs_kms.c
index 37e515221ad8..5904eddc83a5 100644
--- a/drivers/gpu/drm/bochs/bochs_kms.c
+++ b/drivers/gpu/drm/bochs/bochs_kms.c
@@ -27,16 +27,16 @@ static const uint32_t bochs_formats[] = {
static void bochs_plane_update(struct bochs_device *bochs,
struct drm_plane_state *state)
{
- struct bochs_bo *bo;
+ struct drm_gem_vram_object *gbo;
if (!state->fb || !bochs->stride)
return;
- bo = gem_to_bochs_bo(state->fb->obj[0]);
+ gbo = drm_gem_vram_of_gem(state->fb->obj[0]);
bochs_hw_setbase(bochs,
state->crtc_x,
state->crtc_y,
- bo->bo.offset);
+ gbo->bo.offset);
bochs_hw_setformat(bochs, state->fb->format);
}
@@ -69,23 +69,23 @@ static void bochs_pipe_update(struct drm_simple_display_pipe *pipe,
static int bochs_pipe_prepare_fb(struct drm_simple_display_pipe *pipe,
struct drm_plane_state *new_state)
{
- struct bochs_bo *bo;
+ struct drm_gem_vram_object *gbo;
if (!new_state->fb)
return 0;
- bo = gem_to_bochs_bo(new_state->fb->obj[0]);
- return bochs_bo_pin(bo, TTM_PL_FLAG_VRAM);
+ gbo = drm_gem_vram_of_gem(new_state->fb->obj[0]);
+ return drm_gem_vram_pin(gbo, DRM_GEM_VRAM_PL_FLAG_VRAM);
}
static void bochs_pipe_cleanup_fb(struct drm_simple_display_pipe *pipe,
struct drm_plane_state *old_state)
{
- struct bochs_bo *bo;
+ struct drm_gem_vram_object *gbo;
if (!old_state->fb)
return;
- bo = gem_to_bochs_bo(old_state->fb->obj[0]);
- bochs_bo_unpin(bo);
+ gbo = drm_gem_vram_of_gem(old_state->fb->obj[0]);
+ drm_gem_vram_unpin(gbo);
}
static const struct drm_simple_display_pipe_funcs bochs_pipe_funcs = {
diff --git a/drivers/gpu/drm/bochs/bochs_mm.c b/drivers/gpu/drm/bochs/bochs_mm.c
index 807c80f1f024..8f9bb886f7ad 100644
--- a/drivers/gpu/drm/bochs/bochs_mm.c
+++ b/drivers/gpu/drm/bochs/bochs_mm.c
@@ -4,435 +4,22 @@
#include "bochs.h"
-static void bochs_ttm_placement(struct bochs_bo *bo, int domain);
-
/* ---------------------------------------------------------------------- */
-static inline struct bochs_device *bochs_bdev(struct ttm_bo_device *bd)
-{
- return container_of(bd, struct bochs_device, ttm.bdev);
-}
-
-static void bochs_bo_ttm_destroy(struct ttm_buffer_object *tbo)
-{
- struct bochs_bo *bo;
-
- bo = container_of(tbo, struct bochs_bo, bo);
- drm_gem_object_release(&bo->gem);
- kfree(bo);
-}
-
-static bool bochs_ttm_bo_is_bochs_bo(struct ttm_buffer_object *bo)
-{
- if (bo->destroy == &bochs_bo_ttm_destroy)
- return true;
- return false;
-}
-
-static int bochs_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
- struct ttm_mem_type_manager *man)
-{
- switch (type) {
- case TTM_PL_SYSTEM:
- man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
- man->available_caching = TTM_PL_MASK_CACHING;
- man->default_caching = TTM_PL_FLAG_CACHED;
- break;
- case TTM_PL_VRAM:
- man->func = &ttm_bo_manager_func;
- man->flags = TTM_MEMTYPE_FLAG_FIXED |
- TTM_MEMTYPE_FLAG_MAPPABLE;
- man->available_caching = TTM_PL_FLAG_UNCACHED |
- TTM_PL_FLAG_WC;
- man->default_caching = TTM_PL_FLAG_WC;
- break;
- default:
- DRM_ERROR("Unsupported memory type %u\n", (unsigned)type);
- return -EINVAL;
- }
- return 0;
-}
-
-static void
-bochs_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl)
-{
- struct bochs_bo *bochsbo = bochs_bo(bo);
-
- if (!bochs_ttm_bo_is_bochs_bo(bo))
- return;
-
- bochs_ttm_placement(bochsbo, TTM_PL_FLAG_SYSTEM);
- *pl = bochsbo->placement;
-}
-
-static int bochs_bo_verify_access(struct ttm_buffer_object *bo,
- struct file *filp)
-{
- struct bochs_bo *bochsbo = bochs_bo(bo);
-
- return drm_vma_node_verify_access(&bochsbo->gem.vma_node,
- filp->private_data);
-}
-
-static int bochs_ttm_io_mem_reserve(struct ttm_bo_device *bdev,
- struct ttm_mem_reg *mem)
-{
- struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
- struct bochs_device *bochs = bochs_bdev(bdev);
-
- mem->bus.addr = NULL;
- mem->bus.offset = 0;
- mem->bus.size = mem->num_pages << PAGE_SHIFT;
- mem->bus.base = 0;
- mem->bus.is_iomem = false;
- if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
- return -EINVAL;
- switch (mem->mem_type) {
- case TTM_PL_SYSTEM:
- /* system memory */
- return 0;
- case TTM_PL_VRAM:
- mem->bus.offset = mem->start << PAGE_SHIFT;
- mem->bus.base = bochs->fb_base;
- mem->bus.is_iomem = true;
- break;
- default:
- return -EINVAL;
- break;
- }
- return 0;
-}
-
-static void bochs_ttm_io_mem_free(struct ttm_bo_device *bdev,
- struct ttm_mem_reg *mem)
-{
-}
-
-static void bochs_ttm_backend_destroy(struct ttm_tt *tt)
-{
- ttm_tt_fini(tt);
- kfree(tt);
-}
-
-static struct ttm_backend_func bochs_tt_backend_func = {
- .destroy = &bochs_ttm_backend_destroy,
-};
-
-static struct ttm_tt *bochs_ttm_tt_create(struct ttm_buffer_object *bo,
- uint32_t page_flags)
-{
- struct ttm_tt *tt;
-
- tt = kzalloc(sizeof(struct ttm_tt), GFP_KERNEL);
- if (tt == NULL)
- return NULL;
- tt->func = &bochs_tt_backend_func;
- if (ttm_tt_init(tt, bo, page_flags)) {
- kfree(tt);
- return NULL;
- }
- return tt;
-}
-
-static struct ttm_bo_driver bochs_bo_driver = {
- .ttm_tt_create = bochs_ttm_tt_create,
- .init_mem_type = bochs_bo_init_mem_type,
- .eviction_valuable = ttm_bo_eviction_valuable,
- .evict_flags = bochs_bo_evict_flags,
- .move = NULL,
- .verify_access = bochs_bo_verify_access,
- .io_mem_reserve = &bochs_ttm_io_mem_reserve,
- .io_mem_free = &bochs_ttm_io_mem_free,
-};
-
int bochs_mm_init(struct bochs_device *bochs)
{
- struct ttm_bo_device *bdev = &bochs->ttm.bdev;
- int ret;
+ struct drm_vram_mm *vmm;
- ret = ttm_bo_device_init(&bochs->ttm.bdev,
- &bochs_bo_driver,
- bochs->dev->anon_inode->i_mapping,
- true);
- if (ret) {
- DRM_ERROR("Error initialising bo driver; %d\n", ret);
- return ret;
- }
-
- ret = ttm_bo_init_mm(bdev, TTM_PL_VRAM,
- bochs->fb_size >> PAGE_SHIFT);
- if (ret) {
- DRM_ERROR("Failed ttm VRAM init: %d\n", ret);
- return ret;
- }
-
- bochs->ttm.initialized = true;
- return 0;
+ vmm = drm_vram_helper_alloc_mm(bochs->dev, bochs->fb_base,
+ bochs->fb_size,
+ &drm_gem_vram_mm_funcs);
+ return PTR_ERR_OR_ZERO(vmm);
}
void bochs_mm_fini(struct bochs_device *bochs)
{
- if (!bochs->ttm.initialized)
+ if (!bochs->dev->vram_mm)
return;
- ttm_bo_device_release(&bochs->ttm.bdev);
- bochs->ttm.initialized = false;
-}
-
-static void bochs_ttm_placement(struct bochs_bo *bo, int domain)
-{
- unsigned i;
- u32 c = 0;
- bo->placement.placement = bo->placements;
- bo->placement.busy_placement = bo->placements;
- if (domain & TTM_PL_FLAG_VRAM) {
- bo->placements[c++].flags = TTM_PL_FLAG_WC
- | TTM_PL_FLAG_UNCACHED
- | TTM_PL_FLAG_VRAM;
- }
- if (domain & TTM_PL_FLAG_SYSTEM) {
- bo->placements[c++].flags = TTM_PL_MASK_CACHING
- | TTM_PL_FLAG_SYSTEM;
- }
- if (!c) {
- bo->placements[c++].flags = TTM_PL_MASK_CACHING
- | TTM_PL_FLAG_SYSTEM;
- }
- for (i = 0; i < c; ++i) {
- bo->placements[i].fpfn = 0;
- bo->placements[i].lpfn = 0;
- }
- bo->placement.num_placement = c;
- bo->placement.num_busy_placement = c;
-}
-
-int bochs_bo_pin(struct bochs_bo *bo, u32 pl_flag)
-{
- struct ttm_operation_ctx ctx = { false, false };
- int i, ret;
-
- if (bo->pin_count) {
- bo->pin_count++;
- return 0;
- }
-
- bochs_ttm_placement(bo, pl_flag);
- for (i = 0; i < bo->placement.num_placement; i++)
- bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT;
- ret = ttm_bo_reserve(&bo->bo, true, false, NULL);
- if (ret)
- return ret;
- ret = ttm_bo_validate(&bo->bo, &bo->placement, &ctx);
- ttm_bo_unreserve(&bo->bo);
- if (ret)
- return ret;
-
- bo->pin_count = 1;
- return 0;
-}
-
-int bochs_bo_unpin(struct bochs_bo *bo)
-{
- struct ttm_operation_ctx ctx = { false, false };
- int i, ret;
-
- if (!bo->pin_count) {
- DRM_ERROR("unpin bad %p\n", bo);
- return 0;
- }
- bo->pin_count--;
-
- if (bo->pin_count)
- return 0;
-
- for (i = 0; i < bo->placement.num_placement; i++)
- bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT;
- ret = ttm_bo_reserve(&bo->bo, true, false, NULL);
- if (ret)
- return ret;
- ret = ttm_bo_validate(&bo->bo, &bo->placement, &ctx);
- ttm_bo_unreserve(&bo->bo);
- if (ret)
- return ret;
-
- return 0;
-}
-
-int bochs_mmap(struct file *filp, struct vm_area_struct *vma)
-{
- struct drm_file *file_priv = filp->private_data;
- struct bochs_device *bochs = file_priv->minor->dev->dev_private;
-
- return ttm_bo_mmap(filp, vma, &bochs->ttm.bdev);
-}
-
-/* ---------------------------------------------------------------------- */
-
-static int bochs_bo_create(struct drm_device *dev, int size, int align,
- uint32_t flags, struct bochs_bo **pbochsbo)
-{
- struct bochs_device *bochs = dev->dev_private;
- struct bochs_bo *bochsbo;
- size_t acc_size;
- int ret;
-
- bochsbo = kzalloc(sizeof(struct bochs_bo), GFP_KERNEL);
- if (!bochsbo)
- return -ENOMEM;
-
- ret = drm_gem_object_init(dev, &bochsbo->gem, size);
- if (ret) {
- kfree(bochsbo);
- return ret;
- }
-
- bochsbo->bo.bdev = &bochs->ttm.bdev;
- bochsbo->bo.bdev->dev_mapping = dev->anon_inode->i_mapping;
-
- bochs_ttm_placement(bochsbo, TTM_PL_FLAG_VRAM | TTM_PL_FLAG_SYSTEM);
-
- acc_size = ttm_bo_dma_acc_size(&bochs->ttm.bdev, size,
- sizeof(struct bochs_bo));
-
- ret = ttm_bo_init(&bochs->ttm.bdev, &bochsbo->bo, size,
- ttm_bo_type_device, &bochsbo->placement,
- align >> PAGE_SHIFT, false, acc_size,
- NULL, NULL, bochs_bo_ttm_destroy);
- if (ret)
- return ret;
-
- *pbochsbo = bochsbo;
- return 0;
-}
-
-int bochs_gem_create(struct drm_device *dev, u32 size, bool iskernel,
- struct drm_gem_object **obj)
-{
- struct bochs_bo *bochsbo;
- int ret;
-
- *obj = NULL;
-
- size = PAGE_ALIGN(size);
- if (size == 0)
- return -EINVAL;
-
- ret = bochs_bo_create(dev, size, 0, 0, &bochsbo);
- if (ret) {
- if (ret != -ERESTARTSYS)
- DRM_ERROR("failed to allocate GEM object\n");
- return ret;
- }
- *obj = &bochsbo->gem;
- return 0;
-}
-
-int bochs_dumb_create(struct drm_file *file, struct drm_device *dev,
- struct drm_mode_create_dumb *args)
-{
- struct drm_gem_object *gobj;
- u32 handle;
- int ret;
-
- args->pitch = args->width * ((args->bpp + 7) / 8);
- args->size = args->pitch * args->height;
-
- ret = bochs_gem_create(dev, args->size, false,
- &gobj);
- if (ret)
- return ret;
-
- ret = drm_gem_handle_create(file, gobj, &handle);
- drm_gem_object_put_unlocked(gobj);
- if (ret)
- return ret;
-
- args->handle = handle;
- return 0;
-}
-
-static void bochs_bo_unref(struct bochs_bo **bo)
-{
- struct ttm_buffer_object *tbo;
-
- if ((*bo) == NULL)
- return;
-
- tbo = &((*bo)->bo);
- ttm_bo_put(tbo);
- *bo = NULL;
-}
-
-void bochs_gem_free_object(struct drm_gem_object *obj)
-{
- struct bochs_bo *bochs_bo = gem_to_bochs_bo(obj);
-
- bochs_bo_unref(&bochs_bo);
-}
-
-int bochs_dumb_mmap_offset(struct drm_file *file, struct drm_device *dev,
- uint32_t handle, uint64_t *offset)
-{
- struct drm_gem_object *obj;
- struct bochs_bo *bo;
-
- obj = drm_gem_object_lookup(file, handle);
- if (obj == NULL)
- return -ENOENT;
-
- bo = gem_to_bochs_bo(obj);
- *offset = bochs_bo_mmap_offset(bo);
-
- drm_gem_object_put_unlocked(obj);
- return 0;
-}
-
-/* ---------------------------------------------------------------------- */
-
-int bochs_gem_prime_pin(struct drm_gem_object *obj)
-{
- struct bochs_bo *bo = gem_to_bochs_bo(obj);
-
- return bochs_bo_pin(bo, TTM_PL_FLAG_VRAM);
-}
-
-void bochs_gem_prime_unpin(struct drm_gem_object *obj)
-{
- struct bochs_bo *bo = gem_to_bochs_bo(obj);
-
- bochs_bo_unpin(bo);
-}
-
-void *bochs_gem_prime_vmap(struct drm_gem_object *obj)
-{
- struct bochs_bo *bo = gem_to_bochs_bo(obj);
- bool is_iomem;
- int ret;
-
- ret = bochs_bo_pin(bo, TTM_PL_FLAG_VRAM);
- if (ret)
- return NULL;
- ret = ttm_bo_kmap(&bo->bo, 0, bo->bo.num_pages, &bo->kmap);
- if (ret) {
- bochs_bo_unpin(bo);
- return NULL;
- }
- return ttm_kmap_obj_virtual(&bo->kmap, &is_iomem);
-}
-
-void bochs_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
-{
- struct bochs_bo *bo = gem_to_bochs_bo(obj);
-
- ttm_bo_kunmap(&bo->kmap);
- bochs_bo_unpin(bo);
-}
-
-int bochs_gem_prime_mmap(struct drm_gem_object *obj,
- struct vm_area_struct *vma)
-{
- struct bochs_bo *bo = gem_to_bochs_bo(obj);
-
- bo->gem.vma_node.vm_node.start = bo->bo.vma_node.vm_node.start;
- return drm_gem_prime_mmap(obj, vma);
+ drm_vram_helper_release_mm(bochs->dev);
}
diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
index 0e3e868850d5..f6d2681f6927 100644
--- a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
+++ b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
@@ -5,21 +5,21 @@
* Copyright 2012 Analog Devices Inc.
*/
+#include <linux/clk.h>
#include <linux/device.h>
#include <linux/gpio/consumer.h>
#include <linux/module.h>
#include <linux/of_device.h>
#include <linux/slab.h>
-#include <linux/clk.h>
-#include <drm/drmP.h>
+#include <media/cec.h>
+
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_edid.h>
+#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
-#include <media/cec.h>
-
#include "adv7511.h"
/* ADI recommended values for proper operation. */
diff --git a/drivers/gpu/drm/bridge/analogix-anx78xx.c b/drivers/gpu/drm/bridge/analogix-anx78xx.c
index 6bcc36e77c65..3c7cc5af735c 100644
--- a/drivers/gpu/drm/bridge/analogix-anx78xx.c
+++ b/drivers/gpu/drm/bridge/analogix-anx78xx.c
@@ -7,23 +7,22 @@
*/
#include <linux/delay.h>
#include <linux/err.h>
-#include <linux/interrupt.h>
+#include <linux/gpio/consumer.h>
#include <linux/i2c.h>
+#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/of_gpio.h>
#include <linux/of_irq.h>
#include <linux/of_platform.h>
#include <linux/regmap.h>
-#include <linux/types.h>
-#include <linux/gpio/consumer.h>
#include <linux/regulator/consumer.h>
+#include <linux/types.h>
-#include <drm/drmP.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_dp_helper.h>
#include <drm/drm_edid.h>
+#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
#include "analogix-anx78xx.h"
diff --git a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
index 3666c308c34a..3f7f4880be09 100644
--- a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
+++ b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
@@ -6,27 +6,26 @@
* Author: Jingoo Han <jg1.han@samsung.com>
*/
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/err.h>
#include <linux/clk.h>
+#include <linux/component.h>
+#include <linux/err.h>
+#include <linux/gpio/consumer.h>
+#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/iopoll.h>
-#include <linux/interrupt.h>
+#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_gpio.h>
-#include <linux/gpio.h>
-#include <linux/component.h>
#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
-#include <drm/drmP.h>
+#include <drm/bridge/analogix_dp.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
+#include <drm/drm_device.h>
#include <drm/drm_panel.h>
+#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
-#include <drm/bridge/analogix_dp.h>
-
#include "analogix_dp_core.h"
#include "analogix_dp_reg.h"
@@ -111,7 +110,7 @@ EXPORT_SYMBOL_GPL(analogix_dp_psr_enabled);
int analogix_dp_enable_psr(struct analogix_dp_device *dp)
{
- struct edp_vsc_psr psr_vsc;
+ struct dp_sdp psr_vsc;
if (!dp->psr_enable)
return 0;
@@ -123,8 +122,8 @@ int analogix_dp_enable_psr(struct analogix_dp_device *dp)
psr_vsc.sdp_header.HB2 = 0x2;
psr_vsc.sdp_header.HB3 = 0x8;
- psr_vsc.DB0 = 0;
- psr_vsc.DB1 = EDP_VSC_PSR_STATE_ACTIVE | EDP_VSC_PSR_CRC_VALUES_VALID;
+ psr_vsc.db[0] = 0;
+ psr_vsc.db[1] = EDP_VSC_PSR_STATE_ACTIVE | EDP_VSC_PSR_CRC_VALUES_VALID;
return analogix_dp_send_psr_spd(dp, &psr_vsc, true);
}
@@ -132,7 +131,7 @@ EXPORT_SYMBOL_GPL(analogix_dp_enable_psr);
int analogix_dp_disable_psr(struct analogix_dp_device *dp)
{
- struct edp_vsc_psr psr_vsc;
+ struct dp_sdp psr_vsc;
int ret;
if (!dp->psr_enable)
@@ -145,8 +144,8 @@ int analogix_dp_disable_psr(struct analogix_dp_device *dp)
psr_vsc.sdp_header.HB2 = 0x2;
psr_vsc.sdp_header.HB3 = 0x8;
- psr_vsc.DB0 = 0;
- psr_vsc.DB1 = 0;
+ psr_vsc.db[0] = 0;
+ psr_vsc.db[1] = 0;
ret = drm_dp_dpcd_writeb(&dp->aux, DP_SET_POWER, DP_SET_POWER_D0);
if (ret != 1) {
@@ -1407,8 +1406,6 @@ static void analogix_dp_bridge_mode_set(struct drm_bridge *bridge,
video->color_space = COLOR_YCBCR444;
else if (display_info->color_formats & DRM_COLOR_FORMAT_YCRCB422)
video->color_space = COLOR_YCBCR422;
- else if (display_info->color_formats & DRM_COLOR_FORMAT_RGB444)
- video->color_space = COLOR_RGB;
else
video->color_space = COLOR_RGB;
@@ -1581,12 +1578,18 @@ analogix_dp_bind(struct device *dev, struct drm_device *drm_dev,
dp->force_hpd = of_property_read_bool(dev->of_node, "force-hpd");
- dp->hpd_gpio = of_get_named_gpio(dev->of_node, "hpd-gpios", 0);
- if (!gpio_is_valid(dp->hpd_gpio))
- dp->hpd_gpio = of_get_named_gpio(dev->of_node,
- "samsung,hpd-gpio", 0);
+ /* Try two different names */
+ dp->hpd_gpiod = devm_gpiod_get_optional(dev, "hpd", GPIOD_IN);
+ if (!dp->hpd_gpiod)
+ dp->hpd_gpiod = devm_gpiod_get_optional(dev, "samsung,hpd",
+ GPIOD_IN);
+ if (IS_ERR(dp->hpd_gpiod)) {
+ dev_err(dev, "error getting HDP GPIO: %ld\n",
+ PTR_ERR(dp->hpd_gpiod));
+ return ERR_CAST(dp->hpd_gpiod);
+ }
- if (gpio_is_valid(dp->hpd_gpio)) {
+ if (dp->hpd_gpiod) {
/*
* Set up the hotplug GPIO from the device tree as an interrupt.
* Simply specifying a different interrupt in the device tree
@@ -1594,16 +1597,9 @@ analogix_dp_bind(struct device *dev, struct drm_device *drm_dev,
* using a GPIO. We also need the actual GPIO specifier so
* that we can get the current state of the GPIO.
*/
- ret = devm_gpio_request_one(&pdev->dev, dp->hpd_gpio, GPIOF_IN,
- "hpd_gpio");
- if (ret) {
- dev_err(&pdev->dev, "failed to get hpd gpio\n");
- return ERR_PTR(ret);
- }
- dp->irq = gpio_to_irq(dp->hpd_gpio);
+ dp->irq = gpiod_to_irq(dp->hpd_gpiod);
irq_flags = IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING;
} else {
- dp->hpd_gpio = -ENODEV;
dp->irq = platform_get_irq(pdev, 0);
irq_flags = 0;
}
diff --git a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.h b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.h
index 8d82f2555880..da058252dcaf 100644
--- a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.h
+++ b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.h
@@ -34,6 +34,8 @@
#define DPCD_VOLTAGE_SWING_SET(x) (((x) & 0x3) << 0)
#define DPCD_VOLTAGE_SWING_GET(x) (((x) >> 0) & 0x3)
+struct gpio_desc;
+
enum link_lane_count_type {
LANE_COUNT1 = 1,
LANE_COUNT2 = 2,
@@ -167,7 +169,7 @@ struct analogix_dp_device {
struct link_train link_train;
struct phy *phy;
int dpms_mode;
- int hpd_gpio;
+ struct gpio_desc *hpd_gpiod;
bool force_hpd;
bool psr_enable;
bool fast_train_enable;
@@ -250,7 +252,7 @@ void analogix_dp_enable_scrambling(struct analogix_dp_device *dp);
void analogix_dp_disable_scrambling(struct analogix_dp_device *dp);
void analogix_dp_enable_psr_crc(struct analogix_dp_device *dp);
int analogix_dp_send_psr_spd(struct analogix_dp_device *dp,
- struct edp_vsc_psr *vsc, bool blocking);
+ struct dp_sdp *vsc, bool blocking);
ssize_t analogix_dp_transfer(struct analogix_dp_device *dp,
struct drm_dp_aux_msg *msg);
diff --git a/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.c b/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.c
index 7ae311aa13a5..914c569ab8c1 100644
--- a/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.c
+++ b/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.c
@@ -8,7 +8,7 @@
#include <linux/delay.h>
#include <linux/device.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/io.h>
#include <linux/iopoll.h>
@@ -393,7 +393,7 @@ void analogix_dp_clear_hotplug_interrupts(struct analogix_dp_device *dp)
{
u32 reg;
- if (gpio_is_valid(dp->hpd_gpio))
+ if (dp->hpd_gpiod)
return;
reg = HOTPLUG_CHG | HPD_LOST | PLUG;
@@ -407,7 +407,7 @@ void analogix_dp_init_hpd(struct analogix_dp_device *dp)
{
u32 reg;
- if (gpio_is_valid(dp->hpd_gpio))
+ if (dp->hpd_gpiod)
return;
analogix_dp_clear_hotplug_interrupts(dp);
@@ -430,8 +430,8 @@ enum dp_irq_type analogix_dp_get_irq_type(struct analogix_dp_device *dp)
{
u32 reg;
- if (gpio_is_valid(dp->hpd_gpio)) {
- reg = gpio_get_value(dp->hpd_gpio);
+ if (dp->hpd_gpiod) {
+ reg = gpiod_get_value(dp->hpd_gpiod);
if (reg)
return DP_IRQ_TYPE_HP_CABLE_IN;
else
@@ -503,8 +503,8 @@ int analogix_dp_get_plug_in_status(struct analogix_dp_device *dp)
{
u32 reg;
- if (gpio_is_valid(dp->hpd_gpio)) {
- if (gpio_get_value(dp->hpd_gpio))
+ if (dp->hpd_gpiod) {
+ if (gpiod_get_value(dp->hpd_gpiod))
return 0;
} else {
reg = readl(dp->reg_base + ANALOGIX_DP_SYS_CTL_3);
@@ -1037,7 +1037,7 @@ static ssize_t analogix_dp_get_psr_status(struct analogix_dp_device *dp)
}
int analogix_dp_send_psr_spd(struct analogix_dp_device *dp,
- struct edp_vsc_psr *vsc, bool blocking)
+ struct dp_sdp *vsc, bool blocking)
{
unsigned int val;
int ret;
@@ -1065,8 +1065,8 @@ int analogix_dp_send_psr_spd(struct analogix_dp_device *dp,
writel(0x5D, dp->reg_base + ANALOGIX_DP_SPD_PB3);
/* configure DB0 / DB1 values */
- writel(vsc->DB0, dp->reg_base + ANALOGIX_DP_VSC_SHADOW_DB0);
- writel(vsc->DB1, dp->reg_base + ANALOGIX_DP_VSC_SHADOW_DB1);
+ writel(vsc->db[0], dp->reg_base + ANALOGIX_DP_VSC_SHADOW_DB0);
+ writel(vsc->db[1], dp->reg_base + ANALOGIX_DP_VSC_SHADOW_DB1);
/* set reuse spd inforframe */
val = readl(dp->reg_base + ANALOGIX_DP_VIDEO_CTL_3);
@@ -1088,8 +1088,8 @@ int analogix_dp_send_psr_spd(struct analogix_dp_device *dp,
ret = readx_poll_timeout(analogix_dp_get_psr_status, dp, psr_status,
psr_status >= 0 &&
- ((vsc->DB1 && psr_status == DP_PSR_SINK_ACTIVE_RFB) ||
- (!vsc->DB1 && psr_status == DP_PSR_SINK_INACTIVE)), 1500,
+ ((vsc->db[1] && psr_status == DP_PSR_SINK_ACTIVE_RFB) ||
+ (!vsc->db[1] && psr_status == DP_PSR_SINK_INACTIVE)), 1500,
DP_TIMEOUT_PSR_LOOP_MS * 1000);
if (ret) {
dev_warn(dp->dev, "Failed to apply PSR %d\n", ret);
diff --git a/drivers/gpu/drm/bridge/dumb-vga-dac.c b/drivers/gpu/drm/bridge/dumb-vga-dac.c
index 2a4ff77c18de..d32885b906ae 100644
--- a/drivers/gpu/drm/bridge/dumb-vga-dac.c
+++ b/drivers/gpu/drm/bridge/dumb-vga-dac.c
@@ -11,9 +11,9 @@
#include <linux/of_graph.h>
#include <linux/regulator/consumer.h>
-#include <drm/drmP.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
+#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
struct dumb_vga {
diff --git a/drivers/gpu/drm/bridge/lvds-encoder.c b/drivers/gpu/drm/bridge/lvds-encoder.c
index 27e55c32f823..2ab2c234f26c 100644
--- a/drivers/gpu/drm/bridge/lvds-encoder.c
+++ b/drivers/gpu/drm/bridge/lvds-encoder.c
@@ -3,12 +3,14 @@
* Copyright (C) 2016 Laurent Pinchart <laurent.pinchart@ideasonboard.com>
*/
-#include <drm/drmP.h>
-#include <drm/drm_bridge.h>
-#include <drm/drm_panel.h>
-
#include <linux/gpio/consumer.h>
+#include <linux/module.h>
+#include <linux/of.h>
#include <linux/of_graph.h>
+#include <linux/platform_device.h>
+
+#include <drm/drm_bridge.h>
+#include <drm/drm_panel.h>
struct lvds_encoder {
struct drm_bridge bridge;
diff --git a/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c b/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c
index 823db80cbd19..79311f8354bd 100644
--- a/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c
+++ b/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c
@@ -23,11 +23,12 @@
#include <linux/i2c.h>
#include <linux/module.h>
#include <linux/of.h>
+
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_edid.h>
+#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
-#include <drm/drmP.h>
#define EDID_EXT_BLOCK_CNT 0x7E
diff --git a/drivers/gpu/drm/bridge/nxp-ptn3460.c b/drivers/gpu/drm/bridge/nxp-ptn3460.c
index 9fd231c5887f..98bc650b8c95 100644
--- a/drivers/gpu/drm/bridge/nxp-ptn3460.c
+++ b/drivers/gpu/drm/bridge/nxp-ptn3460.c
@@ -12,13 +12,14 @@
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_gpio.h>
+
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_edid.h>
#include <drm/drm_of.h>
#include <drm/drm_panel.h>
+#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
-#include <drm/drmP.h>
#define PTN3460_EDID_ADDR 0x0
#define PTN3460_EDID_EMULATION_ADDR 0x84
diff --git a/drivers/gpu/drm/bridge/panel.c b/drivers/gpu/drm/bridge/panel.c
index 04a513319c8f..b12ae3a4c5f1 100644
--- a/drivers/gpu/drm/bridge/panel.c
+++ b/drivers/gpu/drm/bridge/panel.c
@@ -4,14 +4,13 @@
* Copyright (C) 2017 Broadcom
*/
-#include <drm/drmP.h>
-#include <drm/drm_panel.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_connector.h>
#include <drm/drm_encoder.h>
#include <drm/drm_modeset_helper_vtables.h>
-#include <drm/drm_probe_helper.h>
#include <drm/drm_panel.h>
+#include <drm/drm_print.h>
+#include <drm/drm_probe_helper.h>
struct panel_bridge {
struct drm_bridge bridge;
diff --git a/drivers/gpu/drm/bridge/parade-ps8622.c b/drivers/gpu/drm/bridge/parade-ps8622.c
index 699c8dfb0fcb..2d88146e4836 100644
--- a/drivers/gpu/drm/bridge/parade-ps8622.c
+++ b/drivers/gpu/drm/bridge/parade-ps8622.c
@@ -16,12 +16,13 @@
#include <linux/of_device.h>
#include <linux/pm.h>
#include <linux/regulator/consumer.h>
+
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_of.h>
#include <drm/drm_panel.h>
+#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
-#include <drm/drmP.h>
/* Brightness scale on the Parade chip */
#define PS8622_MAX_BRIGHTNESS 0xff
diff --git a/drivers/gpu/drm/bridge/sii902x.c b/drivers/gpu/drm/bridge/sii902x.c
index 1211b5379df1..dd7aa466b280 100644
--- a/drivers/gpu/drm/bridge/sii902x.c
+++ b/drivers/gpu/drm/bridge/sii902x.c
@@ -17,12 +17,16 @@
#include <linux/i2c.h>
#include <linux/module.h>
#include <linux/regmap.h>
+#include <linux/clk.h>
-#include <drm/drmP.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_drv.h>
#include <drm/drm_edid.h>
+#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
+#include <sound/hdmi-codec.h>
+
#define SII902X_TPI_VIDEO_DATA 0x0
#define SII902X_TPI_PIXEL_REPETITION 0x8
@@ -64,6 +68,77 @@
#define SII902X_AVI_POWER_STATE_MSK GENMASK(1, 0)
#define SII902X_AVI_POWER_STATE_D(l) ((l) & SII902X_AVI_POWER_STATE_MSK)
+/* Audio */
+#define SII902X_TPI_I2S_ENABLE_MAPPING_REG 0x1f
+#define SII902X_TPI_I2S_CONFIG_FIFO0 (0 << 0)
+#define SII902X_TPI_I2S_CONFIG_FIFO1 (1 << 0)
+#define SII902X_TPI_I2S_CONFIG_FIFO2 (2 << 0)
+#define SII902X_TPI_I2S_CONFIG_FIFO3 (3 << 0)
+#define SII902X_TPI_I2S_LEFT_RIGHT_SWAP (1 << 2)
+#define SII902X_TPI_I2S_AUTO_DOWNSAMPLE (1 << 3)
+#define SII902X_TPI_I2S_SELECT_SD0 (0 << 4)
+#define SII902X_TPI_I2S_SELECT_SD1 (1 << 4)
+#define SII902X_TPI_I2S_SELECT_SD2 (2 << 4)
+#define SII902X_TPI_I2S_SELECT_SD3 (3 << 4)
+#define SII902X_TPI_I2S_FIFO_ENABLE (1 << 7)
+
+#define SII902X_TPI_I2S_INPUT_CONFIG_REG 0x20
+#define SII902X_TPI_I2S_FIRST_BIT_SHIFT_YES (0 << 0)
+#define SII902X_TPI_I2S_FIRST_BIT_SHIFT_NO (1 << 0)
+#define SII902X_TPI_I2S_SD_DIRECTION_MSB_FIRST (0 << 1)
+#define SII902X_TPI_I2S_SD_DIRECTION_LSB_FIRST (1 << 1)
+#define SII902X_TPI_I2S_SD_JUSTIFY_LEFT (0 << 2)
+#define SII902X_TPI_I2S_SD_JUSTIFY_RIGHT (1 << 2)
+#define SII902X_TPI_I2S_WS_POLARITY_LOW (0 << 3)
+#define SII902X_TPI_I2S_WS_POLARITY_HIGH (1 << 3)
+#define SII902X_TPI_I2S_MCLK_MULTIPLIER_128 (0 << 4)
+#define SII902X_TPI_I2S_MCLK_MULTIPLIER_256 (1 << 4)
+#define SII902X_TPI_I2S_MCLK_MULTIPLIER_384 (2 << 4)
+#define SII902X_TPI_I2S_MCLK_MULTIPLIER_512 (3 << 4)
+#define SII902X_TPI_I2S_MCLK_MULTIPLIER_768 (4 << 4)
+#define SII902X_TPI_I2S_MCLK_MULTIPLIER_1024 (5 << 4)
+#define SII902X_TPI_I2S_MCLK_MULTIPLIER_1152 (6 << 4)
+#define SII902X_TPI_I2S_MCLK_MULTIPLIER_192 (7 << 4)
+#define SII902X_TPI_I2S_SCK_EDGE_FALLING (0 << 7)
+#define SII902X_TPI_I2S_SCK_EDGE_RISING (1 << 7)
+
+#define SII902X_TPI_I2S_STRM_HDR_BASE 0x21
+#define SII902X_TPI_I2S_STRM_HDR_SIZE 5
+
+#define SII902X_TPI_AUDIO_CONFIG_BYTE2_REG 0x26
+#define SII902X_TPI_AUDIO_CODING_STREAM_HEADER (0 << 0)
+#define SII902X_TPI_AUDIO_CODING_PCM (1 << 0)
+#define SII902X_TPI_AUDIO_CODING_AC3 (2 << 0)
+#define SII902X_TPI_AUDIO_CODING_MPEG1 (3 << 0)
+#define SII902X_TPI_AUDIO_CODING_MP3 (4 << 0)
+#define SII902X_TPI_AUDIO_CODING_MPEG2 (5 << 0)
+#define SII902X_TPI_AUDIO_CODING_AAC (6 << 0)
+#define SII902X_TPI_AUDIO_CODING_DTS (7 << 0)
+#define SII902X_TPI_AUDIO_CODING_ATRAC (8 << 0)
+#define SII902X_TPI_AUDIO_MUTE_DISABLE (0 << 4)
+#define SII902X_TPI_AUDIO_MUTE_ENABLE (1 << 4)
+#define SII902X_TPI_AUDIO_LAYOUT_2_CHANNELS (0 << 5)
+#define SII902X_TPI_AUDIO_LAYOUT_8_CHANNELS (1 << 5)
+#define SII902X_TPI_AUDIO_INTERFACE_DISABLE (0 << 6)
+#define SII902X_TPI_AUDIO_INTERFACE_SPDIF (1 << 6)
+#define SII902X_TPI_AUDIO_INTERFACE_I2S (2 << 6)
+
+#define SII902X_TPI_AUDIO_CONFIG_BYTE3_REG 0x27
+#define SII902X_TPI_AUDIO_FREQ_STREAM (0 << 3)
+#define SII902X_TPI_AUDIO_FREQ_32KHZ (1 << 3)
+#define SII902X_TPI_AUDIO_FREQ_44KHZ (2 << 3)
+#define SII902X_TPI_AUDIO_FREQ_48KHZ (3 << 3)
+#define SII902X_TPI_AUDIO_FREQ_88KHZ (4 << 3)
+#define SII902X_TPI_AUDIO_FREQ_96KHZ (5 << 3)
+#define SII902X_TPI_AUDIO_FREQ_176KHZ (6 << 3)
+#define SII902X_TPI_AUDIO_FREQ_192KHZ (7 << 3)
+#define SII902X_TPI_AUDIO_SAMPLE_SIZE_STREAM (0 << 6)
+#define SII902X_TPI_AUDIO_SAMPLE_SIZE_16 (1 << 6)
+#define SII902X_TPI_AUDIO_SAMPLE_SIZE_20 (2 << 6)
+#define SII902X_TPI_AUDIO_SAMPLE_SIZE_24 (3 << 6)
+
+#define SII902X_TPI_AUDIO_CONFIG_BYTE4_REG 0x28
+
#define SII902X_INT_ENABLE 0x3c
#define SII902X_INT_STATUS 0x3d
#define SII902X_HOTPLUG_EVENT BIT(0)
@@ -71,6 +146,16 @@
#define SII902X_REG_TPI_RQB 0xc7
+/* Indirect internal register access */
+#define SII902X_IND_SET_PAGE 0xbc
+#define SII902X_IND_OFFSET 0xbd
+#define SII902X_IND_VALUE 0xbe
+
+#define SII902X_TPI_MISC_INFOFRAME_BASE 0xbf
+#define SII902X_TPI_MISC_INFOFRAME_END 0xde
+#define SII902X_TPI_MISC_INFOFRAME_SIZE \
+ (SII902X_TPI_MISC_INFOFRAME_END - SII902X_TPI_MISC_INFOFRAME_BASE)
+
#define SII902X_I2C_BUS_ACQUISITION_TIMEOUT_MS 500
struct sii902x {
@@ -80,6 +165,16 @@ struct sii902x {
struct drm_connector connector;
struct gpio_desc *reset_gpio;
struct i2c_mux_core *i2cmux;
+ /*
+ * Mutex protects audio and video functions from interfering
+ * each other, by keeping their i2c command sequences atomic.
+ */
+ struct mutex mutex;
+ struct sii902x_audio {
+ struct platform_device *pdev;
+ struct clk *mclk;
+ u32 i2s_fifo_sequence[4];
+ } audio;
};
static int sii902x_read_unlocked(struct i2c_client *i2c, u8 reg, u8 *val)
@@ -151,8 +246,12 @@ sii902x_connector_detect(struct drm_connector *connector, bool force)
struct sii902x *sii902x = connector_to_sii902x(connector);
unsigned int status;
+ mutex_lock(&sii902x->mutex);
+
regmap_read(sii902x->regmap, SII902X_INT_STATUS, &status);
+ mutex_unlock(&sii902x->mutex);
+
return (status & SII902X_PLUGGED_STATUS) ?
connector_status_connected : connector_status_disconnected;
}
@@ -170,12 +269,18 @@ static int sii902x_get_modes(struct drm_connector *connector)
{
struct sii902x *sii902x = connector_to_sii902x(connector);
u32 bus_format = MEDIA_BUS_FMT_RGB888_1X24;
+ u8 output_mode = SII902X_SYS_CTRL_OUTPUT_DVI;
struct edid *edid;
int num = 0, ret;
+ mutex_lock(&sii902x->mutex);
+
edid = drm_get_edid(connector, sii902x->i2cmux->adapter[0]);
drm_connector_update_edid_property(connector, edid);
if (edid) {
+ if (drm_detect_hdmi_monitor(edid))
+ output_mode = SII902X_SYS_CTRL_OUTPUT_HDMI;
+
num = drm_add_edid_modes(connector, edid);
kfree(edid);
}
@@ -183,9 +288,19 @@ static int sii902x_get_modes(struct drm_connector *connector)
ret = drm_display_info_set_bus_formats(&connector->display_info,
&bus_format, 1);
if (ret)
- return ret;
+ goto error_out;
+
+ ret = regmap_update_bits(sii902x->regmap, SII902X_SYS_CTRL_DATA,
+ SII902X_SYS_CTRL_OUTPUT_MODE, output_mode);
+ if (ret)
+ goto error_out;
- return num;
+ ret = num;
+
+error_out:
+ mutex_unlock(&sii902x->mutex);
+
+ return ret;
}
static enum drm_mode_status sii902x_mode_valid(struct drm_connector *connector,
@@ -205,20 +320,28 @@ static void sii902x_bridge_disable(struct drm_bridge *bridge)
{
struct sii902x *sii902x = bridge_to_sii902x(bridge);
+ mutex_lock(&sii902x->mutex);
+
regmap_update_bits(sii902x->regmap, SII902X_SYS_CTRL_DATA,
SII902X_SYS_CTRL_PWR_DWN,
SII902X_SYS_CTRL_PWR_DWN);
+
+ mutex_unlock(&sii902x->mutex);
}
static void sii902x_bridge_enable(struct drm_bridge *bridge)
{
struct sii902x *sii902x = bridge_to_sii902x(bridge);
+ mutex_lock(&sii902x->mutex);
+
regmap_update_bits(sii902x->regmap, SII902X_PWR_STATE_CTRL,
SII902X_AVI_POWER_STATE_MSK,
SII902X_AVI_POWER_STATE_D(0));
regmap_update_bits(sii902x->regmap, SII902X_SYS_CTRL_DATA,
SII902X_SYS_CTRL_PWR_DWN, 0);
+
+ mutex_unlock(&sii902x->mutex);
}
static void sii902x_bridge_mode_set(struct drm_bridge *bridge,
@@ -229,10 +352,11 @@ static void sii902x_bridge_mode_set(struct drm_bridge *bridge,
struct regmap *regmap = sii902x->regmap;
u8 buf[HDMI_INFOFRAME_SIZE(AVI)];
struct hdmi_avi_infoframe frame;
+ u16 pixel_clock_10kHz = adj->clock / 10;
int ret;
- buf[0] = adj->clock;
- buf[1] = adj->clock >> 8;
+ buf[0] = pixel_clock_10kHz & 0xff;
+ buf[1] = pixel_clock_10kHz >> 8;
buf[2] = adj->vrefresh;
buf[3] = 0x00;
buf[4] = adj->hdisplay;
@@ -244,27 +368,32 @@ static void sii902x_bridge_mode_set(struct drm_bridge *bridge,
buf[9] = SII902X_TPI_AVI_INPUT_RANGE_AUTO |
SII902X_TPI_AVI_INPUT_COLORSPACE_RGB;
+ mutex_lock(&sii902x->mutex);
+
ret = regmap_bulk_write(regmap, SII902X_TPI_VIDEO_DATA, buf, 10);
if (ret)
- return;
+ goto out;
ret = drm_hdmi_avi_infoframe_from_display_mode(&frame,
&sii902x->connector, adj);
if (ret < 0) {
DRM_ERROR("couldn't fill AVI infoframe\n");
- return;
+ goto out;
}
ret = hdmi_avi_infoframe_pack(&frame, buf, sizeof(buf));
if (ret < 0) {
DRM_ERROR("failed to pack AVI infoframe: %d\n", ret);
- return;
+ goto out;
}
/* Do not send the infoframe header, but keep the CRC field. */
regmap_bulk_write(regmap, SII902X_TPI_AVI_INFOFRAME,
buf + HDMI_INFOFRAME_HEADER_SIZE - 1,
HDMI_AVI_INFOFRAME_SIZE + 1);
+
+out:
+ mutex_unlock(&sii902x->mutex);
}
static int sii902x_bridge_attach(struct drm_bridge *bridge)
@@ -305,6 +434,335 @@ static const struct drm_bridge_funcs sii902x_bridge_funcs = {
.enable = sii902x_bridge_enable,
};
+static int sii902x_mute(struct sii902x *sii902x, bool mute)
+{
+ struct device *dev = &sii902x->i2c->dev;
+ unsigned int val = mute ? SII902X_TPI_AUDIO_MUTE_ENABLE :
+ SII902X_TPI_AUDIO_MUTE_DISABLE;
+
+ dev_dbg(dev, "%s: %s\n", __func__, mute ? "Muted" : "Unmuted");
+
+ return regmap_update_bits(sii902x->regmap,
+ SII902X_TPI_AUDIO_CONFIG_BYTE2_REG,
+ SII902X_TPI_AUDIO_MUTE_ENABLE, val);
+}
+
+static const int sii902x_mclk_div_table[] = {
+ 128, 256, 384, 512, 768, 1024, 1152, 192 };
+
+static int sii902x_select_mclk_div(u8 *i2s_config_reg, unsigned int rate,
+ unsigned int mclk)
+{
+ int div = mclk / rate;
+ int distance = 100000;
+ u8 i, nearest = 0;
+
+ for (i = 0; i < ARRAY_SIZE(sii902x_mclk_div_table); i++) {
+ unsigned int d = abs(div - sii902x_mclk_div_table[i]);
+
+ if (d >= distance)
+ continue;
+
+ nearest = i;
+ distance = d;
+ if (d == 0)
+ break;
+ }
+
+ *i2s_config_reg |= nearest << 4;
+
+ return sii902x_mclk_div_table[nearest];
+}
+
+static const struct sii902x_sample_freq {
+ u32 freq;
+ u8 val;
+} sii902x_sample_freq[] = {
+ { .freq = 32000, .val = SII902X_TPI_AUDIO_FREQ_32KHZ },
+ { .freq = 44000, .val = SII902X_TPI_AUDIO_FREQ_44KHZ },
+ { .freq = 48000, .val = SII902X_TPI_AUDIO_FREQ_48KHZ },
+ { .freq = 88000, .val = SII902X_TPI_AUDIO_FREQ_88KHZ },
+ { .freq = 96000, .val = SII902X_TPI_AUDIO_FREQ_96KHZ },
+ { .freq = 176000, .val = SII902X_TPI_AUDIO_FREQ_176KHZ },
+ { .freq = 192000, .val = SII902X_TPI_AUDIO_FREQ_192KHZ },
+};
+
+static int sii902x_audio_hw_params(struct device *dev, void *data,
+ struct hdmi_codec_daifmt *daifmt,
+ struct hdmi_codec_params *params)
+{
+ struct sii902x *sii902x = dev_get_drvdata(dev);
+ u8 i2s_config_reg = SII902X_TPI_I2S_SD_DIRECTION_MSB_FIRST;
+ u8 config_byte2_reg = (SII902X_TPI_AUDIO_INTERFACE_I2S |
+ SII902X_TPI_AUDIO_MUTE_ENABLE |
+ SII902X_TPI_AUDIO_CODING_PCM);
+ u8 config_byte3_reg = 0;
+ u8 infoframe_buf[HDMI_INFOFRAME_SIZE(AUDIO)];
+ unsigned long mclk_rate;
+ int i, ret;
+
+ if (daifmt->bit_clk_master || daifmt->frame_clk_master) {
+ dev_dbg(dev, "%s: I2S master mode not supported\n", __func__);
+ return -EINVAL;
+ }
+
+ switch (daifmt->fmt) {
+ case HDMI_I2S:
+ i2s_config_reg |= SII902X_TPI_I2S_FIRST_BIT_SHIFT_YES |
+ SII902X_TPI_I2S_SD_JUSTIFY_LEFT;
+ break;
+ case HDMI_RIGHT_J:
+ i2s_config_reg |= SII902X_TPI_I2S_SD_JUSTIFY_RIGHT;
+ break;
+ case HDMI_LEFT_J:
+ i2s_config_reg |= SII902X_TPI_I2S_SD_JUSTIFY_LEFT;
+ break;
+ default:
+ dev_dbg(dev, "%s: Unsupported i2s format %u\n", __func__,
+ daifmt->fmt);
+ return -EINVAL;
+ }
+
+ if (daifmt->bit_clk_inv)
+ i2s_config_reg |= SII902X_TPI_I2S_SCK_EDGE_FALLING;
+ else
+ i2s_config_reg |= SII902X_TPI_I2S_SCK_EDGE_RISING;
+
+ if (daifmt->frame_clk_inv)
+ i2s_config_reg |= SII902X_TPI_I2S_WS_POLARITY_LOW;
+ else
+ i2s_config_reg |= SII902X_TPI_I2S_WS_POLARITY_HIGH;
+
+ if (params->channels > 2)
+ config_byte2_reg |= SII902X_TPI_AUDIO_LAYOUT_8_CHANNELS;
+ else
+ config_byte2_reg |= SII902X_TPI_AUDIO_LAYOUT_2_CHANNELS;
+
+ switch (params->sample_width) {
+ case 16:
+ config_byte3_reg |= SII902X_TPI_AUDIO_SAMPLE_SIZE_16;
+ break;
+ case 20:
+ config_byte3_reg |= SII902X_TPI_AUDIO_SAMPLE_SIZE_20;
+ break;
+ case 24:
+ case 32:
+ config_byte3_reg |= SII902X_TPI_AUDIO_SAMPLE_SIZE_24;
+ break;
+ default:
+ dev_err(dev, "%s: Unsupported sample width %u\n", __func__,
+ params->sample_width);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(sii902x_sample_freq); i++) {
+ if (params->sample_rate == sii902x_sample_freq[i].freq) {
+ config_byte3_reg |= sii902x_sample_freq[i].val;
+ break;
+ }
+ }
+
+ ret = clk_prepare_enable(sii902x->audio.mclk);
+ if (ret) {
+ dev_err(dev, "Enabling mclk failed: %d\n", ret);
+ return ret;
+ }
+
+ mclk_rate = clk_get_rate(sii902x->audio.mclk);
+
+ ret = sii902x_select_mclk_div(&i2s_config_reg, params->sample_rate,
+ mclk_rate);
+ if (mclk_rate != ret * params->sample_rate)
+ dev_dbg(dev, "Inaccurate reference clock (%ld/%d != %u)\n",
+ mclk_rate, ret, params->sample_rate);
+
+ mutex_lock(&sii902x->mutex);
+
+ ret = regmap_write(sii902x->regmap,
+ SII902X_TPI_AUDIO_CONFIG_BYTE2_REG,
+ config_byte2_reg);
+ if (ret < 0)
+ goto out;
+
+ ret = regmap_write(sii902x->regmap, SII902X_TPI_I2S_INPUT_CONFIG_REG,
+ i2s_config_reg);
+ if (ret)
+ goto out;
+
+ for (i = 0; i < ARRAY_SIZE(sii902x->audio.i2s_fifo_sequence) &&
+ sii902x->audio.i2s_fifo_sequence[i]; i++)
+ regmap_write(sii902x->regmap,
+ SII902X_TPI_I2S_ENABLE_MAPPING_REG,
+ sii902x->audio.i2s_fifo_sequence[i]);
+
+ ret = regmap_write(sii902x->regmap, SII902X_TPI_AUDIO_CONFIG_BYTE3_REG,
+ config_byte3_reg);
+ if (ret)
+ goto out;
+
+ ret = regmap_bulk_write(sii902x->regmap, SII902X_TPI_I2S_STRM_HDR_BASE,
+ params->iec.status,
+ min((size_t) SII902X_TPI_I2S_STRM_HDR_SIZE,
+ sizeof(params->iec.status)));
+ if (ret)
+ goto out;
+
+ ret = hdmi_audio_infoframe_pack(&params->cea, infoframe_buf,
+ sizeof(infoframe_buf));
+ if (ret < 0) {
+ dev_err(dev, "%s: Failed to pack audio infoframe: %d\n",
+ __func__, ret);
+ goto out;
+ }
+
+ ret = regmap_bulk_write(sii902x->regmap,
+ SII902X_TPI_MISC_INFOFRAME_BASE,
+ infoframe_buf,
+ min(ret, SII902X_TPI_MISC_INFOFRAME_SIZE));
+ if (ret)
+ goto out;
+
+ /* Decode Level 0 Packets */
+ ret = regmap_write(sii902x->regmap, SII902X_IND_SET_PAGE, 0x02);
+ if (ret)
+ goto out;
+
+ ret = regmap_write(sii902x->regmap, SII902X_IND_OFFSET, 0x24);
+ if (ret)
+ goto out;
+
+ ret = regmap_write(sii902x->regmap, SII902X_IND_VALUE, 0x02);
+ if (ret)
+ goto out;
+
+ dev_dbg(dev, "%s: hdmi audio enabled\n", __func__);
+out:
+ mutex_unlock(&sii902x->mutex);
+
+ if (ret) {
+ clk_disable_unprepare(sii902x->audio.mclk);
+ dev_err(dev, "%s: hdmi audio enable failed: %d\n", __func__,
+ ret);
+ }
+
+ return ret;
+}
+
+static void sii902x_audio_shutdown(struct device *dev, void *data)
+{
+ struct sii902x *sii902x = dev_get_drvdata(dev);
+
+ mutex_lock(&sii902x->mutex);
+
+ regmap_write(sii902x->regmap, SII902X_TPI_AUDIO_CONFIG_BYTE2_REG,
+ SII902X_TPI_AUDIO_INTERFACE_DISABLE);
+
+ mutex_unlock(&sii902x->mutex);
+
+ clk_disable_unprepare(sii902x->audio.mclk);
+}
+
+int sii902x_audio_digital_mute(struct device *dev, void *data, bool enable)
+{
+ struct sii902x *sii902x = dev_get_drvdata(dev);
+
+ mutex_lock(&sii902x->mutex);
+
+ sii902x_mute(sii902x, enable);
+
+ mutex_unlock(&sii902x->mutex);
+
+ return 0;
+}
+
+static int sii902x_audio_get_eld(struct device *dev, void *data,
+ uint8_t *buf, size_t len)
+{
+ struct sii902x *sii902x = dev_get_drvdata(dev);
+
+ mutex_lock(&sii902x->mutex);
+
+ memcpy(buf, sii902x->connector.eld,
+ min(sizeof(sii902x->connector.eld), len));
+
+ mutex_unlock(&sii902x->mutex);
+
+ return 0;
+}
+
+static const struct hdmi_codec_ops sii902x_audio_codec_ops = {
+ .hw_params = sii902x_audio_hw_params,
+ .audio_shutdown = sii902x_audio_shutdown,
+ .digital_mute = sii902x_audio_digital_mute,
+ .get_eld = sii902x_audio_get_eld,
+};
+
+static int sii902x_audio_codec_init(struct sii902x *sii902x,
+ struct device *dev)
+{
+ static const u8 audio_fifo_id[] = {
+ SII902X_TPI_I2S_CONFIG_FIFO0,
+ SII902X_TPI_I2S_CONFIG_FIFO1,
+ SII902X_TPI_I2S_CONFIG_FIFO2,
+ SII902X_TPI_I2S_CONFIG_FIFO3,
+ };
+ static const u8 i2s_lane_id[] = {
+ SII902X_TPI_I2S_SELECT_SD0,
+ SII902X_TPI_I2S_SELECT_SD1,
+ SII902X_TPI_I2S_SELECT_SD2,
+ SII902X_TPI_I2S_SELECT_SD3,
+ };
+ struct hdmi_codec_pdata codec_data = {
+ .ops = &sii902x_audio_codec_ops,
+ .i2s = 1, /* Only i2s support for now. */
+ .spdif = 0,
+ .max_i2s_channels = 0,
+ };
+ u8 lanes[4];
+ int num_lanes, i;
+
+ if (!of_property_read_bool(dev->of_node, "#sound-dai-cells")) {
+ dev_dbg(dev, "%s: No \"#sound-dai-cells\", no audio\n",
+ __func__);
+ return 0;
+ }
+
+ num_lanes = of_property_read_variable_u8_array(dev->of_node,
+ "sil,i2s-data-lanes",
+ lanes, 1,
+ ARRAY_SIZE(lanes));
+
+ if (num_lanes == -EINVAL) {
+ dev_dbg(dev,
+ "%s: No \"sil,i2s-data-lanes\", use default <0>\n",
+ __func__);
+ num_lanes = 1;
+ lanes[0] = 0;
+ } else if (num_lanes < 0) {
+ dev_err(dev,
+ "%s: Error gettin \"sil,i2s-data-lanes\": %d\n",
+ __func__, num_lanes);
+ return num_lanes;
+ }
+ codec_data.max_i2s_channels = 2 * num_lanes;
+
+ for (i = 0; i < num_lanes; i++)
+ sii902x->audio.i2s_fifo_sequence[i] |= audio_fifo_id[i] |
+ i2s_lane_id[lanes[i]] | SII902X_TPI_I2S_FIFO_ENABLE;
+
+ if (IS_ERR(sii902x->audio.mclk)) {
+ dev_err(dev, "%s: No clock (audio mclk) found: %ld\n",
+ __func__, PTR_ERR(sii902x->audio.mclk));
+ return 0;
+ }
+
+ sii902x->audio.pdev = platform_device_register_data(
+ dev, HDMI_CODEC_DRV_NAME, PLATFORM_DEVID_AUTO,
+ &codec_data, sizeof(codec_data));
+
+ return PTR_ERR_OR_ZERO(sii902x->audio.pdev);
+}
+
static const struct regmap_range sii902x_volatile_ranges[] = {
{ .range_min = 0, .range_max = 0xff },
};
@@ -317,6 +775,8 @@ static const struct regmap_access_table sii902x_volatile_table = {
static const struct regmap_config sii902x_regmap_config = {
.reg_bits = 8,
.val_bits = 8,
+ .disable_locking = true, /* struct sii902x mutex should be enough */
+ .max_register = SII902X_TPI_MISC_INFOFRAME_END,
.volatile_table = &sii902x_volatile_table,
.cache_type = REGCACHE_NONE,
};
@@ -326,9 +786,13 @@ static irqreturn_t sii902x_interrupt(int irq, void *data)
struct sii902x *sii902x = data;
unsigned int status = 0;
+ mutex_lock(&sii902x->mutex);
+
regmap_read(sii902x->regmap, SII902X_INT_STATUS, &status);
regmap_write(sii902x->regmap, SII902X_INT_STATUS, status);
+ mutex_unlock(&sii902x->mutex);
+
if ((status & SII902X_HOTPLUG_EVENT) && sii902x->bridge.dev)
drm_helper_hpd_irq_event(sii902x->bridge.dev);
@@ -450,6 +914,12 @@ static int sii902x_i2c_bypass_deselect(struct i2c_mux_core *mux, u32 chan_id)
return 0;
}
+static const struct drm_bridge_timings default_sii902x_timings = {
+ .input_bus_flags = DRM_BUS_FLAG_PIXDATA_SAMPLE_NEGEDGE
+ | DRM_BUS_FLAG_SYNC_SAMPLE_NEGEDGE
+ | DRM_BUS_FLAG_DE_HIGH,
+};
+
static int sii902x_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
@@ -483,6 +953,8 @@ static int sii902x_probe(struct i2c_client *client,
return PTR_ERR(sii902x->reset_gpio);
}
+ mutex_init(&sii902x->mutex);
+
sii902x_reset(sii902x);
ret = regmap_write(sii902x->regmap, SII902X_REG_TPI_RQB, 0x0);
@@ -520,8 +992,11 @@ static int sii902x_probe(struct i2c_client *client,
sii902x->bridge.funcs = &sii902x_bridge_funcs;
sii902x->bridge.of_node = dev->of_node;
+ sii902x->bridge.timings = &default_sii902x_timings;
drm_bridge_add(&sii902x->bridge);
+ sii902x_audio_codec_init(sii902x, dev);
+
i2c_set_clientdata(client, sii902x);
sii902x->i2cmux = i2c_mux_alloc(client->adapter, dev,
diff --git a/drivers/gpu/drm/bridge/sii9234.c b/drivers/gpu/drm/bridge/sii9234.c
index b36bbafb0e43..25d4ad8c7ad6 100644
--- a/drivers/gpu/drm/bridge/sii9234.c
+++ b/drivers/gpu/drm/bridge/sii9234.c
@@ -815,7 +815,7 @@ static irqreturn_t sii9234_irq_thread(int irq, void *data)
static int sii9234_init_resources(struct sii9234 *ctx,
struct i2c_client *client)
{
- struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent);
+ struct i2c_adapter *adapter = client->adapter;
int ret;
if (!ctx->dev->of_node) {
@@ -897,7 +897,7 @@ static const struct drm_bridge_funcs sii9234_bridge_funcs = {
static int sii9234_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
- struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent);
+ struct i2c_adapter *adapter = client->adapter;
struct sii9234 *ctx;
struct device *dev = &client->dev;
int ret;
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
index 045b1b13fd0e..c6490949d9db 100644
--- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
+++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
@@ -6,34 +6,36 @@
* Copyright (C) 2011-2013 Freescale Semiconductor, Inc.
* Copyright (C) 2010, Guennadi Liakhovetski <g.liakhovetski@gmx.de>
*/
-#include <linux/module.h>
-#include <linux/irq.h>
+#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/err.h>
-#include <linux/clk.h>
#include <linux/hdmi.h>
+#include <linux/irq.h>
+#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/of_device.h>
+#include <linux/pinctrl/consumer.h>
#include <linux/regmap.h>
+#include <linux/dma-mapping.h>
#include <linux/spinlock.h>
-#include <drm/drm_of.h>
-#include <drm/drmP.h>
+#include <media/cec-notifier.h>
+
+#include <uapi/linux/media-bus-format.h>
+#include <uapi/linux/videodev2.h>
+
+#include <drm/bridge/dw_hdmi.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_edid.h>
#include <drm/drm_encoder_slave.h>
-#include <drm/drm_scdc_helper.h>
+#include <drm/drm_of.h>
+#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
-#include <drm/bridge/dw_hdmi.h>
-
-#include <uapi/linux/media-bus-format.h>
-#include <uapi/linux/videodev2.h>
+#include <drm/drm_scdc_helper.h>
-#include "dw-hdmi.h"
#include "dw-hdmi-audio.h"
#include "dw-hdmi-cec.h"
-
-#include <media/cec-notifier.h>
+#include "dw-hdmi.h"
#define DDC_SEGMENT_ADDR 0x30
@@ -164,6 +166,10 @@ struct dw_hdmi {
bool sink_is_hdmi;
bool sink_has_audio;
+ struct pinctrl *pinctrl;
+ struct pinctrl_state *default_state;
+ struct pinctrl_state *unwedge_state;
+
struct mutex mutex; /* for state below and previous_mode */
enum drm_connector_force force; /* mutex-protected force state */
bool disabled; /* DRM has disabled our bridge */
@@ -222,6 +228,13 @@ static void hdmi_mask_writeb(struct dw_hdmi *hdmi, u8 data, unsigned int reg,
static void dw_hdmi_i2c_init(struct dw_hdmi *hdmi)
{
+ hdmi_writeb(hdmi, HDMI_PHY_I2CM_INT_ADDR_DONE_POL,
+ HDMI_PHY_I2CM_INT_ADDR);
+
+ hdmi_writeb(hdmi, HDMI_PHY_I2CM_CTLINT_ADDR_NAC_POL |
+ HDMI_PHY_I2CM_CTLINT_ADDR_ARBITRATION_POL,
+ HDMI_PHY_I2CM_CTLINT_ADDR);
+
/* Software reset */
hdmi_writeb(hdmi, 0x00, HDMI_I2CM_SOFTRSTZ);
@@ -242,11 +255,82 @@ static void dw_hdmi_i2c_init(struct dw_hdmi *hdmi)
HDMI_IH_MUTE_I2CM_STAT0);
}
+static bool dw_hdmi_i2c_unwedge(struct dw_hdmi *hdmi)
+{
+ /* If no unwedge state then give up */
+ if (!hdmi->unwedge_state)
+ return false;
+
+ dev_info(hdmi->dev, "Attempting to unwedge stuck i2c bus\n");
+
+ /*
+ * This is a huge hack to workaround a problem where the dw_hdmi i2c
+ * bus could sometimes get wedged. Once wedged there doesn't appear
+ * to be any way to unwedge it (including the HDMI_I2CM_SOFTRSTZ)
+ * other than pulsing the SDA line.
+ *
+ * We appear to be able to pulse the SDA line (in the eyes of dw_hdmi)
+ * by:
+ * 1. Remux the pin as a GPIO output, driven low.
+ * 2. Wait a little while. 1 ms seems to work, but we'll do 10.
+ * 3. Immediately jump to remux the pin as dw_hdmi i2c again.
+ *
+ * At the moment of remuxing, the line will still be low due to its
+ * recent stint as an output, but then it will be pulled high by the
+ * (presumed) external pullup. dw_hdmi seems to see this as a rising
+ * edge and that seems to get it out of its jam.
+ *
+ * This wedging was only ever seen on one TV, and only on one of
+ * its HDMI ports. It happened when the TV was powered on while the
+ * device was plugged in. A scope trace shows the TV bringing both SDA
+ * and SCL low, then bringing them both back up at roughly the same
+ * time. Presumably this confuses dw_hdmi because it saw activity but
+ * no real STOP (maybe it thinks there's another master on the bus?).
+ * Giving it a clean rising edge of SDA while SCL is already high
+ * presumably makes dw_hdmi see a STOP which seems to bring dw_hdmi out
+ * of its stupor.
+ *
+ * Note that after coming back alive, transfers seem to immediately
+ * resume, so if we unwedge due to a timeout we should wait a little
+ * longer for our transfer to finish, since it might have just started
+ * now.
+ */
+ pinctrl_select_state(hdmi->pinctrl, hdmi->unwedge_state);
+ msleep(10);
+ pinctrl_select_state(hdmi->pinctrl, hdmi->default_state);
+
+ return true;
+}
+
+static int dw_hdmi_i2c_wait(struct dw_hdmi *hdmi)
+{
+ struct dw_hdmi_i2c *i2c = hdmi->i2c;
+ int stat;
+
+ stat = wait_for_completion_timeout(&i2c->cmp, HZ / 10);
+ if (!stat) {
+ /* If we can't unwedge, return timeout */
+ if (!dw_hdmi_i2c_unwedge(hdmi))
+ return -EAGAIN;
+
+ /* We tried to unwedge; give it another chance */
+ stat = wait_for_completion_timeout(&i2c->cmp, HZ / 10);
+ if (!stat)
+ return -EAGAIN;
+ }
+
+ /* Check for error condition on the bus */
+ if (i2c->stat & HDMI_IH_I2CM_STAT0_ERROR)
+ return -EIO;
+
+ return 0;
+}
+
static int dw_hdmi_i2c_read(struct dw_hdmi *hdmi,
unsigned char *buf, unsigned int length)
{
struct dw_hdmi_i2c *i2c = hdmi->i2c;
- int stat;
+ int ret;
if (!i2c->is_regaddr) {
dev_dbg(hdmi->dev, "set read register address to 0\n");
@@ -265,13 +349,9 @@ static int dw_hdmi_i2c_read(struct dw_hdmi *hdmi,
hdmi_writeb(hdmi, HDMI_I2CM_OPERATION_READ,
HDMI_I2CM_OPERATION);
- stat = wait_for_completion_timeout(&i2c->cmp, HZ / 10);
- if (!stat)
- return -EAGAIN;
-
- /* Check for error condition on the bus */
- if (i2c->stat & HDMI_IH_I2CM_STAT0_ERROR)
- return -EIO;
+ ret = dw_hdmi_i2c_wait(hdmi);
+ if (ret)
+ return ret;
*buf++ = hdmi_readb(hdmi, HDMI_I2CM_DATAI);
}
@@ -284,7 +364,7 @@ static int dw_hdmi_i2c_write(struct dw_hdmi *hdmi,
unsigned char *buf, unsigned int length)
{
struct dw_hdmi_i2c *i2c = hdmi->i2c;
- int stat;
+ int ret;
if (!i2c->is_regaddr) {
/* Use the first write byte as register address */
@@ -302,13 +382,9 @@ static int dw_hdmi_i2c_write(struct dw_hdmi *hdmi,
hdmi_writeb(hdmi, HDMI_I2CM_OPERATION_WRITE,
HDMI_I2CM_OPERATION);
- stat = wait_for_completion_timeout(&i2c->cmp, HZ / 10);
- if (!stat)
- return -EAGAIN;
-
- /* Check for error condition on the bus */
- if (i2c->stat & HDMI_IH_I2CM_STAT0_ERROR)
- return -EIO;
+ ret = dw_hdmi_i2c_wait(hdmi);
+ if (ret)
+ return ret;
}
return 0;
@@ -1920,16 +1996,6 @@ static int dw_hdmi_setup(struct dw_hdmi *hdmi, struct drm_display_mode *mode)
return 0;
}
-static void dw_hdmi_setup_i2c(struct dw_hdmi *hdmi)
-{
- hdmi_writeb(hdmi, HDMI_PHY_I2CM_INT_ADDR_DONE_POL,
- HDMI_PHY_I2CM_INT_ADDR);
-
- hdmi_writeb(hdmi, HDMI_PHY_I2CM_CTLINT_ADDR_NAC_POL |
- HDMI_PHY_I2CM_CTLINT_ADDR_ARBITRATION_POL,
- HDMI_PHY_I2CM_CTLINT_ADDR);
-}
-
static void initialize_hdmi_ih_mutes(struct dw_hdmi *hdmi)
{
u8 ih_mute;
@@ -2430,6 +2496,21 @@ static const struct regmap_config hdmi_regmap_32bit_config = {
.max_register = HDMI_I2CM_FS_SCL_LCNT_0_ADDR << 2,
};
+static void dw_hdmi_init_hw(struct dw_hdmi *hdmi)
+{
+ initialize_hdmi_ih_mutes(hdmi);
+
+ /*
+ * Reset HDMI DDC I2C master controller and mute I2CM interrupts.
+ * Even if we are using a separate i2c adapter doing this doesn't
+ * hurt.
+ */
+ dw_hdmi_i2c_init(hdmi);
+
+ if (hdmi->phy.ops->setup_hpd)
+ hdmi->phy.ops->setup_hpd(hdmi, hdmi->phy.data);
+}
+
static struct dw_hdmi *
__dw_hdmi_probe(struct platform_device *pdev,
const struct dw_hdmi_plat_data *plat_data)
@@ -2581,7 +2662,7 @@ __dw_hdmi_probe(struct platform_device *pdev,
prod_id1 & HDMI_PRODUCT_ID1_HDCP ? "with" : "without",
hdmi->phy.name);
- initialize_hdmi_ih_mutes(hdmi);
+ dw_hdmi_init_hw(hdmi);
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
@@ -2609,6 +2690,24 @@ __dw_hdmi_probe(struct platform_device *pdev,
/* If DDC bus is not specified, try to register HDMI I2C bus */
if (!hdmi->ddc) {
+ /* Look for (optional) stuff related to unwedging */
+ hdmi->pinctrl = devm_pinctrl_get(dev);
+ if (!IS_ERR(hdmi->pinctrl)) {
+ hdmi->unwedge_state =
+ pinctrl_lookup_state(hdmi->pinctrl, "unwedge");
+ hdmi->default_state =
+ pinctrl_lookup_state(hdmi->pinctrl, "default");
+
+ if (IS_ERR(hdmi->default_state) ||
+ IS_ERR(hdmi->unwedge_state)) {
+ if (!IS_ERR(hdmi->unwedge_state))
+ dev_warn(dev,
+ "Unwedge requires default pinctrl\n");
+ hdmi->default_state = NULL;
+ hdmi->unwedge_state = NULL;
+ }
+ }
+
hdmi->ddc = dw_hdmi_i2c_adapter(hdmi);
if (IS_ERR(hdmi->ddc))
hdmi->ddc = NULL;
@@ -2620,10 +2719,6 @@ __dw_hdmi_probe(struct platform_device *pdev,
hdmi->bridge.of_node = pdev->dev.of_node;
#endif
- dw_hdmi_setup_i2c(hdmi);
- if (hdmi->phy.ops->setup_hpd)
- hdmi->phy.ops->setup_hpd(hdmi, hdmi->phy.data);
-
memset(&pdevinfo, 0, sizeof(pdevinfo));
pdevinfo.parent = dev;
pdevinfo.id = PLATFORM_DEVID_AUTO;
@@ -2676,10 +2771,6 @@ __dw_hdmi_probe(struct platform_device *pdev,
hdmi->cec = platform_device_register_full(&pdevinfo);
}
- /* Reset HDMI DDC I2C master controller and mute I2CM interrupts */
- if (hdmi->i2c)
- dw_hdmi_i2c_init(hdmi);
-
return hdmi;
err_iahb:
@@ -2783,6 +2874,12 @@ void dw_hdmi_unbind(struct dw_hdmi *hdmi)
}
EXPORT_SYMBOL_GPL(dw_hdmi_unbind);
+void dw_hdmi_resume(struct dw_hdmi *hdmi)
+{
+ dw_hdmi_init_hw(hdmi);
+}
+EXPORT_SYMBOL_GPL(dw_hdmi_resume);
+
MODULE_AUTHOR("Sascha Hauer <s.hauer@pengutronix.de>");
MODULE_AUTHOR("Andy Yan <andy.yan@rock-chips.com>");
MODULE_AUTHOR("Yakir Yang <ykk@rock-chips.com>");
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c b/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c
index e915ae8c9a92..281c58bab1a1 100644
--- a/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c
+++ b/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c
@@ -15,15 +15,18 @@
#include <linux/of_device.h>
#include <linux/pm_runtime.h>
#include <linux/reset.h>
-#include <drm/drmP.h>
+
+#include <video/mipi_display.h>
+
+#include <drm/bridge/dw_mipi_dsi.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_bridge.h>
#include <drm/drm_crtc.h>
#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_modes.h>
#include <drm/drm_of.h>
+#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
-#include <drm/bridge/dw_mipi_dsi.h>
-#include <video/mipi_display.h>
#define HWVER_131 0x31333100 /* IP version 1.31 */
@@ -775,6 +778,10 @@ static void dw_mipi_dsi_clear_err(struct dw_mipi_dsi *dsi)
static void dw_mipi_dsi_bridge_post_disable(struct drm_bridge *bridge)
{
struct dw_mipi_dsi *dsi = bridge_to_dsi(bridge);
+ const struct dw_mipi_dsi_phy_ops *phy_ops = dsi->plat_data->phy_ops;
+
+ if (phy_ops->power_off)
+ phy_ops->power_off(dsi->plat_data->priv_data);
/*
* Switch to command mode before panel-bridge post_disable &
@@ -874,11 +881,15 @@ static void dw_mipi_dsi_bridge_mode_set(struct drm_bridge *bridge,
static void dw_mipi_dsi_bridge_enable(struct drm_bridge *bridge)
{
struct dw_mipi_dsi *dsi = bridge_to_dsi(bridge);
+ const struct dw_mipi_dsi_phy_ops *phy_ops = dsi->plat_data->phy_ops;
/* Switch to video mode for panel-bridge enable & panel enable */
dw_mipi_dsi_set_mode(dsi, MIPI_DSI_MODE_VIDEO);
if (dsi->slave)
dw_mipi_dsi_set_mode(dsi->slave, MIPI_DSI_MODE_VIDEO);
+
+ if (phy_ops->power_on)
+ phy_ops->power_on(dsi->plat_data->priv_data);
}
static enum drm_mode_status
diff --git a/drivers/gpu/drm/bridge/tc358764.c b/drivers/gpu/drm/bridge/tc358764.c
index a20e454ddd64..170f162ffa55 100644
--- a/drivers/gpu/drm/bridge/tc358764.c
+++ b/drivers/gpu/drm/bridge/tc358764.c
@@ -7,18 +7,22 @@
* Maciej Purski <m.purski@samsung.com>
*/
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/module.h>
+#include <linux/of_graph.h>
+#include <linux/regulator/consumer.h>
+
+#include <video/mipi_display.h>
+
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_mipi_dsi.h>
#include <drm/drm_of.h>
#include <drm/drm_panel.h>
+#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
-#include <drm/drmP.h>
-#include <linux/gpio/consumer.h>
-#include <linux/of_graph.h>
-#include <linux/regulator/consumer.h>
-#include <video/mipi_display.h>
#define FLD_MASK(start, end) (((1 << ((start) - (end) + 1)) - 1) << (end))
#define FLD_VAL(val, start, end) (((val) << (end)) & FLD_MASK(start, end))
diff --git a/drivers/gpu/drm/bridge/tc358767.c b/drivers/gpu/drm/bridge/tc358767.c
index 4655bb1eb88f..13ade28a36a8 100644
--- a/drivers/gpu/drm/bridge/tc358767.c
+++ b/drivers/gpu/drm/bridge/tc358767.c
@@ -62,6 +62,7 @@
/* System */
#define TC_IDREG 0x0500
+#define SYSSTAT 0x0508
#define SYSCTRL 0x0510
#define DP0_AUDSRC_NO_INPUT (0 << 3)
#define DP0_AUDSRC_I2S_RX (1 << 3)
@@ -69,6 +70,19 @@
#define DP0_VIDSRC_DSI_RX (1 << 0)
#define DP0_VIDSRC_DPI_RX (2 << 0)
#define DP0_VIDSRC_COLOR_BAR (3 << 0)
+#define GPIOM 0x0540
+#define GPIOC 0x0544
+#define GPIOO 0x0548
+#define GPIOI 0x054c
+#define INTCTL_G 0x0560
+#define INTSTS_G 0x0564
+
+#define INT_SYSERR BIT(16)
+#define INT_GPIO_H(x) (1 << (x == 0 ? 2 : 10))
+#define INT_GPIO_LC(x) (1 << (x == 0 ? 3 : 11))
+
+#define INT_GP0_LCNT 0x0584
+#define INT_GP1_LCNT 0x0588
/* Control */
#define DP0CTL 0x0600
@@ -177,11 +191,8 @@ module_param_named(test, tc_test_pattern, bool, 0644);
struct tc_edp_link {
struct drm_dp_link base;
u8 assr;
- int scrambler_dis;
- int spread;
- int coding8b10b;
- u8 swing;
- u8 preemp;
+ bool scrambler_dis;
+ bool spread;
};
struct tc_data {
@@ -199,7 +210,7 @@ struct tc_data {
/* display edid */
struct edid *edid;
/* current mode */
- const struct drm_display_mode *mode;
+ struct drm_display_mode mode;
u32 rev;
u8 assr;
@@ -207,6 +218,12 @@ struct tc_data {
struct gpio_desc *sd_gpio;
struct gpio_desc *reset_gpio;
struct clk *refclk;
+
+ /* do we have IRQ */
+ bool have_irq;
+
+ /* HPD pin number (0 or 1) or -ENODEV */
+ int hpd_pin;
};
static inline struct tc_data *aux_to_tc(struct drm_dp_aux *a)
@@ -277,14 +294,17 @@ static int tc_aux_get_status(struct tc_data *tc, u8 *reply)
ret = regmap_read(tc->regmap, DP0_AUXSTATUS, &value);
if (ret < 0)
return ret;
+
if (value & AUX_BUSY) {
- if (value & AUX_TIMEOUT) {
- dev_err(tc->dev, "i2c access timeout!\n");
- return -ETIMEDOUT;
- }
+ dev_err(tc->dev, "aux busy!\n");
return -EBUSY;
}
+ if (value & AUX_TIMEOUT) {
+ dev_err(tc->dev, "aux access timeout!\n");
+ return -ETIMEDOUT;
+ }
+
*reply = (value & AUX_STATUS_MASK) >> AUX_STATUS_SHIFT;
return 0;
}
@@ -378,13 +398,10 @@ static u32 tc_srcctrl(struct tc_data *tc)
* No training pattern, skew lane 1 data by two LSCLK cycles with
* respect to lane 0 data, AutoCorrect Mode = 0
*/
- u32 reg = DP0_SRCCTRL_NOTP | DP0_SRCCTRL_LANESKEW;
+ u32 reg = DP0_SRCCTRL_NOTP | DP0_SRCCTRL_LANESKEW | DP0_SRCCTRL_EN810B;
if (tc->link.scrambler_dis)
reg |= DP0_SRCCTRL_SCRMBLDIS; /* Scrambler Disabled */
- if (tc->link.coding8b10b)
- /* Enable 8/10B Encoder (TxData[19:16] not used) */
- reg |= DP0_SRCCTRL_EN810B;
if (tc->link.spread)
reg |= DP0_SRCCTRL_SSCG; /* Spread Spectrum Enable */
if (tc->link.base.num_lanes == 2)
@@ -536,7 +553,6 @@ static int tc_aux_link_setup(struct tc_data *tc)
unsigned long rate;
u32 value;
int ret;
- u32 dp_phy_ctrl;
rate = clk_get_rate(tc->refclk);
switch (rate) {
@@ -561,10 +577,7 @@ static int tc_aux_link_setup(struct tc_data *tc)
value |= SYSCLK_SEL_LSCLK | LSCLK_DIV_2;
tc_write(SYS_PLLPARAM, value);
- dp_phy_ctrl = BGREN | PWR_SW_EN | PHY_A0_EN;
- if (tc->link.base.num_lanes == 2)
- dp_phy_ctrl |= PHY_2LANE;
- tc_write(DP_PHY_CTRL, dp_phy_ctrl);
+ tc_write(DP_PHY_CTRL, BGREN | PWR_SW_EN | PHY_A0_EN);
/*
* Initially PLLs are in bypass. Force PLL parameter update,
@@ -581,8 +594,9 @@ static int tc_aux_link_setup(struct tc_data *tc)
if (ret == -ETIMEDOUT) {
dev_err(tc->dev, "Timeout waiting for PHY to become ready");
return ret;
- } else if (ret)
+ } else if (ret) {
goto err;
+ }
/* Setup AUX link */
tc_write(DP0_AUXCFG1, AUX_RX_FILTER_EN |
@@ -618,13 +632,13 @@ static int tc_get_display_props(struct tc_data *tc)
ret = drm_dp_dpcd_readb(&tc->aux, DP_MAX_DOWNSPREAD, tmp);
if (ret < 0)
goto err_dpcd_read;
- tc->link.spread = tmp[0] & BIT(0); /* 0.5% down spread */
+ tc->link.spread = tmp[0] & DP_MAX_DOWNSPREAD_0_5;
ret = drm_dp_dpcd_readb(&tc->aux, DP_MAIN_LINK_CHANNEL_CODING, tmp);
if (ret < 0)
goto err_dpcd_read;
- tc->link.coding8b10b = tmp[0] & BIT(0);
- tc->link.scrambler_dis = 0;
+
+ tc->link.scrambler_dis = false;
/* read assr */
ret = drm_dp_dpcd_readb(&tc->aux, DP_EDP_CONFIGURATION_SET, tmp);
if (ret < 0)
@@ -637,7 +651,9 @@ static int tc_get_display_props(struct tc_data *tc)
tc->link.base.num_lanes,
(tc->link.base.capabilities & DP_LINK_CAP_ENHANCED_FRAMING) ?
"enhanced" : "non-enhanced");
- dev_dbg(tc->dev, "ANSI 8B/10B: %d\n", tc->link.coding8b10b);
+ dev_dbg(tc->dev, "Downspread: %s, scrambler: %s\n",
+ tc->link.spread ? "0.5%" : "0.0%",
+ tc->link.scrambler_dis ? "disabled" : "enabled");
dev_dbg(tc->dev, "Display ASSR: %d, TC358767 ASSR: %d\n",
tc->link.assr, tc->assr);
@@ -735,89 +751,29 @@ err:
return ret;
}
-static int tc_link_training(struct tc_data *tc, int pattern)
+static int tc_wait_link_training(struct tc_data *tc)
{
- const char * const *errors;
- u32 srcctrl = tc_srcctrl(tc) | DP0_SRCCTRL_SCRMBLDIS |
- DP0_SRCCTRL_AUTOCORRECT;
- int timeout;
- int retry;
+ u32 timeout = 1000;
u32 value;
int ret;
- if (pattern == DP_TRAINING_PATTERN_1) {
- srcctrl |= DP0_SRCCTRL_TP1;
- errors = training_pattern1_errors;
- } else {
- srcctrl |= DP0_SRCCTRL_TP2;
- errors = training_pattern2_errors;
- }
-
- /* Set DPCD 0x102 for Training Part 1 or 2 */
- tc_write(DP0_SNKLTCTRL, DP_LINK_SCRAMBLING_DISABLE | pattern);
-
- tc_write(DP0_LTLOOPCTRL,
- (0x0f << 28) | /* Defer Iteration Count */
- (0x0f << 24) | /* Loop Iteration Count */
- (0x0d << 0)); /* Loop Timer Delay */
-
- retry = 5;
do {
- /* Set DP0 Training Pattern */
- tc_write(DP0_SRCCTRL, srcctrl);
-
- /* Enable DP0 to start Link Training */
- tc_write(DP0CTL, DP_EN);
-
- /* wait */
- timeout = 1000;
- do {
- tc_read(DP0_LTSTAT, &value);
- udelay(1);
- } while ((!(value & LT_LOOPDONE)) && (--timeout));
- if (timeout == 0) {
- dev_err(tc->dev, "Link training timeout!\n");
- } else {
- int pattern = (value >> 11) & 0x3;
- int error = (value >> 8) & 0x7;
-
- dev_dbg(tc->dev,
- "Link training phase %d done after %d uS: %s\n",
- pattern, 1000 - timeout, errors[error]);
- if (pattern == DP_TRAINING_PATTERN_1 && error == 0)
- break;
- if (pattern == DP_TRAINING_PATTERN_2) {
- value &= LT_CHANNEL1_EQ_BITS |
- LT_INTERLANE_ALIGN_DONE |
- LT_CHANNEL0_EQ_BITS;
- /* in case of two lanes */
- if ((tc->link.base.num_lanes == 2) &&
- (value == (LT_CHANNEL1_EQ_BITS |
- LT_INTERLANE_ALIGN_DONE |
- LT_CHANNEL0_EQ_BITS)))
- break;
- /* in case of one line */
- if ((tc->link.base.num_lanes == 1) &&
- (value == (LT_INTERLANE_ALIGN_DONE |
- LT_CHANNEL0_EQ_BITS)))
- break;
- }
- }
- /* restart */
- tc_write(DP0CTL, 0);
- usleep_range(10, 20);
- } while (--retry);
- if (retry == 0) {
- dev_err(tc->dev, "Failed to finish training phase %d\n",
- pattern);
+ udelay(1);
+ tc_read(DP0_LTSTAT, &value);
+ } while ((!(value & LT_LOOPDONE)) && (--timeout));
+
+ if (timeout == 0) {
+ dev_err(tc->dev, "Link training timeout waiting for LT_LOOPDONE!\n");
+ return -ETIMEDOUT;
}
- return 0;
+ return (value >> 8) & 0x7;
+
err:
return ret;
}
-static int tc_main_link_setup(struct tc_data *tc)
+static int tc_main_link_enable(struct tc_data *tc)
{
struct drm_dp_aux *aux = &tc->aux;
struct device *dev = tc->dev;
@@ -828,9 +784,11 @@ static int tc_main_link_setup(struct tc_data *tc)
int ret;
u8 tmp[8];
- /* display mode should be set at this point */
- if (!tc->mode)
- return -EINVAL;
+ dev_dbg(tc->dev, "link enable\n");
+
+ tc_read(DP0CTL, &value);
+ if (WARN_ON(value & DP_EN))
+ tc_write(DP0CTL, 0);
tc_write(DP0_SRCCTRL, tc_srcctrl(tc));
/* SSCG and BW27 on DP1 must be set to the same as on DP0 */
@@ -863,7 +821,6 @@ static int tc_main_link_setup(struct tc_data *tc)
if (tc->link.base.num_lanes == 2)
dp_phy_ctrl |= PHY_2LANE;
tc_write(DP_PHY_CTRL, dp_phy_ctrl);
- msleep(100);
/* PLL setup */
tc_write(DP0_PLLCTRL, PLLUPDATE | PLLEN);
@@ -872,14 +829,6 @@ static int tc_main_link_setup(struct tc_data *tc)
tc_write(DP1_PLLCTRL, PLLUPDATE | PLLEN);
tc_wait_pll_lock(tc);
- /* PXL PLL setup */
- if (tc_test_pattern) {
- ret = tc_pxl_pll_en(tc, clk_get_rate(tc->refclk),
- 1000 * tc->mode->clock);
- if (ret)
- goto err;
- }
-
/* Reset/Enable Main Links */
dp_phy_ctrl |= DP_PHY_RST | PHY_M1_RST | PHY_M0_RST;
tc_write(DP_PHY_CTRL, dp_phy_ctrl);
@@ -925,9 +874,9 @@ static int tc_main_link_setup(struct tc_data *tc)
if (tmp[0] != tc->assr) {
dev_dbg(dev, "Failed to switch display ASSR to %d, falling back to unscrambled mode\n",
- tc->assr);
+ tc->assr);
/* trying with disabled scrambler */
- tc->link.scrambler_dis = 1;
+ tc->link.scrambler_dis = true;
}
}
@@ -939,19 +888,82 @@ static int tc_main_link_setup(struct tc_data *tc)
/* DOWNSPREAD_CTRL */
tmp[0] = tc->link.spread ? DP_SPREAD_AMP_0_5 : 0x00;
/* MAIN_LINK_CHANNEL_CODING_SET */
- tmp[1] = tc->link.coding8b10b ? DP_SET_ANSI_8B10B : 0x00;
+ tmp[1] = DP_SET_ANSI_8B10B;
ret = drm_dp_dpcd_write(aux, DP_DOWNSPREAD_CTRL, tmp, 2);
if (ret < 0)
goto err_dpcd_write;
- ret = tc_link_training(tc, DP_TRAINING_PATTERN_1);
- if (ret)
+ /* Reset voltage-swing & pre-emphasis */
+ tmp[0] = tmp[1] = DP_TRAIN_VOLTAGE_SWING_LEVEL_0 |
+ DP_TRAIN_PRE_EMPH_LEVEL_0;
+ ret = drm_dp_dpcd_write(aux, DP_TRAINING_LANE0_SET, tmp, 2);
+ if (ret < 0)
+ goto err_dpcd_write;
+
+ /* Clock-Recovery */
+
+ /* Set DPCD 0x102 for Training Pattern 1 */
+ tc_write(DP0_SNKLTCTRL, DP_LINK_SCRAMBLING_DISABLE |
+ DP_TRAINING_PATTERN_1);
+
+ tc_write(DP0_LTLOOPCTRL,
+ (15 << 28) | /* Defer Iteration Count */
+ (15 << 24) | /* Loop Iteration Count */
+ (0xd << 0)); /* Loop Timer Delay */
+
+ tc_write(DP0_SRCCTRL, tc_srcctrl(tc) | DP0_SRCCTRL_SCRMBLDIS |
+ DP0_SRCCTRL_AUTOCORRECT | DP0_SRCCTRL_TP1);
+
+ /* Enable DP0 to start Link Training */
+ tc_write(DP0CTL,
+ ((tc->link.base.capabilities & DP_LINK_CAP_ENHANCED_FRAMING) ? EF_EN : 0) |
+ DP_EN);
+
+ /* wait */
+ ret = tc_wait_link_training(tc);
+ if (ret < 0)
goto err;
- ret = tc_link_training(tc, DP_TRAINING_PATTERN_2);
- if (ret)
+ if (ret) {
+ dev_err(tc->dev, "Link training phase 1 failed: %s\n",
+ training_pattern1_errors[ret]);
+ ret = -ENODEV;
+ goto err;
+ }
+
+ /* Channel Equalization */
+
+ /* Set DPCD 0x102 for Training Pattern 2 */
+ tc_write(DP0_SNKLTCTRL, DP_LINK_SCRAMBLING_DISABLE |
+ DP_TRAINING_PATTERN_2);
+
+ tc_write(DP0_SRCCTRL, tc_srcctrl(tc) | DP0_SRCCTRL_SCRMBLDIS |
+ DP0_SRCCTRL_AUTOCORRECT | DP0_SRCCTRL_TP2);
+
+ /* wait */
+ ret = tc_wait_link_training(tc);
+ if (ret < 0)
goto err;
+ if (ret) {
+ dev_err(tc->dev, "Link training phase 2 failed: %s\n",
+ training_pattern2_errors[ret]);
+ ret = -ENODEV;
+ goto err;
+ }
+
+ /*
+ * Toshiba's documentation suggests to first clear DPCD 0x102, then
+ * clear the training pattern bit in DP0_SRCCTRL. Testing shows
+ * that the link sometimes drops if those steps are done in that order,
+ * but if the steps are done in reverse order, the link stays up.
+ *
+ * So we do the steps differently than documented here.
+ */
+
+ /* Clear Training Pattern, set AutoCorrect Mode = 1 */
+ tc_write(DP0_SRCCTRL, tc_srcctrl(tc) | DP0_SRCCTRL_AUTOCORRECT);
+
/* Clear DPCD 0x102 */
/* Note: Can Not use DP0_SNKLTCTRL (0x06E4) short cut */
tmp[0] = tc->link.scrambler_dis ? DP_LINK_SCRAMBLING_DISABLE : 0x00;
@@ -959,47 +971,43 @@ static int tc_main_link_setup(struct tc_data *tc)
if (ret < 0)
goto err_dpcd_write;
- /* Clear Training Pattern, set AutoCorrect Mode = 1 */
- tc_write(DP0_SRCCTRL, tc_srcctrl(tc) | DP0_SRCCTRL_AUTOCORRECT);
+ /* Check link status */
+ ret = drm_dp_dpcd_read_link_status(aux, tmp);
+ if (ret < 0)
+ goto err_dpcd_read;
- /* Wait */
- timeout = 100;
- do {
- udelay(1);
- /* Read DPCD 0x202-0x207 */
- ret = drm_dp_dpcd_read_link_status(aux, tmp + 2);
- if (ret < 0)
- goto err_dpcd_read;
- } while ((--timeout) &&
- !(drm_dp_channel_eq_ok(tmp + 2, tc->link.base.num_lanes)));
+ ret = 0;
- if (timeout == 0) {
- /* Read DPCD 0x200-0x201 */
- ret = drm_dp_dpcd_read(aux, DP_SINK_COUNT, tmp, 2);
- if (ret < 0)
- goto err_dpcd_read;
- dev_err(dev, "channel(s) EQ not ok\n");
- dev_info(dev, "0x0200 SINK_COUNT: 0x%02x\n", tmp[0]);
- dev_info(dev, "0x0201 DEVICE_SERVICE_IRQ_VECTOR: 0x%02x\n",
- tmp[1]);
- dev_info(dev, "0x0202 LANE0_1_STATUS: 0x%02x\n", tmp[2]);
- dev_info(dev, "0x0204 LANE_ALIGN_STATUS_UPDATED: 0x%02x\n",
- tmp[4]);
- dev_info(dev, "0x0205 SINK_STATUS: 0x%02x\n", tmp[5]);
- dev_info(dev, "0x0206 ADJUST_REQUEST_LANE0_1: 0x%02x\n",
- tmp[6]);
-
- return -EAGAIN;
+ value = tmp[0] & DP_CHANNEL_EQ_BITS;
+
+ if (value != DP_CHANNEL_EQ_BITS) {
+ dev_err(tc->dev, "Lane 0 failed: %x\n", value);
+ ret = -ENODEV;
}
- ret = tc_set_video_mode(tc, tc->mode);
- if (ret)
- goto err;
+ if (tc->link.base.num_lanes == 2) {
+ value = (tmp[0] >> 4) & DP_CHANNEL_EQ_BITS;
- /* Set M/N */
- ret = tc_stream_clock_calc(tc);
- if (ret)
+ if (value != DP_CHANNEL_EQ_BITS) {
+ dev_err(tc->dev, "Lane 1 failed: %x\n", value);
+ ret = -ENODEV;
+ }
+
+ if (!(tmp[2] & DP_INTERLANE_ALIGN_DONE)) {
+ dev_err(tc->dev, "Interlane align failed\n");
+ ret = -ENODEV;
+ }
+ }
+
+ if (ret) {
+ dev_err(dev, "0x0202 LANE0_1_STATUS: 0x%02x\n", tmp[0]);
+ dev_err(dev, "0x0203 LANE2_3_STATUS 0x%02x\n", tmp[1]);
+ dev_err(dev, "0x0204 LANE_ALIGN_STATUS_UPDATED: 0x%02x\n", tmp[2]);
+ dev_err(dev, "0x0205 SINK_STATUS: 0x%02x\n", tmp[3]);
+ dev_err(dev, "0x0206 ADJUST_REQUEST_LANE0_1: 0x%02x\n", tmp[4]);
+ dev_err(dev, "0x0207 ADJUST_REQUEST_LANE2_3: 0x%02x\n", tmp[5]);
goto err;
+ }
return 0;
err_dpcd_read:
@@ -1011,39 +1019,84 @@ err:
return ret;
}
-static int tc_main_link_stream(struct tc_data *tc, int state)
+static int tc_main_link_disable(struct tc_data *tc)
+{
+ int ret;
+
+ dev_dbg(tc->dev, "link disable\n");
+
+ tc_write(DP0_SRCCTRL, 0);
+ tc_write(DP0CTL, 0);
+
+ return 0;
+err:
+ return ret;
+}
+
+static int tc_stream_enable(struct tc_data *tc)
{
int ret;
u32 value;
- dev_dbg(tc->dev, "stream: %d\n", state);
+ dev_dbg(tc->dev, "enable video stream\n");
- if (state) {
- value = VID_MN_GEN | DP_EN;
- if (tc->link.base.capabilities & DP_LINK_CAP_ENHANCED_FRAMING)
- value |= EF_EN;
- tc_write(DP0CTL, value);
- /*
- * VID_EN assertion should be delayed by at least N * LSCLK
- * cycles from the time VID_MN_GEN is enabled in order to
- * generate stable values for VID_M. LSCLK is 270 MHz or
- * 162 MHz, VID_N is set to 32768 in tc_stream_clock_calc(),
- * so a delay of at least 203 us should suffice.
- */
- usleep_range(500, 1000);
- value |= VID_EN;
- tc_write(DP0CTL, value);
- /* Set input interface */
- value = DP0_AUDSRC_NO_INPUT;
- if (tc_test_pattern)
- value |= DP0_VIDSRC_COLOR_BAR;
- else
- value |= DP0_VIDSRC_DPI_RX;
- tc_write(SYSCTRL, value);
- } else {
- tc_write(DP0CTL, 0);
+ /* PXL PLL setup */
+ if (tc_test_pattern) {
+ ret = tc_pxl_pll_en(tc, clk_get_rate(tc->refclk),
+ 1000 * tc->mode.clock);
+ if (ret)
+ goto err;
}
+ ret = tc_set_video_mode(tc, &tc->mode);
+ if (ret)
+ return ret;
+
+ /* Set M/N */
+ ret = tc_stream_clock_calc(tc);
+ if (ret)
+ return ret;
+
+ value = VID_MN_GEN | DP_EN;
+ if (tc->link.base.capabilities & DP_LINK_CAP_ENHANCED_FRAMING)
+ value |= EF_EN;
+ tc_write(DP0CTL, value);
+ /*
+ * VID_EN assertion should be delayed by at least N * LSCLK
+ * cycles from the time VID_MN_GEN is enabled in order to
+ * generate stable values for VID_M. LSCLK is 270 MHz or
+ * 162 MHz, VID_N is set to 32768 in tc_stream_clock_calc(),
+ * so a delay of at least 203 us should suffice.
+ */
+ usleep_range(500, 1000);
+ value |= VID_EN;
+ tc_write(DP0CTL, value);
+ /* Set input interface */
+ value = DP0_AUDSRC_NO_INPUT;
+ if (tc_test_pattern)
+ value |= DP0_VIDSRC_COLOR_BAR;
+ else
+ value |= DP0_VIDSRC_DPI_RX;
+ tc_write(SYSCTRL, value);
+
+ return 0;
+err:
+ return ret;
+}
+
+static int tc_stream_disable(struct tc_data *tc)
+{
+ int ret;
+ u32 val;
+
+ dev_dbg(tc->dev, "disable video stream\n");
+
+ tc_read(DP0CTL, &val);
+ val &= ~VID_EN;
+ tc_write(DP0CTL, val);
+
+ tc_pxl_pll_dis(tc);
+
return 0;
err:
return ret;
@@ -1061,15 +1114,22 @@ static void tc_bridge_enable(struct drm_bridge *bridge)
struct tc_data *tc = bridge_to_tc(bridge);
int ret;
- ret = tc_main_link_setup(tc);
+ ret = tc_get_display_props(tc);
if (ret < 0) {
- dev_err(tc->dev, "main link setup error: %d\n", ret);
+ dev_err(tc->dev, "failed to read display props: %d\n", ret);
return;
}
- ret = tc_main_link_stream(tc, 1);
+ ret = tc_main_link_enable(tc);
+ if (ret < 0) {
+ dev_err(tc->dev, "main link enable error: %d\n", ret);
+ return;
+ }
+
+ ret = tc_stream_enable(tc);
if (ret < 0) {
dev_err(tc->dev, "main link stream start error: %d\n", ret);
+ tc_main_link_disable(tc);
return;
}
@@ -1083,9 +1143,13 @@ static void tc_bridge_disable(struct drm_bridge *bridge)
drm_panel_disable(tc->panel);
- ret = tc_main_link_stream(tc, 0);
+ ret = tc_stream_disable(tc);
if (ret < 0)
dev_err(tc->dev, "main link stream stop error: %d\n", ret);
+
+ ret = tc_main_link_disable(tc);
+ if (ret < 0)
+ dev_err(tc->dev, "main link disable error: %d\n", ret);
}
static void tc_bridge_post_disable(struct drm_bridge *bridge)
@@ -1107,10 +1171,10 @@ static bool tc_bridge_mode_fixup(struct drm_bridge *bridge,
return true;
}
-static enum drm_mode_status tc_connector_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
+static enum drm_mode_status tc_mode_valid(struct drm_bridge *bridge,
+ const struct drm_display_mode *mode)
{
- struct tc_data *tc = connector_to_tc(connector);
+ struct tc_data *tc = bridge_to_tc(bridge);
u32 req, avail;
u32 bits_per_pixel = 24;
@@ -1133,7 +1197,7 @@ static void tc_bridge_mode_set(struct drm_bridge *bridge,
{
struct tc_data *tc = bridge_to_tc(bridge);
- tc->mode = mode;
+ tc->mode = *mode;
}
static int tc_connector_get_modes(struct drm_connector *connector)
@@ -1141,6 +1205,13 @@ static int tc_connector_get_modes(struct drm_connector *connector)
struct tc_data *tc = connector_to_tc(connector);
struct edid *edid;
unsigned int count;
+ int ret;
+
+ ret = tc_get_display_props(tc);
+ if (ret < 0) {
+ dev_err(tc->dev, "failed to read display props: %d\n", ret);
+ return 0;
+ }
if (tc->panel && tc->panel->funcs && tc->panel->funcs->get_modes) {
count = tc->panel->funcs->get_modes(tc->panel);
@@ -1161,29 +1232,40 @@ static int tc_connector_get_modes(struct drm_connector *connector)
return count;
}
-static void tc_connector_set_polling(struct tc_data *tc,
- struct drm_connector *connector)
-{
- /* TODO: add support for HPD */
- connector->polled = DRM_CONNECTOR_POLL_CONNECT |
- DRM_CONNECTOR_POLL_DISCONNECT;
-}
+static const struct drm_connector_helper_funcs tc_connector_helper_funcs = {
+ .get_modes = tc_connector_get_modes,
+};
-static struct drm_encoder *
-tc_connector_best_encoder(struct drm_connector *connector)
+static enum drm_connector_status tc_connector_detect(struct drm_connector *connector,
+ bool force)
{
struct tc_data *tc = connector_to_tc(connector);
+ bool conn;
+ u32 val;
+ int ret;
- return tc->bridge.encoder;
-}
+ if (tc->hpd_pin < 0) {
+ if (tc->panel)
+ return connector_status_connected;
+ else
+ return connector_status_unknown;
+ }
-static const struct drm_connector_helper_funcs tc_connector_helper_funcs = {
- .get_modes = tc_connector_get_modes,
- .mode_valid = tc_connector_mode_valid,
- .best_encoder = tc_connector_best_encoder,
-};
+ tc_read(GPIOI, &val);
+
+ conn = val & BIT(tc->hpd_pin);
+
+ if (conn)
+ return connector_status_connected;
+ else
+ return connector_status_disconnected;
+
+err:
+ return connector_status_unknown;
+}
static const struct drm_connector_funcs tc_connector_funcs = {
+ .detect = tc_connector_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.destroy = drm_connector_cleanup,
.reset = drm_atomic_helper_connector_reset,
@@ -1198,7 +1280,7 @@ static int tc_bridge_attach(struct drm_bridge *bridge)
struct drm_device *drm = bridge->dev;
int ret;
- /* Create eDP connector */
+ /* Create DP/eDP connector */
drm_connector_helper_add(&tc->connector, &tc_connector_helper_funcs);
ret = drm_connector_init(drm, &tc->connector, &tc_connector_funcs,
tc->panel ? DRM_MODE_CONNECTOR_eDP :
@@ -1206,6 +1288,15 @@ static int tc_bridge_attach(struct drm_bridge *bridge)
if (ret)
return ret;
+ /* Don't poll if don't have HPD connected */
+ if (tc->hpd_pin >= 0) {
+ if (tc->have_irq)
+ tc->connector.polled = DRM_CONNECTOR_POLL_HPD;
+ else
+ tc->connector.polled = DRM_CONNECTOR_POLL_CONNECT |
+ DRM_CONNECTOR_POLL_DISCONNECT;
+ }
+
if (tc->panel)
drm_panel_attach(tc->panel, &tc->connector);
@@ -1222,6 +1313,7 @@ static int tc_bridge_attach(struct drm_bridge *bridge)
static const struct drm_bridge_funcs tc_bridge_funcs = {
.attach = tc_bridge_attach,
+ .mode_valid = tc_mode_valid,
.mode_set = tc_bridge_mode_set,
.pre_enable = tc_bridge_pre_enable,
.enable = tc_bridge_enable,
@@ -1241,6 +1333,8 @@ static const struct regmap_range tc_volatile_ranges[] = {
regmap_reg_range(DP_PHY_CTRL, DP_PHY_CTRL),
regmap_reg_range(DP0_PLLCTRL, PXL_PLLCTRL),
regmap_reg_range(VFUEN0, VFUEN0),
+ regmap_reg_range(INTSTS_G, INTSTS_G),
+ regmap_reg_range(GPIOI, GPIOI),
};
static const struct regmap_access_table tc_volatile_table = {
@@ -1269,6 +1363,49 @@ static const struct regmap_config tc_regmap_config = {
.val_format_endian = REGMAP_ENDIAN_LITTLE,
};
+static irqreturn_t tc_irq_handler(int irq, void *arg)
+{
+ struct tc_data *tc = arg;
+ u32 val;
+ int r;
+
+ r = regmap_read(tc->regmap, INTSTS_G, &val);
+ if (r)
+ return IRQ_NONE;
+
+ if (!val)
+ return IRQ_NONE;
+
+ if (val & INT_SYSERR) {
+ u32 stat = 0;
+
+ regmap_read(tc->regmap, SYSSTAT, &stat);
+
+ dev_err(tc->dev, "syserr %x\n", stat);
+ }
+
+ if (tc->hpd_pin >= 0 && tc->bridge.dev) {
+ /*
+ * H is triggered when the GPIO goes high.
+ *
+ * LC is triggered when the GPIO goes low and stays low for
+ * the duration of LCNT
+ */
+ bool h = val & INT_GPIO_H(tc->hpd_pin);
+ bool lc = val & INT_GPIO_LC(tc->hpd_pin);
+
+ dev_dbg(tc->dev, "GPIO%d: %s %s\n", tc->hpd_pin,
+ h ? "H" : "", lc ? "LC" : "");
+
+ if (h || lc)
+ drm_kms_helper_hotplug_event(tc->bridge.dev);
+ }
+
+ regmap_write(tc->regmap, INTSTS_G, val);
+
+ return IRQ_HANDLED;
+}
+
static int tc_probe(struct i2c_client *client, const struct i2c_device_id *id)
{
struct device *dev = &client->dev;
@@ -1320,6 +1457,33 @@ static int tc_probe(struct i2c_client *client, const struct i2c_device_id *id)
return ret;
}
+ ret = of_property_read_u32(dev->of_node, "toshiba,hpd-pin",
+ &tc->hpd_pin);
+ if (ret) {
+ tc->hpd_pin = -ENODEV;
+ } else {
+ if (tc->hpd_pin < 0 || tc->hpd_pin > 1) {
+ dev_err(dev, "failed to parse HPD number\n");
+ return ret;
+ }
+ }
+
+ if (client->irq > 0) {
+ /* enable SysErr */
+ regmap_write(tc->regmap, INTCTL_G, INT_SYSERR);
+
+ ret = devm_request_threaded_irq(dev, client->irq,
+ NULL, tc_irq_handler,
+ IRQF_ONESHOT,
+ "tc358767-irq", tc);
+ if (ret) {
+ dev_err(dev, "failed to register dp interrupt\n");
+ return ret;
+ }
+
+ tc->have_irq = true;
+ }
+
ret = regmap_read(tc->regmap, TC_IDREG, &tc->rev);
if (ret) {
dev_err(tc->dev, "can not read device ID: %d\n", ret);
@@ -1333,6 +1497,22 @@ static int tc_probe(struct i2c_client *client, const struct i2c_device_id *id)
tc->assr = (tc->rev == 0x6601); /* Enable ASSR for eDP panels */
+ if (tc->hpd_pin >= 0) {
+ u32 lcnt_reg = tc->hpd_pin == 0 ? INT_GP0_LCNT : INT_GP1_LCNT;
+ u32 h_lc = INT_GPIO_H(tc->hpd_pin) | INT_GPIO_LC(tc->hpd_pin);
+
+ /* Set LCNT to 2ms */
+ regmap_write(tc->regmap, lcnt_reg,
+ clk_get_rate(tc->refclk) * 2 / 1000);
+ /* We need the "alternate" mode for HPD */
+ regmap_write(tc->regmap, GPIOM, BIT(tc->hpd_pin));
+
+ if (tc->have_irq) {
+ /* enable H & LC */
+ regmap_update_bits(tc->regmap, INTCTL_G, h_lc, h_lc);
+ }
+ }
+
ret = tc_aux_link_setup(tc);
if (ret)
return ret;
@@ -1345,12 +1525,6 @@ static int tc_probe(struct i2c_client *client, const struct i2c_device_id *id)
if (ret)
return ret;
- ret = tc_get_display_props(tc);
- if (ret)
- goto err_unregister_aux;
-
- tc_connector_set_polling(tc, &tc->connector);
-
tc->bridge.funcs = &tc_bridge_funcs;
tc->bridge.of_node = dev->of_node;
drm_bridge_add(&tc->bridge);
@@ -1358,9 +1532,6 @@ static int tc_probe(struct i2c_client *client, const struct i2c_device_id *id)
i2c_set_clientdata(client, tc);
return 0;
-err_unregister_aux:
- drm_dp_aux_unregister(&tc->aux);
- return ret;
}
static int tc_remove(struct i2c_client *client)
@@ -1370,8 +1541,6 @@ static int tc_remove(struct i2c_client *client)
drm_bridge_remove(&tc->bridge);
drm_dp_aux_unregister(&tc->aux);
- tc_pxl_pll_dis(tc);
-
return 0;
}
diff --git a/drivers/gpu/drm/bridge/thc63lvd1024.c b/drivers/gpu/drm/bridge/thc63lvd1024.c
index b083a740565c..3d74129b2995 100644
--- a/drivers/gpu/drm/bridge/thc63lvd1024.c
+++ b/drivers/gpu/drm/bridge/thc63lvd1024.c
@@ -5,15 +5,17 @@
* Copyright (C) 2018 Jacopo Mondi <jacopo+renesas@jmondi.org>
*/
-#include <drm/drmP.h>
-#include <drm/drm_bridge.h>
-#include <drm/drm_panel.h>
-
#include <linux/gpio/consumer.h>
+#include <linux/module.h>
+#include <linux/of.h>
#include <linux/of_graph.h>
+#include <linux/platform_device.h>
#include <linux/regulator/consumer.h>
#include <linux/slab.h>
+#include <drm/drm_bridge.h>
+#include <drm/drm_panel.h>
+
enum thc63_ports {
THC63_LVDS_IN0,
THC63_LVDS_IN1,
@@ -31,6 +33,8 @@ struct thc63_dev {
struct drm_bridge bridge;
struct drm_bridge *next;
+
+ struct drm_bridge_timings timings;
};
static inline struct thc63_dev *to_thc63(struct drm_bridge *bridge)
@@ -48,15 +52,28 @@ static int thc63_attach(struct drm_bridge *bridge)
static enum drm_mode_status thc63_mode_valid(struct drm_bridge *bridge,
const struct drm_display_mode *mode)
{
+ struct thc63_dev *thc63 = to_thc63(bridge);
+ unsigned int min_freq;
+ unsigned int max_freq;
+
/*
- * The THC63LVD1024 clock frequency range is 8 to 135 MHz in single-in
- * mode. Note that the limits are different in dual-in, single-out mode,
- * and will need to be adjusted accordingly.
+ * The THC63LVD1024 pixel rate range is 8 to 135 MHz in all modes but
+ * dual-in, single-out where it is 40 to 150 MHz. As dual-in, dual-out
+ * isn't supported by the driver yet, simply derive the limits from the
+ * input mode.
*/
- if (mode->clock < 8000)
+ if (thc63->timings.dual_link) {
+ min_freq = 40000;
+ max_freq = 150000;
+ } else {
+ min_freq = 8000;
+ max_freq = 135000;
+ }
+
+ if (mode->clock < min_freq)
return MODE_CLOCK_LOW;
- if (mode->clock > 135000)
+ if (mode->clock > max_freq)
return MODE_CLOCK_HIGH;
return MODE_OK;
@@ -101,19 +118,19 @@ static const struct drm_bridge_funcs thc63_bridge_func = {
static int thc63_parse_dt(struct thc63_dev *thc63)
{
- struct device_node *thc63_out;
+ struct device_node *endpoint;
struct device_node *remote;
- thc63_out = of_graph_get_endpoint_by_regs(thc63->dev->of_node,
- THC63_RGB_OUT0, -1);
- if (!thc63_out) {
+ endpoint = of_graph_get_endpoint_by_regs(thc63->dev->of_node,
+ THC63_RGB_OUT0, -1);
+ if (!endpoint) {
dev_err(thc63->dev, "Missing endpoint in port@%u\n",
THC63_RGB_OUT0);
return -ENODEV;
}
- remote = of_graph_get_remote_port_parent(thc63_out);
- of_node_put(thc63_out);
+ remote = of_graph_get_remote_port_parent(endpoint);
+ of_node_put(endpoint);
if (!remote) {
dev_err(thc63->dev, "Endpoint in port@%u unconnected\n",
THC63_RGB_OUT0);
@@ -132,6 +149,22 @@ static int thc63_parse_dt(struct thc63_dev *thc63)
if (!thc63->next)
return -EPROBE_DEFER;
+ endpoint = of_graph_get_endpoint_by_regs(thc63->dev->of_node,
+ THC63_LVDS_IN1, -1);
+ if (endpoint) {
+ remote = of_graph_get_remote_port_parent(endpoint);
+ of_node_put(endpoint);
+
+ if (remote) {
+ if (of_device_is_available(remote))
+ thc63->timings.dual_link = true;
+ of_node_put(remote);
+ }
+ }
+
+ dev_dbg(thc63->dev, "operating in %s-link mode\n",
+ thc63->timings.dual_link ? "dual" : "single");
+
return 0;
}
@@ -188,6 +221,7 @@ static int thc63_probe(struct platform_device *pdev)
thc63->bridge.driver_private = thc63;
thc63->bridge.of_node = pdev->dev.of_node;
thc63->bridge.funcs = &thc63_bridge_func;
+ thc63->bridge.timings = &thc63->timings;
drm_bridge_add(&thc63->bridge);
diff --git a/drivers/gpu/drm/bridge/ti-sn65dsi86.c b/drivers/gpu/drm/bridge/ti-sn65dsi86.c
index f72ee137e5f1..b77a52d05061 100644
--- a/drivers/gpu/drm/bridge/ti-sn65dsi86.c
+++ b/drivers/gpu/drm/bridge/ti-sn65dsi86.c
@@ -3,23 +3,25 @@
* Copyright (c) 2018, The Linux Foundation. All rights reserved.
*/
-#include <drm/drmP.h>
-#include <drm/drm_atomic.h>
-#include <drm/drm_atomic_helper.h>
-#include <drm/drm_dp_helper.h>
-#include <drm/drm_mipi_dsi.h>
-#include <drm/drm_of.h>
-#include <drm/drm_panel.h>
-#include <drm/drm_probe_helper.h>
#include <linux/clk.h>
#include <linux/gpio/consumer.h>
#include <linux/i2c.h>
#include <linux/iopoll.h>
+#include <linux/module.h>
#include <linux/of_graph.h>
#include <linux/pm_runtime.h>
#include <linux/regmap.h>
#include <linux/regulator/consumer.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_dp_helper.h>
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_of.h>
+#include <drm/drm_panel.h>
+#include <drm/drm_print.h>
+#include <drm/drm_probe_helper.h>
+
#define SN_DEVICE_REV_REG 0x08
#define SN_DPPLL_SRC_REG 0x0A
#define DPPLL_CLK_SRC_DSICLK BIT(0)
diff --git a/drivers/gpu/drm/bridge/ti-tfp410.c b/drivers/gpu/drm/bridge/ti-tfp410.c
index 8b0e71bd3ca7..bfb21b5eefe1 100644
--- a/drivers/gpu/drm/bridge/ti-tfp410.c
+++ b/drivers/gpu/drm/bridge/ti-tfp410.c
@@ -11,15 +11,15 @@
#include <linux/delay.h>
#include <linux/fwnode.h>
#include <linux/gpio/consumer.h>
+#include <linux/i2c.h>
#include <linux/irq.h>
#include <linux/module.h>
#include <linux/of_graph.h>
#include <linux/platform_device.h>
-#include <linux/i2c.h>
-#include <drm/drmP.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
+#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
#define HOTPLUG_DEBOUNCE_MS 1100
diff --git a/drivers/gpu/drm/cirrus/cirrus_ttm.c b/drivers/gpu/drm/cirrus/cirrus_ttm.c
deleted file mode 100644
index e6b98467a428..000000000000
--- a/drivers/gpu/drm/cirrus/cirrus_ttm.c
+++ /dev/null
@@ -1,337 +0,0 @@
-/*
- * Copyright 2012 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
- * USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- */
-/*
- * Authors: Dave Airlie <airlied@redhat.com>
- */
-#include <drm/drmP.h>
-#include <drm/ttm/ttm_page_alloc.h>
-
-#include "cirrus_drv.h"
-
-static inline struct cirrus_device *
-cirrus_bdev(struct ttm_bo_device *bd)
-{
- return container_of(bd, struct cirrus_device, ttm.bdev);
-}
-
-static void cirrus_bo_ttm_destroy(struct ttm_buffer_object *tbo)
-{
- struct cirrus_bo *bo;
-
- bo = container_of(tbo, struct cirrus_bo, bo);
-
- drm_gem_object_release(&bo->gem);
- kfree(bo);
-}
-
-static bool cirrus_ttm_bo_is_cirrus_bo(struct ttm_buffer_object *bo)
-{
- if (bo->destroy == &cirrus_bo_ttm_destroy)
- return true;
- return false;
-}
-
-static int
-cirrus_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
- struct ttm_mem_type_manager *man)
-{
- switch (type) {
- case TTM_PL_SYSTEM:
- man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
- man->available_caching = TTM_PL_MASK_CACHING;
- man->default_caching = TTM_PL_FLAG_CACHED;
- break;
- case TTM_PL_VRAM:
- man->func = &ttm_bo_manager_func;
- man->flags = TTM_MEMTYPE_FLAG_FIXED |
- TTM_MEMTYPE_FLAG_MAPPABLE;
- man->available_caching = TTM_PL_FLAG_UNCACHED |
- TTM_PL_FLAG_WC;
- man->default_caching = TTM_PL_FLAG_WC;
- break;
- default:
- DRM_ERROR("Unsupported memory type %u\n", (unsigned)type);
- return -EINVAL;
- }
- return 0;
-}
-
-static void
-cirrus_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl)
-{
- struct cirrus_bo *cirrusbo = cirrus_bo(bo);
-
- if (!cirrus_ttm_bo_is_cirrus_bo(bo))
- return;
-
- cirrus_ttm_placement(cirrusbo, TTM_PL_FLAG_SYSTEM);
- *pl = cirrusbo->placement;
-}
-
-static int cirrus_bo_verify_access(struct ttm_buffer_object *bo, struct file *filp)
-{
- struct cirrus_bo *cirrusbo = cirrus_bo(bo);
-
- return drm_vma_node_verify_access(&cirrusbo->gem.vma_node,
- filp->private_data);
-}
-
-static int cirrus_ttm_io_mem_reserve(struct ttm_bo_device *bdev,
- struct ttm_mem_reg *mem)
-{
- struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
- struct cirrus_device *cirrus = cirrus_bdev(bdev);
-
- mem->bus.addr = NULL;
- mem->bus.offset = 0;
- mem->bus.size = mem->num_pages << PAGE_SHIFT;
- mem->bus.base = 0;
- mem->bus.is_iomem = false;
- if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
- return -EINVAL;
- switch (mem->mem_type) {
- case TTM_PL_SYSTEM:
- /* system memory */
- return 0;
- case TTM_PL_VRAM:
- mem->bus.offset = mem->start << PAGE_SHIFT;
- mem->bus.base = pci_resource_start(cirrus->dev->pdev, 0);
- mem->bus.is_iomem = true;
- break;
- default:
- return -EINVAL;
- break;
- }
- return 0;
-}
-
-static void cirrus_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
-{
-}
-
-static void cirrus_ttm_backend_destroy(struct ttm_tt *tt)
-{
- ttm_tt_fini(tt);
- kfree(tt);
-}
-
-static struct ttm_backend_func cirrus_tt_backend_func = {
- .destroy = &cirrus_ttm_backend_destroy,
-};
-
-
-static struct ttm_tt *cirrus_ttm_tt_create(struct ttm_buffer_object *bo,
- uint32_t page_flags)
-{
- struct ttm_tt *tt;
-
- tt = kzalloc(sizeof(struct ttm_tt), GFP_KERNEL);
- if (tt == NULL)
- return NULL;
- tt->func = &cirrus_tt_backend_func;
- if (ttm_tt_init(tt, bo, page_flags)) {
- kfree(tt);
- return NULL;
- }
- return tt;
-}
-
-struct ttm_bo_driver cirrus_bo_driver = {
- .ttm_tt_create = cirrus_ttm_tt_create,
- .init_mem_type = cirrus_bo_init_mem_type,
- .eviction_valuable = ttm_bo_eviction_valuable,
- .evict_flags = cirrus_bo_evict_flags,
- .move = NULL,
- .verify_access = cirrus_bo_verify_access,
- .io_mem_reserve = &cirrus_ttm_io_mem_reserve,
- .io_mem_free = &cirrus_ttm_io_mem_free,
-};
-
-int cirrus_mm_init(struct cirrus_device *cirrus)
-{
- int ret;
- struct drm_device *dev = cirrus->dev;
- struct ttm_bo_device *bdev = &cirrus->ttm.bdev;
-
- ret = ttm_bo_device_init(&cirrus->ttm.bdev,
- &cirrus_bo_driver,
- dev->anon_inode->i_mapping,
- true);
- if (ret) {
- DRM_ERROR("Error initialising bo driver; %d\n", ret);
- return ret;
- }
-
- ret = ttm_bo_init_mm(bdev, TTM_PL_VRAM,
- cirrus->mc.vram_size >> PAGE_SHIFT);
- if (ret) {
- DRM_ERROR("Failed ttm VRAM init: %d\n", ret);
- return ret;
- }
-
- arch_io_reserve_memtype_wc(pci_resource_start(dev->pdev, 0),
- pci_resource_len(dev->pdev, 0));
-
- cirrus->fb_mtrr = arch_phys_wc_add(pci_resource_start(dev->pdev, 0),
- pci_resource_len(dev->pdev, 0));
-
- cirrus->mm_inited = true;
- return 0;
-}
-
-void cirrus_mm_fini(struct cirrus_device *cirrus)
-{
- struct drm_device *dev = cirrus->dev;
-
- if (!cirrus->mm_inited)
- return;
-
- ttm_bo_device_release(&cirrus->ttm.bdev);
-
- arch_phys_wc_del(cirrus->fb_mtrr);
- cirrus->fb_mtrr = 0;
- arch_io_free_memtype_wc(pci_resource_start(dev->pdev, 0),
- pci_resource_len(dev->pdev, 0));
-}
-
-void cirrus_ttm_placement(struct cirrus_bo *bo, int domain)
-{
- u32 c = 0;
- unsigned i;
- bo->placement.placement = bo->placements;
- bo->placement.busy_placement = bo->placements;
- if (domain & TTM_PL_FLAG_VRAM)
- bo->placements[c++].flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_VRAM;
- if (domain & TTM_PL_FLAG_SYSTEM)
- bo->placements[c++].flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
- if (!c)
- bo->placements[c++].flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
- bo->placement.num_placement = c;
- bo->placement.num_busy_placement = c;
- for (i = 0; i < c; ++i) {
- bo->placements[i].fpfn = 0;
- bo->placements[i].lpfn = 0;
- }
-}
-
-int cirrus_bo_create(struct drm_device *dev, int size, int align,
- uint32_t flags, struct cirrus_bo **pcirrusbo)
-{
- struct cirrus_device *cirrus = dev->dev_private;
- struct cirrus_bo *cirrusbo;
- size_t acc_size;
- int ret;
-
- cirrusbo = kzalloc(sizeof(struct cirrus_bo), GFP_KERNEL);
- if (!cirrusbo)
- return -ENOMEM;
-
- ret = drm_gem_object_init(dev, &cirrusbo->gem, size);
- if (ret) {
- kfree(cirrusbo);
- return ret;
- }
-
- cirrusbo->bo.bdev = &cirrus->ttm.bdev;
-
- cirrus_ttm_placement(cirrusbo, TTM_PL_FLAG_VRAM | TTM_PL_FLAG_SYSTEM);
-
- acc_size = ttm_bo_dma_acc_size(&cirrus->ttm.bdev, size,
- sizeof(struct cirrus_bo));
-
- ret = ttm_bo_init(&cirrus->ttm.bdev, &cirrusbo->bo, size,
- ttm_bo_type_device, &cirrusbo->placement,
- align >> PAGE_SHIFT, false, acc_size,
- NULL, NULL, cirrus_bo_ttm_destroy);
- if (ret)
- return ret;
-
- *pcirrusbo = cirrusbo;
- return 0;
-}
-
-static inline u64 cirrus_bo_gpu_offset(struct cirrus_bo *bo)
-{
- return bo->bo.offset;
-}
-
-int cirrus_bo_pin(struct cirrus_bo *bo, u32 pl_flag, u64 *gpu_addr)
-{
- struct ttm_operation_ctx ctx = { false, false };
- int i, ret;
-
- if (bo->pin_count) {
- bo->pin_count++;
- if (gpu_addr)
- *gpu_addr = cirrus_bo_gpu_offset(bo);
- }
-
- cirrus_ttm_placement(bo, pl_flag);
- for (i = 0; i < bo->placement.num_placement; i++)
- bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT;
- ret = ttm_bo_validate(&bo->bo, &bo->placement, &ctx);
- if (ret)
- return ret;
-
- bo->pin_count = 1;
- if (gpu_addr)
- *gpu_addr = cirrus_bo_gpu_offset(bo);
- return 0;
-}
-
-int cirrus_bo_push_sysram(struct cirrus_bo *bo)
-{
- struct ttm_operation_ctx ctx = { false, false };
- int i, ret;
- if (!bo->pin_count) {
- DRM_ERROR("unpin bad %p\n", bo);
- return 0;
- }
- bo->pin_count--;
- if (bo->pin_count)
- return 0;
-
- if (bo->kmap.virtual)
- ttm_bo_kunmap(&bo->kmap);
-
- cirrus_ttm_placement(bo, TTM_PL_FLAG_SYSTEM);
- for (i = 0; i < bo->placement.num_placement ; i++)
- bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT;
-
- ret = ttm_bo_validate(&bo->bo, &bo->placement, &ctx);
- if (ret) {
- DRM_ERROR("pushing to VRAM failed\n");
- return ret;
- }
- return 0;
-}
-
-int cirrus_mmap(struct file *filp, struct vm_area_struct *vma)
-{
- struct drm_file *file_priv = filp->private_data;
- struct cirrus_device *cirrus = file_priv->minor->dev->dev_private;
-
- return ttm_bo_mmap(filp, vma, &cirrus->ttm.bdev);
-}
diff --git a/drivers/gpu/drm/drm_agpsupport.c b/drivers/gpu/drm/drm_agpsupport.c
index 40fba1c04dfc..117b8ee98243 100644
--- a/drivers/gpu/drm/drm_agpsupport.c
+++ b/drivers/gpu/drm/drm_agpsupport.c
@@ -31,13 +31,20 @@
* OTHER DEALINGS IN THE SOFTWARE.
*/
-#include <drm/drmP.h>
#include <linux/module.h>
+#include <linux/pci.h>
#include <linux/slab.h>
-#include "drm_legacy.h"
#include <asm/agp.h>
+#include <drm/drm_agpsupport.h>
+#include <drm/drm_device.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_file.h>
+#include <drm/drm_print.h>
+
+#include "drm_legacy.h"
+
/**
* Get AGP information.
*
diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c
index f4924cb7f495..419381abbdd1 100644
--- a/drivers/gpu/drm/drm_atomic.c
+++ b/drivers/gpu/drm/drm_atomic.c
@@ -26,13 +26,18 @@
*/
-#include <drm/drmP.h>
+#include <linux/sync_file.h>
+
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_uapi.h>
+#include <drm/drm_debugfs.h>
+#include <drm/drm_device.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_file.h>
+#include <drm/drm_fourcc.h>
#include <drm/drm_mode.h>
#include <drm/drm_print.h>
#include <drm/drm_writeback.h>
-#include <linux/sync_file.h>
#include "drm_crtc_internal.h"
#include "drm_internal.h"
@@ -379,6 +384,7 @@ static void drm_atomic_crtc_print_state(struct drm_printer *p,
drm_printf(p, "crtc[%u]: %s\n", crtc->base.id, crtc->name);
drm_printf(p, "\tenable=%d\n", state->enable);
drm_printf(p, "\tactive=%d\n", state->active);
+ drm_printf(p, "\tself_refresh_active=%d\n", state->self_refresh_active);
drm_printf(p, "\tplanes_changed=%d\n", state->planes_changed);
drm_printf(p, "\tmode_changed=%d\n", state->mode_changed);
drm_printf(p, "\tactive_changed=%d\n", state->active_changed);
@@ -842,6 +848,75 @@ drm_atomic_get_new_private_obj_state(struct drm_atomic_state *state,
EXPORT_SYMBOL(drm_atomic_get_new_private_obj_state);
/**
+ * drm_atomic_get_old_connector_for_encoder - Get old connector for an encoder
+ * @state: Atomic state
+ * @encoder: The encoder to fetch the connector state for
+ *
+ * This function finds and returns the connector that was connected to @encoder
+ * as specified by the @state.
+ *
+ * If there is no connector in @state which previously had @encoder connected to
+ * it, this function will return NULL. While this may seem like an invalid use
+ * case, it is sometimes useful to differentiate commits which had no prior
+ * connectors attached to @encoder vs ones that did (and to inspect their
+ * state). This is especially true in enable hooks because the pipeline has
+ * changed.
+ *
+ * Returns: The old connector connected to @encoder, or NULL if the encoder is
+ * not connected.
+ */
+struct drm_connector *
+drm_atomic_get_old_connector_for_encoder(struct drm_atomic_state *state,
+ struct drm_encoder *encoder)
+{
+ struct drm_connector_state *conn_state;
+ struct drm_connector *connector;
+ unsigned int i;
+
+ for_each_old_connector_in_state(state, connector, conn_state, i) {
+ if (conn_state->best_encoder == encoder)
+ return connector;
+ }
+
+ return NULL;
+}
+EXPORT_SYMBOL(drm_atomic_get_old_connector_for_encoder);
+
+/**
+ * drm_atomic_get_new_connector_for_encoder - Get new connector for an encoder
+ * @state: Atomic state
+ * @encoder: The encoder to fetch the connector state for
+ *
+ * This function finds and returns the connector that will be connected to
+ * @encoder as specified by the @state.
+ *
+ * If there is no connector in @state which will have @encoder connected to it,
+ * this function will return NULL. While this may seem like an invalid use case,
+ * it is sometimes useful to differentiate commits which have no connectors
+ * attached to @encoder vs ones that do (and to inspect their state). This is
+ * especially true in disable hooks because the pipeline will change.
+ *
+ * Returns: The new connector connected to @encoder, or NULL if the encoder is
+ * not connected.
+ */
+struct drm_connector *
+drm_atomic_get_new_connector_for_encoder(struct drm_atomic_state *state,
+ struct drm_encoder *encoder)
+{
+ struct drm_connector_state *conn_state;
+ struct drm_connector *connector;
+ unsigned int i;
+
+ for_each_new_connector_in_state(state, connector, conn_state, i) {
+ if (conn_state->best_encoder == encoder)
+ return connector;
+ }
+
+ return NULL;
+}
+EXPORT_SYMBOL(drm_atomic_get_new_connector_for_encoder);
+
+/**
* drm_atomic_get_connector_state - get connector state
* @state: global atomic state object
* @connector: connector to get state object for
@@ -925,6 +1000,7 @@ static void drm_atomic_connector_print_state(struct drm_printer *p,
drm_printf(p, "connector[%u]: %s\n", connector->base.id, connector->name);
drm_printf(p, "\tcrtc=%s\n", state->crtc ? state->crtc->name : "(null)");
+ drm_printf(p, "\tself_refresh_aware=%d\n", state->self_refresh_aware);
if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
if (state->writeback_job && state->writeback_job->fb)
@@ -1174,6 +1250,174 @@ int drm_atomic_nonblocking_commit(struct drm_atomic_state *state)
}
EXPORT_SYMBOL(drm_atomic_nonblocking_commit);
+/* just used from drm-client and atomic-helper: */
+int __drm_atomic_helper_disable_plane(struct drm_plane *plane,
+ struct drm_plane_state *plane_state)
+{
+ int ret;
+
+ ret = drm_atomic_set_crtc_for_plane(plane_state, NULL);
+ if (ret != 0)
+ return ret;
+
+ drm_atomic_set_fb_for_plane(plane_state, NULL);
+ plane_state->crtc_x = 0;
+ plane_state->crtc_y = 0;
+ plane_state->crtc_w = 0;
+ plane_state->crtc_h = 0;
+ plane_state->src_x = 0;
+ plane_state->src_y = 0;
+ plane_state->src_w = 0;
+ plane_state->src_h = 0;
+
+ return 0;
+}
+EXPORT_SYMBOL(__drm_atomic_helper_disable_plane);
+
+static int update_output_state(struct drm_atomic_state *state,
+ struct drm_mode_set *set)
+{
+ struct drm_device *dev = set->crtc->dev;
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *new_crtc_state;
+ struct drm_connector *connector;
+ struct drm_connector_state *new_conn_state;
+ int ret, i;
+
+ ret = drm_modeset_lock(&dev->mode_config.connection_mutex,
+ state->acquire_ctx);
+ if (ret)
+ return ret;
+
+ /* First disable all connectors on the target crtc. */
+ ret = drm_atomic_add_affected_connectors(state, set->crtc);
+ if (ret)
+ return ret;
+
+ for_each_new_connector_in_state(state, connector, new_conn_state, i) {
+ if (new_conn_state->crtc == set->crtc) {
+ ret = drm_atomic_set_crtc_for_connector(new_conn_state,
+ NULL);
+ if (ret)
+ return ret;
+
+ /* Make sure legacy setCrtc always re-trains */
+ new_conn_state->link_status = DRM_LINK_STATUS_GOOD;
+ }
+ }
+
+ /* Then set all connectors from set->connectors on the target crtc */
+ for (i = 0; i < set->num_connectors; i++) {
+ new_conn_state = drm_atomic_get_connector_state(state,
+ set->connectors[i]);
+ if (IS_ERR(new_conn_state))
+ return PTR_ERR(new_conn_state);
+
+ ret = drm_atomic_set_crtc_for_connector(new_conn_state,
+ set->crtc);
+ if (ret)
+ return ret;
+ }
+
+ for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
+ /*
+ * Don't update ->enable for the CRTC in the set_config request,
+ * since a mismatch would indicate a bug in the upper layers.
+ * The actual modeset code later on will catch any
+ * inconsistencies here.
+ */
+ if (crtc == set->crtc)
+ continue;
+
+ if (!new_crtc_state->connector_mask) {
+ ret = drm_atomic_set_mode_prop_for_crtc(new_crtc_state,
+ NULL);
+ if (ret < 0)
+ return ret;
+
+ new_crtc_state->active = false;
+ }
+ }
+
+ return 0;
+}
+
+/* just used from drm-client and atomic-helper: */
+int __drm_atomic_helper_set_config(struct drm_mode_set *set,
+ struct drm_atomic_state *state)
+{
+ struct drm_crtc_state *crtc_state;
+ struct drm_plane_state *primary_state;
+ struct drm_crtc *crtc = set->crtc;
+ int hdisplay, vdisplay;
+ int ret;
+
+ crtc_state = drm_atomic_get_crtc_state(state, crtc);
+ if (IS_ERR(crtc_state))
+ return PTR_ERR(crtc_state);
+
+ primary_state = drm_atomic_get_plane_state(state, crtc->primary);
+ if (IS_ERR(primary_state))
+ return PTR_ERR(primary_state);
+
+ if (!set->mode) {
+ WARN_ON(set->fb);
+ WARN_ON(set->num_connectors);
+
+ ret = drm_atomic_set_mode_for_crtc(crtc_state, NULL);
+ if (ret != 0)
+ return ret;
+
+ crtc_state->active = false;
+
+ ret = drm_atomic_set_crtc_for_plane(primary_state, NULL);
+ if (ret != 0)
+ return ret;
+
+ drm_atomic_set_fb_for_plane(primary_state, NULL);
+
+ goto commit;
+ }
+
+ WARN_ON(!set->fb);
+ WARN_ON(!set->num_connectors);
+
+ ret = drm_atomic_set_mode_for_crtc(crtc_state, set->mode);
+ if (ret != 0)
+ return ret;
+
+ crtc_state->active = true;
+
+ ret = drm_atomic_set_crtc_for_plane(primary_state, crtc);
+ if (ret != 0)
+ return ret;
+
+ drm_mode_get_hv_timing(set->mode, &hdisplay, &vdisplay);
+
+ drm_atomic_set_fb_for_plane(primary_state, set->fb);
+ primary_state->crtc_x = 0;
+ primary_state->crtc_y = 0;
+ primary_state->crtc_w = hdisplay;
+ primary_state->crtc_h = vdisplay;
+ primary_state->src_x = set->x << 16;
+ primary_state->src_y = set->y << 16;
+ if (drm_rotation_90_or_270(primary_state->rotation)) {
+ primary_state->src_w = vdisplay << 16;
+ primary_state->src_h = hdisplay << 16;
+ } else {
+ primary_state->src_w = hdisplay << 16;
+ primary_state->src_h = vdisplay << 16;
+ }
+
+commit:
+ ret = update_output_state(state, set);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+EXPORT_SYMBOL(__drm_atomic_helper_set_config);
+
void drm_atomic_print_state(const struct drm_atomic_state *state)
{
struct drm_printer p = drm_info_printer(state->dev->dev);
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
index 22a5c617f670..aa16ea17ff9b 100644
--- a/drivers/gpu/drm/drm_atomic_helper.c
+++ b/drivers/gpu/drm/drm_atomic_helper.c
@@ -25,14 +25,18 @@
* Daniel Vetter <daniel.vetter@ffwll.ch>
*/
-#include <drm/drmP.h>
+#include <linux/dma-fence.h>
+
#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
#include <drm/drm_atomic_uapi.h>
+#include <drm/drm_damage_helper.h>
+#include <drm/drm_device.h>
#include <drm/drm_plane_helper.h>
-#include <drm/drm_atomic_helper.h>
+#include <drm/drm_print.h>
+#include <drm/drm_self_refresh_helper.h>
+#include <drm/drm_vblank.h>
#include <drm/drm_writeback.h>
-#include <drm/drm_damage_helper.h>
-#include <linux/dma-fence.h>
#include "drm_crtc_helper_internal.h"
#include "drm_crtc_internal.h"
@@ -683,7 +687,7 @@ drm_atomic_helper_check_modeset(struct drm_device *dev,
}
if (funcs->atomic_check)
- ret = funcs->atomic_check(connector, new_connector_state);
+ ret = funcs->atomic_check(connector, state);
if (ret)
return ret;
@@ -725,7 +729,7 @@ drm_atomic_helper_check_modeset(struct drm_device *dev,
continue;
if (funcs->atomic_check)
- ret = funcs->atomic_check(connector, new_connector_state);
+ ret = funcs->atomic_check(connector, state);
if (ret)
return ret;
}
@@ -950,10 +954,33 @@ int drm_atomic_helper_check(struct drm_device *dev,
if (state->legacy_cursor_update)
state->async_update = !drm_atomic_helper_async_check(dev, state);
+ drm_self_refresh_helper_alter_state(state);
+
return ret;
}
EXPORT_SYMBOL(drm_atomic_helper_check);
+static bool
+crtc_needs_disable(struct drm_crtc_state *old_state,
+ struct drm_crtc_state *new_state)
+{
+ /*
+ * No new_state means the crtc is off, so the only criteria is whether
+ * it's currently active or in self refresh mode.
+ */
+ if (!new_state)
+ return drm_atomic_crtc_effectively_active(old_state);
+
+ /*
+ * We need to run through the crtc_funcs->disable() function if the crtc
+ * is currently on, if it's transitioning to self refresh mode, or if
+ * it's in self refresh mode and needs to be fully disabled.
+ */
+ return old_state->active ||
+ (old_state->self_refresh_active && !new_state->enable) ||
+ new_state->self_refresh_active;
+}
+
static void
disable_outputs(struct drm_device *dev, struct drm_atomic_state *old_state)
{
@@ -974,7 +1001,14 @@ disable_outputs(struct drm_device *dev, struct drm_atomic_state *old_state)
old_crtc_state = drm_atomic_get_old_crtc_state(old_state, old_conn_state->crtc);
- if (!old_crtc_state->active ||
+ if (new_conn_state->crtc)
+ new_crtc_state = drm_atomic_get_new_crtc_state(
+ old_state,
+ new_conn_state->crtc);
+ else
+ new_crtc_state = NULL;
+
+ if (!crtc_needs_disable(old_crtc_state, new_crtc_state) ||
!drm_atomic_crtc_needs_modeset(old_conn_state->crtc->state))
continue;
@@ -995,11 +1029,13 @@ disable_outputs(struct drm_device *dev, struct drm_atomic_state *old_state)
* Each encoder has at most one connector (since we always steal
* it away), so we won't call disable hooks twice.
*/
- drm_bridge_disable(encoder->bridge);
+ drm_atomic_bridge_disable(encoder->bridge, old_state);
/* Right function depends upon target state. */
if (funcs) {
- if (new_conn_state->crtc && funcs->prepare)
+ if (funcs->atomic_disable)
+ funcs->atomic_disable(encoder, old_state);
+ else if (new_conn_state->crtc && funcs->prepare)
funcs->prepare(encoder);
else if (funcs->disable)
funcs->disable(encoder);
@@ -1007,7 +1043,7 @@ disable_outputs(struct drm_device *dev, struct drm_atomic_state *old_state)
funcs->dpms(encoder, DRM_MODE_DPMS_OFF);
}
- drm_bridge_post_disable(encoder->bridge);
+ drm_atomic_bridge_post_disable(encoder->bridge, old_state);
}
for_each_oldnew_crtc_in_state(old_state, crtc, old_crtc_state, new_crtc_state, i) {
@@ -1018,7 +1054,7 @@ disable_outputs(struct drm_device *dev, struct drm_atomic_state *old_state)
if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
continue;
- if (!old_crtc_state->active)
+ if (!crtc_needs_disable(old_crtc_state, new_crtc_state))
continue;
funcs = crtc->helper_private;
@@ -1305,16 +1341,18 @@ void drm_atomic_helper_commit_modeset_enables(struct drm_device *dev,
* Each encoder has at most one connector (since we always steal
* it away), so we won't call enable hooks twice.
*/
- drm_bridge_pre_enable(encoder->bridge);
+ drm_atomic_bridge_pre_enable(encoder->bridge, old_state);
if (funcs) {
- if (funcs->enable)
+ if (funcs->atomic_enable)
+ funcs->atomic_enable(encoder, old_state);
+ else if (funcs->enable)
funcs->enable(encoder);
else if (funcs->commit)
funcs->commit(encoder);
}
- drm_bridge_enable(encoder->bridge);
+ drm_atomic_bridge_enable(encoder->bridge, old_state);
}
drm_atomic_helper_commit_writebacks(dev, old_state);
@@ -1423,7 +1461,7 @@ drm_atomic_helper_wait_for_vblanks(struct drm_device *dev,
ret = wait_event_timeout(dev->vblank[i].queue,
old_state->crtcs[i].last_vblank_count !=
drm_crtc_vblank_count(crtc),
- msecs_to_jiffies(50));
+ msecs_to_jiffies(100));
WARN(!ret, "[CRTC:%d:%s] vblank wait timed out\n",
crtc->base.id, crtc->name);
@@ -2843,95 +2881,6 @@ fail:
}
EXPORT_SYMBOL(drm_atomic_helper_disable_plane);
-/* just used from fb-helper and atomic-helper: */
-int __drm_atomic_helper_disable_plane(struct drm_plane *plane,
- struct drm_plane_state *plane_state)
-{
- int ret;
-
- ret = drm_atomic_set_crtc_for_plane(plane_state, NULL);
- if (ret != 0)
- return ret;
-
- drm_atomic_set_fb_for_plane(plane_state, NULL);
- plane_state->crtc_x = 0;
- plane_state->crtc_y = 0;
- plane_state->crtc_w = 0;
- plane_state->crtc_h = 0;
- plane_state->src_x = 0;
- plane_state->src_y = 0;
- plane_state->src_w = 0;
- plane_state->src_h = 0;
-
- return 0;
-}
-
-static int update_output_state(struct drm_atomic_state *state,
- struct drm_mode_set *set)
-{
- struct drm_device *dev = set->crtc->dev;
- struct drm_crtc *crtc;
- struct drm_crtc_state *new_crtc_state;
- struct drm_connector *connector;
- struct drm_connector_state *new_conn_state;
- int ret, i;
-
- ret = drm_modeset_lock(&dev->mode_config.connection_mutex,
- state->acquire_ctx);
- if (ret)
- return ret;
-
- /* First disable all connectors on the target crtc. */
- ret = drm_atomic_add_affected_connectors(state, set->crtc);
- if (ret)
- return ret;
-
- for_each_new_connector_in_state(state, connector, new_conn_state, i) {
- if (new_conn_state->crtc == set->crtc) {
- ret = drm_atomic_set_crtc_for_connector(new_conn_state,
- NULL);
- if (ret)
- return ret;
-
- /* Make sure legacy setCrtc always re-trains */
- new_conn_state->link_status = DRM_LINK_STATUS_GOOD;
- }
- }
-
- /* Then set all connectors from set->connectors on the target crtc */
- for (i = 0; i < set->num_connectors; i++) {
- new_conn_state = drm_atomic_get_connector_state(state,
- set->connectors[i]);
- if (IS_ERR(new_conn_state))
- return PTR_ERR(new_conn_state);
-
- ret = drm_atomic_set_crtc_for_connector(new_conn_state,
- set->crtc);
- if (ret)
- return ret;
- }
-
- for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
- /* Don't update ->enable for the CRTC in the set_config request,
- * since a mismatch would indicate a bug in the upper layers.
- * The actual modeset code later on will catch any
- * inconsistencies here. */
- if (crtc == set->crtc)
- continue;
-
- if (!new_crtc_state->connector_mask) {
- ret = drm_atomic_set_mode_prop_for_crtc(new_crtc_state,
- NULL);
- if (ret < 0)
- return ret;
-
- new_crtc_state->active = false;
- }
- }
-
- return 0;
-}
-
/**
* drm_atomic_helper_set_config - set a new config from userspace
* @set: mode set configuration
@@ -2976,81 +2925,6 @@ fail:
}
EXPORT_SYMBOL(drm_atomic_helper_set_config);
-/* just used from fb-helper and atomic-helper: */
-int __drm_atomic_helper_set_config(struct drm_mode_set *set,
- struct drm_atomic_state *state)
-{
- struct drm_crtc_state *crtc_state;
- struct drm_plane_state *primary_state;
- struct drm_crtc *crtc = set->crtc;
- int hdisplay, vdisplay;
- int ret;
-
- crtc_state = drm_atomic_get_crtc_state(state, crtc);
- if (IS_ERR(crtc_state))
- return PTR_ERR(crtc_state);
-
- primary_state = drm_atomic_get_plane_state(state, crtc->primary);
- if (IS_ERR(primary_state))
- return PTR_ERR(primary_state);
-
- if (!set->mode) {
- WARN_ON(set->fb);
- WARN_ON(set->num_connectors);
-
- ret = drm_atomic_set_mode_for_crtc(crtc_state, NULL);
- if (ret != 0)
- return ret;
-
- crtc_state->active = false;
-
- ret = drm_atomic_set_crtc_for_plane(primary_state, NULL);
- if (ret != 0)
- return ret;
-
- drm_atomic_set_fb_for_plane(primary_state, NULL);
-
- goto commit;
- }
-
- WARN_ON(!set->fb);
- WARN_ON(!set->num_connectors);
-
- ret = drm_atomic_set_mode_for_crtc(crtc_state, set->mode);
- if (ret != 0)
- return ret;
-
- crtc_state->active = true;
-
- ret = drm_atomic_set_crtc_for_plane(primary_state, crtc);
- if (ret != 0)
- return ret;
-
- drm_mode_get_hv_timing(set->mode, &hdisplay, &vdisplay);
-
- drm_atomic_set_fb_for_plane(primary_state, set->fb);
- primary_state->crtc_x = 0;
- primary_state->crtc_y = 0;
- primary_state->crtc_w = hdisplay;
- primary_state->crtc_h = vdisplay;
- primary_state->src_x = set->x << 16;
- primary_state->src_y = set->y << 16;
- if (drm_rotation_90_or_270(primary_state->rotation)) {
- primary_state->src_w = vdisplay << 16;
- primary_state->src_h = hdisplay << 16;
- } else {
- primary_state->src_w = hdisplay << 16;
- primary_state->src_h = vdisplay << 16;
- }
-
-commit:
- ret = update_output_state(state, set);
- if (ret)
- return ret;
-
- return 0;
-}
-
/**
* drm_atomic_helper_disable_all - disable all currently active outputs
* @dev: DRM device
diff --git a/drivers/gpu/drm/drm_atomic_state_helper.c b/drivers/gpu/drm/drm_atomic_state_helper.c
index 59ffb6b9c745..7d7347a6f194 100644
--- a/drivers/gpu/drm/drm_atomic_state_helper.c
+++ b/drivers/gpu/drm/drm_atomic_state_helper.c
@@ -24,12 +24,13 @@
* Daniel Vetter <daniel.vetter@ffwll.ch>
*/
+#include <drm/drm_atomic.h>
#include <drm/drm_atomic_state_helper.h>
-#include <drm/drm_crtc.h>
-#include <drm/drm_plane.h>
#include <drm/drm_connector.h>
-#include <drm/drm_atomic.h>
+#include <drm/drm_crtc.h>
#include <drm/drm_device.h>
+#include <drm/drm_plane.h>
+#include <drm/drm_print.h>
#include <drm/drm_writeback.h>
#include <linux/slab.h>
@@ -57,6 +58,29 @@
*/
/**
+ * __drm_atomic_helper_crtc_reset - reset state on CRTC
+ * @crtc: drm CRTC
+ * @crtc_state: CRTC state to assign
+ *
+ * Initializes the newly allocated @crtc_state and assigns it to
+ * the &drm_crtc->state pointer of @crtc, usually required when
+ * initializing the drivers or when called from the &drm_crtc_funcs.reset
+ * hook.
+ *
+ * This is useful for drivers that subclass the CRTC state.
+ */
+void
+__drm_atomic_helper_crtc_reset(struct drm_crtc *crtc,
+ struct drm_crtc_state *crtc_state)
+{
+ if (crtc_state)
+ crtc_state->crtc = crtc;
+
+ crtc->state = crtc_state;
+}
+EXPORT_SYMBOL(__drm_atomic_helper_crtc_reset);
+
+/**
* drm_atomic_helper_crtc_reset - default &drm_crtc_funcs.reset hook for CRTCs
* @crtc: drm CRTC
*
@@ -65,14 +89,13 @@
*/
void drm_atomic_helper_crtc_reset(struct drm_crtc *crtc)
{
- if (crtc->state)
- __drm_atomic_helper_crtc_destroy_state(crtc->state);
-
- kfree(crtc->state);
- crtc->state = kzalloc(sizeof(*crtc->state), GFP_KERNEL);
+ struct drm_crtc_state *crtc_state =
+ kzalloc(sizeof(*crtc->state), GFP_KERNEL);
if (crtc->state)
- crtc->state->crtc = crtc;
+ crtc->funcs->atomic_destroy_state(crtc, crtc->state);
+
+ __drm_atomic_helper_crtc_reset(crtc, crtc_state);
}
EXPORT_SYMBOL(drm_atomic_helper_crtc_reset);
@@ -106,6 +129,10 @@ void __drm_atomic_helper_crtc_duplicate_state(struct drm_crtc *crtc,
state->commit = NULL;
state->event = NULL;
state->pageflip_flags = 0;
+
+ /* Self refresh should be canceled when a new update is available */
+ state->active = drm_atomic_crtc_effectively_active(state);
+ state->self_refresh_active = false;
}
EXPORT_SYMBOL(__drm_atomic_helper_crtc_duplicate_state);
@@ -314,7 +341,7 @@ EXPORT_SYMBOL(drm_atomic_helper_plane_destroy_state);
* @conn_state: connector state to assign
*
* Initializes the newly allocated @conn_state and assigns it to
- * the &drm_conector->state pointer of @connector, usually required when
+ * the &drm_connector->state pointer of @connector, usually required when
* initializing the drivers or when called from the &drm_connector_funcs.reset
* hook.
*
@@ -369,6 +396,9 @@ __drm_atomic_helper_connector_duplicate_state(struct drm_connector *connector,
drm_connector_get(connector);
state->commit = NULL;
+ if (state->hdr_output_metadata)
+ drm_property_blob_get(state->hdr_output_metadata);
+
/* Don't copy over a writeback job, they are used only once */
state->writeback_job = NULL;
}
@@ -416,6 +446,8 @@ __drm_atomic_helper_connector_destroy_state(struct drm_connector_state *state)
if (state->writeback_job)
drm_writeback_cleanup_job(state->writeback_job);
+
+ drm_property_blob_put(state->hdr_output_metadata);
}
EXPORT_SYMBOL(__drm_atomic_helper_connector_destroy_state);
diff --git a/drivers/gpu/drm/drm_atomic_uapi.c b/drivers/gpu/drm/drm_atomic_uapi.c
index 428d82662dc4..abe38bdf85ae 100644
--- a/drivers/gpu/drm/drm_atomic_uapi.c
+++ b/drivers/gpu/drm/drm_atomic_uapi.c
@@ -490,7 +490,7 @@ drm_atomic_crtc_get_property(struct drm_crtc *crtc,
struct drm_mode_config *config = &dev->mode_config;
if (property == config->prop_active)
- *val = state->active;
+ *val = drm_atomic_crtc_effectively_active(state);
else if (property == config->prop_mode_id)
*val = (state->mode_blob) ? state->mode_blob->base.id : 0;
else if (property == config->prop_vrr_enabled)
@@ -676,6 +676,8 @@ static int drm_atomic_connector_set_property(struct drm_connector *connector,
{
struct drm_device *dev = connector->dev;
struct drm_mode_config *config = &dev->mode_config;
+ bool replaced = false;
+ int ret;
if (property == config->prop_crtc_id) {
struct drm_crtc *crtc = drm_crtc_find(dev, file_priv, val);
@@ -726,13 +728,20 @@ static int drm_atomic_connector_set_property(struct drm_connector *connector,
*/
if (state->link_status != DRM_LINK_STATUS_GOOD)
state->link_status = val;
+ } else if (property == config->hdr_output_metadata_property) {
+ ret = drm_atomic_replace_property_blob_from_id(dev,
+ &state->hdr_output_metadata,
+ val,
+ sizeof(struct hdr_output_metadata), -1,
+ &replaced);
+ return ret;
} else if (property == config->aspect_ratio_property) {
state->picture_aspect_ratio = val;
} else if (property == config->content_type_property) {
state->content_type = val;
} else if (property == connector->scaling_mode_property) {
state->scaling_mode = val;
- } else if (property == connector->content_protection_property) {
+ } else if (property == config->content_protection_property) {
if (val == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
DRM_DEBUG_KMS("only drivers can set CP Enabled\n");
return -EINVAL;
@@ -779,7 +788,10 @@ drm_atomic_connector_get_property(struct drm_connector *connector,
if (property == config->prop_crtc_id) {
*val = (state->crtc) ? state->crtc->base.id : 0;
} else if (property == config->dpms_property) {
- *val = connector->dpms;
+ if (state->crtc && state->crtc->state->self_refresh_active)
+ *val = DRM_MODE_DPMS_ON;
+ else
+ *val = connector->dpms;
} else if (property == config->tv_select_subconnector_property) {
*val = state->tv.subconnector;
} else if (property == config->tv_left_margin_property) {
@@ -814,7 +826,10 @@ drm_atomic_connector_get_property(struct drm_connector *connector,
*val = state->colorspace;
} else if (property == connector->scaling_mode_property) {
*val = state->scaling_mode;
- } else if (property == connector->content_protection_property) {
+ } else if (property == config->hdr_output_metadata_property) {
+ *val = state->hdr_output_metadata ?
+ state->hdr_output_metadata->base.id : 0;
+ } else if (property == config->content_protection_property) {
*val = state->content_protection;
} else if (property == config->writeback_fb_id_property) {
/* Writeback framebuffer is one-shot, write and forget */
diff --git a/drivers/gpu/drm/drm_auth.c b/drivers/gpu/drm/drm_auth.c
index 22c7a104b802..cc9acd986c68 100644
--- a/drivers/gpu/drm/drm_auth.c
+++ b/drivers/gpu/drm/drm_auth.c
@@ -28,10 +28,16 @@
* OTHER DEALINGS IN THE SOFTWARE.
*/
-#include <drm/drmP.h>
+#include <linux/slab.h>
+
+#include <drm/drm_auth.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_file.h>
+#include <drm/drm_lease.h>
+#include <drm/drm_print.h>
+
#include "drm_internal.h"
#include "drm_legacy.h"
-#include <drm/drm_lease.h>
/**
* DOC: master and authentication
@@ -351,3 +357,23 @@ void drm_master_put(struct drm_master **master)
*master = NULL;
}
EXPORT_SYMBOL(drm_master_put);
+
+/* Used by drm_client and drm_fb_helper */
+bool drm_master_internal_acquire(struct drm_device *dev)
+{
+ mutex_lock(&dev->master_mutex);
+ if (dev->master) {
+ mutex_unlock(&dev->master_mutex);
+ return false;
+ }
+
+ return true;
+}
+EXPORT_SYMBOL(drm_master_internal_acquire);
+
+/* Used by drm_client and drm_fb_helper */
+void drm_master_internal_release(struct drm_device *dev)
+{
+ mutex_unlock(&dev->master_mutex);
+}
+EXPORT_SYMBOL(drm_master_internal_release);
diff --git a/drivers/gpu/drm/drm_blend.c b/drivers/gpu/drm/drm_blend.c
index 0c78ca386cbe..37ac168fcb60 100644
--- a/drivers/gpu/drm/drm_blend.c
+++ b/drivers/gpu/drm/drm_blend.c
@@ -23,13 +23,16 @@
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
* OF THIS SOFTWARE.
*/
-#include <drm/drmP.h>
-#include <drm/drm_atomic.h>
-#include <drm/drm_blend.h>
+
#include <linux/export.h>
#include <linux/slab.h>
#include <linux/sort.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_blend.h>
+#include <drm/drm_device.h>
+#include <drm/drm_print.h>
+
#include "drm_crtc_internal.h"
/**
diff --git a/drivers/gpu/drm/drm_bridge.c b/drivers/gpu/drm/drm_bridge.c
index 138b2711d389..cba537c99e43 100644
--- a/drivers/gpu/drm/drm_bridge.c
+++ b/drivers/gpu/drm/drm_bridge.c
@@ -352,6 +352,116 @@ void drm_bridge_enable(struct drm_bridge *bridge)
}
EXPORT_SYMBOL(drm_bridge_enable);
+/**
+ * drm_atomic_bridge_disable - disables all bridges in the encoder chain
+ * @bridge: bridge control structure
+ * @state: atomic state being committed
+ *
+ * Calls &drm_bridge_funcs.atomic_disable (falls back on
+ * &drm_bridge_funcs.disable) op for all the bridges in the encoder chain,
+ * starting from the last bridge to the first. These are called before calling
+ * &drm_encoder_helper_funcs.atomic_disable
+ *
+ * Note: the bridge passed should be the one closest to the encoder
+ */
+void drm_atomic_bridge_disable(struct drm_bridge *bridge,
+ struct drm_atomic_state *state)
+{
+ if (!bridge)
+ return;
+
+ drm_atomic_bridge_disable(bridge->next, state);
+
+ if (bridge->funcs->atomic_disable)
+ bridge->funcs->atomic_disable(bridge, state);
+ else if (bridge->funcs->disable)
+ bridge->funcs->disable(bridge);
+}
+EXPORT_SYMBOL(drm_atomic_bridge_disable);
+
+/**
+ * drm_atomic_bridge_post_disable - cleans up after disabling all bridges in the
+ * encoder chain
+ * @bridge: bridge control structure
+ * @state: atomic state being committed
+ *
+ * Calls &drm_bridge_funcs.atomic_post_disable (falls back on
+ * &drm_bridge_funcs.post_disable) op for all the bridges in the encoder chain,
+ * starting from the first bridge to the last. These are called after completing
+ * &drm_encoder_helper_funcs.atomic_disable
+ *
+ * Note: the bridge passed should be the one closest to the encoder
+ */
+void drm_atomic_bridge_post_disable(struct drm_bridge *bridge,
+ struct drm_atomic_state *state)
+{
+ if (!bridge)
+ return;
+
+ if (bridge->funcs->atomic_post_disable)
+ bridge->funcs->atomic_post_disable(bridge, state);
+ else if (bridge->funcs->post_disable)
+ bridge->funcs->post_disable(bridge);
+
+ drm_atomic_bridge_post_disable(bridge->next, state);
+}
+EXPORT_SYMBOL(drm_atomic_bridge_post_disable);
+
+/**
+ * drm_atomic_bridge_pre_enable - prepares for enabling all bridges in the
+ * encoder chain
+ * @bridge: bridge control structure
+ * @state: atomic state being committed
+ *
+ * Calls &drm_bridge_funcs.atomic_pre_enable (falls back on
+ * &drm_bridge_funcs.pre_enable) op for all the bridges in the encoder chain,
+ * starting from the last bridge to the first. These are called before calling
+ * &drm_encoder_helper_funcs.atomic_enable
+ *
+ * Note: the bridge passed should be the one closest to the encoder
+ */
+void drm_atomic_bridge_pre_enable(struct drm_bridge *bridge,
+ struct drm_atomic_state *state)
+{
+ if (!bridge)
+ return;
+
+ drm_atomic_bridge_pre_enable(bridge->next, state);
+
+ if (bridge->funcs->atomic_pre_enable)
+ bridge->funcs->atomic_pre_enable(bridge, state);
+ else if (bridge->funcs->pre_enable)
+ bridge->funcs->pre_enable(bridge);
+}
+EXPORT_SYMBOL(drm_atomic_bridge_pre_enable);
+
+/**
+ * drm_atomic_bridge_enable - enables all bridges in the encoder chain
+ * @bridge: bridge control structure
+ * @state: atomic state being committed
+ *
+ * Calls &drm_bridge_funcs.atomic_enable (falls back on
+ * &drm_bridge_funcs.enable) op for all the bridges in the encoder chain,
+ * starting from the first bridge to the last. These are called after completing
+ * &drm_encoder_helper_funcs.atomic_enable
+ *
+ * Note: the bridge passed should be the one closest to the encoder
+ */
+void drm_atomic_bridge_enable(struct drm_bridge *bridge,
+ struct drm_atomic_state *state)
+{
+ if (!bridge)
+ return;
+
+ if (bridge->funcs->atomic_enable)
+ bridge->funcs->atomic_enable(bridge, state);
+ else if (bridge->funcs->enable)
+ bridge->funcs->enable(bridge);
+
+ drm_atomic_bridge_enable(bridge->next, state);
+}
+EXPORT_SYMBOL(drm_atomic_bridge_enable);
+
#ifdef CONFIG_OF
/**
* of_drm_find_bridge - find the bridge corresponding to the device node in
diff --git a/drivers/gpu/drm/drm_bufs.c b/drivers/gpu/drm/drm_bufs.c
index bfc419ed9d6c..68dacf8422c6 100644
--- a/drivers/gpu/drm/drm_bufs.c
+++ b/drivers/gpu/drm/drm_bufs.c
@@ -28,15 +28,26 @@
* OTHER DEALINGS IN THE SOFTWARE.
*/
-#include <linux/vmalloc.h>
-#include <linux/slab.h>
-#include <linux/log2.h>
#include <linux/export.h>
+#include <linux/log2.h>
+#include <linux/mm.h>
+#include <linux/mman.h>
+#include <linux/nospec.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/vmalloc.h>
+
#include <asm/shmparam.h>
-#include <drm/drmP.h>
+
+#include <drm/drm_agpsupport.h>
+#include <drm/drm_device.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_file.h>
+#include <drm/drm_pci.h>
+#include <drm/drm_print.h>
+
#include "drm_legacy.h"
-#include <linux/nospec.h>
static struct drm_map_list *drm_find_matching_map(struct drm_device *dev,
struct drm_local_map *map)
diff --git a/drivers/gpu/drm/drm_client.c b/drivers/gpu/drm/drm_client.c
index f20d1dda3961..410572f14257 100644
--- a/drivers/gpu/drm/drm_client.c
+++ b/drivers/gpu/drm/drm_client.c
@@ -15,10 +15,10 @@
#include <drm/drm_drv.h>
#include <drm/drm_file.h>
#include <drm/drm_fourcc.h>
+#include <drm/drm_framebuffer.h>
#include <drm/drm_gem.h>
#include <drm/drm_mode.h>
#include <drm/drm_print.h>
-#include <drm/drmP.h>
#include "drm_crtc_internal.h"
#include "drm_internal.h"
@@ -27,7 +27,6 @@
* DOC: overview
*
* This library provides support for clients running in the kernel like fbdev and bootsplash.
- * Currently it's only partially implemented, just enough to support fbdev.
*
* GEM drivers which provide a GEM based dumb buffer with a virtual address are supported.
*/
@@ -92,14 +91,20 @@ int drm_client_init(struct drm_device *dev, struct drm_client_dev *client,
client->name = name;
client->funcs = funcs;
- ret = drm_client_open(client);
+ ret = drm_client_modeset_create(client);
if (ret)
goto err_put_module;
+ ret = drm_client_open(client);
+ if (ret)
+ goto err_free;
+
drm_dev_get(dev);
return 0;
+err_free:
+ drm_client_modeset_free(client);
err_put_module:
if (funcs)
module_put(funcs->owner);
@@ -148,6 +153,7 @@ void drm_client_release(struct drm_client_dev *client)
DRM_DEV_DEBUG_KMS(dev->dev, "%s\n", client->name);
+ drm_client_modeset_free(client);
drm_client_close(client);
drm_dev_put(dev);
if (client->funcs)
@@ -243,6 +249,7 @@ static void drm_client_buffer_delete(struct drm_client_buffer *buffer)
static struct drm_client_buffer *
drm_client_buffer_create(struct drm_client_dev *client, u32 width, u32 height, u32 format)
{
+ const struct drm_format_info *info = drm_format_info(format);
struct drm_mode_create_dumb dumb_args = { };
struct drm_device *dev = client->dev;
struct drm_client_buffer *buffer;
@@ -258,7 +265,7 @@ drm_client_buffer_create(struct drm_client_dev *client, u32 width, u32 height, u
dumb_args.width = width;
dumb_args.height = height;
- dumb_args.bpp = drm_format_plane_cpp(format, 0) * 8;
+ dumb_args.bpp = info->cpp[0] * 8;
ret = drm_mode_create_dumb(dev, &dumb_args, client->file);
if (ret)
goto err_delete;
diff --git a/drivers/gpu/drm/drm_client_modeset.c b/drivers/gpu/drm/drm_client_modeset.c
new file mode 100644
index 000000000000..006bf7390e7d
--- /dev/null
+++ b/drivers/gpu/drm/drm_client_modeset.c
@@ -0,0 +1,1087 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright 2018 Noralf Trønnes
+ * Copyright (c) 2006-2009 Red Hat Inc.
+ * Copyright (c) 2006-2008 Intel Corporation
+ * Jesse Barnes <jesse.barnes@intel.com>
+ * Copyright (c) 2007 Dave Airlie <airlied@linux.ie>
+ */
+
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+
+#include <drm/drm_atomic.h>
+#include <drm/drm_client.h>
+#include <drm/drm_connector.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_device.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_encoder.h>
+#include <drm/drm_print.h>
+
+#include "drm_crtc_internal.h"
+#include "drm_internal.h"
+
+#define DRM_CLIENT_MAX_CLONED_CONNECTORS 8
+
+struct drm_client_offset {
+ int x, y;
+};
+
+int drm_client_modeset_create(struct drm_client_dev *client)
+{
+ struct drm_device *dev = client->dev;
+ unsigned int num_crtc = dev->mode_config.num_crtc;
+ unsigned int max_connector_count = 1;
+ struct drm_mode_set *modeset;
+ struct drm_crtc *crtc;
+ unsigned int i = 0;
+
+ /* Add terminating zero entry to enable index less iteration */
+ client->modesets = kcalloc(num_crtc + 1, sizeof(*client->modesets), GFP_KERNEL);
+ if (!client->modesets)
+ return -ENOMEM;
+
+ mutex_init(&client->modeset_mutex);
+
+ drm_for_each_crtc(crtc, dev)
+ client->modesets[i++].crtc = crtc;
+
+ /* Cloning is only supported in the single crtc case. */
+ if (num_crtc == 1)
+ max_connector_count = DRM_CLIENT_MAX_CLONED_CONNECTORS;
+
+ for (modeset = client->modesets; modeset->crtc; modeset++) {
+ modeset->connectors = kcalloc(max_connector_count,
+ sizeof(*modeset->connectors), GFP_KERNEL);
+ if (!modeset->connectors)
+ goto err_free;
+ }
+
+ return 0;
+
+err_free:
+ drm_client_modeset_free(client);
+
+ return -ENOMEM;
+}
+
+static void drm_client_modeset_release(struct drm_client_dev *client)
+{
+ struct drm_mode_set *modeset;
+ unsigned int i;
+
+ drm_client_for_each_modeset(modeset, client) {
+ drm_mode_destroy(client->dev, modeset->mode);
+ modeset->mode = NULL;
+ modeset->fb = NULL;
+
+ for (i = 0; i < modeset->num_connectors; i++) {
+ drm_connector_put(modeset->connectors[i]);
+ modeset->connectors[i] = NULL;
+ }
+ modeset->num_connectors = 0;
+ }
+}
+
+void drm_client_modeset_free(struct drm_client_dev *client)
+{
+ struct drm_mode_set *modeset;
+
+ mutex_lock(&client->modeset_mutex);
+
+ drm_client_modeset_release(client);
+
+ drm_client_for_each_modeset(modeset, client)
+ kfree(modeset->connectors);
+
+ mutex_unlock(&client->modeset_mutex);
+
+ mutex_destroy(&client->modeset_mutex);
+ kfree(client->modesets);
+}
+
+static struct drm_mode_set *
+drm_client_find_modeset(struct drm_client_dev *client, struct drm_crtc *crtc)
+{
+ struct drm_mode_set *modeset;
+
+ drm_client_for_each_modeset(modeset, client)
+ if (modeset->crtc == crtc)
+ return modeset;
+
+ return NULL;
+}
+
+static struct drm_display_mode *
+drm_connector_has_preferred_mode(struct drm_connector *connector, int width, int height)
+{
+ struct drm_display_mode *mode;
+
+ list_for_each_entry(mode, &connector->modes, head) {
+ if (mode->hdisplay > width ||
+ mode->vdisplay > height)
+ continue;
+ if (mode->type & DRM_MODE_TYPE_PREFERRED)
+ return mode;
+ }
+ return NULL;
+}
+
+static struct drm_display_mode *
+drm_connector_pick_cmdline_mode(struct drm_connector *connector)
+{
+ struct drm_cmdline_mode *cmdline_mode;
+ struct drm_display_mode *mode;
+ bool prefer_non_interlace;
+
+ cmdline_mode = &connector->cmdline_mode;
+ if (cmdline_mode->specified == false)
+ return NULL;
+
+ /* attempt to find a matching mode in the list of modes
+ * we have gotten so far, if not add a CVT mode that conforms
+ */
+ if (cmdline_mode->rb || cmdline_mode->margins)
+ goto create_mode;
+
+ prefer_non_interlace = !cmdline_mode->interlace;
+again:
+ list_for_each_entry(mode, &connector->modes, head) {
+ /* check width/height */
+ if (mode->hdisplay != cmdline_mode->xres ||
+ mode->vdisplay != cmdline_mode->yres)
+ continue;
+
+ if (cmdline_mode->refresh_specified) {
+ if (mode->vrefresh != cmdline_mode->refresh)
+ continue;
+ }
+
+ if (cmdline_mode->interlace) {
+ if (!(mode->flags & DRM_MODE_FLAG_INTERLACE))
+ continue;
+ } else if (prefer_non_interlace) {
+ if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+ continue;
+ }
+ return mode;
+ }
+
+ if (prefer_non_interlace) {
+ prefer_non_interlace = false;
+ goto again;
+ }
+
+create_mode:
+ mode = drm_mode_create_from_cmdline_mode(connector->dev, cmdline_mode);
+ list_add(&mode->head, &connector->modes);
+
+ return mode;
+}
+
+static bool drm_connector_enabled(struct drm_connector *connector, bool strict)
+{
+ bool enable;
+
+ if (connector->display_info.non_desktop)
+ return false;
+
+ if (strict)
+ enable = connector->status == connector_status_connected;
+ else
+ enable = connector->status != connector_status_disconnected;
+
+ return enable;
+}
+
+static void drm_client_connectors_enabled(struct drm_connector **connectors,
+ unsigned int connector_count,
+ bool *enabled)
+{
+ bool any_enabled = false;
+ struct drm_connector *connector;
+ int i = 0;
+
+ for (i = 0; i < connector_count; i++) {
+ connector = connectors[i];
+ enabled[i] = drm_connector_enabled(connector, true);
+ DRM_DEBUG_KMS("connector %d enabled? %s\n", connector->base.id,
+ connector->display_info.non_desktop ? "non desktop" : enabled[i] ? "yes" : "no");
+
+ any_enabled |= enabled[i];
+ }
+
+ if (any_enabled)
+ return;
+
+ for (i = 0; i < connector_count; i++)
+ enabled[i] = drm_connector_enabled(connectors[i], false);
+}
+
+static bool drm_client_target_cloned(struct drm_device *dev,
+ struct drm_connector **connectors,
+ unsigned int connector_count,
+ struct drm_display_mode **modes,
+ struct drm_client_offset *offsets,
+ bool *enabled, int width, int height)
+{
+ int count, i, j;
+ bool can_clone = false;
+ struct drm_display_mode *dmt_mode, *mode;
+
+ /* only contemplate cloning in the single crtc case */
+ if (dev->mode_config.num_crtc > 1)
+ return false;
+
+ count = 0;
+ for (i = 0; i < connector_count; i++) {
+ if (enabled[i])
+ count++;
+ }
+
+ /* only contemplate cloning if more than one connector is enabled */
+ if (count <= 1)
+ return false;
+
+ /* check the command line or if nothing common pick 1024x768 */
+ can_clone = true;
+ for (i = 0; i < connector_count; i++) {
+ if (!enabled[i])
+ continue;
+ modes[i] = drm_connector_pick_cmdline_mode(connectors[i]);
+ if (!modes[i]) {
+ can_clone = false;
+ break;
+ }
+ for (j = 0; j < i; j++) {
+ if (!enabled[j])
+ continue;
+ if (!drm_mode_match(modes[j], modes[i],
+ DRM_MODE_MATCH_TIMINGS |
+ DRM_MODE_MATCH_CLOCK |
+ DRM_MODE_MATCH_FLAGS |
+ DRM_MODE_MATCH_3D_FLAGS))
+ can_clone = false;
+ }
+ }
+
+ if (can_clone) {
+ DRM_DEBUG_KMS("can clone using command line\n");
+ return true;
+ }
+
+ /* try and find a 1024x768 mode on each connector */
+ can_clone = true;
+ dmt_mode = drm_mode_find_dmt(dev, 1024, 768, 60, false);
+
+ for (i = 0; i < connector_count; i++) {
+ if (!enabled[i])
+ continue;
+
+ list_for_each_entry(mode, &connectors[i]->modes, head) {
+ if (drm_mode_match(mode, dmt_mode,
+ DRM_MODE_MATCH_TIMINGS |
+ DRM_MODE_MATCH_CLOCK |
+ DRM_MODE_MATCH_FLAGS |
+ DRM_MODE_MATCH_3D_FLAGS))
+ modes[i] = mode;
+ }
+ if (!modes[i])
+ can_clone = false;
+ }
+
+ if (can_clone) {
+ DRM_DEBUG_KMS("can clone using 1024x768\n");
+ return true;
+ }
+ DRM_INFO("kms: can't enable cloning when we probably wanted to.\n");
+ return false;
+}
+
+static int drm_client_get_tile_offsets(struct drm_connector **connectors,
+ unsigned int connector_count,
+ struct drm_display_mode **modes,
+ struct drm_client_offset *offsets,
+ int idx,
+ int h_idx, int v_idx)
+{
+ struct drm_connector *connector;
+ int i;
+ int hoffset = 0, voffset = 0;
+
+ for (i = 0; i < connector_count; i++) {
+ connector = connectors[i];
+ if (!connector->has_tile)
+ continue;
+
+ if (!modes[i] && (h_idx || v_idx)) {
+ DRM_DEBUG_KMS("no modes for connector tiled %d %d\n", i,
+ connector->base.id);
+ continue;
+ }
+ if (connector->tile_h_loc < h_idx)
+ hoffset += modes[i]->hdisplay;
+
+ if (connector->tile_v_loc < v_idx)
+ voffset += modes[i]->vdisplay;
+ }
+ offsets[idx].x = hoffset;
+ offsets[idx].y = voffset;
+ DRM_DEBUG_KMS("returned %d %d for %d %d\n", hoffset, voffset, h_idx, v_idx);
+ return 0;
+}
+
+static bool drm_client_target_preferred(struct drm_connector **connectors,
+ unsigned int connector_count,
+ struct drm_display_mode **modes,
+ struct drm_client_offset *offsets,
+ bool *enabled, int width, int height)
+{
+ const u64 mask = BIT_ULL(connector_count) - 1;
+ struct drm_connector *connector;
+ u64 conn_configured = 0;
+ int tile_pass = 0;
+ int i;
+
+retry:
+ for (i = 0; i < connector_count; i++) {
+ connector = connectors[i];
+
+ if (conn_configured & BIT_ULL(i))
+ continue;
+
+ if (enabled[i] == false) {
+ conn_configured |= BIT_ULL(i);
+ continue;
+ }
+
+ /* first pass over all the untiled connectors */
+ if (tile_pass == 0 && connector->has_tile)
+ continue;
+
+ if (tile_pass == 1) {
+ if (connector->tile_h_loc != 0 ||
+ connector->tile_v_loc != 0)
+ continue;
+
+ } else {
+ if (connector->tile_h_loc != tile_pass - 1 &&
+ connector->tile_v_loc != tile_pass - 1)
+ /* if this tile_pass doesn't cover any of the tiles - keep going */
+ continue;
+
+ /*
+ * find the tile offsets for this pass - need to find
+ * all tiles left and above
+ */
+ drm_client_get_tile_offsets(connectors, connector_count, modes, offsets, i,
+ connector->tile_h_loc, connector->tile_v_loc);
+ }
+ DRM_DEBUG_KMS("looking for cmdline mode on connector %d\n",
+ connector->base.id);
+
+ /* got for command line mode first */
+ modes[i] = drm_connector_pick_cmdline_mode(connector);
+ if (!modes[i]) {
+ DRM_DEBUG_KMS("looking for preferred mode on connector %d %d\n",
+ connector->base.id, connector->tile_group ? connector->tile_group->id : 0);
+ modes[i] = drm_connector_has_preferred_mode(connector, width, height);
+ }
+ /* No preferred modes, pick one off the list */
+ if (!modes[i] && !list_empty(&connector->modes)) {
+ list_for_each_entry(modes[i], &connector->modes, head)
+ break;
+ }
+ DRM_DEBUG_KMS("found mode %s\n", modes[i] ? modes[i]->name :
+ "none");
+ conn_configured |= BIT_ULL(i);
+ }
+
+ if ((conn_configured & mask) != mask) {
+ tile_pass++;
+ goto retry;
+ }
+ return true;
+}
+
+static bool connector_has_possible_crtc(struct drm_connector *connector,
+ struct drm_crtc *crtc)
+{
+ struct drm_encoder *encoder;
+ int i;
+
+ drm_connector_for_each_possible_encoder(connector, encoder, i) {
+ if (encoder->possible_crtcs & drm_crtc_mask(crtc))
+ return true;
+ }
+
+ return false;
+}
+
+static int drm_client_pick_crtcs(struct drm_client_dev *client,
+ struct drm_connector **connectors,
+ unsigned int connector_count,
+ struct drm_crtc **best_crtcs,
+ struct drm_display_mode **modes,
+ int n, int width, int height)
+{
+ struct drm_device *dev = client->dev;
+ struct drm_connector *connector;
+ int my_score, best_score, score;
+ struct drm_crtc **crtcs, *crtc;
+ struct drm_mode_set *modeset;
+ int o;
+
+ if (n == connector_count)
+ return 0;
+
+ connector = connectors[n];
+
+ best_crtcs[n] = NULL;
+ best_score = drm_client_pick_crtcs(client, connectors, connector_count,
+ best_crtcs, modes, n + 1, width, height);
+ if (modes[n] == NULL)
+ return best_score;
+
+ crtcs = kcalloc(connector_count, sizeof(*crtcs), GFP_KERNEL);
+ if (!crtcs)
+ return best_score;
+
+ my_score = 1;
+ if (connector->status == connector_status_connected)
+ my_score++;
+ if (connector->cmdline_mode.specified)
+ my_score++;
+ if (drm_connector_has_preferred_mode(connector, width, height))
+ my_score++;
+
+ /*
+ * select a crtc for this connector and then attempt to configure
+ * remaining connectors
+ */
+ drm_client_for_each_modeset(modeset, client) {
+ crtc = modeset->crtc;
+
+ if (!connector_has_possible_crtc(connector, crtc))
+ continue;
+
+ for (o = 0; o < n; o++)
+ if (best_crtcs[o] == crtc)
+ break;
+
+ if (o < n) {
+ /* ignore cloning unless only a single crtc */
+ if (dev->mode_config.num_crtc > 1)
+ continue;
+
+ if (!drm_mode_equal(modes[o], modes[n]))
+ continue;
+ }
+
+ crtcs[n] = crtc;
+ memcpy(crtcs, best_crtcs, n * sizeof(*crtcs));
+ score = my_score + drm_client_pick_crtcs(client, connectors, connector_count,
+ crtcs, modes, n + 1, width, height);
+ if (score > best_score) {
+ best_score = score;
+ memcpy(best_crtcs, crtcs, connector_count * sizeof(*crtcs));
+ }
+ }
+
+ kfree(crtcs);
+ return best_score;
+}
+
+/* Try to read the BIOS display configuration and use it for the initial config */
+static bool drm_client_firmware_config(struct drm_client_dev *client,
+ struct drm_connector **connectors,
+ unsigned int connector_count,
+ struct drm_crtc **crtcs,
+ struct drm_display_mode **modes,
+ struct drm_client_offset *offsets,
+ bool *enabled, int width, int height)
+{
+ unsigned int count = min_t(unsigned int, connector_count, BITS_PER_LONG);
+ unsigned long conn_configured, conn_seq, mask;
+ struct drm_device *dev = client->dev;
+ int i, j;
+ bool *save_enabled;
+ bool fallback = true, ret = true;
+ int num_connectors_enabled = 0;
+ int num_connectors_detected = 0;
+ struct drm_modeset_acquire_ctx ctx;
+
+ if (!drm_drv_uses_atomic_modeset(dev))
+ return false;
+
+ save_enabled = kcalloc(count, sizeof(bool), GFP_KERNEL);
+ if (!save_enabled)
+ return false;
+
+ drm_modeset_acquire_init(&ctx, 0);
+
+ while (drm_modeset_lock_all_ctx(dev, &ctx) != 0)
+ drm_modeset_backoff(&ctx);
+
+ memcpy(save_enabled, enabled, count);
+ mask = GENMASK(count - 1, 0);
+ conn_configured = 0;
+retry:
+ conn_seq = conn_configured;
+ for (i = 0; i < count; i++) {
+ struct drm_connector *connector;
+ struct drm_encoder *encoder;
+ struct drm_crtc *new_crtc;
+
+ connector = connectors[i];
+
+ if (conn_configured & BIT(i))
+ continue;
+
+ if (conn_seq == 0 && !connector->has_tile)
+ continue;
+
+ if (connector->status == connector_status_connected)
+ num_connectors_detected++;
+
+ if (!enabled[i]) {
+ DRM_DEBUG_KMS("connector %s not enabled, skipping\n",
+ connector->name);
+ conn_configured |= BIT(i);
+ continue;
+ }
+
+ if (connector->force == DRM_FORCE_OFF) {
+ DRM_DEBUG_KMS("connector %s is disabled by user, skipping\n",
+ connector->name);
+ enabled[i] = false;
+ continue;
+ }
+
+ encoder = connector->state->best_encoder;
+ if (!encoder || WARN_ON(!connector->state->crtc)) {
+ if (connector->force > DRM_FORCE_OFF)
+ goto bail;
+
+ DRM_DEBUG_KMS("connector %s has no encoder or crtc, skipping\n",
+ connector->name);
+ enabled[i] = false;
+ conn_configured |= BIT(i);
+ continue;
+ }
+
+ num_connectors_enabled++;
+
+ new_crtc = connector->state->crtc;
+
+ /*
+ * Make sure we're not trying to drive multiple connectors
+ * with a single CRTC, since our cloning support may not
+ * match the BIOS.
+ */
+ for (j = 0; j < count; j++) {
+ if (crtcs[j] == new_crtc) {
+ DRM_DEBUG_KMS("fallback: cloned configuration\n");
+ goto bail;
+ }
+ }
+
+ DRM_DEBUG_KMS("looking for cmdline mode on connector %s\n",
+ connector->name);
+
+ /* go for command line mode first */
+ modes[i] = drm_connector_pick_cmdline_mode(connector);
+
+ /* try for preferred next */
+ if (!modes[i]) {
+ DRM_DEBUG_KMS("looking for preferred mode on connector %s %d\n",
+ connector->name, connector->has_tile);
+ modes[i] = drm_connector_has_preferred_mode(connector, width, height);
+ }
+
+ /* No preferred mode marked by the EDID? Are there any modes? */
+ if (!modes[i] && !list_empty(&connector->modes)) {
+ DRM_DEBUG_KMS("using first mode listed on connector %s\n",
+ connector->name);
+ modes[i] = list_first_entry(&connector->modes,
+ struct drm_display_mode,
+ head);
+ }
+
+ /* last resort: use current mode */
+ if (!modes[i]) {
+ /*
+ * IMPORTANT: We want to use the adjusted mode (i.e.
+ * after the panel fitter upscaling) as the initial
+ * config, not the input mode, which is what crtc->mode
+ * usually contains. But since our current
+ * code puts a mode derived from the post-pfit timings
+ * into crtc->mode this works out correctly.
+ *
+ * This is crtc->mode and not crtc->state->mode for the
+ * fastboot check to work correctly.
+ */
+ DRM_DEBUG_KMS("looking for current mode on connector %s\n",
+ connector->name);
+ modes[i] = &connector->state->crtc->mode;
+ }
+ crtcs[i] = new_crtc;
+
+ DRM_DEBUG_KMS("connector %s on [CRTC:%d:%s]: %dx%d%s\n",
+ connector->name,
+ connector->state->crtc->base.id,
+ connector->state->crtc->name,
+ modes[i]->hdisplay, modes[i]->vdisplay,
+ modes[i]->flags & DRM_MODE_FLAG_INTERLACE ? "i" : "");
+
+ fallback = false;
+ conn_configured |= BIT(i);
+ }
+
+ if ((conn_configured & mask) != mask && conn_configured != conn_seq)
+ goto retry;
+
+ /*
+ * If the BIOS didn't enable everything it could, fall back to have the
+ * same user experiencing of lighting up as much as possible like the
+ * fbdev helper library.
+ */
+ if (num_connectors_enabled != num_connectors_detected &&
+ num_connectors_enabled < dev->mode_config.num_crtc) {
+ DRM_DEBUG_KMS("fallback: Not all outputs enabled\n");
+ DRM_DEBUG_KMS("Enabled: %i, detected: %i\n", num_connectors_enabled,
+ num_connectors_detected);
+ fallback = true;
+ }
+
+ if (fallback) {
+bail:
+ DRM_DEBUG_KMS("Not using firmware configuration\n");
+ memcpy(enabled, save_enabled, count);
+ ret = false;
+ }
+
+ drm_modeset_drop_locks(&ctx);
+ drm_modeset_acquire_fini(&ctx);
+
+ kfree(save_enabled);
+ return ret;
+}
+
+/**
+ * drm_client_modeset_probe() - Probe for displays
+ * @client: DRM client
+ * @width: Maximum display mode width (optional)
+ * @height: Maximum display mode height (optional)
+ *
+ * This function sets up display pipelines for enabled connectors and stores the
+ * config in the client's modeset array.
+ *
+ * Returns:
+ * Zero on success or negative error code on failure.
+ */
+int drm_client_modeset_probe(struct drm_client_dev *client, unsigned int width, unsigned int height)
+{
+ struct drm_connector *connector, **connectors = NULL;
+ struct drm_connector_list_iter conn_iter;
+ struct drm_device *dev = client->dev;
+ unsigned int total_modes_count = 0;
+ struct drm_client_offset *offsets;
+ unsigned int connector_count = 0;
+ struct drm_display_mode **modes;
+ struct drm_crtc **crtcs;
+ int i, ret = 0;
+ bool *enabled;
+
+ DRM_DEBUG_KMS("\n");
+
+ if (!width)
+ width = dev->mode_config.max_width;
+ if (!height)
+ height = dev->mode_config.max_height;
+
+ drm_connector_list_iter_begin(dev, &conn_iter);
+ drm_client_for_each_connector_iter(connector, &conn_iter) {
+ struct drm_connector **tmp;
+
+ tmp = krealloc(connectors, (connector_count + 1) * sizeof(*connectors), GFP_KERNEL);
+ if (!tmp) {
+ ret = -ENOMEM;
+ goto free_connectors;
+ }
+
+ connectors = tmp;
+ drm_connector_get(connector);
+ connectors[connector_count++] = connector;
+ }
+ drm_connector_list_iter_end(&conn_iter);
+
+ if (!connector_count)
+ return 0;
+
+ crtcs = kcalloc(connector_count, sizeof(*crtcs), GFP_KERNEL);
+ modes = kcalloc(connector_count, sizeof(*modes), GFP_KERNEL);
+ offsets = kcalloc(connector_count, sizeof(*offsets), GFP_KERNEL);
+ enabled = kcalloc(connector_count, sizeof(bool), GFP_KERNEL);
+ if (!crtcs || !modes || !enabled || !offsets) {
+ DRM_ERROR("Memory allocation failed\n");
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ mutex_lock(&client->modeset_mutex);
+
+ mutex_lock(&dev->mode_config.mutex);
+ for (i = 0; i < connector_count; i++)
+ total_modes_count += connectors[i]->funcs->fill_modes(connectors[i], width, height);
+ if (!total_modes_count)
+ DRM_DEBUG_KMS("No connectors reported connected with modes\n");
+ drm_client_connectors_enabled(connectors, connector_count, enabled);
+
+ if (!drm_client_firmware_config(client, connectors, connector_count, crtcs,
+ modes, offsets, enabled, width, height)) {
+ memset(modes, 0, connector_count * sizeof(*modes));
+ memset(crtcs, 0, connector_count * sizeof(*crtcs));
+ memset(offsets, 0, connector_count * sizeof(*offsets));
+
+ if (!drm_client_target_cloned(dev, connectors, connector_count, modes,
+ offsets, enabled, width, height) &&
+ !drm_client_target_preferred(connectors, connector_count, modes,
+ offsets, enabled, width, height))
+ DRM_ERROR("Unable to find initial modes\n");
+
+ DRM_DEBUG_KMS("picking CRTCs for %dx%d config\n",
+ width, height);
+
+ drm_client_pick_crtcs(client, connectors, connector_count,
+ crtcs, modes, 0, width, height);
+ }
+ mutex_unlock(&dev->mode_config.mutex);
+
+ drm_client_modeset_release(client);
+
+ for (i = 0; i < connector_count; i++) {
+ struct drm_display_mode *mode = modes[i];
+ struct drm_crtc *crtc = crtcs[i];
+ struct drm_client_offset *offset = &offsets[i];
+
+ if (mode && crtc) {
+ struct drm_mode_set *modeset = drm_client_find_modeset(client, crtc);
+ struct drm_connector *connector = connectors[i];
+
+ DRM_DEBUG_KMS("desired mode %s set on crtc %d (%d,%d)\n",
+ mode->name, crtc->base.id, offset->x, offset->y);
+
+ if (WARN_ON_ONCE(modeset->num_connectors == DRM_CLIENT_MAX_CLONED_CONNECTORS ||
+ (dev->mode_config.num_crtc > 1 && modeset->num_connectors == 1))) {
+ ret = -EINVAL;
+ break;
+ }
+
+ modeset->mode = drm_mode_duplicate(dev, mode);
+ drm_connector_get(connector);
+ modeset->connectors[modeset->num_connectors++] = connector;
+ modeset->x = offset->x;
+ modeset->y = offset->y;
+ }
+ }
+
+ mutex_unlock(&client->modeset_mutex);
+out:
+ kfree(crtcs);
+ kfree(modes);
+ kfree(offsets);
+ kfree(enabled);
+free_connectors:
+ for (i = 0; i < connector_count; i++)
+ drm_connector_put(connectors[i]);
+ kfree(connectors);
+
+ return ret;
+}
+EXPORT_SYMBOL(drm_client_modeset_probe);
+
+/**
+ * drm_client_panel_rotation() - Check panel orientation
+ * @modeset: DRM modeset
+ * @rotation: Returned rotation value
+ *
+ * This function checks if the primary plane in @modeset can hw rotate to match
+ * the panel orientation on its connector.
+ *
+ * Note: Currently only 0 and 180 degrees are supported.
+ *
+ * Return:
+ * True if the plane can do the rotation, false otherwise.
+ */
+bool drm_client_panel_rotation(struct drm_mode_set *modeset, unsigned int *rotation)
+{
+ struct drm_connector *connector = modeset->connectors[0];
+ struct drm_plane *plane = modeset->crtc->primary;
+ u64 valid_mask = 0;
+ unsigned int i;
+
+ if (!modeset->num_connectors)
+ return false;
+
+ switch (connector->display_info.panel_orientation) {
+ case DRM_MODE_PANEL_ORIENTATION_BOTTOM_UP:
+ *rotation = DRM_MODE_ROTATE_180;
+ break;
+ case DRM_MODE_PANEL_ORIENTATION_LEFT_UP:
+ *rotation = DRM_MODE_ROTATE_90;
+ break;
+ case DRM_MODE_PANEL_ORIENTATION_RIGHT_UP:
+ *rotation = DRM_MODE_ROTATE_270;
+ break;
+ default:
+ *rotation = DRM_MODE_ROTATE_0;
+ }
+
+ /*
+ * TODO: support 90 / 270 degree hardware rotation,
+ * depending on the hardware this may require the framebuffer
+ * to be in a specific tiling format.
+ */
+ if (*rotation != DRM_MODE_ROTATE_180 || !plane->rotation_property)
+ return false;
+
+ for (i = 0; i < plane->rotation_property->num_values; i++)
+ valid_mask |= (1ULL << plane->rotation_property->values[i]);
+
+ if (!(*rotation & valid_mask))
+ return false;
+
+ return true;
+}
+EXPORT_SYMBOL(drm_client_panel_rotation);
+
+static int drm_client_modeset_commit_atomic(struct drm_client_dev *client, bool active)
+{
+ struct drm_device *dev = client->dev;
+ struct drm_plane_state *plane_state;
+ struct drm_plane *plane;
+ struct drm_atomic_state *state;
+ struct drm_modeset_acquire_ctx ctx;
+ struct drm_mode_set *mode_set;
+ int ret;
+
+ drm_modeset_acquire_init(&ctx, 0);
+
+ state = drm_atomic_state_alloc(dev);
+ if (!state) {
+ ret = -ENOMEM;
+ goto out_ctx;
+ }
+
+ state->acquire_ctx = &ctx;
+retry:
+ drm_for_each_plane(plane, dev) {
+ plane_state = drm_atomic_get_plane_state(state, plane);
+ if (IS_ERR(plane_state)) {
+ ret = PTR_ERR(plane_state);
+ goto out_state;
+ }
+
+ plane_state->rotation = DRM_MODE_ROTATE_0;
+
+ /* disable non-primary: */
+ if (plane->type == DRM_PLANE_TYPE_PRIMARY)
+ continue;
+
+ ret = __drm_atomic_helper_disable_plane(plane, plane_state);
+ if (ret != 0)
+ goto out_state;
+ }
+
+ drm_client_for_each_modeset(mode_set, client) {
+ struct drm_plane *primary = mode_set->crtc->primary;
+ unsigned int rotation;
+
+ if (drm_client_panel_rotation(mode_set, &rotation)) {
+ /* Cannot fail as we've already gotten the plane state above */
+ plane_state = drm_atomic_get_new_plane_state(state, primary);
+ plane_state->rotation = rotation;
+ }
+
+ ret = __drm_atomic_helper_set_config(mode_set, state);
+ if (ret != 0)
+ goto out_state;
+
+ /*
+ * __drm_atomic_helper_set_config() sets active when a
+ * mode is set, unconditionally clear it if we force DPMS off
+ */
+ if (!active) {
+ struct drm_crtc *crtc = mode_set->crtc;
+ struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
+
+ crtc_state->active = false;
+ }
+ }
+
+ ret = drm_atomic_commit(state);
+
+out_state:
+ if (ret == -EDEADLK)
+ goto backoff;
+
+ drm_atomic_state_put(state);
+out_ctx:
+ drm_modeset_drop_locks(&ctx);
+ drm_modeset_acquire_fini(&ctx);
+
+ return ret;
+
+backoff:
+ drm_atomic_state_clear(state);
+ drm_modeset_backoff(&ctx);
+
+ goto retry;
+}
+
+static int drm_client_modeset_commit_legacy(struct drm_client_dev *client)
+{
+ struct drm_device *dev = client->dev;
+ struct drm_mode_set *mode_set;
+ struct drm_plane *plane;
+ int ret = 0;
+
+ drm_modeset_lock_all(dev);
+ drm_for_each_plane(plane, dev) {
+ if (plane->type != DRM_PLANE_TYPE_PRIMARY)
+ drm_plane_force_disable(plane);
+
+ if (plane->rotation_property)
+ drm_mode_plane_set_obj_prop(plane,
+ plane->rotation_property,
+ DRM_MODE_ROTATE_0);
+ }
+
+ drm_client_for_each_modeset(mode_set, client) {
+ struct drm_crtc *crtc = mode_set->crtc;
+
+ if (crtc->funcs->cursor_set2) {
+ ret = crtc->funcs->cursor_set2(crtc, NULL, 0, 0, 0, 0, 0);
+ if (ret)
+ goto out;
+ } else if (crtc->funcs->cursor_set) {
+ ret = crtc->funcs->cursor_set(crtc, NULL, 0, 0, 0);
+ if (ret)
+ goto out;
+ }
+
+ ret = drm_mode_set_config_internal(mode_set);
+ if (ret)
+ goto out;
+ }
+out:
+ drm_modeset_unlock_all(dev);
+
+ return ret;
+}
+
+/**
+ * drm_client_modeset_commit_force() - Force commit CRTC configuration
+ * @client: DRM client
+ *
+ * Commit modeset configuration to crtcs without checking if there is a DRM master.
+ *
+ * Returns:
+ * Zero on success or negative error code on failure.
+ */
+int drm_client_modeset_commit_force(struct drm_client_dev *client)
+{
+ struct drm_device *dev = client->dev;
+ int ret;
+
+ mutex_lock(&client->modeset_mutex);
+ if (drm_drv_uses_atomic_modeset(dev))
+ ret = drm_client_modeset_commit_atomic(client, true);
+ else
+ ret = drm_client_modeset_commit_legacy(client);
+ mutex_unlock(&client->modeset_mutex);
+
+ return ret;
+}
+EXPORT_SYMBOL(drm_client_modeset_commit_force);
+
+/**
+ * drm_client_modeset_commit() - Commit CRTC configuration
+ * @client: DRM client
+ *
+ * Commit modeset configuration to crtcs.
+ *
+ * Returns:
+ * Zero on success or negative error code on failure.
+ */
+int drm_client_modeset_commit(struct drm_client_dev *client)
+{
+ struct drm_device *dev = client->dev;
+ int ret;
+
+ if (!drm_master_internal_acquire(dev))
+ return -EBUSY;
+
+ ret = drm_client_modeset_commit_force(client);
+
+ drm_master_internal_release(dev);
+
+ return ret;
+}
+EXPORT_SYMBOL(drm_client_modeset_commit);
+
+static void drm_client_modeset_dpms_legacy(struct drm_client_dev *client, int dpms_mode)
+{
+ struct drm_device *dev = client->dev;
+ struct drm_connector *connector;
+ struct drm_mode_set *modeset;
+ int j;
+
+ drm_modeset_lock_all(dev);
+ drm_client_for_each_modeset(modeset, client) {
+ if (!modeset->crtc->enabled)
+ continue;
+
+ for (j = 0; j < modeset->num_connectors; j++) {
+ connector = modeset->connectors[j];
+ connector->funcs->dpms(connector, dpms_mode);
+ drm_object_property_set_value(&connector->base,
+ dev->mode_config.dpms_property, dpms_mode);
+ }
+ }
+ drm_modeset_unlock_all(dev);
+}
+
+/**
+ * drm_client_modeset_dpms() - Set DPMS mode
+ * @client: DRM client
+ * @mode: DPMS mode
+ *
+ * Note: For atomic drivers @mode is reduced to on/off.
+ *
+ * Returns:
+ * Zero on success or negative error code on failure.
+ */
+int drm_client_modeset_dpms(struct drm_client_dev *client, int mode)
+{
+ struct drm_device *dev = client->dev;
+ int ret = 0;
+
+ if (!drm_master_internal_acquire(dev))
+ return -EBUSY;
+
+ mutex_lock(&client->modeset_mutex);
+ if (drm_drv_uses_atomic_modeset(dev))
+ ret = drm_client_modeset_commit_atomic(client, mode == DRM_MODE_DPMS_ON);
+ else
+ drm_client_modeset_dpms_legacy(client, mode);
+ mutex_unlock(&client->modeset_mutex);
+
+ drm_master_internal_release(dev);
+
+ return ret;
+}
+EXPORT_SYMBOL(drm_client_modeset_dpms);
diff --git a/drivers/gpu/drm/drm_color_mgmt.c b/drivers/gpu/drm/drm_color_mgmt.c
index d5d34d0c79c7..4ce5c6d8de99 100644
--- a/drivers/gpu/drm/drm_color_mgmt.c
+++ b/drivers/gpu/drm/drm_color_mgmt.c
@@ -20,9 +20,13 @@
* OF THIS SOFTWARE.
*/
-#include <drm/drmP.h>
-#include <drm/drm_crtc.h>
+#include <linux/uaccess.h>
+
#include <drm/drm_color_mgmt.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_device.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_print.h>
#include "drm_crtc_internal.h"
diff --git a/drivers/gpu/drm/drm_connector.c b/drivers/gpu/drm/drm_connector.c
index b34c3d38bf15..3ccdcf3dfcde 100644
--- a/drivers/gpu/drm/drm_connector.c
+++ b/drivers/gpu/drm/drm_connector.c
@@ -464,10 +464,7 @@ int drm_connector_register(struct drm_connector *connector)
if (ret)
goto unlock;
- ret = drm_debugfs_connector_add(connector);
- if (ret) {
- goto err_sysfs;
- }
+ drm_debugfs_connector_add(connector);
if (connector->funcs->late_register) {
ret = connector->funcs->late_register(connector);
@@ -482,7 +479,6 @@ int drm_connector_register(struct drm_connector *connector)
err_debugfs:
drm_debugfs_connector_remove(connector);
-err_sysfs:
drm_sysfs_connector_remove(connector);
unlock:
mutex_unlock(&connector->mutex);
@@ -823,13 +819,6 @@ static const struct drm_prop_enum_list drm_tv_subconnector_enum_list[] = {
DRM_ENUM_NAME_FN(drm_get_tv_subconnector_name,
drm_tv_subconnector_enum_list)
-static struct drm_prop_enum_list drm_cp_enum_list[] = {
- { DRM_MODE_CONTENT_PROTECTION_UNDESIRED, "Undesired" },
- { DRM_MODE_CONTENT_PROTECTION_DESIRED, "Desired" },
- { DRM_MODE_CONTENT_PROTECTION_ENABLED, "Enabled" },
-};
-DRM_ENUM_NAME_FN(drm_get_content_protection_name, drm_cp_enum_list)
-
static const struct drm_prop_enum_list hdmi_colorspaces[] = {
/* For Default case, driver will set the colorspace */
{ DRM_MODE_COLORIMETRY_DEFAULT, "Default" },
@@ -963,6 +952,47 @@ static const struct drm_prop_enum_list hdmi_colorspaces[] = {
* is no longer protected and userspace should take appropriate action
* (whatever that might be).
*
+ * HDR_OUTPUT_METADATA:
+ * Connector property to enable userspace to send HDR Metadata to
+ * driver. This metadata is based on the composition and blending
+ * policies decided by user, taking into account the hardware and
+ * sink capabilities. The driver gets this metadata and creates a
+ * Dynamic Range and Mastering Infoframe (DRM) in case of HDMI,
+ * SDP packet (Non-audio INFOFRAME SDP v1.3) for DP. This is then
+ * sent to sink. This notifies the sink of the upcoming frame's Color
+ * Encoding and Luminance parameters.
+ *
+ * Userspace first need to detect the HDR capabilities of sink by
+ * reading and parsing the EDID. Details of HDR metadata for HDMI
+ * are added in CTA 861.G spec. For DP , its defined in VESA DP
+ * Standard v1.4. It needs to then get the metadata information
+ * of the video/game/app content which are encoded in HDR (basically
+ * using HDR transfer functions). With this information it needs to
+ * decide on a blending policy and compose the relevant
+ * layers/overlays into a common format. Once this blending is done,
+ * userspace will be aware of the metadata of the composed frame to
+ * be send to sink. It then uses this property to communicate this
+ * metadata to driver which then make a Infoframe packet and sends
+ * to sink based on the type of encoder connected.
+ *
+ * Userspace will be responsible to do Tone mapping operation in case:
+ * - Some layers are HDR and others are SDR
+ * - HDR layers luminance is not same as sink
+ *
+ * It will even need to do colorspace conversion and get all layers
+ * to one common colorspace for blending. It can use either GL, Media
+ * or display engine to get this done based on the capabilties of the
+ * associated hardware.
+ *
+ * Driver expects metadata to be put in &struct hdr_output_metadata
+ * structure from userspace. This is received as blob and stored in
+ * &drm_connector_state.hdr_output_metadata. It parses EDID and saves the
+ * sink metadata in &struct hdr_sink_metadata, as
+ * &drm_connector.hdr_sink_metadata. Driver uses
+ * drm_hdmi_infoframe_set_hdr_metadata() helper to set the HDR metadata,
+ * hdmi_drm_infoframe_pack() to pack the infoframe as per spec, in case of
+ * HDMI encoder.
+ *
* max bpc:
* This range property is used by userspace to limit the bit depth. When
* used the driver would limit the bpc in accordance with the valid range
@@ -1058,6 +1088,12 @@ int drm_connector_create_standard_properties(struct drm_device *dev)
return -ENOMEM;
dev->mode_config.non_desktop_property = prop;
+ prop = drm_property_create(dev, DRM_MODE_PROP_BLOB,
+ "HDR_OUTPUT_METADATA", 0);
+ if (!prop)
+ return -ENOMEM;
+ dev->mode_config.hdr_output_metadata_property = prop;
+
return 0;
}
@@ -1510,42 +1546,6 @@ int drm_connector_attach_scaling_mode_property(struct drm_connector *connector,
EXPORT_SYMBOL(drm_connector_attach_scaling_mode_property);
/**
- * drm_connector_attach_content_protection_property - attach content protection
- * property
- *
- * @connector: connector to attach CP property on.
- *
- * This is used to add support for content protection on select connectors.
- * Content Protection is intentionally vague to allow for different underlying
- * technologies, however it is most implemented by HDCP.
- *
- * The content protection will be set to &drm_connector_state.content_protection
- *
- * Returns:
- * Zero on success, negative errno on failure.
- */
-int drm_connector_attach_content_protection_property(
- struct drm_connector *connector)
-{
- struct drm_device *dev = connector->dev;
- struct drm_property *prop;
-
- prop = drm_property_create_enum(dev, 0, "Content Protection",
- drm_cp_enum_list,
- ARRAY_SIZE(drm_cp_enum_list));
- if (!prop)
- return -ENOMEM;
-
- drm_object_attach_property(&connector->base, prop,
- DRM_MODE_CONTENT_PROTECTION_UNDESIRED);
-
- connector->content_protection_property = prop;
-
- return 0;
-}
-EXPORT_SYMBOL(drm_connector_attach_content_protection_property);
-
-/**
* drm_mode_create_aspect_ratio_property - create aspect ratio property
* @dev: DRM device
*
diff --git a/drivers/gpu/drm/drm_context.c b/drivers/gpu/drm/drm_context.c
index 6e8e1a9fcae3..1f802d8e5681 100644
--- a/drivers/gpu/drm/drm_context.c
+++ b/drivers/gpu/drm/drm_context.c
@@ -28,7 +28,13 @@
* OTHER DEALINGS IN THE SOFTWARE.
*/
-#include <drm/drmP.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+
+#include <drm/drm_drv.h>
+#include <drm/drm_file.h>
+#include <drm/drm_print.h>
+
#include "drm_legacy.h"
struct drm_ctx_list {
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index 790ba5941954..4936e1080e41 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -122,9 +122,7 @@ int drm_crtc_register_all(struct drm_device *dev)
int ret = 0;
drm_for_each_crtc(crtc, dev) {
- if (drm_debugfs_crtc_add(crtc))
- DRM_ERROR("Failed to initialize debugfs entry for CRTC '%s'.\n",
- crtc->name);
+ drm_debugfs_crtc_add(crtc);
if (crtc->funcs->late_register)
ret = crtc->funcs->late_register(crtc);
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
index 747661f63fbb..6dd49a60deac 100644
--- a/drivers/gpu/drm/drm_crtc_helper.c
+++ b/drivers/gpu/drm/drm_crtc_helper.c
@@ -29,21 +29,23 @@
* Jesse Barnes <jesse.barnes@intel.com>
*/
-#include <linux/kernel.h>
#include <linux/export.h>
+#include <linux/kernel.h>
#include <linux/moduleparam.h>
-#include <drm/drmP.h>
#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
#include <drm/drm_atomic_uapi.h>
#include <drm/drm_crtc.h>
-#include <drm/drm_encoder.h>
-#include <drm/drm_fourcc.h>
#include <drm/drm_crtc_helper.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_edid.h>
+#include <drm/drm_encoder.h>
#include <drm/drm_fb_helper.h>
+#include <drm/drm_fourcc.h>
#include <drm/drm_plane_helper.h>
-#include <drm/drm_atomic_helper.h>
-#include <drm/drm_edid.h>
+#include <drm/drm_print.h>
+#include <drm/drm_vblank.h>
/**
* DOC: overview
diff --git a/drivers/gpu/drm/drm_crtc_internal.h b/drivers/gpu/drm/drm_crtc_internal.h
index 0719a235d6cc..c7d5e4c21423 100644
--- a/drivers/gpu/drm/drm_crtc_internal.h
+++ b/drivers/gpu/drm/drm_crtc_internal.h
@@ -31,14 +31,32 @@
* and are not exported to drivers.
*/
-enum drm_mode_status;
+#include <linux/types.h>
+
+enum drm_color_encoding;
+enum drm_color_range;
enum drm_connector_force;
+enum drm_mode_status;
-struct drm_display_mode;
-struct work_struct;
-struct drm_connector;
+struct drm_atomic_state;
struct drm_bridge;
+struct drm_connector;
+struct drm_crtc;
+struct drm_device;
+struct drm_display_mode;
+struct drm_file;
+struct drm_framebuffer;
+struct drm_mode_create_dumb;
+struct drm_mode_fb_cmd2;
+struct drm_mode_fb_cmd;
+struct drm_mode_object;
+struct drm_mode_set;
+struct drm_plane;
+struct drm_plane_state;
+struct drm_property;
struct edid;
+struct kref;
+struct work_struct;
/* drm_crtc.c */
int drm_mode_crtc_set_obj_prop(struct drm_mode_object *obj,
@@ -207,6 +225,11 @@ struct drm_minor;
int drm_atomic_debugfs_init(struct drm_minor *minor);
#endif
+int __drm_atomic_helper_disable_plane(struct drm_plane *plane,
+ struct drm_plane_state *plane_state);
+int __drm_atomic_helper_set_config(struct drm_mode_set *set,
+ struct drm_atomic_state *state);
+
void drm_atomic_print_state(const struct drm_atomic_state *state);
/* drm_atomic_uapi.c */
diff --git a/drivers/gpu/drm/drm_damage_helper.c b/drivers/gpu/drm/drm_damage_helper.c
index ee67c96841fa..8230dac01a89 100644
--- a/drivers/gpu/drm/drm_damage_helper.c
+++ b/drivers/gpu/drm/drm_damage_helper.c
@@ -286,7 +286,7 @@ drm_atomic_helper_damage_iter_init(struct drm_atomic_helper_damage_iter *iter,
iter->plane_src.y2 = (state->src.y2 >> 16) + !!(state->src.y2 & 0xFFFF);
if (!iter->clips || !drm_rect_equals(&state->src, &old_state->src)) {
- iter->clips = 0;
+ iter->clips = NULL;
iter->num_clips = 0;
iter->full_update = true;
}
diff --git a/drivers/gpu/drm/drm_debugfs.c b/drivers/gpu/drm/drm_debugfs.c
index f8468eae0503..63b9951bb8f3 100644
--- a/drivers/gpu/drm/drm_debugfs.c
+++ b/drivers/gpu/drm/drm_debugfs.c
@@ -24,20 +24,23 @@
*/
#include <linux/debugfs.h>
+#include <linux/export.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
-#include <linux/export.h>
+#include <linux/uaccess.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_auth.h>
#include <drm/drm_client.h>
#include <drm/drm_debugfs.h>
+#include <drm/drm_device.h>
+#include <drm/drm_drv.h>
#include <drm/drm_edid.h>
-#include <drm/drm_atomic.h>
-#include <drm/drm_auth.h>
+#include <drm/drm_file.h>
#include <drm/drm_gem.h>
-#include <drm/drmP.h>
-#include "drm_internal.h"
#include "drm_crtc_internal.h"
+#include "drm_internal.h"
#if defined(CONFIG_DEBUG_FS)
@@ -226,10 +229,6 @@ int drm_debugfs_init(struct drm_minor *minor, int minor_id,
mutex_init(&minor->debugfs_lock);
sprintf(name, "%d", minor_id);
minor->debugfs_root = debugfs_create_dir(name, root);
- if (!minor->debugfs_root) {
- DRM_ERROR("Cannot create /sys/kernel/debug/dri/%s\n", name);
- return -1;
- }
ret = drm_debugfs_create_files(drm_debugfs_list, DRM_DEBUGFS_ENTRIES,
minor->debugfs_root, minor);
@@ -310,17 +309,15 @@ static void drm_debugfs_remove_all_files(struct drm_minor *minor)
mutex_unlock(&minor->debugfs_lock);
}
-int drm_debugfs_cleanup(struct drm_minor *minor)
+void drm_debugfs_cleanup(struct drm_minor *minor)
{
if (!minor->debugfs_root)
- return 0;
+ return;
drm_debugfs_remove_all_files(minor);
debugfs_remove_recursive(minor->debugfs_root);
minor->debugfs_root = NULL;
-
- return 0;
}
static int connector_show(struct seq_file *m, void *data)
@@ -438,38 +435,24 @@ static const struct file_operations drm_connector_fops = {
.write = connector_write
};
-int drm_debugfs_connector_add(struct drm_connector *connector)
+void drm_debugfs_connector_add(struct drm_connector *connector)
{
struct drm_minor *minor = connector->dev->primary;
- struct dentry *root, *ent;
+ struct dentry *root;
if (!minor->debugfs_root)
- return -1;
+ return;
root = debugfs_create_dir(connector->name, minor->debugfs_root);
- if (!root)
- return -ENOMEM;
-
connector->debugfs_entry = root;
/* force */
- ent = debugfs_create_file("force", S_IRUGO | S_IWUSR, root, connector,
- &drm_connector_fops);
- if (!ent)
- goto error;
+ debugfs_create_file("force", S_IRUGO | S_IWUSR, root, connector,
+ &drm_connector_fops);
/* edid */
- ent = debugfs_create_file("edid_override", S_IRUGO | S_IWUSR, root,
- connector, &drm_edid_fops);
- if (!ent)
- goto error;
-
- return 0;
-
-error:
- debugfs_remove_recursive(connector->debugfs_entry);
- connector->debugfs_entry = NULL;
- return -ENOMEM;
+ debugfs_create_file("edid_override", S_IRUGO | S_IWUSR, root, connector,
+ &drm_edid_fops);
}
void drm_debugfs_connector_remove(struct drm_connector *connector)
@@ -482,7 +465,7 @@ void drm_debugfs_connector_remove(struct drm_connector *connector)
connector->debugfs_entry = NULL;
}
-int drm_debugfs_crtc_add(struct drm_crtc *crtc)
+void drm_debugfs_crtc_add(struct drm_crtc *crtc)
{
struct drm_minor *minor = crtc->dev->primary;
struct dentry *root;
@@ -490,23 +473,14 @@ int drm_debugfs_crtc_add(struct drm_crtc *crtc)
name = kasprintf(GFP_KERNEL, "crtc-%d", crtc->index);
if (!name)
- return -ENOMEM;
+ return;
root = debugfs_create_dir(name, minor->debugfs_root);
kfree(name);
- if (!root)
- return -ENOMEM;
crtc->debugfs_entry = root;
- if (drm_debugfs_crtc_crc_add(crtc))
- goto error;
-
- return 0;
-
-error:
- drm_debugfs_crtc_remove(crtc);
- return -ENOMEM;
+ drm_debugfs_crtc_crc_add(crtc);
}
void drm_debugfs_crtc_remove(struct drm_crtc *crtc)
diff --git a/drivers/gpu/drm/drm_debugfs_crc.c b/drivers/gpu/drm/drm_debugfs_crc.c
index 00e743153e94..7ca486d750e9 100644
--- a/drivers/gpu/drm/drm_debugfs_crc.c
+++ b/drivers/gpu/drm/drm_debugfs_crc.c
@@ -29,7 +29,14 @@
#include <linux/circ_buf.h>
#include <linux/ctype.h>
#include <linux/debugfs.h>
-#include <drm/drmP.h>
+#include <linux/poll.h>
+#include <linux/uaccess.h>
+
+#include <drm/drm_crtc.h>
+#include <drm/drm_debugfs_crc.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_print.h>
+
#include "drm_internal.h"
/**
@@ -344,33 +351,19 @@ static const struct file_operations drm_crtc_crc_data_fops = {
.release = crtc_crc_release,
};
-int drm_debugfs_crtc_crc_add(struct drm_crtc *crtc)
+void drm_debugfs_crtc_crc_add(struct drm_crtc *crtc)
{
- struct dentry *crc_ent, *ent;
+ struct dentry *crc_ent;
if (!crtc->funcs->set_crc_source || !crtc->funcs->verify_crc_source)
- return 0;
+ return;
crc_ent = debugfs_create_dir("crc", crtc->debugfs_entry);
- if (!crc_ent)
- return -ENOMEM;
-
- ent = debugfs_create_file("control", S_IRUGO, crc_ent, crtc,
- &drm_crtc_crc_control_fops);
- if (!ent)
- goto error;
-
- ent = debugfs_create_file("data", S_IRUGO, crc_ent, crtc,
- &drm_crtc_crc_data_fops);
- if (!ent)
- goto error;
-
- return 0;
-
-error:
- debugfs_remove_recursive(crc_ent);
- return -ENOMEM;
+ debugfs_create_file("control", S_IRUGO, crc_ent, crtc,
+ &drm_crtc_crc_control_fops);
+ debugfs_create_file("data", S_IRUGO, crc_ent, crtc,
+ &drm_crtc_crc_data_fops);
}
/**
@@ -389,12 +382,13 @@ int drm_crtc_add_crc_entry(struct drm_crtc *crtc, bool has_frame,
struct drm_crtc_crc *crc = &crtc->crc;
struct drm_crtc_crc_entry *entry;
int head, tail;
+ unsigned long flags;
- spin_lock(&crc->lock);
+ spin_lock_irqsave(&crc->lock, flags);
/* Caller may not have noticed yet that userspace has stopped reading */
if (!crc->entries) {
- spin_unlock(&crc->lock);
+ spin_unlock_irqrestore(&crc->lock, flags);
return -EINVAL;
}
@@ -405,7 +399,7 @@ int drm_crtc_add_crc_entry(struct drm_crtc *crtc, bool has_frame,
bool was_overflow = crc->overflow;
crc->overflow = true;
- spin_unlock(&crc->lock);
+ spin_unlock_irqrestore(&crc->lock, flags);
if (!was_overflow)
DRM_ERROR("Overflow of CRC buffer, userspace reads too slow.\n");
@@ -421,7 +415,7 @@ int drm_crtc_add_crc_entry(struct drm_crtc *crtc, bool has_frame,
head = (head + 1) & (DRM_CRC_ENTRIES_NR - 1);
crc->head = head;
- spin_unlock(&crc->lock);
+ spin_unlock_irqrestore(&crc->lock, flags);
wake_up_interruptible(&crc->wq);
diff --git a/drivers/gpu/drm/drm_dma.c b/drivers/gpu/drm/drm_dma.c
index 3f83e2ca80ad..5ef0227eaa0e 100644
--- a/drivers/gpu/drm/drm_dma.c
+++ b/drivers/gpu/drm/drm_dma.c
@@ -34,7 +34,11 @@
*/
#include <linux/export.h>
-#include <drm/drmP.h>
+
+#include <drm/drm_drv.h>
+#include <drm/drm_pci.h>
+#include <drm/drm_print.h>
+
#include "drm_legacy.h"
/**
diff --git a/drivers/gpu/drm/drm_dp_aux_dev.c b/drivers/gpu/drm/drm_dp_aux_dev.c
index 0e4f25d63fd2..5be28e3295f3 100644
--- a/drivers/gpu/drm/drm_dp_aux_dev.c
+++ b/drivers/gpu/drm/drm_dp_aux_dev.c
@@ -27,15 +27,17 @@
#include <linux/device.h>
#include <linux/fs.h>
-#include <linux/slab.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/sched/signal.h>
+#include <linux/slab.h>
#include <linux/uaccess.h>
#include <linux/uio.h>
-#include <drm/drm_dp_helper.h>
+
#include <drm/drm_crtc.h>
-#include <drm/drmP.h>
+#include <drm/drm_dp_helper.h>
+#include <drm/drm_print.h>
#include "drm_crtc_helper_internal.h"
diff --git a/drivers/gpu/drm/drm_dp_dual_mode_helper.c b/drivers/gpu/drm/drm_dp_dual_mode_helper.c
index e7f4fe2848a5..1c9ea9f7fdaf 100644
--- a/drivers/gpu/drm/drm_dp_dual_mode_helper.c
+++ b/drivers/gpu/drm/drm_dp_dual_mode_helper.c
@@ -20,13 +20,15 @@
* OTHER DEALINGS IN THE SOFTWARE.
*/
+#include <linux/delay.h>
#include <linux/errno.h>
#include <linux/export.h>
#include <linux/i2c.h>
#include <linux/slab.h>
#include <linux/string.h>
+
#include <drm/drm_dp_dual_mode_helper.h>
-#include <drm/drmP.h>
+#include <drm/drm_print.h>
/**
* DOC: dp dual mode helpers
diff --git a/drivers/gpu/drm/drm_dp_helper.c b/drivers/gpu/drm/drm_dp_helper.c
index 54a6414c5d96..e6af758a7d22 100644
--- a/drivers/gpu/drm/drm_dp_helper.c
+++ b/drivers/gpu/drm/drm_dp_helper.c
@@ -20,16 +20,18 @@
* OF THIS SOFTWARE.
*/
-#include <linux/kernel.h>
-#include <linux/module.h>
#include <linux/delay.h>
-#include <linux/init.h>
#include <linux/errno.h>
-#include <linux/sched.h>
#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/sched.h>
#include <linux/seq_file.h>
+
#include <drm/drm_dp_helper.h>
-#include <drm/drmP.h>
+#include <drm/drm_print.h>
+#include <drm/drm_vblank.h>
#include "drm_crtc_helper_internal.h"
diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
index c630ed157994..0984b9a34d55 100644
--- a/drivers/gpu/drm/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/drm_dp_mst_topology.c
@@ -20,19 +20,20 @@
* OF THIS SOFTWARE.
*/
-#include <linux/kernel.h>
#include <linux/delay.h>
-#include <linux/init.h>
#include <linux/errno.h>
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/seq_file.h>
-#include <linux/i2c.h>
-#include <drm/drm_dp_mst_helper.h>
-#include <drm/drmP.h>
-#include <drm/drm_fixed.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_dp_mst_helper.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_fixed.h>
+#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
/**
@@ -1995,7 +1996,11 @@ static void process_single_up_tx_qlock(struct drm_dp_mst_topology_mgr *mgr,
if (ret != 1)
DRM_DEBUG_KMS("failed to send msg in q %d\n", ret);
- txmsg->dst->tx_slots[txmsg->seqno] = NULL;
+ if (txmsg->seqno != -1) {
+ WARN_ON((unsigned int)txmsg->seqno >
+ ARRAY_SIZE(txmsg->dst->tx_slots));
+ txmsg->dst->tx_slots[txmsg->seqno] = NULL;
+ }
}
static void drm_dp_queue_down_tx(struct drm_dp_mst_topology_mgr *mgr,
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index 862621494a93..fe0ce86c280f 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -35,16 +35,19 @@
#include <linux/srcu.h>
#include <drm/drm_client.h>
+#include <drm/drm_color_mgmt.h>
#include <drm/drm_drv.h>
-#include <drm/drmP.h>
+#include <drm/drm_file.h>
+#include <drm/drm_mode_object.h>
+#include <drm/drm_print.h>
#include "drm_crtc_internal.h"
-#include "drm_legacy.h"
#include "drm_internal.h"
+#include "drm_legacy.h"
/*
* drm_debug: Enable debug output.
- * Bitmask of DRM_UT_x. See include/drm/drmP.h for details.
+ * Bitmask of DRM_UT_x. See include/drm/drm_print.h for details.
*/
unsigned int drm_debug = 0;
EXPORT_SYMBOL(drm_debug);
@@ -1161,11 +1164,6 @@ static int __init drm_core_init(void)
}
drm_debugfs_root = debugfs_create_dir("dri", NULL);
- if (!drm_debugfs_root) {
- ret = -ENOMEM;
- DRM_ERROR("Cannot create debugfs-root: %d\n", ret);
- goto error;
- }
ret = register_chrdev(DRM_MAJOR, "drm", &drm_stub_fops);
if (ret < 0)
diff --git a/drivers/gpu/drm/drm_dumb_buffers.c b/drivers/gpu/drm/drm_dumb_buffers.c
index 81dfdd33753a..d18a740fe0f1 100644
--- a/drivers/gpu/drm/drm_dumb_buffers.c
+++ b/drivers/gpu/drm/drm_dumb_buffers.c
@@ -23,8 +23,10 @@
* OF THIS SOFTWARE.
*/
-#include <drm/drmP.h>
+#include <drm/drm_device.h>
+#include <drm/drm_drv.h>
#include <drm/drm_gem.h>
+#include <drm/drm_mode.h>
#include "drm_crtc_internal.h"
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index e804ac5dec02..9d8f2b952004 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -27,16 +27,19 @@
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
-#include <linux/kernel.h>
-#include <linux/slab.h>
+
#include <linux/hdmi.h>
#include <linux/i2c.h>
+#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/slab.h>
#include <linux/vga_switcheroo.h>
-#include <drm/drmP.h>
+
+#include <drm/drm_displayid.h>
+#include <drm/drm_drv.h>
#include <drm/drm_edid.h>
#include <drm/drm_encoder.h>
-#include <drm/drm_displayid.h>
+#include <drm/drm_print.h>
#include <drm/drm_scdc_helper.h>
#include "drm_crtc_internal.h"
@@ -2888,6 +2891,7 @@ add_detailed_modes(struct drm_connector *connector, struct edid *edid,
#define VIDEO_BLOCK 0x02
#define VENDOR_BLOCK 0x03
#define SPEAKER_BLOCK 0x04
+#define HDR_STATIC_METADATA_BLOCK 0x6
#define USE_EXTENDED_TAG 0x07
#define EXT_VIDEO_CAPABILITY_BLOCK 0x00
#define EXT_VIDEO_DATA_BLOCK_420 0x0E
@@ -3870,6 +3874,55 @@ static void fixup_detailed_cea_mode_clock(struct drm_display_mode *mode)
mode->clock = clock;
}
+static bool cea_db_is_hdmi_hdr_metadata_block(const u8 *db)
+{
+ if (cea_db_tag(db) != USE_EXTENDED_TAG)
+ return false;
+
+ if (db[1] != HDR_STATIC_METADATA_BLOCK)
+ return false;
+
+ if (cea_db_payload_len(db) < 3)
+ return false;
+
+ return true;
+}
+
+static uint8_t eotf_supported(const u8 *edid_ext)
+{
+ return edid_ext[2] &
+ (BIT(HDMI_EOTF_TRADITIONAL_GAMMA_SDR) |
+ BIT(HDMI_EOTF_TRADITIONAL_GAMMA_HDR) |
+ BIT(HDMI_EOTF_SMPTE_ST2084) |
+ BIT(HDMI_EOTF_BT_2100_HLG));
+}
+
+static uint8_t hdr_metadata_type(const u8 *edid_ext)
+{
+ return edid_ext[3] &
+ BIT(HDMI_STATIC_METADATA_TYPE1);
+}
+
+static void
+drm_parse_hdr_metadata_block(struct drm_connector *connector, const u8 *db)
+{
+ u16 len;
+
+ len = cea_db_payload_len(db);
+
+ connector->hdr_sink_metadata.hdmi_type1.eotf =
+ eotf_supported(db);
+ connector->hdr_sink_metadata.hdmi_type1.metadata_type =
+ hdr_metadata_type(db);
+
+ if (len >= 4)
+ connector->hdr_sink_metadata.hdmi_type1.max_cll = db[4];
+ if (len >= 5)
+ connector->hdr_sink_metadata.hdmi_type1.max_fall = db[5];
+ if (len >= 6)
+ connector->hdr_sink_metadata.hdmi_type1.min_cll = db[6];
+}
+
static void
drm_parse_hdmi_vsdb_audio(struct drm_connector *connector, const u8 *db)
{
@@ -4497,6 +4550,8 @@ static void drm_parse_cea_ext(struct drm_connector *connector,
drm_parse_y420cmdb_bitmap(connector, db);
if (cea_db_is_vcdb(db))
drm_parse_vcdb(connector, db);
+ if (cea_db_is_hdmi_hdr_metadata_block(db))
+ drm_parse_hdr_metadata_block(connector, db);
}
}
@@ -4553,8 +4608,8 @@ u32 drm_add_display_info(struct drm_connector *connector, const struct edid *edi
* tells us to assume 8 bpc color depth if the EDID doesn't have
* extensions which tell otherwise.
*/
- if ((info->bpc == 0) && (edid->revision < 4) &&
- (edid->input & DRM_EDID_DIGITAL_TYPE_DVI)) {
+ if (info->bpc == 0 && edid->revision == 3 &&
+ edid->input & DRM_EDID_DIGITAL_DFP_1_X) {
info->bpc = 8;
DRM_DEBUG("%s: Assigning DFP sink color depth as %d bpc.\n",
connector->name, info->bpc);
@@ -4889,6 +4944,78 @@ static bool is_hdmi2_sink(struct drm_connector *connector)
connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB420;
}
+static inline bool is_eotf_supported(u8 output_eotf, u8 sink_eotf)
+{
+ return sink_eotf & BIT(output_eotf);
+}
+
+/**
+ * drm_hdmi_infoframe_set_hdr_metadata() - fill an HDMI DRM infoframe with
+ * HDR metadata from userspace
+ * @frame: HDMI DRM infoframe
+ * @conn_state: Connector state containing HDR metadata
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int
+drm_hdmi_infoframe_set_hdr_metadata(struct hdmi_drm_infoframe *frame,
+ const struct drm_connector_state *conn_state)
+{
+ struct drm_connector *connector;
+ struct hdr_output_metadata *hdr_metadata;
+ int err;
+
+ if (!frame || !conn_state)
+ return -EINVAL;
+
+ connector = conn_state->connector;
+
+ if (!conn_state->hdr_output_metadata)
+ return -EINVAL;
+
+ hdr_metadata = conn_state->hdr_output_metadata->data;
+
+ if (!hdr_metadata || !connector)
+ return -EINVAL;
+
+ /* Sink EOTF is Bit map while infoframe is absolute values */
+ if (!is_eotf_supported(hdr_metadata->hdmi_metadata_type1.eotf,
+ connector->hdr_sink_metadata.hdmi_type1.eotf)) {
+ DRM_DEBUG_KMS("EOTF Not Supported\n");
+ return -EINVAL;
+ }
+
+ err = hdmi_drm_infoframe_init(frame);
+ if (err < 0)
+ return err;
+
+ frame->eotf = hdr_metadata->hdmi_metadata_type1.eotf;
+ frame->metadata_type = hdr_metadata->hdmi_metadata_type1.metadata_type;
+
+ BUILD_BUG_ON(sizeof(frame->display_primaries) !=
+ sizeof(hdr_metadata->hdmi_metadata_type1.display_primaries));
+ BUILD_BUG_ON(sizeof(frame->white_point) !=
+ sizeof(hdr_metadata->hdmi_metadata_type1.white_point));
+
+ memcpy(&frame->display_primaries,
+ &hdr_metadata->hdmi_metadata_type1.display_primaries,
+ sizeof(frame->display_primaries));
+
+ memcpy(&frame->white_point,
+ &hdr_metadata->hdmi_metadata_type1.white_point,
+ sizeof(frame->white_point));
+
+ frame->max_display_mastering_luminance =
+ hdr_metadata->hdmi_metadata_type1.max_display_mastering_luminance;
+ frame->min_display_mastering_luminance =
+ hdr_metadata->hdmi_metadata_type1.min_display_mastering_luminance;
+ frame->max_fall = hdr_metadata->hdmi_metadata_type1.max_fall;
+ frame->max_cll = hdr_metadata->hdmi_metadata_type1.max_cll;
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_hdmi_infoframe_set_hdr_metadata);
+
/**
* drm_hdmi_avi_infoframe_from_display_mode() - fill an HDMI AVI infoframe with
* data from a DRM display mode
diff --git a/drivers/gpu/drm/drm_edid_load.c b/drivers/gpu/drm/drm_edid_load.c
index 1e5593575d23..d38b3b255926 100644
--- a/drivers/gpu/drm/drm_edid_load.c
+++ b/drivers/gpu/drm/drm_edid_load.c
@@ -7,12 +7,15 @@
*/
-#include <linux/module.h>
#include <linux/firmware.h>
-#include <drm/drmP.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
#include <drm/drm_crtc.h>
#include <drm/drm_crtc_helper.h>
+#include <drm/drm_drv.h>
#include <drm/drm_edid.h>
+#include <drm/drm_print.h>
static char edid_firmware[PATH_MAX];
module_param_string(edid_firmware, edid_firmware, sizeof(edid_firmware), 0644);
@@ -278,6 +281,8 @@ struct edid *drm_load_edid_firmware(struct drm_connector *connector)
* the last one found one as a fallback.
*/
fwstr = kstrdup(edid_firmware, GFP_KERNEL);
+ if (!fwstr)
+ return ERR_PTR(-ENOMEM);
edidstr = fwstr;
while ((edidname = strsep(&edidstr, ","))) {
diff --git a/drivers/gpu/drm/drm_encoder.c b/drivers/gpu/drm/drm_encoder.c
index b694fb57eaa4..7fb47b7b8b44 100644
--- a/drivers/gpu/drm/drm_encoder.c
+++ b/drivers/gpu/drm/drm_encoder.c
@@ -21,7 +21,9 @@
*/
#include <linux/export.h>
-#include <drm/drmP.h>
+
+#include <drm/drm_device.h>
+#include <drm/drm_drv.h>
#include <drm/drm_encoder.h>
#include "drm_crtc_internal.h"
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 498f95c3e81d..42852cae749b 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -32,18 +32,21 @@
#include <linux/console.h>
#include <linux/dma-buf.h>
#include <linux/kernel.h>
-#include <linux/sysrq.h>
-#include <linux/slab.h>
#include <linux/module.h>
-#include <drm/drmP.h>
+#include <linux/slab.h>
+#include <linux/sysrq.h>
+#include <linux/vmalloc.h>
+
+#include <drm/drm_atomic.h>
#include <drm/drm_crtc.h>
-#include <drm/drm_fb_helper.h>
#include <drm/drm_crtc_helper.h>
-#include <drm/drm_atomic.h>
-#include <drm/drm_atomic_helper.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_fb_helper.h>
+#include <drm/drm_fourcc.h>
+#include <drm/drm_print.h>
+#include <drm/drm_vblank.h>
-#include "drm_crtc_internal.h"
-#include "drm_crtc_helper_internal.h"
+#include "drm_internal.h"
static bool drm_fbdev_emulation = true;
module_param_named(fbdev_emulation, drm_fbdev_emulation, bool, 0600);
@@ -92,12 +95,6 @@ static DEFINE_MUTEX(kernel_fb_helper_lock);
* Setup fbdev emulation by calling drm_fb_helper_fbdev_setup() and tear it
* down by calling drm_fb_helper_fbdev_teardown().
*
- * Drivers that need to handle connector hotplugging (e.g. dp mst) can't use
- * the setup helper and will need to do the whole four-step setup process with
- * drm_fb_helper_prepare(), drm_fb_helper_init(),
- * drm_fb_helper_single_add_all_connectors(), enable hotplugging and
- * drm_fb_helper_initial_config() to avoid a possible race window.
- *
* At runtime drivers should restore the fbdev console by using
* drm_fb_helper_lastclose() as their &drm_driver.lastclose callback.
* They should also notify the fb helper code from updates to the output
@@ -120,8 +117,7 @@ static DEFINE_MUTEX(kernel_fb_helper_lock);
* encoders and connectors. To finish up the fbdev helper initialization, the
* drm_fb_helper_init() function is called. To probe for all attached displays
* and set up an initial configuration using the detected hardware, drivers
- * should call drm_fb_helper_single_add_all_connectors() followed by
- * drm_fb_helper_initial_config().
+ * should call drm_fb_helper_initial_config().
*
* If &drm_framebuffer_funcs.dirty is set, the
* drm_fb_helper_{cfb,sys}_{write,fillrect,copyarea,imageblit} functions will
@@ -134,165 +130,6 @@ static DEFINE_MUTEX(kernel_fb_helper_lock);
* deferred I/O (coupled with drm_fb_helper_fbdev_teardown()).
*/
-#define drm_fb_helper_for_each_connector(fbh, i__) \
- for (({ lockdep_assert_held(&(fbh)->lock); }), \
- i__ = 0; i__ < (fbh)->connector_count; i__++)
-
-static int __drm_fb_helper_add_one_connector(struct drm_fb_helper *fb_helper,
- struct drm_connector *connector)
-{
- struct drm_fb_helper_connector *fb_conn;
- struct drm_fb_helper_connector **temp;
- unsigned int count;
-
- if (!drm_fbdev_emulation)
- return 0;
-
- lockdep_assert_held(&fb_helper->lock);
-
- count = fb_helper->connector_count + 1;
-
- if (count > fb_helper->connector_info_alloc_count) {
- size_t size = count * sizeof(fb_conn);
-
- temp = krealloc(fb_helper->connector_info, size, GFP_KERNEL);
- if (!temp)
- return -ENOMEM;
-
- fb_helper->connector_info_alloc_count = count;
- fb_helper->connector_info = temp;
- }
-
- fb_conn = kzalloc(sizeof(*fb_conn), GFP_KERNEL);
- if (!fb_conn)
- return -ENOMEM;
-
- drm_connector_get(connector);
- fb_conn->connector = connector;
- fb_helper->connector_info[fb_helper->connector_count++] = fb_conn;
-
- return 0;
-}
-
-int drm_fb_helper_add_one_connector(struct drm_fb_helper *fb_helper,
- struct drm_connector *connector)
-{
- int err;
-
- if (!fb_helper)
- return 0;
-
- mutex_lock(&fb_helper->lock);
- err = __drm_fb_helper_add_one_connector(fb_helper, connector);
- mutex_unlock(&fb_helper->lock);
-
- return err;
-}
-EXPORT_SYMBOL(drm_fb_helper_add_one_connector);
-
-/**
- * drm_fb_helper_single_add_all_connectors() - add all connectors to fbdev
- * emulation helper
- * @fb_helper: fbdev initialized with drm_fb_helper_init, can be NULL
- *
- * This functions adds all the available connectors for use with the given
- * fb_helper. This is a separate step to allow drivers to freely assign
- * connectors to the fbdev, e.g. if some are reserved for special purposes or
- * not adequate to be used for the fbcon.
- *
- * This function is protected against concurrent connector hotadds/removals
- * using drm_fb_helper_add_one_connector() and
- * drm_fb_helper_remove_one_connector().
- */
-int drm_fb_helper_single_add_all_connectors(struct drm_fb_helper *fb_helper)
-{
- struct drm_device *dev;
- struct drm_connector *connector;
- struct drm_connector_list_iter conn_iter;
- int i, ret = 0;
-
- if (!drm_fbdev_emulation || !fb_helper)
- return 0;
-
- dev = fb_helper->dev;
-
- mutex_lock(&fb_helper->lock);
- drm_connector_list_iter_begin(dev, &conn_iter);
- drm_for_each_connector_iter(connector, &conn_iter) {
- if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
- continue;
-
- ret = __drm_fb_helper_add_one_connector(fb_helper, connector);
- if (ret)
- goto fail;
- }
- goto out;
-
-fail:
- drm_fb_helper_for_each_connector(fb_helper, i) {
- struct drm_fb_helper_connector *fb_helper_connector =
- fb_helper->connector_info[i];
-
- drm_connector_put(fb_helper_connector->connector);
-
- kfree(fb_helper_connector);
- fb_helper->connector_info[i] = NULL;
- }
- fb_helper->connector_count = 0;
-out:
- drm_connector_list_iter_end(&conn_iter);
- mutex_unlock(&fb_helper->lock);
-
- return ret;
-}
-EXPORT_SYMBOL(drm_fb_helper_single_add_all_connectors);
-
-static int __drm_fb_helper_remove_one_connector(struct drm_fb_helper *fb_helper,
- struct drm_connector *connector)
-{
- struct drm_fb_helper_connector *fb_helper_connector;
- int i, j;
-
- if (!drm_fbdev_emulation)
- return 0;
-
- lockdep_assert_held(&fb_helper->lock);
-
- drm_fb_helper_for_each_connector(fb_helper, i) {
- if (fb_helper->connector_info[i]->connector == connector)
- break;
- }
-
- if (i == fb_helper->connector_count)
- return -EINVAL;
- fb_helper_connector = fb_helper->connector_info[i];
- drm_connector_put(fb_helper_connector->connector);
-
- for (j = i + 1; j < fb_helper->connector_count; j++)
- fb_helper->connector_info[j - 1] = fb_helper->connector_info[j];
-
- fb_helper->connector_count--;
- kfree(fb_helper_connector);
-
- return 0;
-}
-
-int drm_fb_helper_remove_one_connector(struct drm_fb_helper *fb_helper,
- struct drm_connector *connector)
-{
- int err;
-
- if (!fb_helper)
- return 0;
-
- mutex_lock(&fb_helper->lock);
- err = __drm_fb_helper_remove_one_connector(fb_helper, connector);
- mutex_unlock(&fb_helper->lock);
-
- return err;
-}
-EXPORT_SYMBOL(drm_fb_helper_remove_one_connector);
-
static void drm_fb_helper_restore_lut_atomic(struct drm_crtc *crtc)
{
uint16_t *r_base, *g_base, *b_base;
@@ -316,13 +153,11 @@ int drm_fb_helper_debug_enter(struct fb_info *info)
{
struct drm_fb_helper *helper = info->par;
const struct drm_crtc_helper_funcs *funcs;
- int i;
+ struct drm_mode_set *mode_set;
list_for_each_entry(helper, &kernel_fb_helper_list, kernel_fb_list) {
- for (i = 0; i < helper->crtc_count; i++) {
- struct drm_mode_set *mode_set =
- &helper->crtc_info[i].mode_set;
-
+ mutex_lock(&helper->client.modeset_mutex);
+ drm_client_for_each_modeset(mode_set, &helper->client) {
if (!mode_set->crtc->enabled)
continue;
@@ -339,6 +174,7 @@ int drm_fb_helper_debug_enter(struct fb_info *info)
mode_set->y,
ENTER_ATOMIC_MODE_SET);
}
+ mutex_unlock(&helper->client.modeset_mutex);
}
return 0;
@@ -352,14 +188,14 @@ EXPORT_SYMBOL(drm_fb_helper_debug_enter);
int drm_fb_helper_debug_leave(struct fb_info *info)
{
struct drm_fb_helper *helper = info->par;
+ struct drm_client_dev *client = &helper->client;
struct drm_crtc *crtc;
const struct drm_crtc_helper_funcs *funcs;
+ struct drm_mode_set *mode_set;
struct drm_framebuffer *fb;
- int i;
-
- for (i = 0; i < helper->crtc_count; i++) {
- struct drm_mode_set *mode_set = &helper->crtc_info[i].mode_set;
+ mutex_lock(&client->modeset_mutex);
+ drm_client_for_each_modeset(mode_set, client) {
crtc = mode_set->crtc;
if (drm_drv_uses_atomic_modeset(crtc->dev))
continue;
@@ -382,143 +218,12 @@ int drm_fb_helper_debug_leave(struct fb_info *info)
funcs->mode_set_base_atomic(mode_set->crtc, fb, crtc->x,
crtc->y, LEAVE_ATOMIC_MODE_SET);
}
+ mutex_unlock(&client->modeset_mutex);
return 0;
}
EXPORT_SYMBOL(drm_fb_helper_debug_leave);
-static int restore_fbdev_mode_atomic(struct drm_fb_helper *fb_helper, bool active)
-{
- struct drm_device *dev = fb_helper->dev;
- struct drm_plane_state *plane_state;
- struct drm_plane *plane;
- struct drm_atomic_state *state;
- int i, ret;
- struct drm_modeset_acquire_ctx ctx;
-
- drm_modeset_acquire_init(&ctx, 0);
-
- state = drm_atomic_state_alloc(dev);
- if (!state) {
- ret = -ENOMEM;
- goto out_ctx;
- }
-
- state->acquire_ctx = &ctx;
-retry:
- drm_for_each_plane(plane, dev) {
- plane_state = drm_atomic_get_plane_state(state, plane);
- if (IS_ERR(plane_state)) {
- ret = PTR_ERR(plane_state);
- goto out_state;
- }
-
- plane_state->rotation = DRM_MODE_ROTATE_0;
-
- /* disable non-primary: */
- if (plane->type == DRM_PLANE_TYPE_PRIMARY)
- continue;
-
- ret = __drm_atomic_helper_disable_plane(plane, plane_state);
- if (ret != 0)
- goto out_state;
- }
-
- for (i = 0; i < fb_helper->crtc_count; i++) {
- struct drm_mode_set *mode_set = &fb_helper->crtc_info[i].mode_set;
- struct drm_plane *primary = mode_set->crtc->primary;
-
- /* Cannot fail as we've already gotten the plane state above */
- plane_state = drm_atomic_get_new_plane_state(state, primary);
- plane_state->rotation = fb_helper->crtc_info[i].rotation;
-
- ret = __drm_atomic_helper_set_config(mode_set, state);
- if (ret != 0)
- goto out_state;
-
- /*
- * __drm_atomic_helper_set_config() sets active when a
- * mode is set, unconditionally clear it if we force DPMS off
- */
- if (!active) {
- struct drm_crtc *crtc = mode_set->crtc;
- struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
-
- crtc_state->active = false;
- }
- }
-
- ret = drm_atomic_commit(state);
-
-out_state:
- if (ret == -EDEADLK)
- goto backoff;
-
- drm_atomic_state_put(state);
-out_ctx:
- drm_modeset_drop_locks(&ctx);
- drm_modeset_acquire_fini(&ctx);
-
- return ret;
-
-backoff:
- drm_atomic_state_clear(state);
- drm_modeset_backoff(&ctx);
-
- goto retry;
-}
-
-static int restore_fbdev_mode_legacy(struct drm_fb_helper *fb_helper)
-{
- struct drm_device *dev = fb_helper->dev;
- struct drm_plane *plane;
- int i, ret = 0;
-
- drm_modeset_lock_all(fb_helper->dev);
- drm_for_each_plane(plane, dev) {
- if (plane->type != DRM_PLANE_TYPE_PRIMARY)
- drm_plane_force_disable(plane);
-
- if (plane->rotation_property)
- drm_mode_plane_set_obj_prop(plane,
- plane->rotation_property,
- DRM_MODE_ROTATE_0);
- }
-
- for (i = 0; i < fb_helper->crtc_count; i++) {
- struct drm_mode_set *mode_set = &fb_helper->crtc_info[i].mode_set;
- struct drm_crtc *crtc = mode_set->crtc;
-
- if (crtc->funcs->cursor_set2) {
- ret = crtc->funcs->cursor_set2(crtc, NULL, 0, 0, 0, 0, 0);
- if (ret)
- goto out;
- } else if (crtc->funcs->cursor_set) {
- ret = crtc->funcs->cursor_set(crtc, NULL, 0, 0, 0);
- if (ret)
- goto out;
- }
-
- ret = drm_mode_set_config_internal(mode_set);
- if (ret)
- goto out;
- }
-out:
- drm_modeset_unlock_all(fb_helper->dev);
-
- return ret;
-}
-
-static int restore_fbdev_mode(struct drm_fb_helper *fb_helper)
-{
- struct drm_device *dev = fb_helper->dev;
-
- if (drm_drv_uses_atomic_modeset(dev))
- return restore_fbdev_mode_atomic(fb_helper, true);
- else
- return restore_fbdev_mode_legacy(fb_helper);
-}
-
/**
* drm_fb_helper_restore_fbdev_mode_unlocked - restore fbdev configuration
* @fb_helper: driver-allocated fbdev helper, can be NULL
@@ -542,7 +247,17 @@ int drm_fb_helper_restore_fbdev_mode_unlocked(struct drm_fb_helper *fb_helper)
return 0;
mutex_lock(&fb_helper->lock);
- ret = restore_fbdev_mode(fb_helper);
+ /*
+ * TODO:
+ * We should bail out here if there is a master by dropping _force.
+ * Currently these igt tests fail if we do that:
+ * - kms_fbcon_fbt@psr
+ * - kms_fbcon_fbt@psr-suspend
+ *
+ * So first these tests need to be fixed so they drop master or don't
+ * have an fd open.
+ */
+ ret = drm_client_modeset_commit_force(&fb_helper->client);
do_delayed = fb_helper->delayed_hotplug;
if (do_delayed)
@@ -556,34 +271,6 @@ int drm_fb_helper_restore_fbdev_mode_unlocked(struct drm_fb_helper *fb_helper)
}
EXPORT_SYMBOL(drm_fb_helper_restore_fbdev_mode_unlocked);
-static bool drm_fb_helper_is_bound(struct drm_fb_helper *fb_helper)
-{
- struct drm_device *dev = fb_helper->dev;
- struct drm_crtc *crtc;
- int bound = 0, crtcs_bound = 0;
-
- /*
- * Sometimes user space wants everything disabled, so don't steal the
- * display if there's a master.
- */
- if (READ_ONCE(dev->master))
- return false;
-
- drm_for_each_crtc(crtc, dev) {
- drm_modeset_lock(&crtc->mutex, NULL);
- if (crtc->primary->fb)
- crtcs_bound++;
- if (crtc->primary->fb == fb_helper->fb)
- bound++;
- drm_modeset_unlock(&crtc->mutex);
- }
-
- if (bound < crtcs_bound)
- return false;
-
- return true;
-}
-
#ifdef CONFIG_MAGIC_SYSRQ
/*
* restore fbcon display for all kms driver's using this helper, used for sysrq
@@ -604,7 +291,7 @@ static bool drm_fb_helper_force_kernel_mode(void)
continue;
mutex_lock(&helper->lock);
- ret = restore_fbdev_mode(helper);
+ ret = drm_client_modeset_commit_force(&helper->client);
if (ret)
error = true;
mutex_unlock(&helper->lock);
@@ -636,47 +323,12 @@ static struct sysrq_key_op sysrq_drm_fb_helper_restore_op = {
static struct sysrq_key_op sysrq_drm_fb_helper_restore_op = { };
#endif
-static void dpms_legacy(struct drm_fb_helper *fb_helper, int dpms_mode)
-{
- struct drm_device *dev = fb_helper->dev;
- struct drm_connector *connector;
- struct drm_mode_set *modeset;
- int i, j;
-
- drm_modeset_lock_all(dev);
- for (i = 0; i < fb_helper->crtc_count; i++) {
- modeset = &fb_helper->crtc_info[i].mode_set;
-
- if (!modeset->crtc->enabled)
- continue;
-
- for (j = 0; j < modeset->num_connectors; j++) {
- connector = modeset->connectors[j];
- connector->funcs->dpms(connector, dpms_mode);
- drm_object_property_set_value(&connector->base,
- dev->mode_config.dpms_property, dpms_mode);
- }
- }
- drm_modeset_unlock_all(dev);
-}
-
static void drm_fb_helper_dpms(struct fb_info *info, int dpms_mode)
{
struct drm_fb_helper *fb_helper = info->par;
- /*
- * For each CRTC in this fb, turn the connectors on/off.
- */
mutex_lock(&fb_helper->lock);
- if (!drm_fb_helper_is_bound(fb_helper)) {
- mutex_unlock(&fb_helper->lock);
- return;
- }
-
- if (drm_drv_uses_atomic_modeset(fb_helper->dev))
- restore_fbdev_mode_atomic(fb_helper, dpms_mode == DRM_MODE_DPMS_ON);
- else
- dpms_legacy(fb_helper, dpms_mode);
+ drm_client_modeset_dpms(&fb_helper->client, dpms_mode);
mutex_unlock(&fb_helper->lock);
}
@@ -716,43 +368,6 @@ int drm_fb_helper_blank(int blank, struct fb_info *info)
}
EXPORT_SYMBOL(drm_fb_helper_blank);
-static void drm_fb_helper_modeset_release(struct drm_fb_helper *helper,
- struct drm_mode_set *modeset)
-{
- int i;
-
- for (i = 0; i < modeset->num_connectors; i++) {
- drm_connector_put(modeset->connectors[i]);
- modeset->connectors[i] = NULL;
- }
- modeset->num_connectors = 0;
-
- drm_mode_destroy(helper->dev, modeset->mode);
- modeset->mode = NULL;
-
- /* FIXME should hold a ref? */
- modeset->fb = NULL;
-}
-
-static void drm_fb_helper_crtc_free(struct drm_fb_helper *helper)
-{
- int i;
-
- for (i = 0; i < helper->connector_count; i++) {
- drm_connector_put(helper->connector_info[i]->connector);
- kfree(helper->connector_info[i]);
- }
- kfree(helper->connector_info);
-
- for (i = 0; i < helper->crtc_count; i++) {
- struct drm_mode_set *modeset = &helper->crtc_info[i].mode_set;
-
- drm_fb_helper_modeset_release(helper, modeset);
- kfree(modeset->connectors);
- }
- kfree(helper->crtc_info);
-}
-
static void drm_fb_helper_resume_worker(struct work_struct *work)
{
struct drm_fb_helper *helper = container_of(work, struct drm_fb_helper,
@@ -767,7 +382,7 @@ static void drm_fb_helper_dirty_blit_real(struct drm_fb_helper *fb_helper,
struct drm_clip_rect *clip)
{
struct drm_framebuffer *fb = fb_helper->fb;
- unsigned int cpp = drm_format_plane_cpp(fb->format->format, 0);
+ unsigned int cpp = fb->format->cpp[0];
size_t offset = clip->y1 * fb->pitches[0] + clip->x1 * cpp;
void *src = fb_helper->fbdev->screen_buffer + offset;
void *dst = fb_helper->buffer->vaddr + offset;
@@ -831,7 +446,7 @@ EXPORT_SYMBOL(drm_fb_helper_prepare);
* drm_fb_helper_init - initialize a &struct drm_fb_helper
* @dev: drm device
* @fb_helper: driver-allocated fbdev helper structure to initialize
- * @max_conn_count: max connector count
+ * @max_conn_count: max connector count (not used)
*
* This allocates the structures for the fbdev helper with the given limits.
* Note that this won't yet touch the hardware (through the driver interfaces)
@@ -847,55 +462,26 @@ int drm_fb_helper_init(struct drm_device *dev,
struct drm_fb_helper *fb_helper,
int max_conn_count)
{
- struct drm_crtc *crtc;
- struct drm_mode_config *config = &dev->mode_config;
- int i;
+ int ret;
if (!drm_fbdev_emulation) {
dev->fb_helper = fb_helper;
return 0;
}
- if (!max_conn_count)
- return -EINVAL;
-
- fb_helper->crtc_info = kcalloc(config->num_crtc, sizeof(struct drm_fb_helper_crtc), GFP_KERNEL);
- if (!fb_helper->crtc_info)
- return -ENOMEM;
-
- fb_helper->crtc_count = config->num_crtc;
- fb_helper->connector_info = kcalloc(dev->mode_config.num_connector, sizeof(struct drm_fb_helper_connector *), GFP_KERNEL);
- if (!fb_helper->connector_info) {
- kfree(fb_helper->crtc_info);
- return -ENOMEM;
- }
- fb_helper->connector_info_alloc_count = dev->mode_config.num_connector;
- fb_helper->connector_count = 0;
-
- for (i = 0; i < fb_helper->crtc_count; i++) {
- fb_helper->crtc_info[i].mode_set.connectors =
- kcalloc(max_conn_count,
- sizeof(struct drm_connector *),
- GFP_KERNEL);
-
- if (!fb_helper->crtc_info[i].mode_set.connectors)
- goto out_free;
- fb_helper->crtc_info[i].mode_set.num_connectors = 0;
- fb_helper->crtc_info[i].rotation = DRM_MODE_ROTATE_0;
- }
-
- i = 0;
- drm_for_each_crtc(crtc, dev) {
- fb_helper->crtc_info[i].mode_set.crtc = crtc;
- i++;
+ /*
+ * If this is not the generic fbdev client, initialize a drm_client
+ * without callbacks so we can use the modesets.
+ */
+ if (!fb_helper->client.funcs) {
+ ret = drm_client_init(dev, &fb_helper->client, "drm_fb_helper", NULL);
+ if (ret)
+ return ret;
}
dev->fb_helper = fb_helper;
return 0;
-out_free:
- drm_fb_helper_crtc_free(fb_helper);
- return -ENOMEM;
}
EXPORT_SYMBOL(drm_fb_helper_init);
@@ -999,8 +585,9 @@ void drm_fb_helper_fini(struct drm_fb_helper *fb_helper)
mutex_unlock(&kernel_fb_helper_lock);
mutex_destroy(&fb_helper->lock);
- drm_fb_helper_crtc_free(fb_helper);
+ if (!fb_helper->client.funcs)
+ drm_client_release(&fb_helper->client);
}
EXPORT_SYMBOL(drm_fb_helper_fini);
@@ -1345,13 +932,14 @@ static int setcmap_pseudo_palette(struct fb_cmap *cmap, struct fb_info *info)
static int setcmap_legacy(struct fb_cmap *cmap, struct fb_info *info)
{
struct drm_fb_helper *fb_helper = info->par;
+ struct drm_mode_set *modeset;
struct drm_crtc *crtc;
u16 *r, *g, *b;
- int i, ret = 0;
+ int ret = 0;
drm_modeset_lock_all(fb_helper->dev);
- for (i = 0; i < fb_helper->crtc_count; i++) {
- crtc = fb_helper->crtc_info[i].mode_set.crtc;
+ drm_client_for_each_modeset(modeset, &fb_helper->client) {
+ crtc = modeset->crtc;
if (!crtc->funcs->gamma_set || !crtc->gamma_size)
return -EINVAL;
@@ -1427,10 +1015,11 @@ static int setcmap_atomic(struct fb_cmap *cmap, struct fb_info *info)
struct drm_modeset_acquire_ctx ctx;
struct drm_crtc_state *crtc_state;
struct drm_atomic_state *state;
+ struct drm_mode_set *modeset;
struct drm_crtc *crtc;
u16 *r, *g, *b;
- int i, ret = 0;
bool replaced;
+ int ret = 0;
drm_modeset_acquire_init(&ctx, 0);
@@ -1442,8 +1031,8 @@ static int setcmap_atomic(struct fb_cmap *cmap, struct fb_info *info)
state->acquire_ctx = &ctx;
retry:
- for (i = 0; i < fb_helper->crtc_count; i++) {
- crtc = fb_helper->crtc_info[i].mode_set.crtc;
+ drm_client_for_each_modeset(modeset, &fb_helper->client) {
+ crtc = modeset->crtc;
if (!gamma_lut)
gamma_lut = setcmap_new_gamma_lut(crtc, cmap);
@@ -1471,8 +1060,8 @@ retry:
if (ret)
goto out_state;
- for (i = 0; i < fb_helper->crtc_count; i++) {
- crtc = fb_helper->crtc_info[i].mode_set.crtc;
+ drm_client_for_each_modeset(modeset, &fb_helper->client) {
+ crtc = modeset->crtc;
r = crtc->gamma_store;
g = r + crtc->gamma_size;
@@ -1509,6 +1098,7 @@ backoff:
int drm_fb_helper_setcmap(struct fb_cmap *cmap, struct fb_info *info)
{
struct drm_fb_helper *fb_helper = info->par;
+ struct drm_device *dev = fb_helper->dev;
int ret;
if (oops_in_progress)
@@ -1516,19 +1106,22 @@ int drm_fb_helper_setcmap(struct fb_cmap *cmap, struct fb_info *info)
mutex_lock(&fb_helper->lock);
- if (!drm_fb_helper_is_bound(fb_helper)) {
+ if (!drm_master_internal_acquire(dev)) {
ret = -EBUSY;
- goto out;
+ goto unlock;
}
+ mutex_lock(&fb_helper->client.modeset_mutex);
if (info->fix.visual == FB_VISUAL_TRUECOLOR)
ret = setcmap_pseudo_palette(cmap, info);
else if (drm_drv_uses_atomic_modeset(fb_helper->dev))
ret = setcmap_atomic(cmap, info);
else
ret = setcmap_legacy(cmap, info);
+ mutex_unlock(&fb_helper->client.modeset_mutex);
-out:
+ drm_master_internal_release(dev);
+unlock:
mutex_unlock(&fb_helper->lock);
return ret;
@@ -1548,12 +1141,12 @@ int drm_fb_helper_ioctl(struct fb_info *info, unsigned int cmd,
unsigned long arg)
{
struct drm_fb_helper *fb_helper = info->par;
- struct drm_mode_set *mode_set;
+ struct drm_device *dev = fb_helper->dev;
struct drm_crtc *crtc;
int ret = 0;
mutex_lock(&fb_helper->lock);
- if (!drm_fb_helper_is_bound(fb_helper)) {
+ if (!drm_master_internal_acquire(dev)) {
ret = -EBUSY;
goto unlock;
}
@@ -1576,8 +1169,7 @@ int drm_fb_helper_ioctl(struct fb_info *info, unsigned int cmd,
* make. If we're not smart enough here, one should
* just consider switch the userspace to KMS.
*/
- mode_set = &fb_helper->crtc_info[0].mode_set;
- crtc = mode_set->crtc;
+ crtc = fb_helper->client.modesets[0].crtc;
/*
* Only wait for a vblank event if the CRTC is
@@ -1591,11 +1183,12 @@ int drm_fb_helper_ioctl(struct fb_info *info, unsigned int cmd,
}
ret = 0;
- goto unlock;
+ break;
default:
ret = -ENOTTY;
}
+ drm_master_internal_release(dev);
unlock:
mutex_unlock(&fb_helper->lock);
return ret;
@@ -1773,16 +1366,14 @@ EXPORT_SYMBOL(drm_fb_helper_set_par);
static void pan_set(struct drm_fb_helper *fb_helper, int x, int y)
{
- int i;
-
- for (i = 0; i < fb_helper->crtc_count; i++) {
- struct drm_mode_set *mode_set;
-
- mode_set = &fb_helper->crtc_info[i].mode_set;
+ struct drm_mode_set *mode_set;
+ mutex_lock(&fb_helper->client.modeset_mutex);
+ drm_client_for_each_modeset(mode_set, &fb_helper->client) {
mode_set->x = x;
mode_set->y = y;
}
+ mutex_unlock(&fb_helper->client.modeset_mutex);
}
static int pan_display_atomic(struct fb_var_screeninfo *var,
@@ -1793,7 +1384,7 @@ static int pan_display_atomic(struct fb_var_screeninfo *var,
pan_set(fb_helper, var->xoffset, var->yoffset);
- ret = restore_fbdev_mode_atomic(fb_helper, true);
+ ret = drm_client_modeset_commit_force(&fb_helper->client);
if (!ret) {
info->var.xoffset = var->xoffset;
info->var.yoffset = var->yoffset;
@@ -1807,14 +1398,13 @@ static int pan_display_legacy(struct fb_var_screeninfo *var,
struct fb_info *info)
{
struct drm_fb_helper *fb_helper = info->par;
+ struct drm_client_dev *client = &fb_helper->client;
struct drm_mode_set *modeset;
int ret = 0;
- int i;
+ mutex_lock(&client->modeset_mutex);
drm_modeset_lock_all(fb_helper->dev);
- for (i = 0; i < fb_helper->crtc_count; i++) {
- modeset = &fb_helper->crtc_info[i].mode_set;
-
+ drm_client_for_each_modeset(modeset, client) {
modeset->x = var->xoffset;
modeset->y = var->yoffset;
@@ -1827,6 +1417,7 @@ static int pan_display_legacy(struct fb_var_screeninfo *var,
}
}
drm_modeset_unlock_all(fb_helper->dev);
+ mutex_unlock(&client->modeset_mutex);
return ret;
}
@@ -1847,15 +1438,18 @@ int drm_fb_helper_pan_display(struct fb_var_screeninfo *var,
return -EBUSY;
mutex_lock(&fb_helper->lock);
- if (!drm_fb_helper_is_bound(fb_helper)) {
- mutex_unlock(&fb_helper->lock);
- return -EBUSY;
+ if (!drm_master_internal_acquire(dev)) {
+ ret = -EBUSY;
+ goto unlock;
}
if (drm_drv_uses_atomic_modeset(dev))
ret = pan_display_atomic(var, info);
else
ret = pan_display_legacy(var, info);
+
+ drm_master_internal_release(dev);
+unlock:
mutex_unlock(&fb_helper->lock);
return ret;
@@ -1869,10 +1463,13 @@ EXPORT_SYMBOL(drm_fb_helper_pan_display);
static int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper,
int preferred_bpp)
{
+ struct drm_client_dev *client = &fb_helper->client;
int ret = 0;
int crtc_count = 0;
- int i;
+ struct drm_connector_list_iter conn_iter;
struct drm_fb_helper_surface_size sizes;
+ struct drm_connector *connector;
+ struct drm_mode_set *mode_set;
int best_depth = 0;
memset(&sizes, 0, sizeof(struct drm_fb_helper_surface_size));
@@ -1888,11 +1485,11 @@ static int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper,
if (preferred_bpp != sizes.surface_bpp)
sizes.surface_depth = sizes.surface_bpp = preferred_bpp;
- drm_fb_helper_for_each_connector(fb_helper, i) {
- struct drm_fb_helper_connector *fb_helper_conn = fb_helper->connector_info[i];
+ drm_connector_list_iter_begin(fb_helper->dev, &conn_iter);
+ drm_client_for_each_connector_iter(connector, &conn_iter) {
struct drm_cmdline_mode *cmdline_mode;
- cmdline_mode = &fb_helper_conn->connector->cmdline_mode;
+ cmdline_mode = &connector->cmdline_mode;
if (cmdline_mode->bpp_specified) {
switch (cmdline_mode->bpp) {
@@ -1917,19 +1514,20 @@ static int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper,
break;
}
}
+ drm_connector_list_iter_end(&conn_iter);
/*
* If we run into a situation where, for example, the primary plane
* supports RGBA5551 (16 bpp, depth 15) but not RGB565 (16 bpp, depth
* 16) we need to scale down the depth of the sizes we request.
*/
- for (i = 0; i < fb_helper->crtc_count; i++) {
- struct drm_mode_set *mode_set = &fb_helper->crtc_info[i].mode_set;
+ mutex_lock(&client->modeset_mutex);
+ drm_client_for_each_modeset(mode_set, client) {
struct drm_crtc *crtc = mode_set->crtc;
struct drm_plane *plane = crtc->primary;
int j;
- DRM_DEBUG("test CRTC %d primary plane\n", i);
+ DRM_DEBUG("test CRTC %u primary plane\n", drm_crtc_index(crtc));
for (j = 0; j < plane->format_count; j++) {
const struct drm_format_info *fmt;
@@ -1969,9 +1567,8 @@ static int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper,
/* first up get a count of crtcs now in use and new min/maxes width/heights */
crtc_count = 0;
- for (i = 0; i < fb_helper->crtc_count; i++) {
+ drm_client_for_each_modeset(mode_set, client) {
struct drm_display_mode *desired_mode;
- struct drm_mode_set *mode_set;
int x, y, j;
/* in case of tile group, are we the last tile vert or horiz?
* If no tile group you are always the last one both vertically
@@ -1979,16 +1576,15 @@ static int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper,
*/
bool lastv = true, lasth = true;
- desired_mode = fb_helper->crtc_info[i].desired_mode;
- mode_set = &fb_helper->crtc_info[i].mode_set;
+ desired_mode = mode_set->mode;
if (!desired_mode)
continue;
crtc_count++;
- x = fb_helper->crtc_info[i].x;
- y = fb_helper->crtc_info[i].y;
+ x = mode_set->x;
+ y = mode_set->y;
sizes.surface_width = max_t(u32, desired_mode->hdisplay + x, sizes.surface_width);
sizes.surface_height = max_t(u32, desired_mode->vdisplay + y, sizes.surface_height);
@@ -2009,13 +1605,14 @@ static int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper,
if (lastv)
sizes.fb_height = min_t(u32, desired_mode->vdisplay + y, sizes.fb_height);
}
+ mutex_unlock(&client->modeset_mutex);
if (crtc_count == 0 || sizes.fb_width == -1 || sizes.fb_height == -1) {
DRM_INFO("Cannot find any crtc or sizes\n");
/* First time: disable all crtc's.. */
- if (!fb_helper->deferred_setup && !READ_ONCE(fb_helper->dev->master))
- restore_fbdev_mode(fb_helper);
+ if (!fb_helper->deferred_setup)
+ drm_client_modeset_commit(client);
return -EAGAIN;
}
@@ -2102,742 +1699,6 @@ void drm_fb_helper_fill_info(struct fb_info *info,
}
EXPORT_SYMBOL(drm_fb_helper_fill_info);
-static int drm_fb_helper_probe_connector_modes(struct drm_fb_helper *fb_helper,
- uint32_t maxX,
- uint32_t maxY)
-{
- struct drm_connector *connector;
- int i, count = 0;
-
- drm_fb_helper_for_each_connector(fb_helper, i) {
- connector = fb_helper->connector_info[i]->connector;
- count += connector->funcs->fill_modes(connector, maxX, maxY);
- }
-
- return count;
-}
-
-struct drm_display_mode *drm_has_preferred_mode(struct drm_fb_helper_connector *fb_connector, int width, int height)
-{
- struct drm_display_mode *mode;
-
- list_for_each_entry(mode, &fb_connector->connector->modes, head) {
- if (mode->hdisplay > width ||
- mode->vdisplay > height)
- continue;
- if (mode->type & DRM_MODE_TYPE_PREFERRED)
- return mode;
- }
- return NULL;
-}
-EXPORT_SYMBOL(drm_has_preferred_mode);
-
-static bool drm_has_cmdline_mode(struct drm_fb_helper_connector *fb_connector)
-{
- return fb_connector->connector->cmdline_mode.specified;
-}
-
-struct drm_display_mode *drm_pick_cmdline_mode(struct drm_fb_helper_connector *fb_helper_conn)
-{
- struct drm_cmdline_mode *cmdline_mode;
- struct drm_display_mode *mode;
- bool prefer_non_interlace;
-
- cmdline_mode = &fb_helper_conn->connector->cmdline_mode;
- if (cmdline_mode->specified == false)
- return NULL;
-
- /* attempt to find a matching mode in the list of modes
- * we have gotten so far, if not add a CVT mode that conforms
- */
- if (cmdline_mode->rb || cmdline_mode->margins)
- goto create_mode;
-
- prefer_non_interlace = !cmdline_mode->interlace;
-again:
- list_for_each_entry(mode, &fb_helper_conn->connector->modes, head) {
- /* check width/height */
- if (mode->hdisplay != cmdline_mode->xres ||
- mode->vdisplay != cmdline_mode->yres)
- continue;
-
- if (cmdline_mode->refresh_specified) {
- if (mode->vrefresh != cmdline_mode->refresh)
- continue;
- }
-
- if (cmdline_mode->interlace) {
- if (!(mode->flags & DRM_MODE_FLAG_INTERLACE))
- continue;
- } else if (prefer_non_interlace) {
- if (mode->flags & DRM_MODE_FLAG_INTERLACE)
- continue;
- }
- return mode;
- }
-
- if (prefer_non_interlace) {
- prefer_non_interlace = false;
- goto again;
- }
-
-create_mode:
- mode = drm_mode_create_from_cmdline_mode(fb_helper_conn->connector->dev,
- cmdline_mode);
- list_add(&mode->head, &fb_helper_conn->connector->modes);
- return mode;
-}
-EXPORT_SYMBOL(drm_pick_cmdline_mode);
-
-static bool drm_connector_enabled(struct drm_connector *connector, bool strict)
-{
- bool enable;
-
- if (connector->display_info.non_desktop)
- return false;
-
- if (strict)
- enable = connector->status == connector_status_connected;
- else
- enable = connector->status != connector_status_disconnected;
-
- return enable;
-}
-
-static void drm_enable_connectors(struct drm_fb_helper *fb_helper,
- bool *enabled)
-{
- bool any_enabled = false;
- struct drm_connector *connector;
- int i = 0;
-
- drm_fb_helper_for_each_connector(fb_helper, i) {
- connector = fb_helper->connector_info[i]->connector;
- enabled[i] = drm_connector_enabled(connector, true);
- DRM_DEBUG_KMS("connector %d enabled? %s\n", connector->base.id,
- connector->display_info.non_desktop ? "non desktop" : enabled[i] ? "yes" : "no");
-
- any_enabled |= enabled[i];
- }
-
- if (any_enabled)
- return;
-
- drm_fb_helper_for_each_connector(fb_helper, i) {
- connector = fb_helper->connector_info[i]->connector;
- enabled[i] = drm_connector_enabled(connector, false);
- }
-}
-
-static bool drm_target_cloned(struct drm_fb_helper *fb_helper,
- struct drm_display_mode **modes,
- struct drm_fb_offset *offsets,
- bool *enabled, int width, int height)
-{
- int count, i, j;
- bool can_clone = false;
- struct drm_fb_helper_connector *fb_helper_conn;
- struct drm_display_mode *dmt_mode, *mode;
-
- /* only contemplate cloning in the single crtc case */
- if (fb_helper->crtc_count > 1)
- return false;
-
- count = 0;
- drm_fb_helper_for_each_connector(fb_helper, i) {
- if (enabled[i])
- count++;
- }
-
- /* only contemplate cloning if more than one connector is enabled */
- if (count <= 1)
- return false;
-
- /* check the command line or if nothing common pick 1024x768 */
- can_clone = true;
- drm_fb_helper_for_each_connector(fb_helper, i) {
- if (!enabled[i])
- continue;
- fb_helper_conn = fb_helper->connector_info[i];
- modes[i] = drm_pick_cmdline_mode(fb_helper_conn);
- if (!modes[i]) {
- can_clone = false;
- break;
- }
- for (j = 0; j < i; j++) {
- if (!enabled[j])
- continue;
- if (!drm_mode_match(modes[j], modes[i],
- DRM_MODE_MATCH_TIMINGS |
- DRM_MODE_MATCH_CLOCK |
- DRM_MODE_MATCH_FLAGS |
- DRM_MODE_MATCH_3D_FLAGS))
- can_clone = false;
- }
- }
-
- if (can_clone) {
- DRM_DEBUG_KMS("can clone using command line\n");
- return true;
- }
-
- /* try and find a 1024x768 mode on each connector */
- can_clone = true;
- dmt_mode = drm_mode_find_dmt(fb_helper->dev, 1024, 768, 60, false);
-
- drm_fb_helper_for_each_connector(fb_helper, i) {
- if (!enabled[i])
- continue;
-
- fb_helper_conn = fb_helper->connector_info[i];
- list_for_each_entry(mode, &fb_helper_conn->connector->modes, head) {
- if (drm_mode_match(mode, dmt_mode,
- DRM_MODE_MATCH_TIMINGS |
- DRM_MODE_MATCH_CLOCK |
- DRM_MODE_MATCH_FLAGS |
- DRM_MODE_MATCH_3D_FLAGS))
- modes[i] = mode;
- }
- if (!modes[i])
- can_clone = false;
- }
-
- if (can_clone) {
- DRM_DEBUG_KMS("can clone using 1024x768\n");
- return true;
- }
- DRM_INFO("kms: can't enable cloning when we probably wanted to.\n");
- return false;
-}
-
-static int drm_get_tile_offsets(struct drm_fb_helper *fb_helper,
- struct drm_display_mode **modes,
- struct drm_fb_offset *offsets,
- int idx,
- int h_idx, int v_idx)
-{
- struct drm_fb_helper_connector *fb_helper_conn;
- int i;
- int hoffset = 0, voffset = 0;
-
- drm_fb_helper_for_each_connector(fb_helper, i) {
- fb_helper_conn = fb_helper->connector_info[i];
- if (!fb_helper_conn->connector->has_tile)
- continue;
-
- if (!modes[i] && (h_idx || v_idx)) {
- DRM_DEBUG_KMS("no modes for connector tiled %d %d\n", i,
- fb_helper_conn->connector->base.id);
- continue;
- }
- if (fb_helper_conn->connector->tile_h_loc < h_idx)
- hoffset += modes[i]->hdisplay;
-
- if (fb_helper_conn->connector->tile_v_loc < v_idx)
- voffset += modes[i]->vdisplay;
- }
- offsets[idx].x = hoffset;
- offsets[idx].y = voffset;
- DRM_DEBUG_KMS("returned %d %d for %d %d\n", hoffset, voffset, h_idx, v_idx);
- return 0;
-}
-
-static bool drm_target_preferred(struct drm_fb_helper *fb_helper,
- struct drm_display_mode **modes,
- struct drm_fb_offset *offsets,
- bool *enabled, int width, int height)
-{
- struct drm_fb_helper_connector *fb_helper_conn;
- const u64 mask = BIT_ULL(fb_helper->connector_count) - 1;
- u64 conn_configured = 0;
- int tile_pass = 0;
- int i;
-
-retry:
- drm_fb_helper_for_each_connector(fb_helper, i) {
- fb_helper_conn = fb_helper->connector_info[i];
-
- if (conn_configured & BIT_ULL(i))
- continue;
-
- if (enabled[i] == false) {
- conn_configured |= BIT_ULL(i);
- continue;
- }
-
- /* first pass over all the untiled connectors */
- if (tile_pass == 0 && fb_helper_conn->connector->has_tile)
- continue;
-
- if (tile_pass == 1) {
- if (fb_helper_conn->connector->tile_h_loc != 0 ||
- fb_helper_conn->connector->tile_v_loc != 0)
- continue;
-
- } else {
- if (fb_helper_conn->connector->tile_h_loc != tile_pass - 1 &&
- fb_helper_conn->connector->tile_v_loc != tile_pass - 1)
- /* if this tile_pass doesn't cover any of the tiles - keep going */
- continue;
-
- /*
- * find the tile offsets for this pass - need to find
- * all tiles left and above
- */
- drm_get_tile_offsets(fb_helper, modes, offsets,
- i, fb_helper_conn->connector->tile_h_loc, fb_helper_conn->connector->tile_v_loc);
- }
- DRM_DEBUG_KMS("looking for cmdline mode on connector %d\n",
- fb_helper_conn->connector->base.id);
-
- /* got for command line mode first */
- modes[i] = drm_pick_cmdline_mode(fb_helper_conn);
- if (!modes[i]) {
- DRM_DEBUG_KMS("looking for preferred mode on connector %d %d\n",
- fb_helper_conn->connector->base.id, fb_helper_conn->connector->tile_group ? fb_helper_conn->connector->tile_group->id : 0);
- modes[i] = drm_has_preferred_mode(fb_helper_conn, width, height);
- }
- /* No preferred modes, pick one off the list */
- if (!modes[i] && !list_empty(&fb_helper_conn->connector->modes)) {
- list_for_each_entry(modes[i], &fb_helper_conn->connector->modes, head)
- break;
- }
- DRM_DEBUG_KMS("found mode %s\n", modes[i] ? modes[i]->name :
- "none");
- conn_configured |= BIT_ULL(i);
- }
-
- if ((conn_configured & mask) != mask) {
- tile_pass++;
- goto retry;
- }
- return true;
-}
-
-static bool connector_has_possible_crtc(struct drm_connector *connector,
- struct drm_crtc *crtc)
-{
- struct drm_encoder *encoder;
- int i;
-
- drm_connector_for_each_possible_encoder(connector, encoder, i) {
- if (encoder->possible_crtcs & drm_crtc_mask(crtc))
- return true;
- }
-
- return false;
-}
-
-static int drm_pick_crtcs(struct drm_fb_helper *fb_helper,
- struct drm_fb_helper_crtc **best_crtcs,
- struct drm_display_mode **modes,
- int n, int width, int height)
-{
- int c, o;
- struct drm_connector *connector;
- int my_score, best_score, score;
- struct drm_fb_helper_crtc **crtcs, *crtc;
- struct drm_fb_helper_connector *fb_helper_conn;
-
- if (n == fb_helper->connector_count)
- return 0;
-
- fb_helper_conn = fb_helper->connector_info[n];
- connector = fb_helper_conn->connector;
-
- best_crtcs[n] = NULL;
- best_score = drm_pick_crtcs(fb_helper, best_crtcs, modes, n+1, width, height);
- if (modes[n] == NULL)
- return best_score;
-
- crtcs = kcalloc(fb_helper->connector_count,
- sizeof(struct drm_fb_helper_crtc *), GFP_KERNEL);
- if (!crtcs)
- return best_score;
-
- my_score = 1;
- if (connector->status == connector_status_connected)
- my_score++;
- if (drm_has_cmdline_mode(fb_helper_conn))
- my_score++;
- if (drm_has_preferred_mode(fb_helper_conn, width, height))
- my_score++;
-
- /*
- * select a crtc for this connector and then attempt to configure
- * remaining connectors
- */
- for (c = 0; c < fb_helper->crtc_count; c++) {
- crtc = &fb_helper->crtc_info[c];
-
- if (!connector_has_possible_crtc(connector,
- crtc->mode_set.crtc))
- continue;
-
- for (o = 0; o < n; o++)
- if (best_crtcs[o] == crtc)
- break;
-
- if (o < n) {
- /* ignore cloning unless only a single crtc */
- if (fb_helper->crtc_count > 1)
- continue;
-
- if (!drm_mode_equal(modes[o], modes[n]))
- continue;
- }
-
- crtcs[n] = crtc;
- memcpy(crtcs, best_crtcs, n * sizeof(struct drm_fb_helper_crtc *));
- score = my_score + drm_pick_crtcs(fb_helper, crtcs, modes, n + 1,
- width, height);
- if (score > best_score) {
- best_score = score;
- memcpy(best_crtcs, crtcs,
- fb_helper->connector_count *
- sizeof(struct drm_fb_helper_crtc *));
- }
- }
-
- kfree(crtcs);
- return best_score;
-}
-
-/*
- * This function checks if rotation is necessary because of panel orientation
- * and if it is, if it is supported.
- * If rotation is necessary and supported, it gets set in fb_crtc.rotation.
- * If rotation is necessary but not supported, a DRM_MODE_ROTATE_* flag gets
- * or-ed into fb_helper->sw_rotations. In drm_setup_crtcs_fb() we check if only
- * one bit is set and then we set fb_info.fbcon_rotate_hint to make fbcon do
- * the unsupported rotation.
- */
-static void drm_setup_crtc_rotation(struct drm_fb_helper *fb_helper,
- struct drm_fb_helper_crtc *fb_crtc,
- struct drm_connector *connector)
-{
- struct drm_plane *plane = fb_crtc->mode_set.crtc->primary;
- uint64_t valid_mask = 0;
- int i, rotation;
-
- fb_crtc->rotation = DRM_MODE_ROTATE_0;
-
- switch (connector->display_info.panel_orientation) {
- case DRM_MODE_PANEL_ORIENTATION_BOTTOM_UP:
- rotation = DRM_MODE_ROTATE_180;
- break;
- case DRM_MODE_PANEL_ORIENTATION_LEFT_UP:
- rotation = DRM_MODE_ROTATE_90;
- break;
- case DRM_MODE_PANEL_ORIENTATION_RIGHT_UP:
- rotation = DRM_MODE_ROTATE_270;
- break;
- default:
- rotation = DRM_MODE_ROTATE_0;
- }
-
- /*
- * TODO: support 90 / 270 degree hardware rotation,
- * depending on the hardware this may require the framebuffer
- * to be in a specific tiling format.
- */
- if (rotation != DRM_MODE_ROTATE_180 || !plane->rotation_property) {
- fb_helper->sw_rotations |= rotation;
- return;
- }
-
- for (i = 0; i < plane->rotation_property->num_values; i++)
- valid_mask |= (1ULL << plane->rotation_property->values[i]);
-
- if (!(rotation & valid_mask)) {
- fb_helper->sw_rotations |= rotation;
- return;
- }
-
- fb_crtc->rotation = rotation;
- /* Rotating in hardware, fbcon should not rotate */
- fb_helper->sw_rotations |= DRM_MODE_ROTATE_0;
-}
-
-static struct drm_fb_helper_crtc *
-drm_fb_helper_crtc(struct drm_fb_helper *fb_helper, struct drm_crtc *crtc)
-{
- int i;
-
- for (i = 0; i < fb_helper->crtc_count; i++)
- if (fb_helper->crtc_info[i].mode_set.crtc == crtc)
- return &fb_helper->crtc_info[i];
-
- return NULL;
-}
-
-/* Try to read the BIOS display configuration and use it for the initial config */
-static bool drm_fb_helper_firmware_config(struct drm_fb_helper *fb_helper,
- struct drm_fb_helper_crtc **crtcs,
- struct drm_display_mode **modes,
- struct drm_fb_offset *offsets,
- bool *enabled, int width, int height)
-{
- struct drm_device *dev = fb_helper->dev;
- unsigned int count = min(fb_helper->connector_count, BITS_PER_LONG);
- unsigned long conn_configured, conn_seq, mask;
- int i, j;
- bool *save_enabled;
- bool fallback = true, ret = true;
- int num_connectors_enabled = 0;
- int num_connectors_detected = 0;
- struct drm_modeset_acquire_ctx ctx;
-
- if (!drm_drv_uses_atomic_modeset(dev))
- return false;
-
- save_enabled = kcalloc(count, sizeof(bool), GFP_KERNEL);
- if (!save_enabled)
- return false;
-
- drm_modeset_acquire_init(&ctx, 0);
-
- while (drm_modeset_lock_all_ctx(dev, &ctx) != 0)
- drm_modeset_backoff(&ctx);
-
- memcpy(save_enabled, enabled, count);
- mask = GENMASK(count - 1, 0);
- conn_configured = 0;
-retry:
- conn_seq = conn_configured;
- for (i = 0; i < count; i++) {
- struct drm_fb_helper_connector *fb_conn;
- struct drm_connector *connector;
- struct drm_encoder *encoder;
- struct drm_fb_helper_crtc *new_crtc;
-
- fb_conn = fb_helper->connector_info[i];
- connector = fb_conn->connector;
-
- if (conn_configured & BIT(i))
- continue;
-
- if (conn_seq == 0 && !connector->has_tile)
- continue;
-
- if (connector->status == connector_status_connected)
- num_connectors_detected++;
-
- if (!enabled[i]) {
- DRM_DEBUG_KMS("connector %s not enabled, skipping\n",
- connector->name);
- conn_configured |= BIT(i);
- continue;
- }
-
- if (connector->force == DRM_FORCE_OFF) {
- DRM_DEBUG_KMS("connector %s is disabled by user, skipping\n",
- connector->name);
- enabled[i] = false;
- continue;
- }
-
- encoder = connector->state->best_encoder;
- if (!encoder || WARN_ON(!connector->state->crtc)) {
- if (connector->force > DRM_FORCE_OFF)
- goto bail;
-
- DRM_DEBUG_KMS("connector %s has no encoder or crtc, skipping\n",
- connector->name);
- enabled[i] = false;
- conn_configured |= BIT(i);
- continue;
- }
-
- num_connectors_enabled++;
-
- new_crtc = drm_fb_helper_crtc(fb_helper, connector->state->crtc);
-
- /*
- * Make sure we're not trying to drive multiple connectors
- * with a single CRTC, since our cloning support may not
- * match the BIOS.
- */
- for (j = 0; j < count; j++) {
- if (crtcs[j] == new_crtc) {
- DRM_DEBUG_KMS("fallback: cloned configuration\n");
- goto bail;
- }
- }
-
- DRM_DEBUG_KMS("looking for cmdline mode on connector %s\n",
- connector->name);
-
- /* go for command line mode first */
- modes[i] = drm_pick_cmdline_mode(fb_conn);
-
- /* try for preferred next */
- if (!modes[i]) {
- DRM_DEBUG_KMS("looking for preferred mode on connector %s %d\n",
- connector->name, connector->has_tile);
- modes[i] = drm_has_preferred_mode(fb_conn, width,
- height);
- }
-
- /* No preferred mode marked by the EDID? Are there any modes? */
- if (!modes[i] && !list_empty(&connector->modes)) {
- DRM_DEBUG_KMS("using first mode listed on connector %s\n",
- connector->name);
- modes[i] = list_first_entry(&connector->modes,
- struct drm_display_mode,
- head);
- }
-
- /* last resort: use current mode */
- if (!modes[i]) {
- /*
- * IMPORTANT: We want to use the adjusted mode (i.e.
- * after the panel fitter upscaling) as the initial
- * config, not the input mode, which is what crtc->mode
- * usually contains. But since our current
- * code puts a mode derived from the post-pfit timings
- * into crtc->mode this works out correctly.
- *
- * This is crtc->mode and not crtc->state->mode for the
- * fastboot check to work correctly.
- */
- DRM_DEBUG_KMS("looking for current mode on connector %s\n",
- connector->name);
- modes[i] = &connector->state->crtc->mode;
- }
- crtcs[i] = new_crtc;
-
- DRM_DEBUG_KMS("connector %s on [CRTC:%d:%s]: %dx%d%s\n",
- connector->name,
- connector->state->crtc->base.id,
- connector->state->crtc->name,
- modes[i]->hdisplay, modes[i]->vdisplay,
- modes[i]->flags & DRM_MODE_FLAG_INTERLACE ? "i" : "");
-
- fallback = false;
- conn_configured |= BIT(i);
- }
-
- if ((conn_configured & mask) != mask && conn_configured != conn_seq)
- goto retry;
-
- /*
- * If the BIOS didn't enable everything it could, fall back to have the
- * same user experiencing of lighting up as much as possible like the
- * fbdev helper library.
- */
- if (num_connectors_enabled != num_connectors_detected &&
- num_connectors_enabled < dev->mode_config.num_crtc) {
- DRM_DEBUG_KMS("fallback: Not all outputs enabled\n");
- DRM_DEBUG_KMS("Enabled: %i, detected: %i\n", num_connectors_enabled,
- num_connectors_detected);
- fallback = true;
- }
-
- if (fallback) {
-bail:
- DRM_DEBUG_KMS("Not using firmware configuration\n");
- memcpy(enabled, save_enabled, count);
- ret = false;
- }
-
- drm_modeset_drop_locks(&ctx);
- drm_modeset_acquire_fini(&ctx);
-
- kfree(save_enabled);
- return ret;
-}
-
-static void drm_setup_crtcs(struct drm_fb_helper *fb_helper,
- u32 width, u32 height)
-{
- struct drm_device *dev = fb_helper->dev;
- struct drm_fb_helper_crtc **crtcs;
- struct drm_display_mode **modes;
- struct drm_fb_offset *offsets;
- bool *enabled;
- int i;
-
- DRM_DEBUG_KMS("\n");
- /* prevent concurrent modification of connector_count by hotplug */
- lockdep_assert_held(&fb_helper->lock);
-
- crtcs = kcalloc(fb_helper->connector_count,
- sizeof(struct drm_fb_helper_crtc *), GFP_KERNEL);
- modes = kcalloc(fb_helper->connector_count,
- sizeof(struct drm_display_mode *), GFP_KERNEL);
- offsets = kcalloc(fb_helper->connector_count,
- sizeof(struct drm_fb_offset), GFP_KERNEL);
- enabled = kcalloc(fb_helper->connector_count,
- sizeof(bool), GFP_KERNEL);
- if (!crtcs || !modes || !enabled || !offsets) {
- DRM_ERROR("Memory allocation failed\n");
- goto out;
- }
-
- mutex_lock(&fb_helper->dev->mode_config.mutex);
- if (drm_fb_helper_probe_connector_modes(fb_helper, width, height) == 0)
- DRM_DEBUG_KMS("No connectors reported connected with modes\n");
- drm_enable_connectors(fb_helper, enabled);
-
- if (!drm_fb_helper_firmware_config(fb_helper, crtcs, modes, offsets,
- enabled, width, height)) {
- memset(modes, 0, fb_helper->connector_count*sizeof(modes[0]));
- memset(crtcs, 0, fb_helper->connector_count*sizeof(crtcs[0]));
- memset(offsets, 0, fb_helper->connector_count*sizeof(offsets[0]));
-
- if (!drm_target_cloned(fb_helper, modes, offsets,
- enabled, width, height) &&
- !drm_target_preferred(fb_helper, modes, offsets,
- enabled, width, height))
- DRM_ERROR("Unable to find initial modes\n");
-
- DRM_DEBUG_KMS("picking CRTCs for %dx%d config\n",
- width, height);
-
- drm_pick_crtcs(fb_helper, crtcs, modes, 0, width, height);
- }
- mutex_unlock(&fb_helper->dev->mode_config.mutex);
-
- /* need to set the modesets up here for use later */
- /* fill out the connector<->crtc mappings into the modesets */
- for (i = 0; i < fb_helper->crtc_count; i++)
- drm_fb_helper_modeset_release(fb_helper,
- &fb_helper->crtc_info[i].mode_set);
-
- fb_helper->sw_rotations = 0;
- drm_fb_helper_for_each_connector(fb_helper, i) {
- struct drm_display_mode *mode = modes[i];
- struct drm_fb_helper_crtc *fb_crtc = crtcs[i];
- struct drm_fb_offset *offset = &offsets[i];
-
- if (mode && fb_crtc) {
- struct drm_mode_set *modeset = &fb_crtc->mode_set;
- struct drm_connector *connector =
- fb_helper->connector_info[i]->connector;
-
- DRM_DEBUG_KMS("desired mode %s set on crtc %d (%d,%d)\n",
- mode->name, fb_crtc->mode_set.crtc->base.id, offset->x, offset->y);
-
- fb_crtc->desired_mode = mode;
- fb_crtc->x = offset->x;
- fb_crtc->y = offset->y;
- modeset->mode = drm_mode_duplicate(dev,
- fb_crtc->desired_mode);
- drm_connector_get(connector);
- drm_setup_crtc_rotation(fb_helper, fb_crtc, connector);
- modeset->connectors[modeset->num_connectors++] = connector;
- modeset->x = offset->x;
- modeset->y = offset->y;
- }
- }
-out:
- kfree(crtcs);
- kfree(modes);
- kfree(offsets);
- kfree(enabled);
-}
-
/*
* This is a continuation of drm_setup_crtcs() that sets up anything related
* to the framebuffer. During initialization, drm_setup_crtcs() is called before
@@ -2847,17 +1708,30 @@ out:
*/
static void drm_setup_crtcs_fb(struct drm_fb_helper *fb_helper)
{
+ struct drm_client_dev *client = &fb_helper->client;
+ struct drm_connector_list_iter conn_iter;
struct fb_info *info = fb_helper->fbdev;
- int i;
+ unsigned int rotation, sw_rotations = 0;
+ struct drm_connector *connector;
+ struct drm_mode_set *modeset;
+
+ mutex_lock(&client->modeset_mutex);
+ drm_client_for_each_modeset(modeset, client) {
+ if (!modeset->num_connectors)
+ continue;
- for (i = 0; i < fb_helper->crtc_count; i++)
- if (fb_helper->crtc_info[i].mode_set.num_connectors)
- fb_helper->crtc_info[i].mode_set.fb = fb_helper->fb;
+ modeset->fb = fb_helper->fb;
+
+ if (drm_client_panel_rotation(modeset, &rotation))
+ /* Rotating in hardware, fbcon should not rotate */
+ sw_rotations |= DRM_MODE_ROTATE_0;
+ else
+ sw_rotations |= rotation;
+ }
+ mutex_unlock(&client->modeset_mutex);
- mutex_lock(&fb_helper->dev->mode_config.mutex);
- drm_fb_helper_for_each_connector(fb_helper, i) {
- struct drm_connector *connector =
- fb_helper->connector_info[i]->connector;
+ drm_connector_list_iter_begin(fb_helper->dev, &conn_iter);
+ drm_client_for_each_connector_iter(connector, &conn_iter) {
/* use first connected connector for the physical dimensions */
if (connector->status == connector_status_connected) {
@@ -2866,9 +1740,9 @@ static void drm_setup_crtcs_fb(struct drm_fb_helper *fb_helper)
break;
}
}
- mutex_unlock(&fb_helper->dev->mode_config.mutex);
+ drm_connector_list_iter_end(&conn_iter);
- switch (fb_helper->sw_rotations) {
+ switch (sw_rotations) {
case DRM_MODE_ROTATE_0:
info->fbcon_rotate_hint = FB_ROTATE_UR;
break;
@@ -2904,7 +1778,7 @@ __drm_fb_helper_initial_config_and_unlock(struct drm_fb_helper *fb_helper,
width = dev->mode_config.max_width;
height = dev->mode_config.max_height;
- drm_setup_crtcs(fb_helper, width, height);
+ drm_client_modeset_probe(&fb_helper->client, width, height);
ret = drm_fb_helper_single_fb_probe(fb_helper, bpp_sel);
if (ret < 0) {
if (ret == -EAGAIN) {
@@ -3041,15 +1915,17 @@ int drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper)
return err;
}
- if (!fb_helper->fb || !drm_fb_helper_is_bound(fb_helper)) {
+ if (!fb_helper->fb || !drm_master_internal_acquire(fb_helper->dev)) {
fb_helper->delayed_hotplug = true;
mutex_unlock(&fb_helper->lock);
return err;
}
+ drm_master_internal_release(fb_helper->dev);
+
DRM_DEBUG_KMS("\n");
- drm_setup_crtcs(fb_helper, fb_helper->fb->width, fb_helper->fb->height);
+ drm_client_modeset_probe(&fb_helper->client, fb_helper->fb->width, fb_helper->fb->height);
drm_setup_crtcs_fb(fb_helper);
mutex_unlock(&fb_helper->lock);
@@ -3066,8 +1942,7 @@ EXPORT_SYMBOL(drm_fb_helper_hotplug_event);
* @funcs: fbdev helper functions
* @preferred_bpp: Preferred bits per pixel for the device.
* @dev->mode_config.preferred_depth is used if this is zero.
- * @max_conn_count: Maximum number of connectors.
- * @dev->mode_config.num_connector is used if this is zero.
+ * @max_conn_count: Maximum number of connectors (not used)
*
* This function sets up fbdev emulation and registers fbdev for access by
* userspace. If all connectors are disconnected, setup is deferred to the next
@@ -3095,27 +1970,14 @@ int drm_fb_helper_fbdev_setup(struct drm_device *dev,
if (!preferred_bpp)
preferred_bpp = 32;
- if (!max_conn_count)
- max_conn_count = dev->mode_config.num_connector;
- if (!max_conn_count) {
- DRM_DEV_ERROR(dev->dev, "fbdev: No connectors\n");
- return -EINVAL;
- }
-
drm_fb_helper_prepare(dev, fb_helper, funcs);
- ret = drm_fb_helper_init(dev, fb_helper, max_conn_count);
+ ret = drm_fb_helper_init(dev, fb_helper, 0);
if (ret < 0) {
DRM_DEV_ERROR(dev->dev, "fbdev: Failed to initialize (ret=%d)\n", ret);
return ret;
}
- ret = drm_fb_helper_single_add_all_connectors(fb_helper);
- if (ret < 0) {
- DRM_DEV_ERROR(dev->dev, "fbdev: Failed to add connectors (ret=%d)\n", ret);
- goto err_drm_fb_helper_fini;
- }
-
if (!drm_drv_uses_atomic_modeset(dev))
drm_helper_disable_unused_functions(dev);
@@ -3417,14 +2279,10 @@ static int drm_fbdev_client_hotplug(struct drm_client_dev *client)
drm_fb_helper_prepare(dev, fb_helper, &drm_fb_helper_generic_funcs);
- ret = drm_fb_helper_init(dev, fb_helper, dev->mode_config.num_connector);
+ ret = drm_fb_helper_init(dev, fb_helper, 0);
if (ret)
goto err;
- ret = drm_fb_helper_single_add_all_connectors(fb_helper);
- if (ret)
- goto err_cleanup;
-
if (!drm_drv_uses_atomic_modeset(dev))
drm_helper_disable_unused_functions(dev);
diff --git a/drivers/gpu/drm/drm_file.c b/drivers/gpu/drm/drm_file.c
index 233f114d2186..754af25fe255 100644
--- a/drivers/gpu/drm/drm_file.c
+++ b/drivers/gpu/drm/drm_file.c
@@ -31,17 +31,20 @@
* OTHER DEALINGS IN THE SOFTWARE.
*/
+#include <linux/dma-fence.h>
+#include <linux/module.h>
+#include <linux/pci.h>
#include <linux/poll.h>
#include <linux/slab.h>
-#include <linux/module.h>
#include <drm/drm_client.h>
+#include <drm/drm_drv.h>
#include <drm/drm_file.h>
-#include <drm/drmP.h>
+#include <drm/drm_print.h>
-#include "drm_legacy.h"
-#include "drm_internal.h"
#include "drm_crtc_internal.h"
+#include "drm_internal.h"
+#include "drm_legacy.h"
/* from BKL pushdown */
DEFINE_MUTEX(drm_global_mutex);
@@ -100,8 +103,6 @@ DEFINE_MUTEX(drm_global_mutex);
* :ref:`IOCTL support in the userland interfaces chapter<drm_driver_ioctl>`.
*/
-static int drm_open_helper(struct file *filp, struct drm_minor *minor);
-
/**
* drm_file_alloc - allocate file context
* @minor: minor to allocate on
@@ -273,76 +274,6 @@ static void drm_close_helper(struct file *filp)
drm_file_free(file_priv);
}
-static int drm_setup(struct drm_device * dev)
-{
- int ret;
-
- if (dev->driver->firstopen &&
- drm_core_check_feature(dev, DRIVER_LEGACY)) {
- ret = dev->driver->firstopen(dev);
- if (ret != 0)
- return ret;
- }
-
- ret = drm_legacy_dma_setup(dev);
- if (ret < 0)
- return ret;
-
-
- DRM_DEBUG("\n");
- return 0;
-}
-
-/**
- * drm_open - open method for DRM file
- * @inode: device inode
- * @filp: file pointer.
- *
- * This function must be used by drivers as their &file_operations.open method.
- * It looks up the correct DRM device and instantiates all the per-file
- * resources for it. It also calls the &drm_driver.open driver callback.
- *
- * RETURNS:
- *
- * 0 on success or negative errno value on falure.
- */
-int drm_open(struct inode *inode, struct file *filp)
-{
- struct drm_device *dev;
- struct drm_minor *minor;
- int retcode;
- int need_setup = 0;
-
- minor = drm_minor_acquire(iminor(inode));
- if (IS_ERR(minor))
- return PTR_ERR(minor);
-
- dev = minor->dev;
- if (!dev->open_count++)
- need_setup = 1;
-
- /* share address_space across all char-devs of a single device */
- filp->f_mapping = dev->anon_inode->i_mapping;
-
- retcode = drm_open_helper(filp, minor);
- if (retcode)
- goto err_undo;
- if (need_setup) {
- retcode = drm_setup(dev);
- if (retcode) {
- drm_close_helper(filp);
- goto err_undo;
- }
- }
- return 0;
-
-err_undo:
- dev->open_count--;
- drm_minor_release(minor);
- return retcode;
-}
-EXPORT_SYMBOL(drm_open);
-
/*
* Check whether DRI will run on this CPU.
*
@@ -424,6 +355,56 @@ static int drm_open_helper(struct file *filp, struct drm_minor *minor)
return 0;
}
+/**
+ * drm_open - open method for DRM file
+ * @inode: device inode
+ * @filp: file pointer.
+ *
+ * This function must be used by drivers as their &file_operations.open method.
+ * It looks up the correct DRM device and instantiates all the per-file
+ * resources for it. It also calls the &drm_driver.open driver callback.
+ *
+ * RETURNS:
+ *
+ * 0 on success or negative errno value on falure.
+ */
+int drm_open(struct inode *inode, struct file *filp)
+{
+ struct drm_device *dev;
+ struct drm_minor *minor;
+ int retcode;
+ int need_setup = 0;
+
+ minor = drm_minor_acquire(iminor(inode));
+ if (IS_ERR(minor))
+ return PTR_ERR(minor);
+
+ dev = minor->dev;
+ if (!dev->open_count++)
+ need_setup = 1;
+
+ /* share address_space across all char-devs of a single device */
+ filp->f_mapping = dev->anon_inode->i_mapping;
+
+ retcode = drm_open_helper(filp, minor);
+ if (retcode)
+ goto err_undo;
+ if (need_setup) {
+ retcode = drm_legacy_setup(dev);
+ if (retcode) {
+ drm_close_helper(filp);
+ goto err_undo;
+ }
+ }
+ return 0;
+
+err_undo:
+ dev->open_count--;
+ drm_minor_release(minor);
+ return retcode;
+}
+EXPORT_SYMBOL(drm_open);
+
void drm_lastclose(struct drm_device * dev)
{
DRM_DEBUG("\n");
diff --git a/drivers/gpu/drm/drm_flip_work.c b/drivers/gpu/drm/drm_flip_work.c
index 3da3bf5af405..060b753881a2 100644
--- a/drivers/gpu/drm/drm_flip_work.c
+++ b/drivers/gpu/drm/drm_flip_work.c
@@ -21,9 +21,11 @@
* SOFTWARE.
*/
-#include <drm/drmP.h>
-#include <drm/drm_util.h>
+#include <linux/slab.h>
+
#include <drm/drm_flip_work.h>
+#include <drm/drm_print.h>
+#include <drm/drm_util.h>
/**
* drm_flip_work_allocate_task - allocate a flip-work task
diff --git a/drivers/gpu/drm/drm_format_helper.c b/drivers/gpu/drm/drm_format_helper.c
index a18da35145b7..0897cb9aeaff 100644
--- a/drivers/gpu/drm/drm_format_helper.c
+++ b/drivers/gpu/drm/drm_format_helper.c
@@ -36,7 +36,7 @@ static unsigned int clip_offset(struct drm_rect *clip,
void drm_fb_memcpy(void *dst, void *vaddr, struct drm_framebuffer *fb,
struct drm_rect *clip)
{
- unsigned int cpp = drm_format_plane_cpp(fb->format->format, 0);
+ unsigned int cpp = fb->format->cpp[0];
size_t len = (clip->x2 - clip->x1) * cpp;
unsigned int y, lines = clip->y2 - clip->y1;
@@ -63,7 +63,7 @@ void drm_fb_memcpy_dstclip(void __iomem *dst, void *vaddr,
struct drm_framebuffer *fb,
struct drm_rect *clip)
{
- unsigned int cpp = drm_format_plane_cpp(fb->format->format, 0);
+ unsigned int cpp = fb->format->cpp[0];
unsigned int offset = clip_offset(clip, fb->pitches[0], cpp);
size_t len = (clip->x2 - clip->x1) * cpp;
unsigned int y, lines = clip->y2 - clip->y1;
diff --git a/drivers/gpu/drm/drm_fourcc.c b/drivers/gpu/drm/drm_fourcc.c
index 6ea55fb4526d..c630064ccf41 100644
--- a/drivers/gpu/drm/drm_fourcc.c
+++ b/drivers/gpu/drm/drm_fourcc.c
@@ -27,7 +27,7 @@
#include <linux/export.h>
#include <linux/kernel.h>
-#include <drm/drmP.h>
+#include <drm/drm_device.h>
#include <drm/drm_fourcc.h>
static char printable_char(int c)
@@ -333,124 +333,6 @@ drm_get_format_info(struct drm_device *dev,
EXPORT_SYMBOL(drm_get_format_info);
/**
- * drm_format_num_planes - get the number of planes for format
- * @format: pixel format (DRM_FORMAT_*)
- *
- * Returns:
- * The number of planes used by the specified pixel format.
- */
-int drm_format_num_planes(uint32_t format)
-{
- const struct drm_format_info *info;
-
- info = drm_format_info(format);
- return info ? info->num_planes : 1;
-}
-EXPORT_SYMBOL(drm_format_num_planes);
-
-/**
- * drm_format_plane_cpp - determine the bytes per pixel value
- * @format: pixel format (DRM_FORMAT_*)
- * @plane: plane index
- *
- * Returns:
- * The bytes per pixel value for the specified plane.
- */
-int drm_format_plane_cpp(uint32_t format, int plane)
-{
- const struct drm_format_info *info;
-
- info = drm_format_info(format);
- if (!info || plane >= info->num_planes)
- return 0;
-
- return info->cpp[plane];
-}
-EXPORT_SYMBOL(drm_format_plane_cpp);
-
-/**
- * drm_format_horz_chroma_subsampling - get the horizontal chroma subsampling factor
- * @format: pixel format (DRM_FORMAT_*)
- *
- * Returns:
- * The horizontal chroma subsampling factor for the
- * specified pixel format.
- */
-int drm_format_horz_chroma_subsampling(uint32_t format)
-{
- const struct drm_format_info *info;
-
- info = drm_format_info(format);
- return info ? info->hsub : 1;
-}
-EXPORT_SYMBOL(drm_format_horz_chroma_subsampling);
-
-/**
- * drm_format_vert_chroma_subsampling - get the vertical chroma subsampling factor
- * @format: pixel format (DRM_FORMAT_*)
- *
- * Returns:
- * The vertical chroma subsampling factor for the
- * specified pixel format.
- */
-int drm_format_vert_chroma_subsampling(uint32_t format)
-{
- const struct drm_format_info *info;
-
- info = drm_format_info(format);
- return info ? info->vsub : 1;
-}
-EXPORT_SYMBOL(drm_format_vert_chroma_subsampling);
-
-/**
- * drm_format_plane_width - width of the plane given the first plane
- * @width: width of the first plane
- * @format: pixel format
- * @plane: plane index
- *
- * Returns:
- * The width of @plane, given that the width of the first plane is @width.
- */
-int drm_format_plane_width(int width, uint32_t format, int plane)
-{
- const struct drm_format_info *info;
-
- info = drm_format_info(format);
- if (!info || plane >= info->num_planes)
- return 0;
-
- if (plane == 0)
- return width;
-
- return width / info->hsub;
-}
-EXPORT_SYMBOL(drm_format_plane_width);
-
-/**
- * drm_format_plane_height - height of the plane given the first plane
- * @height: height of the first plane
- * @format: pixel format
- * @plane: plane index
- *
- * Returns:
- * The height of @plane, given that the height of the first plane is @height.
- */
-int drm_format_plane_height(int height, uint32_t format, int plane)
-{
- const struct drm_format_info *info;
-
- info = drm_format_info(format);
- if (!info || plane >= info->num_planes)
- return 0;
-
- if (plane == 0)
- return height;
-
- return height / info->vsub;
-}
-EXPORT_SYMBOL(drm_format_plane_height);
-
-/**
* drm_format_info_block_width - width in pixels of block.
* @info: pixel format info
* @plane: plane index
diff --git a/drivers/gpu/drm/drm_framebuffer.c b/drivers/gpu/drm/drm_framebuffer.c
index d8d75e25f6fb..0b72468e8131 100644
--- a/drivers/gpu/drm/drm_framebuffer.c
+++ b/drivers/gpu/drm/drm_framebuffer.c
@@ -21,16 +21,21 @@
*/
#include <linux/export.h>
-#include <drm/drmP.h>
-#include <drm/drm_auth.h>
-#include <drm/drm_framebuffer.h>
+#include <linux/uaccess.h>
+
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_uapi.h>
+#include <drm/drm_auth.h>
+#include <drm/drm_debugfs.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_file.h>
+#include <drm/drm_fourcc.h>
+#include <drm/drm_framebuffer.h>
#include <drm/drm_print.h>
#include <drm/drm_util.h>
-#include "drm_internal.h"
#include "drm_crtc_internal.h"
+#include "drm_internal.h"
/**
* DOC: overview
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index 50de138c89e0..8a55f71325b1 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -38,10 +38,14 @@
#include <linux/dma-buf.h>
#include <linux/mem_encrypt.h>
#include <linux/pagevec.h>
-#include <drm/drmP.h>
-#include <drm/drm_vma_manager.h>
+
+#include <drm/drm_device.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_file.h>
#include <drm/drm_gem.h>
#include <drm/drm_print.h>
+#include <drm/drm_vma_manager.h>
+
#include "drm_internal.h"
/** @file drm_gem.c
diff --git a/drivers/gpu/drm/drm_gem_cma_helper.c b/drivers/gpu/drm/drm_gem_cma_helper.c
index a5c8850079f1..12e98fb28229 100644
--- a/drivers/gpu/drm/drm_gem_cma_helper.c
+++ b/drivers/gpu/drm/drm_gem_cma_helper.c
@@ -9,15 +9,16 @@
* Copyright (c) 2011 Samsung Electronics Co., Ltd.
*/
-#include <linux/mm.h>
-#include <linux/slab.h>
-#include <linux/mutex.h>
-#include <linux/export.h>
#include <linux/dma-buf.h>
#include <linux/dma-mapping.h>
+#include <linux/export.h>
+#include <linux/mm.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
-#include <drm/drmP.h>
#include <drm/drm.h>
+#include <drm/drm_device.h>
+#include <drm/drm_drv.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_vma_manager.h>
diff --git a/drivers/gpu/drm/drm_gem_framebuffer_helper.c b/drivers/gpu/drm/drm_gem_framebuffer_helper.c
index ed985d9b6010..8fcbabf02dfd 100644
--- a/drivers/gpu/drm/drm_gem_framebuffer_helper.c
+++ b/drivers/gpu/drm/drm_gem_framebuffer_helper.c
@@ -10,7 +10,6 @@
#include <linux/reservation.h>
#include <linux/slab.h>
-#include <drm/drmP.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_uapi.h>
#include <drm/drm_damage_helper.h>
@@ -281,6 +280,9 @@ EXPORT_SYMBOL_GPL(drm_gem_fb_create_with_dirty);
* There is no need for &drm_plane_helper_funcs.cleanup_fb hook for simple
* gem based framebuffer drivers which have their buffers always pinned in
* memory.
+ *
+ * See drm_atomic_set_fence_for_plane() for a discussion of implicit and
+ * explicit fencing in atomic modeset updates.
*/
int drm_gem_fb_prepare_fb(struct drm_plane *plane,
struct drm_plane_state *state)
@@ -311,6 +313,9 @@ EXPORT_SYMBOL_GPL(drm_gem_fb_prepare_fb);
* &dma_buf attached, extracts the exclusive fence and attaches it to plane
* state for the atomic helper to wait on. Drivers can use this as their
* &drm_simple_display_pipe_funcs.prepare_fb callback.
+ *
+ * See drm_atomic_set_fence_for_plane() for a discussion of implicit and
+ * explicit fencing in atomic modeset updates.
*/
int drm_gem_fb_simple_display_pipe_prepare_fb(struct drm_simple_display_pipe *pipe,
struct drm_plane_state *plane_state)
diff --git a/drivers/gpu/drm/drm_gem_vram_helper.c b/drivers/gpu/drm/drm_gem_vram_helper.c
new file mode 100644
index 000000000000..4de782ca26b2
--- /dev/null
+++ b/drivers/gpu/drm/drm_gem_vram_helper.c
@@ -0,0 +1,641 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include <drm/drm_gem_vram_helper.h>
+#include <drm/drm_device.h>
+#include <drm/drm_mode.h>
+#include <drm/drm_prime.h>
+#include <drm/drm_vram_mm_helper.h>
+#include <drm/ttm/ttm_page_alloc.h>
+
+/**
+ * DOC: overview
+ *
+ * This library provides a GEM buffer object that is backed by video RAM
+ * (VRAM). It can be used for framebuffer devices with dedicated memory.
+ */
+
+/*
+ * Buffer-objects helpers
+ */
+
+static void drm_gem_vram_cleanup(struct drm_gem_vram_object *gbo)
+{
+ /* We got here via ttm_bo_put(), which means that the
+ * TTM buffer object in 'bo' has already been cleaned
+ * up; only release the GEM object.
+ */
+ drm_gem_object_release(&gbo->gem);
+}
+
+static void drm_gem_vram_destroy(struct drm_gem_vram_object *gbo)
+{
+ drm_gem_vram_cleanup(gbo);
+ kfree(gbo);
+}
+
+static void ttm_buffer_object_destroy(struct ttm_buffer_object *bo)
+{
+ struct drm_gem_vram_object *gbo = drm_gem_vram_of_bo(bo);
+
+ drm_gem_vram_destroy(gbo);
+}
+
+static void drm_gem_vram_placement(struct drm_gem_vram_object *gbo,
+ unsigned long pl_flag)
+{
+ unsigned int i;
+ unsigned int c = 0;
+
+ gbo->placement.placement = gbo->placements;
+ gbo->placement.busy_placement = gbo->placements;
+
+ if (pl_flag & TTM_PL_FLAG_VRAM)
+ gbo->placements[c++].flags = TTM_PL_FLAG_WC |
+ TTM_PL_FLAG_UNCACHED |
+ TTM_PL_FLAG_VRAM;
+
+ if (pl_flag & TTM_PL_FLAG_SYSTEM)
+ gbo->placements[c++].flags = TTM_PL_MASK_CACHING |
+ TTM_PL_FLAG_SYSTEM;
+
+ if (!c)
+ gbo->placements[c++].flags = TTM_PL_MASK_CACHING |
+ TTM_PL_FLAG_SYSTEM;
+
+ gbo->placement.num_placement = c;
+ gbo->placement.num_busy_placement = c;
+
+ for (i = 0; i < c; ++i) {
+ gbo->placements[i].fpfn = 0;
+ gbo->placements[i].lpfn = 0;
+ }
+}
+
+static int drm_gem_vram_init(struct drm_device *dev,
+ struct ttm_bo_device *bdev,
+ struct drm_gem_vram_object *gbo,
+ size_t size, unsigned long pg_align,
+ bool interruptible)
+{
+ int ret;
+ size_t acc_size;
+
+ ret = drm_gem_object_init(dev, &gbo->gem, size);
+ if (ret)
+ return ret;
+
+ acc_size = ttm_bo_dma_acc_size(bdev, size, sizeof(*gbo));
+
+ gbo->bo.bdev = bdev;
+ drm_gem_vram_placement(gbo, TTM_PL_FLAG_VRAM | TTM_PL_FLAG_SYSTEM);
+
+ ret = ttm_bo_init(bdev, &gbo->bo, size, ttm_bo_type_device,
+ &gbo->placement, pg_align, interruptible, acc_size,
+ NULL, NULL, ttm_buffer_object_destroy);
+ if (ret)
+ goto err_drm_gem_object_release;
+
+ return 0;
+
+err_drm_gem_object_release:
+ drm_gem_object_release(&gbo->gem);
+ return ret;
+}
+
+/**
+ * drm_gem_vram_create() - Creates a VRAM-backed GEM object
+ * @dev: the DRM device
+ * @bdev: the TTM BO device backing the object
+ * @size: the buffer size in bytes
+ * @pg_align: the buffer's alignment in multiples of the page size
+ * @interruptible: sleep interruptible if waiting for memory
+ *
+ * Returns:
+ * A new instance of &struct drm_gem_vram_object on success, or
+ * an ERR_PTR()-encoded error code otherwise.
+ */
+struct drm_gem_vram_object *drm_gem_vram_create(struct drm_device *dev,
+ struct ttm_bo_device *bdev,
+ size_t size,
+ unsigned long pg_align,
+ bool interruptible)
+{
+ struct drm_gem_vram_object *gbo;
+ int ret;
+
+ gbo = kzalloc(sizeof(*gbo), GFP_KERNEL);
+ if (!gbo)
+ return ERR_PTR(-ENOMEM);
+
+ ret = drm_gem_vram_init(dev, bdev, gbo, size, pg_align, interruptible);
+ if (ret < 0)
+ goto err_kfree;
+
+ return gbo;
+
+err_kfree:
+ kfree(gbo);
+ return ERR_PTR(ret);
+}
+EXPORT_SYMBOL(drm_gem_vram_create);
+
+/**
+ * drm_gem_vram_put() - Releases a reference to a VRAM-backed GEM object
+ * @gbo: the GEM VRAM object
+ *
+ * See ttm_bo_put() for more information.
+ */
+void drm_gem_vram_put(struct drm_gem_vram_object *gbo)
+{
+ ttm_bo_put(&gbo->bo);
+}
+EXPORT_SYMBOL(drm_gem_vram_put);
+
+/**
+ * drm_gem_vram_mmap_offset() - Returns a GEM VRAM object's mmap offset
+ * @gbo: the GEM VRAM object
+ *
+ * See drm_vma_node_offset_addr() for more information.
+ *
+ * Returns:
+ * The buffer object's offset for userspace mappings on success, or
+ * 0 if no offset is allocated.
+ */
+u64 drm_gem_vram_mmap_offset(struct drm_gem_vram_object *gbo)
+{
+ return drm_vma_node_offset_addr(&gbo->bo.vma_node);
+}
+EXPORT_SYMBOL(drm_gem_vram_mmap_offset);
+
+/**
+ * drm_gem_vram_offset() - \
+ Returns a GEM VRAM object's offset in video memory
+ * @gbo: the GEM VRAM object
+ *
+ * This function returns the buffer object's offset in the device's video
+ * memory. The buffer object has to be pinned to %TTM_PL_VRAM.
+ *
+ * Returns:
+ * The buffer object's offset in video memory on success, or
+ * a negative errno code otherwise.
+ */
+s64 drm_gem_vram_offset(struct drm_gem_vram_object *gbo)
+{
+ if (WARN_ON_ONCE(!gbo->pin_count))
+ return (s64)-ENODEV;
+ return gbo->bo.offset;
+}
+EXPORT_SYMBOL(drm_gem_vram_offset);
+
+/**
+ * drm_gem_vram_pin() - Pins a GEM VRAM object in a region.
+ * @gbo: the GEM VRAM object
+ * @pl_flag: a bitmask of possible memory regions
+ *
+ * Pinning a buffer object ensures that it is not evicted from
+ * a memory region. A pinned buffer object has to be unpinned before
+ * it can be pinned to another region. If the pl_flag argument is 0,
+ * the buffer is pinned at its current location (video RAM or system
+ * memory).
+ *
+ * Returns:
+ * 0 on success, or
+ * a negative error code otherwise.
+ */
+int drm_gem_vram_pin(struct drm_gem_vram_object *gbo, unsigned long pl_flag)
+{
+ int i, ret;
+ struct ttm_operation_ctx ctx = { false, false };
+
+ ret = ttm_bo_reserve(&gbo->bo, true, false, NULL);
+ if (ret < 0)
+ return ret;
+
+ if (gbo->pin_count)
+ goto out;
+
+ if (pl_flag)
+ drm_gem_vram_placement(gbo, pl_flag);
+
+ for (i = 0; i < gbo->placement.num_placement; ++i)
+ gbo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT;
+
+ ret = ttm_bo_validate(&gbo->bo, &gbo->placement, &ctx);
+ if (ret < 0)
+ goto err_ttm_bo_unreserve;
+
+out:
+ ++gbo->pin_count;
+ ttm_bo_unreserve(&gbo->bo);
+
+ return 0;
+
+err_ttm_bo_unreserve:
+ ttm_bo_unreserve(&gbo->bo);
+ return ret;
+}
+EXPORT_SYMBOL(drm_gem_vram_pin);
+
+/**
+ * drm_gem_vram_unpin() - Unpins a GEM VRAM object
+ * @gbo: the GEM VRAM object
+ *
+ * Returns:
+ * 0 on success, or
+ * a negative error code otherwise.
+ */
+int drm_gem_vram_unpin(struct drm_gem_vram_object *gbo)
+{
+ int i, ret;
+ struct ttm_operation_ctx ctx = { false, false };
+
+ ret = ttm_bo_reserve(&gbo->bo, true, false, NULL);
+ if (ret < 0)
+ return ret;
+
+ if (WARN_ON_ONCE(!gbo->pin_count))
+ goto out;
+
+ --gbo->pin_count;
+ if (gbo->pin_count)
+ goto out;
+
+ for (i = 0; i < gbo->placement.num_placement ; ++i)
+ gbo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT;
+
+ ret = ttm_bo_validate(&gbo->bo, &gbo->placement, &ctx);
+ if (ret < 0)
+ goto err_ttm_bo_unreserve;
+
+out:
+ ttm_bo_unreserve(&gbo->bo);
+
+ return 0;
+
+err_ttm_bo_unreserve:
+ ttm_bo_unreserve(&gbo->bo);
+ return ret;
+}
+EXPORT_SYMBOL(drm_gem_vram_unpin);
+
+/**
+ * drm_gem_vram_kmap() - Maps a GEM VRAM object into kernel address space
+ * @gbo: the GEM VRAM object
+ * @map: establish a mapping if necessary
+ * @is_iomem: returns true if the mapped memory is I/O memory, or false \
+ otherwise; can be NULL
+ *
+ * This function maps the buffer object into the kernel's address space
+ * or returns the current mapping. If the parameter map is false, the
+ * function only queries the current mapping, but does not establish a
+ * new one.
+ *
+ * Returns:
+ * The buffers virtual address if mapped, or
+ * NULL if not mapped, or
+ * an ERR_PTR()-encoded error code otherwise.
+ */
+void *drm_gem_vram_kmap(struct drm_gem_vram_object *gbo, bool map,
+ bool *is_iomem)
+{
+ int ret;
+ struct ttm_bo_kmap_obj *kmap = &gbo->kmap;
+
+ if (kmap->virtual || !map)
+ goto out;
+
+ ret = ttm_bo_kmap(&gbo->bo, 0, gbo->bo.num_pages, kmap);
+ if (ret)
+ return ERR_PTR(ret);
+
+out:
+ if (!is_iomem)
+ return kmap->virtual;
+ if (!kmap->virtual) {
+ *is_iomem = false;
+ return NULL;
+ }
+ return ttm_kmap_obj_virtual(kmap, is_iomem);
+}
+EXPORT_SYMBOL(drm_gem_vram_kmap);
+
+/**
+ * drm_gem_vram_kunmap() - Unmaps a GEM VRAM object
+ * @gbo: the GEM VRAM object
+ */
+void drm_gem_vram_kunmap(struct drm_gem_vram_object *gbo)
+{
+ struct ttm_bo_kmap_obj *kmap = &gbo->kmap;
+
+ if (!kmap->virtual)
+ return;
+
+ ttm_bo_kunmap(kmap);
+ kmap->virtual = NULL;
+}
+EXPORT_SYMBOL(drm_gem_vram_kunmap);
+
+/**
+ * drm_gem_vram_fill_create_dumb() - \
+ Helper for implementing &struct drm_driver.dumb_create
+ * @file: the DRM file
+ * @dev: the DRM device
+ * @bdev: the TTM BO device managing the buffer object
+ * @pg_align: the buffer's alignment in multiples of the page size
+ * @interruptible: sleep interruptible if waiting for memory
+ * @args: the arguments as provided to \
+ &struct drm_driver.dumb_create
+ *
+ * This helper function fills &struct drm_mode_create_dumb, which is used
+ * by &struct drm_driver.dumb_create. Implementations of this interface
+ * should forwards their arguments to this helper, plus the driver-specific
+ * parameters.
+ *
+ * Returns:
+ * 0 on success, or
+ * a negative error code otherwise.
+ */
+int drm_gem_vram_fill_create_dumb(struct drm_file *file,
+ struct drm_device *dev,
+ struct ttm_bo_device *bdev,
+ unsigned long pg_align,
+ bool interruptible,
+ struct drm_mode_create_dumb *args)
+{
+ size_t pitch, size;
+ struct drm_gem_vram_object *gbo;
+ int ret;
+ u32 handle;
+
+ pitch = args->width * ((args->bpp + 7) / 8);
+ size = pitch * args->height;
+
+ size = roundup(size, PAGE_SIZE);
+ if (!size)
+ return -EINVAL;
+
+ gbo = drm_gem_vram_create(dev, bdev, size, pg_align, interruptible);
+ if (IS_ERR(gbo))
+ return PTR_ERR(gbo);
+
+ ret = drm_gem_handle_create(file, &gbo->gem, &handle);
+ if (ret)
+ goto err_drm_gem_object_put_unlocked;
+
+ drm_gem_object_put_unlocked(&gbo->gem);
+
+ args->pitch = pitch;
+ args->size = size;
+ args->handle = handle;
+
+ return 0;
+
+err_drm_gem_object_put_unlocked:
+ drm_gem_object_put_unlocked(&gbo->gem);
+ return ret;
+}
+EXPORT_SYMBOL(drm_gem_vram_fill_create_dumb);
+
+/*
+ * Helpers for struct ttm_bo_driver
+ */
+
+static bool drm_is_gem_vram(struct ttm_buffer_object *bo)
+{
+ return (bo->destroy == ttm_buffer_object_destroy);
+}
+
+/**
+ * drm_gem_vram_bo_driver_evict_flags() - \
+ Implements &struct ttm_bo_driver.evict_flags
+ * @bo: TTM buffer object. Refers to &struct drm_gem_vram_object.bo
+ * @pl: TTM placement information.
+ */
+void drm_gem_vram_bo_driver_evict_flags(struct ttm_buffer_object *bo,
+ struct ttm_placement *pl)
+{
+ struct drm_gem_vram_object *gbo;
+
+ /* TTM may pass BOs that are not GEM VRAM BOs. */
+ if (!drm_is_gem_vram(bo))
+ return;
+
+ gbo = drm_gem_vram_of_bo(bo);
+ drm_gem_vram_placement(gbo, TTM_PL_FLAG_SYSTEM);
+ *pl = gbo->placement;
+}
+EXPORT_SYMBOL(drm_gem_vram_bo_driver_evict_flags);
+
+/**
+ * drm_gem_vram_bo_driver_verify_access() - \
+ Implements &struct ttm_bo_driver.verify_access
+ * @bo: TTM buffer object. Refers to &struct drm_gem_vram_object.bo
+ * @filp: File pointer.
+ *
+ * Returns:
+ * 0 on success, or
+ * a negative errno code otherwise.
+ */
+int drm_gem_vram_bo_driver_verify_access(struct ttm_buffer_object *bo,
+ struct file *filp)
+{
+ struct drm_gem_vram_object *gbo = drm_gem_vram_of_bo(bo);
+
+ return drm_vma_node_verify_access(&gbo->gem.vma_node,
+ filp->private_data);
+}
+EXPORT_SYMBOL(drm_gem_vram_bo_driver_verify_access);
+
+/*
+ * drm_gem_vram_mm_funcs - Functions for &struct drm_vram_mm
+ *
+ * Most users of @struct drm_gem_vram_object will also use
+ * @struct drm_vram_mm. This instance of &struct drm_vram_mm_funcs
+ * can be used to connect both.
+ */
+const struct drm_vram_mm_funcs drm_gem_vram_mm_funcs = {
+ .evict_flags = drm_gem_vram_bo_driver_evict_flags,
+ .verify_access = drm_gem_vram_bo_driver_verify_access
+};
+EXPORT_SYMBOL(drm_gem_vram_mm_funcs);
+
+/*
+ * Helpers for struct drm_driver
+ */
+
+/**
+ * drm_gem_vram_driver_gem_free_object_unlocked() - \
+ Implements &struct drm_driver.gem_free_object_unlocked
+ * @gem: GEM object. Refers to &struct drm_gem_vram_object.gem
+ */
+void drm_gem_vram_driver_gem_free_object_unlocked(struct drm_gem_object *gem)
+{
+ struct drm_gem_vram_object *gbo = drm_gem_vram_of_gem(gem);
+
+ drm_gem_vram_put(gbo);
+}
+EXPORT_SYMBOL(drm_gem_vram_driver_gem_free_object_unlocked);
+
+/**
+ * drm_gem_vram_driver_create_dumb() - \
+ Implements &struct drm_driver.dumb_create
+ * @file: the DRM file
+ * @dev: the DRM device
+ * @args: the arguments as provided to \
+ &struct drm_driver.dumb_create
+ *
+ * This function requires the driver to use @drm_device.vram_mm for its
+ * instance of VRAM MM.
+ *
+ * Returns:
+ * 0 on success, or
+ * a negative error code otherwise.
+ */
+int drm_gem_vram_driver_dumb_create(struct drm_file *file,
+ struct drm_device *dev,
+ struct drm_mode_create_dumb *args)
+{
+ if (WARN_ONCE(!dev->vram_mm, "VRAM MM not initialized"))
+ return -EINVAL;
+
+ return drm_gem_vram_fill_create_dumb(file, dev, &dev->vram_mm->bdev, 0,
+ false, args);
+}
+EXPORT_SYMBOL(drm_gem_vram_driver_dumb_create);
+
+/**
+ * drm_gem_vram_driver_dumb_mmap_offset() - \
+ Implements &struct drm_driver.dumb_mmap_offset
+ * @file: DRM file pointer.
+ * @dev: DRM device.
+ * @handle: GEM handle
+ * @offset: Returns the mapping's memory offset on success
+ *
+ * Returns:
+ * 0 on success, or
+ * a negative errno code otherwise.
+ */
+int drm_gem_vram_driver_dumb_mmap_offset(struct drm_file *file,
+ struct drm_device *dev,
+ uint32_t handle, uint64_t *offset)
+{
+ struct drm_gem_object *gem;
+ struct drm_gem_vram_object *gbo;
+
+ gem = drm_gem_object_lookup(file, handle);
+ if (!gem)
+ return -ENOENT;
+
+ gbo = drm_gem_vram_of_gem(gem);
+ *offset = drm_gem_vram_mmap_offset(gbo);
+
+ drm_gem_object_put_unlocked(gem);
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_gem_vram_driver_dumb_mmap_offset);
+
+/*
+ * PRIME helpers for struct drm_driver
+ */
+
+/**
+ * drm_gem_vram_driver_gem_prime_pin() - \
+ Implements &struct drm_driver.gem_prime_pin
+ * @gem: The GEM object to pin
+ *
+ * Returns:
+ * 0 on success, or
+ * a negative errno code otherwise.
+ */
+int drm_gem_vram_driver_gem_prime_pin(struct drm_gem_object *gem)
+{
+ struct drm_gem_vram_object *gbo = drm_gem_vram_of_gem(gem);
+
+ /* Fbdev console emulation is the use case of these PRIME
+ * helpers. This may involve updating a hardware buffer from
+ * a shadow FB. We pin the buffer to it's current location
+ * (either video RAM or system memory) to prevent it from
+ * being relocated during the update operation. If you require
+ * the buffer to be pinned to VRAM, implement a callback that
+ * sets the flags accordingly.
+ */
+ return drm_gem_vram_pin(gbo, 0);
+}
+EXPORT_SYMBOL(drm_gem_vram_driver_gem_prime_pin);
+
+/**
+ * drm_gem_vram_driver_gem_prime_unpin() - \
+ Implements &struct drm_driver.gem_prime_unpin
+ * @gem: The GEM object to unpin
+ */
+void drm_gem_vram_driver_gem_prime_unpin(struct drm_gem_object *gem)
+{
+ struct drm_gem_vram_object *gbo = drm_gem_vram_of_gem(gem);
+
+ drm_gem_vram_unpin(gbo);
+}
+EXPORT_SYMBOL(drm_gem_vram_driver_gem_prime_unpin);
+
+/**
+ * drm_gem_vram_driver_gem_prime_vmap() - \
+ Implements &struct drm_driver.gem_prime_vmap
+ * @gem: The GEM object to map
+ *
+ * Returns:
+ * The buffers virtual address on success, or
+ * NULL otherwise.
+ */
+void *drm_gem_vram_driver_gem_prime_vmap(struct drm_gem_object *gem)
+{
+ struct drm_gem_vram_object *gbo = drm_gem_vram_of_gem(gem);
+ int ret;
+ void *base;
+
+ ret = drm_gem_vram_pin(gbo, 0);
+ if (ret)
+ return NULL;
+ base = drm_gem_vram_kmap(gbo, true, NULL);
+ if (IS_ERR(base)) {
+ drm_gem_vram_unpin(gbo);
+ return NULL;
+ }
+ return base;
+}
+EXPORT_SYMBOL(drm_gem_vram_driver_gem_prime_vmap);
+
+/**
+ * drm_gem_vram_driver_gem_prime_vunmap() - \
+ Implements &struct drm_driver.gem_prime_vunmap
+ * @gem: The GEM object to unmap
+ * @vaddr: The mapping's base address
+ */
+void drm_gem_vram_driver_gem_prime_vunmap(struct drm_gem_object *gem,
+ void *vaddr)
+{
+ struct drm_gem_vram_object *gbo = drm_gem_vram_of_gem(gem);
+
+ drm_gem_vram_kunmap(gbo);
+ drm_gem_vram_unpin(gbo);
+}
+EXPORT_SYMBOL(drm_gem_vram_driver_gem_prime_vunmap);
+
+/**
+ * drm_gem_vram_driver_gem_prime_mmap() - \
+ Implements &struct drm_driver.gem_prime_mmap
+ * @gem: The GEM object to map
+ * @vma: The VMA describing the mapping
+ *
+ * Returns:
+ * 0 on success, or
+ * a negative errno code otherwise.
+ */
+int drm_gem_vram_driver_gem_prime_mmap(struct drm_gem_object *gem,
+ struct vm_area_struct *vma)
+{
+ struct drm_gem_vram_object *gbo = drm_gem_vram_of_gem(gem);
+
+ gbo->gem.vma_node.vm_node.start = gbo->bo.vma_node.vm_node.start;
+ return drm_gem_prime_mmap(gem, vma);
+}
+EXPORT_SYMBOL(drm_gem_vram_driver_gem_prime_mmap);
diff --git a/drivers/gpu/drm/drm_hashtab.c b/drivers/gpu/drm/drm_hashtab.c
index c92b00d42ece..c50fa6f0709f 100644
--- a/drivers/gpu/drm/drm_hashtab.c
+++ b/drivers/gpu/drm/drm_hashtab.c
@@ -32,11 +32,15 @@
* Thomas Hellström <thomas-at-tungstengraphics-dot-com>
*/
-#include <drm/drmP.h>
-#include <drm/drm_hashtab.h>
+#include <linux/export.h>
#include <linux/hash.h>
+#include <linux/mm.h>
+#include <linux/rculist.h>
#include <linux/slab.h>
-#include <linux/export.h>
+#include <linux/vmalloc.h>
+
+#include <drm/drm_hashtab.h>
+#include <drm/drm_print.h>
int drm_ht_create(struct drm_open_hash *ht, unsigned int order)
{
diff --git a/drivers/gpu/drm/drm_hdcp.c b/drivers/gpu/drm/drm_hdcp.c
new file mode 100644
index 000000000000..cd837bd409f7
--- /dev/null
+++ b/drivers/gpu/drm/drm_hdcp.c
@@ -0,0 +1,382 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2019 Intel Corporation.
+ *
+ * Authors:
+ * Ramalingam C <ramalingam.c@intel.com>
+ */
+
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/gfp.h>
+#include <linux/export.h>
+#include <linux/slab.h>
+#include <linux/firmware.h>
+
+#include <drm/drm_hdcp.h>
+#include <drm/drm_sysfs.h>
+#include <drm/drm_print.h>
+#include <drm/drm_device.h>
+#include <drm/drm_property.h>
+#include <drm/drm_mode_object.h>
+#include <drm/drm_connector.h>
+
+#include "drm_internal.h"
+
+static struct hdcp_srm {
+ u32 revoked_ksv_cnt;
+ u8 *revoked_ksv_list;
+
+ /* Mutex to protect above struct member */
+ struct mutex mutex;
+} *srm_data;
+
+static inline void drm_hdcp_print_ksv(const u8 *ksv)
+{
+ DRM_DEBUG("\t%#02x, %#02x, %#02x, %#02x, %#02x\n",
+ ksv[0], ksv[1], ksv[2], ksv[3], ksv[4]);
+}
+
+static u32 drm_hdcp_get_revoked_ksv_count(const u8 *buf, u32 vrls_length)
+{
+ u32 parsed_bytes = 0, ksv_count = 0, vrl_ksv_cnt, vrl_sz;
+
+ while (parsed_bytes < vrls_length) {
+ vrl_ksv_cnt = *buf;
+ ksv_count += vrl_ksv_cnt;
+
+ vrl_sz = (vrl_ksv_cnt * DRM_HDCP_KSV_LEN) + 1;
+ buf += vrl_sz;
+ parsed_bytes += vrl_sz;
+ }
+
+ /*
+ * When vrls are not valid, ksvs are not considered.
+ * Hence SRM will be discarded.
+ */
+ if (parsed_bytes != vrls_length)
+ ksv_count = 0;
+
+ return ksv_count;
+}
+
+static u32 drm_hdcp_get_revoked_ksvs(const u8 *buf, u8 *revoked_ksv_list,
+ u32 vrls_length)
+{
+ u32 parsed_bytes = 0, ksv_count = 0;
+ u32 vrl_ksv_cnt, vrl_ksv_sz, vrl_idx = 0;
+
+ do {
+ vrl_ksv_cnt = *buf;
+ vrl_ksv_sz = vrl_ksv_cnt * DRM_HDCP_KSV_LEN;
+
+ buf++;
+
+ DRM_DEBUG("vrl: %d, Revoked KSVs: %d\n", vrl_idx++,
+ vrl_ksv_cnt);
+ memcpy(revoked_ksv_list, buf, vrl_ksv_sz);
+
+ ksv_count += vrl_ksv_cnt;
+ revoked_ksv_list += vrl_ksv_sz;
+ buf += vrl_ksv_sz;
+
+ parsed_bytes += (vrl_ksv_sz + 1);
+ } while (parsed_bytes < vrls_length);
+
+ return ksv_count;
+}
+
+static inline u32 get_vrl_length(const u8 *buf)
+{
+ return drm_hdcp_be24_to_cpu(buf);
+}
+
+static int drm_hdcp_parse_hdcp1_srm(const u8 *buf, size_t count)
+{
+ struct hdcp_srm_header *header;
+ u32 vrl_length, ksv_count;
+
+ if (count < (sizeof(struct hdcp_srm_header) +
+ DRM_HDCP_1_4_VRL_LENGTH_SIZE + DRM_HDCP_1_4_DCP_SIG_SIZE)) {
+ DRM_ERROR("Invalid blob length\n");
+ return -EINVAL;
+ }
+
+ header = (struct hdcp_srm_header *)buf;
+ DRM_DEBUG("SRM ID: 0x%x, SRM Ver: 0x%x, SRM Gen No: 0x%x\n",
+ header->srm_id,
+ be16_to_cpu(header->srm_version), header->srm_gen_no);
+
+ WARN_ON(header->reserved);
+
+ buf = buf + sizeof(*header);
+ vrl_length = get_vrl_length(buf);
+ if (count < (sizeof(struct hdcp_srm_header) + vrl_length) ||
+ vrl_length < (DRM_HDCP_1_4_VRL_LENGTH_SIZE +
+ DRM_HDCP_1_4_DCP_SIG_SIZE)) {
+ DRM_ERROR("Invalid blob length or vrl length\n");
+ return -EINVAL;
+ }
+
+ /* Length of the all vrls combined */
+ vrl_length -= (DRM_HDCP_1_4_VRL_LENGTH_SIZE +
+ DRM_HDCP_1_4_DCP_SIG_SIZE);
+
+ if (!vrl_length) {
+ DRM_ERROR("No vrl found\n");
+ return -EINVAL;
+ }
+
+ buf += DRM_HDCP_1_4_VRL_LENGTH_SIZE;
+ ksv_count = drm_hdcp_get_revoked_ksv_count(buf, vrl_length);
+ if (!ksv_count) {
+ DRM_DEBUG("Revoked KSV count is 0\n");
+ return count;
+ }
+
+ kfree(srm_data->revoked_ksv_list);
+ srm_data->revoked_ksv_list = kcalloc(ksv_count, DRM_HDCP_KSV_LEN,
+ GFP_KERNEL);
+ if (!srm_data->revoked_ksv_list) {
+ DRM_ERROR("Out of Memory\n");
+ return -ENOMEM;
+ }
+
+ if (drm_hdcp_get_revoked_ksvs(buf, srm_data->revoked_ksv_list,
+ vrl_length) != ksv_count) {
+ srm_data->revoked_ksv_cnt = 0;
+ kfree(srm_data->revoked_ksv_list);
+ return -EINVAL;
+ }
+
+ srm_data->revoked_ksv_cnt = ksv_count;
+ return count;
+}
+
+static int drm_hdcp_parse_hdcp2_srm(const u8 *buf, size_t count)
+{
+ struct hdcp_srm_header *header;
+ u32 vrl_length, ksv_count, ksv_sz;
+
+ if (count < (sizeof(struct hdcp_srm_header) +
+ DRM_HDCP_2_VRL_LENGTH_SIZE + DRM_HDCP_2_DCP_SIG_SIZE)) {
+ DRM_ERROR("Invalid blob length\n");
+ return -EINVAL;
+ }
+
+ header = (struct hdcp_srm_header *)buf;
+ DRM_DEBUG("SRM ID: 0x%x, SRM Ver: 0x%x, SRM Gen No: 0x%x\n",
+ header->srm_id & DRM_HDCP_SRM_ID_MASK,
+ be16_to_cpu(header->srm_version), header->srm_gen_no);
+
+ if (header->reserved)
+ return -EINVAL;
+
+ buf = buf + sizeof(*header);
+ vrl_length = get_vrl_length(buf);
+
+ if (count < (sizeof(struct hdcp_srm_header) + vrl_length) ||
+ vrl_length < (DRM_HDCP_2_VRL_LENGTH_SIZE +
+ DRM_HDCP_2_DCP_SIG_SIZE)) {
+ DRM_ERROR("Invalid blob length or vrl length\n");
+ return -EINVAL;
+ }
+
+ /* Length of the all vrls combined */
+ vrl_length -= (DRM_HDCP_2_VRL_LENGTH_SIZE +
+ DRM_HDCP_2_DCP_SIG_SIZE);
+
+ if (!vrl_length) {
+ DRM_ERROR("No vrl found\n");
+ return -EINVAL;
+ }
+
+ buf += DRM_HDCP_2_VRL_LENGTH_SIZE;
+ ksv_count = (*buf << 2) | DRM_HDCP_2_KSV_COUNT_2_LSBITS(*(buf + 1));
+ if (!ksv_count) {
+ DRM_DEBUG("Revoked KSV count is 0\n");
+ return count;
+ }
+
+ kfree(srm_data->revoked_ksv_list);
+ srm_data->revoked_ksv_list = kcalloc(ksv_count, DRM_HDCP_KSV_LEN,
+ GFP_KERNEL);
+ if (!srm_data->revoked_ksv_list) {
+ DRM_ERROR("Out of Memory\n");
+ return -ENOMEM;
+ }
+
+ ksv_sz = ksv_count * DRM_HDCP_KSV_LEN;
+ buf += DRM_HDCP_2_NO_OF_DEV_PLUS_RESERVED_SZ;
+
+ DRM_DEBUG("Revoked KSVs: %d\n", ksv_count);
+ memcpy(srm_data->revoked_ksv_list, buf, ksv_sz);
+
+ srm_data->revoked_ksv_cnt = ksv_count;
+ return count;
+}
+
+static inline bool is_srm_version_hdcp1(const u8 *buf)
+{
+ return *buf == (u8)(DRM_HDCP_1_4_SRM_ID << 4);
+}
+
+static inline bool is_srm_version_hdcp2(const u8 *buf)
+{
+ return *buf == (u8)(DRM_HDCP_2_SRM_ID << 4 | DRM_HDCP_2_INDICATOR);
+}
+
+static void drm_hdcp_srm_update(const u8 *buf, size_t count)
+{
+ if (count < sizeof(struct hdcp_srm_header))
+ return;
+
+ if (is_srm_version_hdcp1(buf))
+ drm_hdcp_parse_hdcp1_srm(buf, count);
+ else if (is_srm_version_hdcp2(buf))
+ drm_hdcp_parse_hdcp2_srm(buf, count);
+}
+
+static void drm_hdcp_request_srm(struct drm_device *drm_dev)
+{
+ char fw_name[36] = "display_hdcp_srm.bin";
+ const struct firmware *fw;
+
+ int ret;
+
+ ret = request_firmware_direct(&fw, (const char *)fw_name,
+ drm_dev->dev);
+ if (ret < 0)
+ goto exit;
+
+ if (fw->size && fw->data)
+ drm_hdcp_srm_update(fw->data, fw->size);
+
+exit:
+ release_firmware(fw);
+}
+
+/**
+ * drm_hdcp_check_ksvs_revoked - Check the revoked status of the IDs
+ *
+ * @drm_dev: drm_device for which HDCP revocation check is requested
+ * @ksvs: List of KSVs (HDCP receiver IDs)
+ * @ksv_count: KSV count passed in through @ksvs
+ *
+ * This function reads the HDCP System renewability Message(SRM Table)
+ * from userspace as a firmware and parses it for the revoked HDCP
+ * KSVs(Receiver IDs) detected by DCP LLC. Once the revoked KSVs are known,
+ * revoked state of the KSVs in the list passed in by display drivers are
+ * decided and response is sent.
+ *
+ * SRM should be presented in the name of "display_hdcp_srm.bin".
+ *
+ * Returns:
+ * TRUE on any of the KSV is revoked, else FALSE.
+ */
+bool drm_hdcp_check_ksvs_revoked(struct drm_device *drm_dev, u8 *ksvs,
+ u32 ksv_count)
+{
+ u32 rev_ksv_cnt, cnt, i, j;
+ u8 *rev_ksv_list;
+
+ if (!srm_data)
+ return false;
+
+ mutex_lock(&srm_data->mutex);
+ drm_hdcp_request_srm(drm_dev);
+
+ rev_ksv_cnt = srm_data->revoked_ksv_cnt;
+ rev_ksv_list = srm_data->revoked_ksv_list;
+
+ /* If the Revoked ksv list is empty */
+ if (!rev_ksv_cnt || !rev_ksv_list) {
+ mutex_unlock(&srm_data->mutex);
+ return false;
+ }
+
+ for (cnt = 0; cnt < ksv_count; cnt++) {
+ rev_ksv_list = srm_data->revoked_ksv_list;
+ for (i = 0; i < rev_ksv_cnt; i++) {
+ for (j = 0; j < DRM_HDCP_KSV_LEN; j++)
+ if (ksvs[j] != rev_ksv_list[j]) {
+ break;
+ } else if (j == (DRM_HDCP_KSV_LEN - 1)) {
+ DRM_DEBUG("Revoked KSV is ");
+ drm_hdcp_print_ksv(ksvs);
+ mutex_unlock(&srm_data->mutex);
+ return true;
+ }
+ /* Move the offset to next KSV in the revoked list */
+ rev_ksv_list += DRM_HDCP_KSV_LEN;
+ }
+
+ /* Iterate to next ksv_offset */
+ ksvs += DRM_HDCP_KSV_LEN;
+ }
+ mutex_unlock(&srm_data->mutex);
+ return false;
+}
+EXPORT_SYMBOL_GPL(drm_hdcp_check_ksvs_revoked);
+
+int drm_setup_hdcp_srm(struct class *drm_class)
+{
+ srm_data = kzalloc(sizeof(*srm_data), GFP_KERNEL);
+ if (!srm_data)
+ return -ENOMEM;
+ mutex_init(&srm_data->mutex);
+
+ return 0;
+}
+
+void drm_teardown_hdcp_srm(struct class *drm_class)
+{
+ if (srm_data) {
+ kfree(srm_data->revoked_ksv_list);
+ kfree(srm_data);
+ }
+}
+
+static struct drm_prop_enum_list drm_cp_enum_list[] = {
+ { DRM_MODE_CONTENT_PROTECTION_UNDESIRED, "Undesired" },
+ { DRM_MODE_CONTENT_PROTECTION_DESIRED, "Desired" },
+ { DRM_MODE_CONTENT_PROTECTION_ENABLED, "Enabled" },
+};
+DRM_ENUM_NAME_FN(drm_get_content_protection_name, drm_cp_enum_list)
+
+/**
+ * drm_connector_attach_content_protection_property - attach content protection
+ * property
+ *
+ * @connector: connector to attach CP property on.
+ *
+ * This is used to add support for content protection on select connectors.
+ * Content Protection is intentionally vague to allow for different underlying
+ * technologies, however it is most implemented by HDCP.
+ *
+ * The content protection will be set to &drm_connector_state.content_protection
+ *
+ * Returns:
+ * Zero on success, negative errno on failure.
+ */
+int drm_connector_attach_content_protection_property(
+ struct drm_connector *connector)
+{
+ struct drm_device *dev = connector->dev;
+ struct drm_property *prop =
+ dev->mode_config.content_protection_property;
+
+ if (!prop)
+ prop = drm_property_create_enum(dev, 0, "Content Protection",
+ drm_cp_enum_list,
+ ARRAY_SIZE(drm_cp_enum_list));
+ if (!prop)
+ return -ENOMEM;
+
+ drm_object_attach_property(&connector->base, prop,
+ DRM_MODE_CONTENT_PROTECTION_UNDESIRED);
+ dev->mode_config.content_protection_property = prop;
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_connector_attach_content_protection_property);
diff --git a/drivers/gpu/drm/drm_internal.h b/drivers/gpu/drm/drm_internal.h
index e19ac7ca602d..d18c7b91a1a8 100644
--- a/drivers/gpu/drm/drm_internal.h
+++ b/drivers/gpu/drm/drm_internal.h
@@ -28,8 +28,16 @@
#define DRM_IF_VERSION(maj, min) (maj << 16 | min)
-struct drm_prime_file_private;
+struct dentry;
struct dma_buf;
+struct drm_connector;
+struct drm_crtc;
+struct drm_framebuffer;
+struct drm_gem_object;
+struct drm_master;
+struct drm_minor;
+struct drm_prime_file_private;
+struct drm_printer;
/* drm_file.c */
extern struct mutex drm_global_mutex;
@@ -93,6 +101,8 @@ int drm_dropmaster_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int drm_master_open(struct drm_file *file_priv);
void drm_master_release(struct drm_file *file_priv);
+bool drm_master_internal_acquire(struct drm_device *dev);
+void drm_master_internal_release(struct drm_device *dev);
/* drm_sysfs.c */
extern struct class *drm_class;
@@ -106,6 +116,7 @@ void drm_sysfs_connector_remove(struct drm_connector *connector);
void drm_sysfs_lease_event(struct drm_device *dev);
/* drm_gem.c */
+struct drm_gem_object;
int drm_gem_init(struct drm_device *dev);
void drm_gem_destroy(struct drm_device *dev);
int drm_gem_handle_create_tail(struct drm_file *file_priv,
@@ -126,12 +137,12 @@ void drm_gem_print_info(struct drm_printer *p, unsigned int indent,
#if defined(CONFIG_DEBUG_FS)
int drm_debugfs_init(struct drm_minor *minor, int minor_id,
struct dentry *root);
-int drm_debugfs_cleanup(struct drm_minor *minor);
-int drm_debugfs_connector_add(struct drm_connector *connector);
+void drm_debugfs_cleanup(struct drm_minor *minor);
+void drm_debugfs_connector_add(struct drm_connector *connector);
void drm_debugfs_connector_remove(struct drm_connector *connector);
-int drm_debugfs_crtc_add(struct drm_crtc *crtc);
+void drm_debugfs_crtc_add(struct drm_crtc *crtc);
void drm_debugfs_crtc_remove(struct drm_crtc *crtc);
-int drm_debugfs_crtc_crc_add(struct drm_crtc *crtc);
+void drm_debugfs_crtc_crc_add(struct drm_crtc *crtc);
#else
static inline int drm_debugfs_init(struct drm_minor *minor, int minor_id,
struct dentry *root)
@@ -139,30 +150,26 @@ static inline int drm_debugfs_init(struct drm_minor *minor, int minor_id,
return 0;
}
-static inline int drm_debugfs_cleanup(struct drm_minor *minor)
+static inline void drm_debugfs_cleanup(struct drm_minor *minor)
{
- return 0;
}
-static inline int drm_debugfs_connector_add(struct drm_connector *connector)
+static inline void drm_debugfs_connector_add(struct drm_connector *connector)
{
- return 0;
}
static inline void drm_debugfs_connector_remove(struct drm_connector *connector)
{
}
-static inline int drm_debugfs_crtc_add(struct drm_crtc *crtc)
+static inline void drm_debugfs_crtc_add(struct drm_crtc *crtc)
{
- return 0;
}
static inline void drm_debugfs_crtc_remove(struct drm_crtc *crtc)
{
}
-static inline int drm_debugfs_crtc_crc_add(struct drm_crtc *crtc)
+static inline void drm_debugfs_crtc_crc_add(struct drm_crtc *crtc)
{
- return 0;
}
#endif
@@ -201,3 +208,7 @@ int drm_syncobj_query_ioctl(struct drm_device *dev, void *data,
void drm_framebuffer_print_info(struct drm_printer *p, unsigned int indent,
const struct drm_framebuffer *fb);
int drm_framebuffer_debugfs_init(struct drm_minor *minor);
+
+/* drm_hdcp.c */
+int drm_setup_hdcp_srm(struct class *drm_class);
+void drm_teardown_hdcp_srm(struct class *drm_class);
diff --git a/drivers/gpu/drm/drm_ioc32.c b/drivers/gpu/drm/drm_ioc32.c
index 374b372da58a..586aa28024c5 100644
--- a/drivers/gpu/drm/drm_ioc32.c
+++ b/drivers/gpu/drm/drm_ioc32.c
@@ -31,10 +31,13 @@
#include <linux/ratelimit.h>
#include <linux/export.h>
-#include <drm/drmP.h>
-#include "drm_legacy.h"
-#include "drm_internal.h"
+#include <drm/drm_agpsupport.h>
+#include <drm/drm_file.h>
+#include <drm/drm_print.h>
+
#include "drm_crtc_internal.h"
+#include "drm_internal.h"
+#include "drm_legacy.h"
#define DRM_IOCTL_VERSION32 DRM_IOWR(0x00, drm_version32_t)
#define DRM_IOCTL_GET_UNIQUE32 DRM_IOWR(0x01, drm_unique32_t)
diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
index 2263e3ddd822..9441a36a2469 100644
--- a/drivers/gpu/drm/drm_ioctl.c
+++ b/drivers/gpu/drm/drm_ioctl.c
@@ -28,16 +28,22 @@
* OTHER DEALINGS IN THE SOFTWARE.
*/
-#include <drm/drm_ioctl.h>
-#include <drm/drmP.h>
-#include <drm/drm_auth.h>
-#include "drm_legacy.h"
-#include "drm_internal.h"
-#include "drm_crtc_internal.h"
-
-#include <linux/pci.h>
#include <linux/export.h>
#include <linux/nospec.h>
+#include <linux/pci.h>
+#include <linux/uaccess.h>
+
+#include <drm/drm_agpsupport.h>
+#include <drm/drm_auth.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_file.h>
+#include <drm/drm_ioctl.h>
+#include <drm/drm_print.h>
+
+#include "drm_crtc_internal.h"
+#include "drm_internal.h"
+#include "drm_legacy.h"
/**
* DOC: getunique and setversion story
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
index 02f38cc9f468..03bce566a8c3 100644
--- a/drivers/gpu/drm/drm_irq.c
+++ b/drivers/gpu/drm/drm_irq.c
@@ -51,13 +51,18 @@
* OTHER DEALINGS IN THE SOFTWARE.
*/
-#include <drm/drm_irq.h>
-#include <drm/drmP.h>
+#include <linux/export.h>
#include <linux/interrupt.h> /* For task queue support */
-
+#include <linux/pci.h>
#include <linux/vgaarb.h>
-#include <linux/export.h>
+
+#include <drm/drm.h>
+#include <drm/drm_device.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_irq.h>
+#include <drm/drm_print.h>
+#include <drm/drm_vblank.h>
#include "drm_internal.h"
diff --git a/drivers/gpu/drm/drm_kms_helper_common.c b/drivers/gpu/drm/drm_kms_helper_common.c
index 9c5ae825c507..d9a5ac81949e 100644
--- a/drivers/gpu/drm/drm_kms_helper_common.c
+++ b/drivers/gpu/drm/drm_kms_helper_common.c
@@ -26,7 +26,8 @@
*/
#include <linux/module.h>
-#include <drm/drmP.h>
+
+#include <drm/drm_print.h>
#include "drm_crtc_helper_internal.h"
diff --git a/drivers/gpu/drm/drm_lease.c b/drivers/gpu/drm/drm_lease.c
index e8a5e3b13b2a..b481cafdde28 100644
--- a/drivers/gpu/drm/drm_lease.c
+++ b/drivers/gpu/drm/drm_lease.c
@@ -2,14 +2,19 @@
/*
* Copyright © 2017 Keith Packard <keithp@keithp.com>
*/
+#include <linux/file.h>
+#include <linux/uaccess.h>
-#include <drm/drmP.h>
-#include "drm_internal.h"
-#include "drm_legacy.h"
-#include "drm_crtc_internal.h"
-#include <drm/drm_lease.h>
#include <drm/drm_auth.h>
#include <drm/drm_crtc_helper.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_file.h>
+#include <drm/drm_lease.h>
+#include <drm/drm_print.h>
+
+#include "drm_crtc_internal.h"
+#include "drm_internal.h"
+#include "drm_legacy.h"
#define drm_for_each_lessee(lessee, lessor) \
list_for_each_entry((lessee), &(lessor)->lessees, lessee_list)
diff --git a/drivers/gpu/drm/drm_legacy.h b/drivers/gpu/drm/drm_legacy.h
index 51f1fabfa145..1be3ea320474 100644
--- a/drivers/gpu/drm/drm_legacy.h
+++ b/drivers/gpu/drm/drm_legacy.h
@@ -29,11 +29,15 @@
* drivers use them, and removing them are API breaks.
*/
#include <linux/list.h>
+
+#include <drm/drm.h>
+#include <drm/drm_device.h>
#include <drm/drm_legacy.h>
struct agp_memory;
struct drm_device;
struct drm_file;
+struct drm_buf_desc;
/*
* Generic DRM Contexts
@@ -187,10 +191,12 @@ int drm_legacy_sg_free(struct drm_device *dev, void *data,
void drm_legacy_init_members(struct drm_device *dev);
void drm_legacy_destroy_members(struct drm_device *dev);
void drm_legacy_dev_reinit(struct drm_device *dev);
+int drm_legacy_setup(struct drm_device * dev);
#else
static inline void drm_legacy_init_members(struct drm_device *dev) {}
static inline void drm_legacy_destroy_members(struct drm_device *dev) {}
static inline void drm_legacy_dev_reinit(struct drm_device *dev) {}
+static inline int drm_legacy_setup(struct drm_device * dev) { return 0; }
#endif
#if IS_ENABLED(CONFIG_DRM_LEGACY)
diff --git a/drivers/gpu/drm/drm_legacy_misc.c b/drivers/gpu/drm/drm_legacy_misc.c
index 2fe786839ca8..4d3a11cfd979 100644
--- a/drivers/gpu/drm/drm_legacy_misc.c
+++ b/drivers/gpu/drm/drm_legacy_misc.c
@@ -33,7 +33,12 @@
* OTHER DEALINGS IN THE SOFTWARE.
*/
-#include <drm/drmP.h>
+#include <drm/drm_agpsupport.h>
+#include <drm/drm_device.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_irq.h>
+#include <drm/drm_print.h>
+
#include "drm_internal.h"
#include "drm_legacy.h"
@@ -51,6 +56,26 @@ void drm_legacy_destroy_members(struct drm_device *dev)
mutex_destroy(&dev->ctxlist_mutex);
}
+int drm_legacy_setup(struct drm_device * dev)
+{
+ int ret;
+
+ if (dev->driver->firstopen &&
+ drm_core_check_feature(dev, DRIVER_LEGACY)) {
+ ret = dev->driver->firstopen(dev);
+ if (ret != 0)
+ return ret;
+ }
+
+ ret = drm_legacy_dma_setup(dev);
+ if (ret < 0)
+ return ret;
+
+
+ DRM_DEBUG("\n");
+ return 0;
+}
+
void drm_legacy_dev_reinit(struct drm_device *dev)
{
if (dev->irq_enabled)
diff --git a/drivers/gpu/drm/drm_lock.c b/drivers/gpu/drm/drm_lock.c
index b70058e77a28..68b18b0e290c 100644
--- a/drivers/gpu/drm/drm_lock.c
+++ b/drivers/gpu/drm/drm_lock.c
@@ -36,9 +36,13 @@
#include <linux/export.h>
#include <linux/sched/signal.h>
-#include <drm/drmP.h>
-#include "drm_legacy.h"
+#include <drm/drm.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_file.h>
+#include <drm/drm_print.h>
+
#include "drm_internal.h"
+#include "drm_legacy.h"
static int drm_lock_take(struct drm_lock_data *lock_data, unsigned int context);
diff --git a/drivers/gpu/drm/drm_memory.c b/drivers/gpu/drm/drm_memory.c
index 132fef8ff1b6..b634e1670190 100644
--- a/drivers/gpu/drm/drm_memory.c
+++ b/drivers/gpu/drm/drm_memory.c
@@ -33,10 +33,15 @@
* OTHER DEALINGS IN THE SOFTWARE.
*/
-#include <linux/highmem.h>
#include <linux/export.h>
+#include <linux/highmem.h>
+#include <linux/pci.h>
+#include <linux/vmalloc.h>
#include <xen/xen.h>
-#include <drm/drmP.h>
+
+#include <drm/drm_agpsupport.h>
+#include <drm/drm_device.h>
+
#include "drm_legacy.h"
#if IS_ENABLED(CONFIG_AGP)
diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c
index 8b4cd31ce7bd..9a59865ce574 100644
--- a/drivers/gpu/drm/drm_mm.c
+++ b/drivers/gpu/drm/drm_mm.c
@@ -42,12 +42,13 @@
* Thomas Hellström <thomas-at-tungstengraphics-dot-com>
*/
-#include <drm/drmP.h>
-#include <drm/drm_mm.h>
-#include <linux/slab.h>
-#include <linux/seq_file.h>
#include <linux/export.h>
#include <linux/interval_tree_generic.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include <linux/stacktrace.h>
+
+#include <drm/drm_mm.h>
/**
* DOC: Overview
diff --git a/drivers/gpu/drm/drm_mode_config.c b/drivers/gpu/drm/drm_mode_config.c
index 1a346ae1599d..7bc03c3c154f 100644
--- a/drivers/gpu/drm/drm_mode_config.c
+++ b/drivers/gpu/drm/drm_mode_config.c
@@ -20,9 +20,13 @@
* OF THIS SOFTWARE.
*/
+#include <linux/uaccess.h>
+
+#include <drm/drm_drv.h>
#include <drm/drm_encoder.h>
+#include <drm/drm_file.h>
#include <drm/drm_mode_config.h>
-#include <drm/drmP.h>
+#include <drm/drm_print.h>
#include "drm_crtc_internal.h"
#include "drm_internal.h"
diff --git a/drivers/gpu/drm/drm_mode_object.c b/drivers/gpu/drm/drm_mode_object.c
index f32507e65b79..1c6e51135962 100644
--- a/drivers/gpu/drm/drm_mode_object.c
+++ b/drivers/gpu/drm/drm_mode_object.c
@@ -21,9 +21,14 @@
*/
#include <linux/export.h>
-#include <drm/drmP.h>
-#include <drm/drm_mode_object.h>
+#include <linux/uaccess.h>
+
#include <drm/drm_atomic.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_device.h>
+#include <drm/drm_file.h>
+#include <drm/drm_mode_object.h>
+#include <drm/drm_print.h>
#include "drm_crtc_internal.h"
diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c
index 56f92a0bba62..5a07a28fec6d 100644
--- a/drivers/gpu/drm/drm_modes.c
+++ b/drivers/gpu/drm/drm_modes.c
@@ -33,11 +33,14 @@
#include <linux/list.h>
#include <linux/list_sort.h>
#include <linux/export.h>
-#include <drm/drmP.h>
-#include <drm/drm_crtc.h>
+
#include <video/of_videomode.h>
#include <video/videomode.h>
+
+#include <drm/drm_crtc.h>
+#include <drm/drm_device.h>
#include <drm/drm_modes.h>
+#include <drm/drm_print.h>
#include "drm_crtc_internal.h"
diff --git a/drivers/gpu/drm/drm_modeset_lock.c b/drivers/gpu/drm/drm_modeset_lock.c
index 81dd11901ffd..53187821df01 100644
--- a/drivers/gpu/drm/drm_modeset_lock.c
+++ b/drivers/gpu/drm/drm_modeset_lock.c
@@ -21,9 +21,9 @@
* OTHER DEALINGS IN THE SOFTWARE.
*/
-#include <drm/drmP.h>
#include <drm/drm_atomic.h>
#include <drm/drm_crtc.h>
+#include <drm/drm_device.h>
#include <drm/drm_modeset_lock.h>
/**
diff --git a/drivers/gpu/drm/drm_of.c b/drivers/gpu/drm/drm_of.c
index 6becf63f9166..43d89dd59c6b 100644
--- a/drivers/gpu/drm/drm_of.c
+++ b/drivers/gpu/drm/drm_of.c
@@ -3,12 +3,13 @@
#include <linux/export.h>
#include <linux/list.h>
#include <linux/of_graph.h>
-#include <drm/drmP.h>
+
#include <drm/drm_bridge.h>
#include <drm/drm_crtc.h>
+#include <drm/drm_device.h>
#include <drm/drm_encoder.h>
-#include <drm/drm_panel.h>
#include <drm/drm_of.h>
+#include <drm/drm_panel.h>
/**
* DOC: overview
diff --git a/drivers/gpu/drm/drm_pci.c b/drivers/gpu/drm/drm_pci.c
index 693748ad8b88..a86a3ab2771c 100644
--- a/drivers/gpu/drm/drm_pci.c
+++ b/drivers/gpu/drm/drm_pci.c
@@ -22,12 +22,17 @@
* WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
-#include <linux/pci.h>
-#include <linux/slab.h>
#include <linux/dma-mapping.h>
#include <linux/export.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+
+#include <drm/drm.h>
+#include <drm/drm_agpsupport.h>
+#include <drm/drm_drv.h>
#include <drm/drm_pci.h>
-#include <drm/drmP.h>
+#include <drm/drm_print.h>
+
#include "drm_internal.h"
#include "drm_legacy.h"
diff --git a/drivers/gpu/drm/drm_plane_helper.c b/drivers/gpu/drm/drm_plane_helper.c
index 0fff72dcd06d..3aae7ea522f2 100644
--- a/drivers/gpu/drm/drm_plane_helper.c
+++ b/drivers/gpu/drm/drm_plane_helper.c
@@ -24,14 +24,15 @@
*/
#include <linux/list.h>
-#include <drm/drmP.h>
-#include <drm/drm_plane_helper.h>
-#include <drm/drm_rect.h>
+
#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
#include <drm/drm_atomic_uapi.h>
#include <drm/drm_crtc_helper.h>
+#include <drm/drm_device.h>
#include <drm/drm_encoder.h>
-#include <drm/drm_atomic_helper.h>
+#include <drm/drm_plane_helper.h>
+#include <drm/drm_rect.h>
#define SUBPIXEL_MASK 0xffff
diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c
index dc079efb3b0f..d0c01318076b 100644
--- a/drivers/gpu/drm/drm_prime.c
+++ b/drivers/gpu/drm/drm_prime.c
@@ -29,9 +29,12 @@
#include <linux/export.h>
#include <linux/dma-buf.h>
#include <linux/rbtree.h>
-#include <drm/drm_prime.h>
+
+#include <drm/drm_drv.h>
+#include <drm/drm_file.h>
+#include <drm/drm_framebuffer.h>
#include <drm/drm_gem.h>
-#include <drm/drmP.h>
+#include <drm/drm_prime.h>
#include "drm_internal.h"
@@ -86,11 +89,6 @@ struct drm_prime_member {
struct rb_node handle_rb;
};
-struct drm_prime_attachment {
- struct sg_table *sgt;
- enum dma_data_direction dir;
-};
-
static int drm_prime_add_buf_handle(struct drm_prime_file_private *prime_fpriv,
struct dma_buf *dma_buf, uint32_t handle)
{
@@ -188,25 +186,16 @@ static int drm_prime_lookup_buf_handle(struct drm_prime_file_private *prime_fpri
* @dma_buf: buffer to attach device to
* @attach: buffer attachment data
*
- * Allocates &drm_prime_attachment and calls &drm_driver.gem_prime_pin for
- * device specific attachment. This can be used as the &dma_buf_ops.attach
- * callback.
+ * Calls &drm_driver.gem_prime_pin for device specific handling. This can be
+ * used as the &dma_buf_ops.attach callback.
*
* Returns 0 on success, negative error code on failure.
*/
int drm_gem_map_attach(struct dma_buf *dma_buf,
struct dma_buf_attachment *attach)
{
- struct drm_prime_attachment *prime_attach;
struct drm_gem_object *obj = dma_buf->priv;
- prime_attach = kzalloc(sizeof(*prime_attach), GFP_KERNEL);
- if (!prime_attach)
- return -ENOMEM;
-
- prime_attach->dir = DMA_NONE;
- attach->priv = prime_attach;
-
return drm_gem_pin(obj);
}
EXPORT_SYMBOL(drm_gem_map_attach);
@@ -222,26 +211,8 @@ EXPORT_SYMBOL(drm_gem_map_attach);
void drm_gem_map_detach(struct dma_buf *dma_buf,
struct dma_buf_attachment *attach)
{
- struct drm_prime_attachment *prime_attach = attach->priv;
struct drm_gem_object *obj = dma_buf->priv;
- if (prime_attach) {
- struct sg_table *sgt = prime_attach->sgt;
-
- if (sgt) {
- if (prime_attach->dir != DMA_NONE)
- dma_unmap_sg_attrs(attach->dev, sgt->sgl,
- sgt->nents,
- prime_attach->dir,
- DMA_ATTR_SKIP_CPU_SYNC);
- sg_free_table(sgt);
- }
-
- kfree(sgt);
- kfree(prime_attach);
- attach->priv = NULL;
- }
-
drm_gem_unpin(obj);
}
EXPORT_SYMBOL(drm_gem_map_detach);
@@ -286,39 +257,22 @@ void drm_prime_remove_buf_handle_locked(struct drm_prime_file_private *prime_fpr
struct sg_table *drm_gem_map_dma_buf(struct dma_buf_attachment *attach,
enum dma_data_direction dir)
{
- struct drm_prime_attachment *prime_attach = attach->priv;
struct drm_gem_object *obj = attach->dmabuf->priv;
struct sg_table *sgt;
- if (WARN_ON(dir == DMA_NONE || !prime_attach))
+ if (WARN_ON(dir == DMA_NONE))
return ERR_PTR(-EINVAL);
- /* return the cached mapping when possible */
- if (prime_attach->dir == dir)
- return prime_attach->sgt;
-
- /*
- * two mappings with different directions for the same attachment are
- * not allowed
- */
- if (WARN_ON(prime_attach->dir != DMA_NONE))
- return ERR_PTR(-EBUSY);
-
if (obj->funcs)
sgt = obj->funcs->get_sg_table(obj);
else
sgt = obj->dev->driver->gem_prime_get_sg_table(obj);
- if (!IS_ERR(sgt)) {
- if (!dma_map_sg_attrs(attach->dev, sgt->sgl, sgt->nents, dir,
- DMA_ATTR_SKIP_CPU_SYNC)) {
- sg_free_table(sgt);
- kfree(sgt);
- sgt = ERR_PTR(-ENOMEM);
- } else {
- prime_attach->sgt = sgt;
- prime_attach->dir = dir;
- }
+ if (!dma_map_sg_attrs(attach->dev, sgt->sgl, sgt->nents, dir,
+ DMA_ATTR_SKIP_CPU_SYNC)) {
+ sg_free_table(sgt);
+ kfree(sgt);
+ sgt = ERR_PTR(-ENOMEM);
}
return sgt;
@@ -331,14 +285,19 @@ EXPORT_SYMBOL(drm_gem_map_dma_buf);
* @sgt: scatterlist info of the buffer to unmap
* @dir: direction of DMA transfer
*
- * Not implemented. The unmap is done at drm_gem_map_detach(). This can be
- * used as the &dma_buf_ops.unmap_dma_buf callback.
+ * This can be used as the &dma_buf_ops.unmap_dma_buf callback.
*/
void drm_gem_unmap_dma_buf(struct dma_buf_attachment *attach,
struct sg_table *sgt,
enum dma_data_direction dir)
{
- /* nothing to be done here */
+ if (!sgt)
+ return;
+
+ dma_unmap_sg_attrs(attach->dev, sgt->sgl, sgt->nents, dir,
+ DMA_ATTR_SKIP_CPU_SYNC);
+ sg_free_table(sgt);
+ kfree(sgt);
}
EXPORT_SYMBOL(drm_gem_unmap_dma_buf);
@@ -452,6 +411,7 @@ int drm_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma)
EXPORT_SYMBOL(drm_gem_dmabuf_mmap);
static const struct dma_buf_ops drm_gem_prime_dmabuf_ops = {
+ .cache_sgt_mapping = true,
.attach = drm_gem_map_attach,
.detach = drm_gem_map_detach,
.map_dma_buf = drm_gem_map_dma_buf,
diff --git a/drivers/gpu/drm/drm_print.c b/drivers/gpu/drm/drm_print.c
index f5cb0aabfe35..a17c8a14dba4 100644
--- a/drivers/gpu/drm/drm_print.c
+++ b/drivers/gpu/drm/drm_print.c
@@ -26,8 +26,13 @@
#define DEBUG /* for pr_debug() */
#include <stdarg.h>
+
+#include <linux/io.h>
#include <linux/seq_file.h>
-#include <drm/drmP.h>
+#include <linux/slab.h>
+
+#include <drm/drm.h>
+#include <drm/drm_drv.h>
#include <drm/drm_print.h>
void __drm_puts_coredump(struct drm_printer *p, const char *str)
diff --git a/drivers/gpu/drm/drm_probe_helper.c b/drivers/gpu/drm/drm_probe_helper.c
index dd427c7ff967..ef2c468205a2 100644
--- a/drivers/gpu/drm/drm_probe_helper.c
+++ b/drivers/gpu/drm/drm_probe_helper.c
@@ -32,14 +32,15 @@
#include <linux/export.h>
#include <linux/moduleparam.h>
-#include <drm/drmP.h>
#include <drm/drm_client.h>
#include <drm/drm_crtc.h>
-#include <drm/drm_fourcc.h>
-#include <drm/drm_fb_helper.h>
#include <drm/drm_edid.h>
+#include <drm/drm_fb_helper.h>
+#include <drm/drm_fourcc.h>
#include <drm/drm_modeset_helper_vtables.h>
+#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
+#include <drm/drm_sysfs.h>
#include "drm_crtc_helper_internal.h"
@@ -581,6 +582,9 @@ static void output_poll_execute(struct work_struct *work)
enum drm_connector_status old_status;
bool repoll = false, changed;
+ if (!dev->mode_config.poll_enabled)
+ return;
+
/* Pick up any changes detected by the probe functions. */
changed = dev->mode_config.delayed_event;
dev->mode_config.delayed_event = false;
@@ -735,7 +739,11 @@ EXPORT_SYMBOL(drm_kms_helper_poll_init);
*/
void drm_kms_helper_poll_fini(struct drm_device *dev)
{
- drm_kms_helper_poll_disable(dev);
+ if (!dev->mode_config.poll_enabled)
+ return;
+
+ dev->mode_config.poll_enabled = false;
+ cancel_delayed_work_sync(&dev->mode_config.output_poll_work);
}
EXPORT_SYMBOL(drm_kms_helper_poll_fini);
diff --git a/drivers/gpu/drm/drm_property.c b/drivers/gpu/drm/drm_property.c
index f8ec8f9c3e7a..892ce636ef72 100644
--- a/drivers/gpu/drm/drm_property.c
+++ b/drivers/gpu/drm/drm_property.c
@@ -21,7 +21,12 @@
*/
#include <linux/export.h>
-#include <drm/drmP.h>
+#include <linux/uaccess.h>
+
+#include <drm/drm_crtc.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_file.h>
+#include <drm/drm_framebuffer.h>
#include <drm/drm_property.h>
#include "drm_crtc_internal.h"
diff --git a/drivers/gpu/drm/drm_rect.c b/drivers/gpu/drm/drm_rect.c
index 66c41b12719c..b8363aaa9032 100644
--- a/drivers/gpu/drm/drm_rect.c
+++ b/drivers/gpu/drm/drm_rect.c
@@ -24,7 +24,9 @@
#include <linux/errno.h>
#include <linux/export.h>
#include <linux/kernel.h>
-#include <drm/drmP.h>
+
+#include <drm/drm_mode.h>
+#include <drm/drm_print.h>
#include <drm/drm_rect.h>
/**
diff --git a/drivers/gpu/drm/drm_scatter.c b/drivers/gpu/drm/drm_scatter.c
index bb829a115fc6..2d7790f14b0c 100644
--- a/drivers/gpu/drm/drm_scatter.c
+++ b/drivers/gpu/drm/drm_scatter.c
@@ -31,9 +31,14 @@
* DEALINGS IN THE SOFTWARE.
*/
-#include <linux/vmalloc.h>
+#include <linux/mm.h>
#include <linux/slab.h>
-#include <drm/drmP.h>
+#include <linux/vmalloc.h>
+
+#include <drm/drm.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_print.h>
+
#include "drm_legacy.h"
#define DEBUG_SCATTER 0
diff --git a/drivers/gpu/drm/drm_scdc_helper.c b/drivers/gpu/drm/drm_scdc_helper.c
index 870e25f1f788..311e71bbba5b 100644
--- a/drivers/gpu/drm/drm_scdc_helper.c
+++ b/drivers/gpu/drm/drm_scdc_helper.c
@@ -24,8 +24,8 @@
#include <linux/slab.h>
#include <linux/delay.h>
+#include <drm/drm_print.h>
#include <drm/drm_scdc_helper.h>
-#include <drm/drmP.h>
/**
* DOC: scdc helpers
diff --git a/drivers/gpu/drm/drm_self_refresh_helper.c b/drivers/gpu/drm/drm_self_refresh_helper.c
new file mode 100644
index 000000000000..2b3daaf77841
--- /dev/null
+++ b/drivers/gpu/drm/drm_self_refresh_helper.c
@@ -0,0 +1,216 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright (C) 2019 Google, Inc.
+ *
+ * Authors:
+ * Sean Paul <seanpaul@chromium.org>
+ */
+#include <linux/bitops.h>
+#include <linux/slab.h>
+#include <linux/workqueue.h>
+
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_connector.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_device.h>
+#include <drm/drm_mode_config.h>
+#include <drm/drm_modeset_lock.h>
+#include <drm/drm_print.h>
+#include <drm/drm_self_refresh_helper.h>
+
+/**
+ * DOC: overview
+ *
+ * This helper library provides an easy way for drivers to leverage the atomic
+ * framework to implement panel self refresh (SR) support. Drivers are
+ * responsible for initializing and cleaning up the SR helpers on load/unload
+ * (see &drm_self_refresh_helper_init/&drm_self_refresh_helper_cleanup).
+ * The connector is responsible for setting
+ * &drm_connector_state.self_refresh_aware to true at runtime if it is SR-aware
+ * (meaning it knows how to initiate self refresh on the panel).
+ *
+ * Once a crtc has enabled SR using &drm_self_refresh_helper_init, the
+ * helpers will monitor activity and call back into the driver to enable/disable
+ * SR as appropriate. The best way to think about this is that it's a DPMS
+ * on/off request with &drm_crtc_state.self_refresh_active set in crtc state
+ * that tells you to disable/enable SR on the panel instead of power-cycling it.
+ *
+ * During SR, drivers may choose to fully disable their crtc/encoder/bridge
+ * hardware (in which case no driver changes are necessary), or they can inspect
+ * &drm_crtc_state.self_refresh_active if they want to enter low power mode
+ * without full disable (in case full disable/enable is too slow).
+ *
+ * SR will be deactivated if there are any atomic updates affecting the
+ * pipe that is in SR mode. If a crtc is driving multiple connectors, all
+ * connectors must be SR aware and all will enter/exit SR mode at the same time.
+ *
+ * If the crtc and connector are SR aware, but the panel connected does not
+ * support it (or is otherwise unable to enter SR), the driver should fail
+ * atomic_check when &drm_crtc_state.self_refresh_active is true.
+ */
+
+struct drm_self_refresh_data {
+ struct drm_crtc *crtc;
+ struct delayed_work entry_work;
+ struct drm_atomic_state *save_state;
+ unsigned int entry_delay_ms;
+};
+
+static void drm_self_refresh_helper_entry_work(struct work_struct *work)
+{
+ struct drm_self_refresh_data *sr_data = container_of(
+ to_delayed_work(work),
+ struct drm_self_refresh_data, entry_work);
+ struct drm_crtc *crtc = sr_data->crtc;
+ struct drm_device *dev = crtc->dev;
+ struct drm_modeset_acquire_ctx ctx;
+ struct drm_atomic_state *state;
+ struct drm_connector *conn;
+ struct drm_connector_state *conn_state;
+ struct drm_crtc_state *crtc_state;
+ int i, ret;
+
+ drm_modeset_acquire_init(&ctx, 0);
+
+ state = drm_atomic_state_alloc(dev);
+ if (!state) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+retry:
+ state->acquire_ctx = &ctx;
+
+ crtc_state = drm_atomic_get_crtc_state(state, crtc);
+ if (IS_ERR(crtc_state)) {
+ ret = PTR_ERR(crtc_state);
+ goto out;
+ }
+
+ if (!crtc_state->enable)
+ goto out;
+
+ ret = drm_atomic_add_affected_connectors(state, crtc);
+ if (ret)
+ goto out;
+
+ for_each_new_connector_in_state(state, conn, conn_state, i) {
+ if (!conn_state->self_refresh_aware)
+ goto out;
+ }
+
+ crtc_state->active = false;
+ crtc_state->self_refresh_active = true;
+
+ ret = drm_atomic_commit(state);
+ if (ret)
+ goto out;
+
+out:
+ if (ret == -EDEADLK) {
+ drm_atomic_state_clear(state);
+ ret = drm_modeset_backoff(&ctx);
+ if (!ret)
+ goto retry;
+ }
+
+ drm_atomic_state_put(state);
+ drm_modeset_drop_locks(&ctx);
+ drm_modeset_acquire_fini(&ctx);
+}
+
+/**
+ * drm_self_refresh_helper_alter_state - Alters the atomic state for SR exit
+ * @state: the state currently being checked
+ *
+ * Called at the end of atomic check. This function checks the state for flags
+ * incompatible with self refresh exit and changes them. This is a bit
+ * disingenuous since userspace is expecting one thing and we're giving it
+ * another. However in order to keep self refresh entirely hidden from
+ * userspace, this is required.
+ *
+ * At the end, we queue up the self refresh entry work so we can enter PSR after
+ * the desired delay.
+ */
+void drm_self_refresh_helper_alter_state(struct drm_atomic_state *state)
+{
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *crtc_state;
+ int i;
+
+ if (state->async_update || !state->allow_modeset) {
+ for_each_old_crtc_in_state(state, crtc, crtc_state, i) {
+ if (crtc_state->self_refresh_active) {
+ state->async_update = false;
+ state->allow_modeset = true;
+ break;
+ }
+ }
+ }
+
+ for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
+ struct drm_self_refresh_data *sr_data;
+
+ /* Don't trigger the entry timer when we're already in SR */
+ if (crtc_state->self_refresh_active)
+ continue;
+
+ sr_data = crtc->self_refresh_data;
+ if (!sr_data)
+ continue;
+
+ mod_delayed_work(system_wq, &sr_data->entry_work,
+ msecs_to_jiffies(sr_data->entry_delay_ms));
+ }
+}
+EXPORT_SYMBOL(drm_self_refresh_helper_alter_state);
+
+/**
+ * drm_self_refresh_helper_init - Initializes self refresh helpers for a crtc
+ * @crtc: the crtc which supports self refresh supported displays
+ * @entry_delay_ms: amount of inactivity to wait before entering self refresh
+ *
+ * Returns zero if successful or -errno on failure
+ */
+int drm_self_refresh_helper_init(struct drm_crtc *crtc,
+ unsigned int entry_delay_ms)
+{
+ struct drm_self_refresh_data *sr_data = crtc->self_refresh_data;
+
+ /* Helper is already initialized */
+ if (WARN_ON(sr_data))
+ return -EINVAL;
+
+ sr_data = kzalloc(sizeof(*sr_data), GFP_KERNEL);
+ if (!sr_data)
+ return -ENOMEM;
+
+ INIT_DELAYED_WORK(&sr_data->entry_work,
+ drm_self_refresh_helper_entry_work);
+ sr_data->entry_delay_ms = entry_delay_ms;
+ sr_data->crtc = crtc;
+
+ crtc->self_refresh_data = sr_data;
+ return 0;
+}
+EXPORT_SYMBOL(drm_self_refresh_helper_init);
+
+/**
+ * drm_self_refresh_helper_cleanup - Cleans up self refresh helpers for a crtc
+ * @crtc: the crtc to cleanup
+ */
+void drm_self_refresh_helper_cleanup(struct drm_crtc *crtc)
+{
+ struct drm_self_refresh_data *sr_data = crtc->self_refresh_data;
+
+ /* Helper is already uninitialized */
+ if (sr_data)
+ return;
+
+ crtc->self_refresh_data = NULL;
+
+ cancel_delayed_work_sync(&sr_data->entry_work);
+ kfree(sr_data);
+}
+EXPORT_SYMBOL(drm_self_refresh_helper_cleanup);
diff --git a/drivers/gpu/drm/drm_simple_kms_helper.c b/drivers/gpu/drm/drm_simple_kms_helper.c
index 5d5e9091ad6d..b11910f14c46 100644
--- a/drivers/gpu/drm/drm_simple_kms_helper.c
+++ b/drivers/gpu/drm/drm_simple_kms_helper.c
@@ -3,13 +3,14 @@
* Copyright (C) 2016 Noralf Trønnes
*/
-#include <drm/drmP.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_plane_helper.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_simple_kms_helper.h>
-#include <linux/slab.h>
/**
* DOC: overview
diff --git a/drivers/gpu/drm/drm_syncobj.c b/drivers/gpu/drm/drm_syncobj.c
index 3d400905100b..a199c8d56b95 100644
--- a/drivers/gpu/drm/drm_syncobj.c
+++ b/drivers/gpu/drm/drm_syncobj.c
@@ -46,16 +46,21 @@
* The file takes a reference on the kref.
*/
-#include <drm/drmP.h>
+#include <linux/anon_inodes.h>
#include <linux/file.h>
#include <linux/fs.h>
-#include <linux/anon_inodes.h>
-#include <linux/sync_file.h>
#include <linux/sched/signal.h>
+#include <linux/sync_file.h>
+#include <linux/uaccess.h>
-#include "drm_internal.h"
+#include <drm/drm_drv.h>
+#include <drm/drm_file.h>
+#include <drm/drm_gem.h>
+#include <drm/drm_print.h>
#include <drm/drm_syncobj.h>
+#include "drm_internal.h"
+
struct syncobj_wait_entry {
struct list_head node;
struct task_struct *task;
diff --git a/drivers/gpu/drm/drm_sysfs.c b/drivers/gpu/drm/drm_sysfs.c
index 74857fafb0b8..ad10810bc972 100644
--- a/drivers/gpu/drm/drm_sysfs.c
+++ b/drivers/gpu/drm/drm_sysfs.c
@@ -11,13 +11,20 @@
*/
#include <linux/device.h>
-#include <linux/kdev_t.h>
-#include <linux/gfp.h>
#include <linux/err.h>
#include <linux/export.h>
-
+#include <linux/gfp.h>
+#include <linux/kdev_t.h>
+#include <linux/slab.h>
+
+#include <drm/drm_connector.h>
+#include <drm/drm_device.h>
+#include <drm/drm_file.h>
+#include <drm/drm_modes.h>
+#include <drm/drm_print.h>
+#include <drm/drm_property.h>
#include <drm/drm_sysfs.h>
-#include <drm/drmP.h>
+
#include "drm_internal.h"
#define to_drm_minor(d) dev_get_drvdata(d)
@@ -76,6 +83,7 @@ int drm_sysfs_init(void)
}
drm_class->devnode = drm_devnode;
+ drm_setup_hdcp_srm(drm_class);
return 0;
}
@@ -88,6 +96,7 @@ void drm_sysfs_destroy(void)
{
if (IS_ERR_OR_NULL(drm_class))
return;
+ drm_teardown_hdcp_srm(drm_class);
class_remove_file(drm_class, &class_attr_version.attr);
class_destroy(drm_class);
drm_class = NULL;
diff --git a/drivers/gpu/drm/drm_trace.h b/drivers/gpu/drm/drm_trace.h
index baccc63db106..471eb927474b 100644
--- a/drivers/gpu/drm/drm_trace.h
+++ b/drivers/gpu/drm/drm_trace.h
@@ -6,6 +6,8 @@
#include <linux/types.h>
#include <linux/tracepoint.h>
+struct drm_file;
+
#undef TRACE_SYSTEM
#define TRACE_SYSTEM drm
#define TRACE_INCLUDE_FILE drm_trace
diff --git a/drivers/gpu/drm/drm_trace_points.c b/drivers/gpu/drm/drm_trace_points.c
index 3bbc4deb4dbc..1e2065b403c9 100644
--- a/drivers/gpu/drm/drm_trace_points.c
+++ b/drivers/gpu/drm/drm_trace_points.c
@@ -1,4 +1,5 @@
-#include <drm/drmP.h>
+
+#include <drm/drm_file.h>
#define CREATE_TRACE_POINTS
#include "drm_trace.h"
diff --git a/drivers/gpu/drm/drm_vblank.c b/drivers/gpu/drm/drm_vblank.c
index a1b65d26d761..0d704bddb1a6 100644
--- a/drivers/gpu/drm/drm_vblank.c
+++ b/drivers/gpu/drm/drm_vblank.c
@@ -24,12 +24,18 @@
* OTHER DEALINGS IN THE SOFTWARE.
*/
-#include <drm/drm_vblank.h>
-#include <drm/drmP.h>
#include <linux/export.h>
+#include <linux/moduleparam.h>
+
+#include <drm/drm_crtc.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_framebuffer.h>
+#include <drm/drm_print.h>
+#include <drm/drm_os_linux.h>
+#include <drm/drm_vblank.h>
-#include "drm_trace.h"
#include "drm_internal.h"
+#include "drm_trace.h"
/**
* DOC: vblank handling
diff --git a/drivers/gpu/drm/drm_vm.c b/drivers/gpu/drm/drm_vm.c
index 10cf83d569e1..2f24ee6c7a92 100644
--- a/drivers/gpu/drm/drm_vm.c
+++ b/drivers/gpu/drm/drm_vm.c
@@ -33,15 +33,27 @@
* OTHER DEALINGS IN THE SOFTWARE.
*/
-#include <drm/drmP.h>
#include <linux/export.h>
+#include <linux/pci.h>
#include <linux/seq_file.h>
+#include <linux/vmalloc.h>
+
#if defined(__ia64__)
#include <linux/efi.h>
#include <linux/slab.h>
#endif
#include <linux/mem_encrypt.h>
+
#include <asm/pgtable.h>
+
+#include <drm/drm_agpsupport.h>
+#include <drm/drm_device.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_file.h>
+#include <drm/drm_framebuffer.h>
+#include <drm/drm_gem.h>
+#include <drm/drm_print.h>
+
#include "drm_internal.h"
#include "drm_legacy.h"
diff --git a/drivers/gpu/drm/drm_vma_manager.c b/drivers/gpu/drm/drm_vma_manager.c
index c5d0d2358301..4565319fa6b3 100644
--- a/drivers/gpu/drm/drm_vma_manager.c
+++ b/drivers/gpu/drm/drm_vma_manager.c
@@ -23,9 +23,6 @@
* OTHER DEALINGS IN THE SOFTWARE.
*/
-#include <drm/drmP.h>
-#include <drm/drm_mm.h>
-#include <drm/drm_vma_manager.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/rbtree.h>
@@ -33,6 +30,9 @@
#include <linux/spinlock.h>
#include <linux/types.h>
+#include <drm/drm_mm.h>
+#include <drm/drm_vma_manager.h>
+
/**
* DOC: vma offset manager
*
diff --git a/drivers/gpu/drm/drm_vram_helper_common.c b/drivers/gpu/drm/drm_vram_helper_common.c
new file mode 100644
index 000000000000..e9c9f9a80ba3
--- /dev/null
+++ b/drivers/gpu/drm/drm_vram_helper_common.c
@@ -0,0 +1,96 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include <linux/module.h>
+
+/**
+ * DOC: overview
+ *
+ * This library provides &struct drm_gem_vram_object (GEM VRAM), a GEM
+ * buffer object that is backed by video RAM. It can be used for
+ * framebuffer devices with dedicated memory. The video RAM can be
+ * managed with &struct drm_vram_mm (VRAM MM). Both data structures are
+ * supposed to be used together, but can also be used individually.
+ *
+ * With the GEM interface userspace applications create, manage and destroy
+ * graphics buffers, such as an on-screen framebuffer. GEM does not provide
+ * an implementation of these interfaces. It's up to the DRM driver to
+ * provide an implementation that suits the hardware. If the hardware device
+ * contains dedicated video memory, the DRM driver can use the VRAM helper
+ * library. Each active buffer object is stored in video RAM. Active
+ * buffer are used for drawing the current frame, typically something like
+ * the frame's scanout buffer or the cursor image. If there's no more space
+ * left in VRAM, inactive GEM objects can be moved to system memory.
+ *
+ * The easiest way to use the VRAM helper library is to call
+ * drm_vram_helper_alloc_mm(). The function allocates and initializes an
+ * instance of &struct drm_vram_mm in &struct drm_device.vram_mm . Use
+ * &DRM_GEM_VRAM_DRIVER to initialize &struct drm_driver and
+ * &DRM_VRAM_MM_FILE_OPERATIONS to initialize &struct file_operations;
+ * as illustrated below.
+ *
+ * .. code-block:: c
+ *
+ * struct file_operations fops ={
+ * .owner = THIS_MODULE,
+ * DRM_VRAM_MM_FILE_OPERATION
+ * };
+ * struct drm_driver drv = {
+ * .driver_feature = DRM_ ... ,
+ * .fops = &fops,
+ * DRM_GEM_VRAM_DRIVER
+ * };
+ *
+ * int init_drm_driver()
+ * {
+ * struct drm_device *dev;
+ * uint64_t vram_base;
+ * unsigned long vram_size;
+ * int ret;
+ *
+ * // setup device, vram base and size
+ * // ...
+ *
+ * ret = drm_vram_helper_alloc_mm(dev, vram_base, vram_size,
+ * &drm_gem_vram_mm_funcs);
+ * if (ret)
+ * return ret;
+ * return 0;
+ * }
+ *
+ * This creates an instance of &struct drm_vram_mm, exports DRM userspace
+ * interfaces for GEM buffer management and initializes file operations to
+ * allow for accessing created GEM buffers. With this setup, the DRM driver
+ * manages an area of video RAM with VRAM MM and provides GEM VRAM objects
+ * to userspace.
+ *
+ * To clean up the VRAM memory management, call drm_vram_helper_release_mm()
+ * in the driver's clean-up code.
+ *
+ * .. code-block:: c
+ *
+ * void fini_drm_driver()
+ * {
+ * struct drm_device *dev = ...;
+ *
+ * drm_vram_helper_release_mm(dev);
+ * }
+ *
+ * For drawing or scanout operations, buffer object have to be pinned in video
+ * RAM. Call drm_gem_vram_pin() with &DRM_GEM_VRAM_PL_FLAG_VRAM or
+ * &DRM_GEM_VRAM_PL_FLAG_SYSTEM to pin a buffer object in video RAM or system
+ * memory. Call drm_gem_vram_unpin() to release the pinned object afterwards.
+ *
+ * A buffer object that is pinned in video RAM has a fixed address within that
+ * memory region. Call drm_gem_vram_offset() to retrieve this value. Typically
+ * it's used to program the hardware's scanout engine for framebuffers, set
+ * the cursor overlay's image for a mouse cursor, or use it as input to the
+ * hardware's draing engine.
+ *
+ * To access a buffer object's memory from the DRM driver, call
+ * drm_gem_vram_kmap(). It (optionally) maps the buffer into kernel address
+ * space and returns the memory address. Use drm_gem_vram_kunmap() to
+ * release the mapping.
+ */
+
+MODULE_DESCRIPTION("DRM VRAM memory-management helpers");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/drm_vram_mm_helper.c b/drivers/gpu/drm/drm_vram_mm_helper.c
new file mode 100644
index 000000000000..c911781d6728
--- /dev/null
+++ b/drivers/gpu/drm/drm_vram_mm_helper.c
@@ -0,0 +1,297 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include <drm/drm_device.h>
+#include <drm/drm_file.h>
+#include <drm/drm_vram_mm_helper.h>
+
+#include <drm/ttm/ttm_page_alloc.h>
+
+/**
+ * DOC: overview
+ *
+ * The data structure &struct drm_vram_mm and its helpers implement a memory
+ * manager for simple framebuffer devices with dedicated video memory. Buffer
+ * objects are either placed in video RAM or evicted to system memory. These
+ * helper functions work well with &struct drm_gem_vram_object.
+ */
+
+/*
+ * TTM TT
+ */
+
+static void backend_func_destroy(struct ttm_tt *tt)
+{
+ ttm_tt_fini(tt);
+ kfree(tt);
+}
+
+static struct ttm_backend_func backend_func = {
+ .destroy = backend_func_destroy
+};
+
+/*
+ * TTM BO device
+ */
+
+static struct ttm_tt *bo_driver_ttm_tt_create(struct ttm_buffer_object *bo,
+ uint32_t page_flags)
+{
+ struct ttm_tt *tt;
+ int ret;
+
+ tt = kzalloc(sizeof(*tt), GFP_KERNEL);
+ if (!tt)
+ return NULL;
+
+ tt->func = &backend_func;
+
+ ret = ttm_tt_init(tt, bo, page_flags);
+ if (ret < 0)
+ goto err_ttm_tt_init;
+
+ return tt;
+
+err_ttm_tt_init:
+ kfree(tt);
+ return NULL;
+}
+
+static int bo_driver_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
+ struct ttm_mem_type_manager *man)
+{
+ switch (type) {
+ case TTM_PL_SYSTEM:
+ man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
+ man->available_caching = TTM_PL_MASK_CACHING;
+ man->default_caching = TTM_PL_FLAG_CACHED;
+ break;
+ case TTM_PL_VRAM:
+ man->func = &ttm_bo_manager_func;
+ man->flags = TTM_MEMTYPE_FLAG_FIXED |
+ TTM_MEMTYPE_FLAG_MAPPABLE;
+ man->available_caching = TTM_PL_FLAG_UNCACHED |
+ TTM_PL_FLAG_WC;
+ man->default_caching = TTM_PL_FLAG_WC;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static void bo_driver_evict_flags(struct ttm_buffer_object *bo,
+ struct ttm_placement *placement)
+{
+ struct drm_vram_mm *vmm = drm_vram_mm_of_bdev(bo->bdev);
+
+ if (vmm->funcs && vmm->funcs->evict_flags)
+ vmm->funcs->evict_flags(bo, placement);
+}
+
+static int bo_driver_verify_access(struct ttm_buffer_object *bo,
+ struct file *filp)
+{
+ struct drm_vram_mm *vmm = drm_vram_mm_of_bdev(bo->bdev);
+
+ if (!vmm->funcs || !vmm->funcs->verify_access)
+ return 0;
+ return vmm->funcs->verify_access(bo, filp);
+}
+
+static int bo_driver_io_mem_reserve(struct ttm_bo_device *bdev,
+ struct ttm_mem_reg *mem)
+{
+ struct ttm_mem_type_manager *man = bdev->man + mem->mem_type;
+ struct drm_vram_mm *vmm = drm_vram_mm_of_bdev(bdev);
+
+ if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
+ return -EINVAL;
+
+ mem->bus.addr = NULL;
+ mem->bus.size = mem->num_pages << PAGE_SHIFT;
+
+ switch (mem->mem_type) {
+ case TTM_PL_SYSTEM: /* nothing to do */
+ mem->bus.offset = 0;
+ mem->bus.base = 0;
+ mem->bus.is_iomem = false;
+ break;
+ case TTM_PL_VRAM:
+ mem->bus.offset = mem->start << PAGE_SHIFT;
+ mem->bus.base = vmm->vram_base;
+ mem->bus.is_iomem = true;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void bo_driver_io_mem_free(struct ttm_bo_device *bdev,
+ struct ttm_mem_reg *mem)
+{ }
+
+static struct ttm_bo_driver bo_driver = {
+ .ttm_tt_create = bo_driver_ttm_tt_create,
+ .ttm_tt_populate = ttm_pool_populate,
+ .ttm_tt_unpopulate = ttm_pool_unpopulate,
+ .init_mem_type = bo_driver_init_mem_type,
+ .eviction_valuable = ttm_bo_eviction_valuable,
+ .evict_flags = bo_driver_evict_flags,
+ .verify_access = bo_driver_verify_access,
+ .io_mem_reserve = bo_driver_io_mem_reserve,
+ .io_mem_free = bo_driver_io_mem_free,
+};
+
+/*
+ * struct drm_vram_mm
+ */
+
+/**
+ * drm_vram_mm_init() - Initialize an instance of VRAM MM.
+ * @vmm: the VRAM MM instance to initialize
+ * @dev: the DRM device
+ * @vram_base: the base address of the video memory
+ * @vram_size: the size of the video memory in bytes
+ * @funcs: callback functions for buffer objects
+ *
+ * Returns:
+ * 0 on success, or
+ * a negative error code otherwise.
+ */
+int drm_vram_mm_init(struct drm_vram_mm *vmm, struct drm_device *dev,
+ uint64_t vram_base, size_t vram_size,
+ const struct drm_vram_mm_funcs *funcs)
+{
+ int ret;
+
+ vmm->vram_base = vram_base;
+ vmm->vram_size = vram_size;
+ vmm->funcs = funcs;
+
+ ret = ttm_bo_device_init(&vmm->bdev, &bo_driver,
+ dev->anon_inode->i_mapping,
+ true);
+ if (ret)
+ return ret;
+
+ ret = ttm_bo_init_mm(&vmm->bdev, TTM_PL_VRAM, vram_size >> PAGE_SHIFT);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_vram_mm_init);
+
+/**
+ * drm_vram_mm_cleanup() - Cleans up an initialized instance of VRAM MM.
+ * @vmm: the VRAM MM instance to clean up
+ */
+void drm_vram_mm_cleanup(struct drm_vram_mm *vmm)
+{
+ ttm_bo_device_release(&vmm->bdev);
+}
+EXPORT_SYMBOL(drm_vram_mm_cleanup);
+
+/**
+ * drm_vram_mm_mmap() - Helper for implementing &struct file_operations.mmap()
+ * @filp: the mapping's file structure
+ * @vma: the mapping's memory area
+ * @vmm: the VRAM MM instance
+ *
+ * Returns:
+ * 0 on success, or
+ * a negative error code otherwise.
+ */
+int drm_vram_mm_mmap(struct file *filp, struct vm_area_struct *vma,
+ struct drm_vram_mm *vmm)
+{
+ return ttm_bo_mmap(filp, vma, &vmm->bdev);
+}
+EXPORT_SYMBOL(drm_vram_mm_mmap);
+
+/*
+ * Helpers for integration with struct drm_device
+ */
+
+/**
+ * drm_vram_helper_alloc_mm - Allocates a device's instance of \
+ &struct drm_vram_mm
+ * @dev: the DRM device
+ * @vram_base: the base address of the video memory
+ * @vram_size: the size of the video memory in bytes
+ * @funcs: callback functions for buffer objects
+ *
+ * Returns:
+ * The new instance of &struct drm_vram_mm on success, or
+ * an ERR_PTR()-encoded errno code otherwise.
+ */
+struct drm_vram_mm *drm_vram_helper_alloc_mm(
+ struct drm_device *dev, uint64_t vram_base, size_t vram_size,
+ const struct drm_vram_mm_funcs *funcs)
+{
+ int ret;
+
+ if (WARN_ON(dev->vram_mm))
+ return dev->vram_mm;
+
+ dev->vram_mm = kzalloc(sizeof(*dev->vram_mm), GFP_KERNEL);
+ if (!dev->vram_mm)
+ return ERR_PTR(-ENOMEM);
+
+ ret = drm_vram_mm_init(dev->vram_mm, dev, vram_base, vram_size, funcs);
+ if (ret)
+ goto err_kfree;
+
+ return dev->vram_mm;
+
+err_kfree:
+ kfree(dev->vram_mm);
+ dev->vram_mm = NULL;
+ return ERR_PTR(ret);
+}
+EXPORT_SYMBOL(drm_vram_helper_alloc_mm);
+
+/**
+ * drm_vram_helper_release_mm - Releases a device's instance of \
+ &struct drm_vram_mm
+ * @dev: the DRM device
+ */
+void drm_vram_helper_release_mm(struct drm_device *dev)
+{
+ if (!dev->vram_mm)
+ return;
+
+ drm_vram_mm_cleanup(dev->vram_mm);
+ kfree(dev->vram_mm);
+ dev->vram_mm = NULL;
+}
+EXPORT_SYMBOL(drm_vram_helper_release_mm);
+
+/*
+ * Helpers for &struct file_operations
+ */
+
+/**
+ * drm_vram_mm_file_operations_mmap() - \
+ Implements &struct file_operations.mmap()
+ * @filp: the mapping's file structure
+ * @vma: the mapping's memory area
+ *
+ * Returns:
+ * 0 on success, or
+ * a negative error code otherwise.
+ */
+int drm_vram_mm_file_operations_mmap(
+ struct file *filp, struct vm_area_struct *vma)
+{
+ struct drm_file *file_priv = filp->private_data;
+ struct drm_device *dev = file_priv->minor->dev;
+
+ if (WARN_ONCE(!dev->vram_mm, "VRAM MM not initialized"))
+ return -EINVAL;
+
+ return drm_vram_mm_mmap(filp, vma, dev->vram_mm);
+}
+EXPORT_SYMBOL(drm_vram_mm_file_operations_mmap);
diff --git a/drivers/gpu/drm/drm_writeback.c b/drivers/gpu/drm/drm_writeback.c
index 79ac014701c8..ff138b6ec48b 100644
--- a/drivers/gpu/drm/drm_writeback.c
+++ b/drivers/gpu/drm/drm_writeback.c
@@ -9,12 +9,14 @@
* of such GNU licence.
*/
+#include <linux/dma-fence.h>
+
#include <drm/drm_crtc.h>
+#include <drm/drm_device.h>
+#include <drm/drm_drv.h>
#include <drm/drm_modeset_helper_vtables.h>
#include <drm/drm_property.h>
#include <drm/drm_writeback.h>
-#include <drm/drmP.h>
-#include <linux/dma-fence.h>
/**
* DOC: overview
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_dump.c b/drivers/gpu/drm/etnaviv/etnaviv_dump.c
index 515515ef24f9..9a6f5b65488f 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_dump.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_dump.c
@@ -118,7 +118,6 @@ void etnaviv_core_dump(struct etnaviv_gpu *gpu)
unsigned int n_obj, n_bomap_pages;
size_t file_size, mmu_size;
__le64 *bomap, *bomap_start;
- unsigned long flags;
/* Only catch the first event, or when manually re-armed */
if (!etnaviv_dump_core)
@@ -137,13 +136,11 @@ void etnaviv_core_dump(struct etnaviv_gpu *gpu)
mmu_size + gpu->buffer.size;
/* Add in the active command buffers */
- spin_lock_irqsave(&gpu->sched.job_list_lock, flags);
list_for_each_entry(s_job, &gpu->sched.ring_mirror_list, node) {
submit = to_etnaviv_submit(s_job);
file_size += submit->cmdbuf.size;
n_obj++;
}
- spin_unlock_irqrestore(&gpu->sched.job_list_lock, flags);
/* Add in the active buffer objects */
list_for_each_entry(vram, &gpu->mmu->mappings, mmu_node) {
@@ -186,14 +183,12 @@ void etnaviv_core_dump(struct etnaviv_gpu *gpu)
gpu->buffer.size,
etnaviv_cmdbuf_get_va(&gpu->buffer));
- spin_lock_irqsave(&gpu->sched.job_list_lock, flags);
list_for_each_entry(s_job, &gpu->sched.ring_mirror_list, node) {
submit = to_etnaviv_submit(s_job);
etnaviv_core_dump_mem(&iter, ETDUMP_BUF_CMD,
submit->cmdbuf.vaddr, submit->cmdbuf.size,
etnaviv_cmdbuf_get_va(&submit->cmdbuf));
}
- spin_unlock_irqrestore(&gpu->sched.job_list_lock, flags);
/* Reserve space for the bomap */
if (n_bomap_pages) {
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_sched.c b/drivers/gpu/drm/etnaviv/etnaviv_sched.c
index 6d24fea1766b..a813c824e154 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_sched.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_sched.c
@@ -109,7 +109,7 @@ static void etnaviv_sched_timedout_job(struct drm_sched_job *sched_job)
}
/* block scheduler */
- drm_sched_stop(&gpu->sched);
+ drm_sched_stop(&gpu->sched, sched_job);
if(sched_job)
drm_sched_increase_karma(sched_job);
diff --git a/drivers/gpu/drm/gma500/accel_2d.c b/drivers/gpu/drm/gma500/accel_2d.c
index a07eacb3d5e2..45ad5ffedc93 100644
--- a/drivers/gpu/drm/gma500/accel_2d.c
+++ b/drivers/gpu/drm/gma500/accel_2d.c
@@ -8,24 +8,24 @@
*
**************************************************************************/
-#include <linux/module.h>
-#include <linux/kernel.h>
+#include <linux/console.h>
+#include <linux/delay.h>
#include <linux/errno.h>
-#include <linux/string.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
#include <linux/mm.h>
-#include <linux/tty.h>
+#include <linux/module.h>
#include <linux/slab.h>
-#include <linux/delay.h>
-#include <linux/init.h>
-#include <linux/console.h>
+#include <linux/string.h>
+#include <linux/tty.h>
-#include <drm/drmP.h>
#include <drm/drm.h>
#include <drm/drm_crtc.h>
+#include <drm/drm_fourcc.h>
+#include "framebuffer.h"
#include "psb_drv.h"
#include "psb_reg.h"
-#include "framebuffer.h"
/**
* psb_spank - reset the 2D engine
diff --git a/drivers/gpu/drm/gma500/blitter.h b/drivers/gpu/drm/gma500/blitter.h
index 0d1c4cc01fd2..8d67dabd9ba3 100644
--- a/drivers/gpu/drm/gma500/blitter.h
+++ b/drivers/gpu/drm/gma500/blitter.h
@@ -9,6 +9,8 @@
#ifndef __BLITTER_H
#define __BLITTER_H
+struct drm_psb_private;
+
extern int gma_blt_wait_idle(struct drm_psb_private *dev_priv);
#endif
diff --git a/drivers/gpu/drm/gma500/cdv_device.c b/drivers/gpu/drm/gma500/cdv_device.c
index 3df010438bac..4d216a0205f2 100644
--- a/drivers/gpu/drm/gma500/cdv_device.c
+++ b/drivers/gpu/drm/gma500/cdv_device.c
@@ -6,15 +6,16 @@
**************************************************************************/
#include <linux/backlight.h>
-#include <drm/drmP.h>
+#include <linux/delay.h>
+
#include <drm/drm.h>
-#include <drm/gma_drm.h>
-#include "psb_drv.h"
-#include "psb_reg.h"
-#include "psb_intel_reg.h"
-#include "intel_bios.h"
+
#include "cdv_device.h"
#include "gma_device.h"
+#include "intel_bios.h"
+#include "psb_drv.h"
+#include "psb_intel_reg.h"
+#include "psb_reg.h"
#define VGA_SR_INDEX 0x3c4
#define VGA_SR_DATA 0x3c5
diff --git a/drivers/gpu/drm/gma500/cdv_device.h b/drivers/gpu/drm/gma500/cdv_device.h
index b375bc206363..37e4bdc84c03 100644
--- a/drivers/gpu/drm/gma500/cdv_device.h
+++ b/drivers/gpu/drm/gma500/cdv_device.h
@@ -3,6 +3,10 @@
* Copyright © 2011 Intel Corporation
*/
+struct drm_crtc;
+struct drm_device;
+struct psb_intel_mode_device;
+
extern const struct drm_crtc_helper_funcs cdv_intel_helper_funcs;
extern const struct drm_crtc_funcs cdv_intel_crtc_funcs;
extern const struct gma_clock_funcs cdv_clock_funcs;
diff --git a/drivers/gpu/drm/gma500/cdv_intel_crt.c b/drivers/gpu/drm/gma500/cdv_intel_crt.c
index cb5a14b7ec7f..29c36d63b20e 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_crt.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_crt.c
@@ -24,16 +24,16 @@
* Eric Anholt <eric@anholt.net>
*/
+#include <linux/delay.h>
#include <linux/i2c.h>
-#include <drm/drmP.h>
+#include <linux/pm_runtime.h>
+#include "cdv_device.h"
#include "intel_bios.h"
+#include "power.h"
#include "psb_drv.h"
#include "psb_intel_drv.h"
#include "psb_intel_reg.h"
-#include "power.h"
-#include "cdv_device.h"
-#include <linux/pm_runtime.h>
static void cdv_intel_crt_dpms(struct drm_encoder *encoder, int mode)
diff --git a/drivers/gpu/drm/gma500/cdv_intel_display.c b/drivers/gpu/drm/gma500/cdv_intel_display.c
index 235cfeeec100..f56852a503e8 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_display.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_display.c
@@ -6,16 +6,18 @@
* Eric Anholt <eric@anholt.net>
*/
+#include <linux/delay.h>
#include <linux/i2c.h>
-#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+
+#include "cdv_device.h"
#include "framebuffer.h"
+#include "gma_display.h"
+#include "power.h"
#include "psb_drv.h"
#include "psb_intel_drv.h"
#include "psb_intel_reg.h"
-#include "gma_display.h"
-#include "power.h"
-#include "cdv_device.h"
static bool cdv_intel_find_dp_pll(const struct gma_limit_t *limit,
struct drm_crtc *crtc, int target,
diff --git a/drivers/gpu/drm/gma500/cdv_intel_dp.c b/drivers/gpu/drm/gma500/cdv_intel_dp.c
index 90ed20083009..570b59520fd1 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_dp.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_dp.c
@@ -26,16 +26,17 @@
*/
#include <linux/i2c.h>
-#include <linux/slab.h>
#include <linux/module.h>
-#include <drm/drmP.h>
+#include <linux/slab.h>
+
#include <drm/drm_crtc.h>
#include <drm/drm_crtc_helper.h>
+#include <drm/drm_dp_helper.h>
+
+#include "gma_display.h"
#include "psb_drv.h"
#include "psb_intel_drv.h"
#include "psb_intel_reg.h"
-#include "gma_display.h"
-#include <drm/drm_dp_helper.h>
/**
* struct i2c_algo_dp_aux_data - driver interface structure for i2c over dp
diff --git a/drivers/gpu/drm/gma500/cdv_intel_hdmi.c b/drivers/gpu/drm/gma500/cdv_intel_hdmi.c
index 4e4e4a66eaee..1711a41acc16 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_hdmi.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_hdmi.c
@@ -27,15 +27,16 @@
* We should probably make this generic and share it with Medfield
*/
-#include <drm/drmP.h>
+#include <linux/pm_runtime.h>
+
#include <drm/drm.h>
#include <drm/drm_crtc.h>
#include <drm/drm_edid.h>
-#include "psb_intel_drv.h"
+
+#include "cdv_device.h"
#include "psb_drv.h"
+#include "psb_intel_drv.h"
#include "psb_intel_reg.h"
-#include "cdv_device.h"
-#include <linux/pm_runtime.h>
/* hdmi control bits */
#define HDMI_NULL_PACKETS_DURING_VSYNC (1 << 9)
diff --git a/drivers/gpu/drm/gma500/cdv_intel_lvds.c b/drivers/gpu/drm/gma500/cdv_intel_lvds.c
index 4b103b3eb5ad..ea0a5d9a0acc 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_lvds.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_lvds.c
@@ -8,17 +8,16 @@
* Jesse Barnes <jesse.barnes@intel.com>
*/
-#include <linux/i2c.h>
#include <linux/dmi.h>
-#include <drm/drmP.h>
+#include <linux/i2c.h>
+#include <linux/pm_runtime.h>
+#include "cdv_device.h"
#include "intel_bios.h"
+#include "power.h"
#include "psb_drv.h"
#include "psb_intel_drv.h"
#include "psb_intel_reg.h"
-#include "power.h"
-#include <linux/pm_runtime.h>
-#include "cdv_device.h"
/**
* LVDS I2C backlight control macros
diff --git a/drivers/gpu/drm/gma500/framebuffer.c b/drivers/gpu/drm/gma500/framebuffer.c
index 45c3db50ee1a..218f3bb15276 100644
--- a/drivers/gpu/drm/gma500/framebuffer.c
+++ b/drivers/gpu/drm/gma500/framebuffer.c
@@ -5,29 +5,29 @@
*
**************************************************************************/
-#include <linux/module.h>
-#include <linux/kernel.h>
+#include <linux/console.h>
+#include <linux/delay.h>
#include <linux/errno.h>
-#include <linux/string.h>
-#include <linux/pfn_t.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
#include <linux/mm.h>
-#include <linux/tty.h>
+#include <linux/module.h>
+#include <linux/pfn_t.h>
#include <linux/slab.h>
-#include <linux/delay.h>
-#include <linux/init.h>
-#include <linux/console.h>
+#include <linux/string.h>
+#include <linux/tty.h>
-#include <drm/drmP.h>
#include <drm/drm.h>
#include <drm/drm_crtc.h>
#include <drm/drm_fb_helper.h>
+#include <drm/drm_fourcc.h>
#include <drm/drm_gem_framebuffer_helper.h>
-#include "psb_drv.h"
-#include "psb_intel_reg.h"
-#include "psb_intel_drv.h"
#include "framebuffer.h"
#include "gtt.h"
+#include "psb_drv.h"
+#include "psb_intel_drv.h"
+#include "psb_intel_reg.h"
static const struct drm_framebuffer_funcs psb_fb_funcs = {
.destroy = drm_gem_fb_destroy,
@@ -220,7 +220,7 @@ static int psb_framebuffer_init(struct drm_device *dev,
* Reject unknown formats, YUV formats, and formats with more than
* 4 bytes per pixel.
*/
- info = drm_format_info(mode_cmd->pixel_format);
+ info = drm_get_format_info(dev, mode_cmd);
if (!info || !info->depth || info->cpp[0] > 4)
return -EINVAL;
diff --git a/drivers/gpu/drm/gma500/framebuffer.h b/drivers/gpu/drm/gma500/framebuffer.h
index a8a0b117b661..ae8a02639fd9 100644
--- a/drivers/gpu/drm/gma500/framebuffer.h
+++ b/drivers/gpu/drm/gma500/framebuffer.h
@@ -9,7 +9,6 @@
#ifndef _FRAMEBUFFER_H_
#define _FRAMEBUFFER_H_
-#include <drm/drmP.h>
#include <drm/drm_fb_helper.h>
#include "psb_drv.h"
diff --git a/drivers/gpu/drm/gma500/gem.c b/drivers/gpu/drm/gma500/gem.c
index 6cb0ad52de5e..83ee86f70b89 100644
--- a/drivers/gpu/drm/gma500/gem.c
+++ b/drivers/gpu/drm/gma500/gem.c
@@ -11,10 +11,11 @@
* accelerated operations on a GEM object)
*/
-#include <drm/drmP.h>
+#include <linux/pagemap.h>
+
#include <drm/drm.h>
-#include <drm/gma_drm.h>
#include <drm/drm_vma_manager.h>
+
#include "psb_drv.h"
void psb_gem_free_object(struct drm_gem_object *obj)
diff --git a/drivers/gpu/drm/gma500/gma_device.c b/drivers/gpu/drm/gma500/gma_device.c
index 938408221142..869f30392566 100644
--- a/drivers/gpu/drm/gma500/gma_device.c
+++ b/drivers/gpu/drm/gma500/gma_device.c
@@ -5,7 +5,6 @@
*
**************************************************************************/
-#include <drm/drmP.h>
#include "psb_drv.h"
void gma_get_core_freq(struct drm_device *dev)
diff --git a/drivers/gpu/drm/gma500/gma_device.h b/drivers/gpu/drm/gma500/gma_device.h
index 39eb4de39048..a8cf31d4b814 100644
--- a/drivers/gpu/drm/gma500/gma_device.h
+++ b/drivers/gpu/drm/gma500/gma_device.h
@@ -7,6 +7,7 @@
#ifndef _GMA_DEVICE_H
#define _GMA_DEVICE_H
+struct drm_device;
extern void gma_get_core_freq(struct drm_device *dev);
diff --git a/drivers/gpu/drm/gma500/gma_display.c b/drivers/gpu/drm/gma500/gma_display.c
index f504754020fb..e20ccb5d10fd 100644
--- a/drivers/gpu/drm/gma500/gma_display.c
+++ b/drivers/gpu/drm/gma500/gma_display.c
@@ -7,12 +7,18 @@
* Patrik Jakobsson <patrik.r.jakobsson@gmail.com>
*/
-#include <drm/drmP.h>
+#include <linux/delay.h>
+#include <linux/highmem.h>
+
+#include <drm/drm_crtc.h>
+#include <drm/drm_fourcc.h>
+#include <drm/drm_vblank.h>
+
+#include "framebuffer.h"
#include "gma_display.h"
+#include "psb_drv.h"
#include "psb_intel_drv.h"
#include "psb_intel_reg.h"
-#include "psb_drv.h"
-#include "framebuffer.h"
/**
* Returns whether any output on the specified pipe is of the specified type
diff --git a/drivers/gpu/drm/gma500/gma_display.h b/drivers/gpu/drm/gma500/gma_display.h
index 7f5bc65045a4..fdbd7ecaa59c 100644
--- a/drivers/gpu/drm/gma500/gma_display.h
+++ b/drivers/gpu/drm/gma500/gma_display.h
@@ -12,6 +12,9 @@
#include <linux/pm_runtime.h>
+struct drm_encoder;
+struct drm_mode_set;
+
struct gma_clock_t {
/* given values */
int n;
diff --git a/drivers/gpu/drm/gma500/gtt.c b/drivers/gpu/drm/gma500/gtt.c
index f851b922413f..afaf4bea21cf 100644
--- a/drivers/gpu/drm/gma500/gtt.c
+++ b/drivers/gpu/drm/gma500/gtt.c
@@ -7,11 +7,12 @@
* Alan Cox <alan@linux.intel.com>
*/
-#include <drm/drmP.h>
#include <linux/shmem_fs.h>
+
#include <asm/set_memory.h>
-#include "psb_drv.h"
+
#include "blitter.h"
+#include "psb_drv.h"
/*
diff --git a/drivers/gpu/drm/gma500/gtt.h b/drivers/gpu/drm/gma500/gtt.h
index eb08b4b04731..3cf190295ad3 100644
--- a/drivers/gpu/drm/gma500/gtt.h
+++ b/drivers/gpu/drm/gma500/gtt.h
@@ -8,7 +8,6 @@
#ifndef _PSB_GTT_H_
#define _PSB_GTT_H_
-#include <drm/drmP.h>
#include <drm/drm_gem.h>
/* This wants cleaning up with respect to the psb_dev and un-needed stuff */
diff --git a/drivers/gpu/drm/gma500/intel_bios.c b/drivers/gpu/drm/gma500/intel_bios.c
index 6ed7cfda0a4f..8ad6337eeba3 100644
--- a/drivers/gpu/drm/gma500/intel_bios.c
+++ b/drivers/gpu/drm/gma500/intel_bios.c
@@ -5,13 +5,13 @@
* Authors:
* Eric Anholt <eric@anholt.net>
*/
-#include <drm/drmP.h>
#include <drm/drm.h>
-#include <drm/gma_drm.h>
+#include <drm/drm_dp_helper.h>
+
+#include "intel_bios.h"
#include "psb_drv.h"
#include "psb_intel_drv.h"
#include "psb_intel_reg.h"
-#include "intel_bios.h"
#define SLAVE_ADDR1 0x70
#define SLAVE_ADDR2 0x72
diff --git a/drivers/gpu/drm/gma500/intel_bios.h b/drivers/gpu/drm/gma500/intel_bios.h
index 4927b68ec561..a1f9ce9465a5 100644
--- a/drivers/gpu/drm/gma500/intel_bios.h
+++ b/drivers/gpu/drm/gma500/intel_bios.h
@@ -9,8 +9,7 @@
#ifndef _INTEL_BIOS_H_
#define _INTEL_BIOS_H_
-#include <drm/drmP.h>
-#include <drm/drm_dp_helper.h>
+struct drm_device;
struct vbt_header {
u8 signature[20]; /**< Always starts with 'VBT$' */
diff --git a/drivers/gpu/drm/gma500/intel_gmbus.c b/drivers/gpu/drm/gma500/intel_gmbus.c
index e7e22187c539..a083fbfe35b8 100644
--- a/drivers/gpu/drm/gma500/intel_gmbus.c
+++ b/drivers/gpu/drm/gma500/intel_gmbus.c
@@ -26,13 +26,14 @@
* Eric Anholt <eric@anholt.net>
* Chris Wilson <chris@chris-wilson.co.uk>
*/
-#include <linux/module.h>
-#include <linux/i2c.h>
+
+#include <linux/delay.h>
#include <linux/i2c-algo-bit.h>
-#include <drm/drmP.h>
-#include "psb_intel_drv.h"
-#include <drm/gma_drm.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+
#include "psb_drv.h"
+#include "psb_intel_drv.h"
#include "psb_intel_reg.h"
#define _wait_for(COND, MS, W) ({ \
diff --git a/drivers/gpu/drm/gma500/intel_i2c.c b/drivers/gpu/drm/gma500/intel_i2c.c
index 7f1d4a514aef..de8810188190 100644
--- a/drivers/gpu/drm/gma500/intel_i2c.c
+++ b/drivers/gpu/drm/gma500/intel_i2c.c
@@ -5,9 +5,11 @@
* Authors:
* Eric Anholt <eric@anholt.net>
*/
+
+#include <linux/delay.h>
#include <linux/export.h>
-#include <linux/i2c.h>
#include <linux/i2c-algo-bit.h>
+#include <linux/i2c.h>
#include "psb_drv.h"
#include "psb_intel_reg.h"
diff --git a/drivers/gpu/drm/gma500/mdfld_device.c b/drivers/gpu/drm/gma500/mdfld_device.c
index 0db869dcd7bd..b718efccdcf2 100644
--- a/drivers/gpu/drm/gma500/mdfld_device.c
+++ b/drivers/gpu/drm/gma500/mdfld_device.c
@@ -5,14 +5,16 @@
*
**************************************************************************/
-#include "psb_drv.h"
-#include "mid_bios.h"
-#include "mdfld_output.h"
-#include "mdfld_dsi_output.h"
-#include "tc35876x-dsi-lvds.h"
+#include <linux/delay.h>
#include <asm/intel_scu_ipc.h>
+#include "mdfld_dsi_output.h"
+#include "mdfld_output.h"
+#include "mid_bios.h"
+#include "psb_drv.h"
+#include "tc35876x-dsi-lvds.h"
+
#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
#define MRST_BLC_MAX_PWM_REG_FREQ 0xFFFF
@@ -330,7 +332,7 @@ static int mdfld_restore_display_registers(struct drm_device *dev, int pipenum)
if (pipenum == 1) {
/* restore palette (gamma) */
- /*DRM_UDELAY(50000); */
+ /* udelay(50000); */
for (i = 0; i < 256; i++)
PSB_WVDC32(pipe->palette[i], map->palette + (i << 2));
@@ -392,7 +394,7 @@ static int mdfld_restore_display_registers(struct drm_device *dev, int pipenum)
PSB_WVDC32(pipe->conf, map->conf);
/* restore palette (gamma) */
- /*DRM_UDELAY(50000); */
+ /* udelay(50000); */
for (i = 0; i < 256; i++)
PSB_WVDC32(pipe->palette[i], map->palette + (i << 2));
diff --git a/drivers/gpu/drm/gma500/mdfld_dsi_dpi.c b/drivers/gpu/drm/gma500/mdfld_dsi_dpi.c
index d0bf5a1e94e8..d4c65f268922 100644
--- a/drivers/gpu/drm/gma500/mdfld_dsi_dpi.c
+++ b/drivers/gpu/drm/gma500/mdfld_dsi_dpi.c
@@ -25,9 +25,11 @@
* Jackie Li<yaodong.li@intel.com>
*/
+#include <linux/delay.h>
+
#include "mdfld_dsi_dpi.h"
-#include "mdfld_output.h"
#include "mdfld_dsi_pkg_sender.h"
+#include "mdfld_output.h"
#include "psb_drv.h"
#include "tc35876x-dsi-lvds.h"
diff --git a/drivers/gpu/drm/gma500/mdfld_dsi_output.c b/drivers/gpu/drm/gma500/mdfld_dsi_output.c
index fe020926ea4f..03023fa0fb6f 100644
--- a/drivers/gpu/drm/gma500/mdfld_dsi_output.c
+++ b/drivers/gpu/drm/gma500/mdfld_dsi_output.c
@@ -25,15 +25,17 @@
* Jackie Li<yaodong.li@intel.com>
*/
-#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/moduleparam.h>
+#include <linux/pm_runtime.h>
+
+#include <asm/intel_scu_ipc.h>
-#include "mdfld_dsi_output.h"
#include "mdfld_dsi_dpi.h"
-#include "mdfld_output.h"
+#include "mdfld_dsi_output.h"
#include "mdfld_dsi_pkg_sender.h"
+#include "mdfld_output.h"
#include "tc35876x-dsi-lvds.h"
-#include <linux/pm_runtime.h>
-#include <asm/intel_scu_ipc.h>
/* get the LABC from command line. */
static int LABC_control = 1;
diff --git a/drivers/gpu/drm/gma500/mdfld_dsi_output.h b/drivers/gpu/drm/gma500/mdfld_dsi_output.h
index 5b646c1f0c3e..0cccfe400a98 100644
--- a/drivers/gpu/drm/gma500/mdfld_dsi_output.h
+++ b/drivers/gpu/drm/gma500/mdfld_dsi_output.h
@@ -29,17 +29,17 @@
#define __MDFLD_DSI_OUTPUT_H__
#include <linux/backlight.h>
-#include <drm/drmP.h>
+
+#include <asm/intel-mid.h>
+
#include <drm/drm.h>
#include <drm/drm_crtc.h>
#include <drm/drm_edid.h>
+#include "mdfld_output.h"
#include "psb_drv.h"
#include "psb_intel_drv.h"
#include "psb_intel_reg.h"
-#include "mdfld_output.h"
-
-#include <asm/intel-mid.h>
#define FLD_MASK(start, end) (((1 << ((start) - (end) + 1)) - 1) << (end))
#define FLD_VAL(val, start, end) (((val) << (end)) & FLD_MASK(start, end))
diff --git a/drivers/gpu/drm/gma500/mdfld_dsi_pkg_sender.c b/drivers/gpu/drm/gma500/mdfld_dsi_pkg_sender.c
index c50534c923df..6e0de83e9f7d 100644
--- a/drivers/gpu/drm/gma500/mdfld_dsi_pkg_sender.c
+++ b/drivers/gpu/drm/gma500/mdfld_dsi_pkg_sender.c
@@ -24,12 +24,14 @@
* Jackie Li<yaodong.li@intel.com>
*/
+#include <linux/delay.h>
#include <linux/freezer.h>
+
#include <video/mipi_display.h>
+#include "mdfld_dsi_dpi.h"
#include "mdfld_dsi_output.h"
#include "mdfld_dsi_pkg_sender.h"
-#include "mdfld_dsi_dpi.h"
#define MDFLD_DSI_READ_MAX_COUNT 5000
diff --git a/drivers/gpu/drm/gma500/mdfld_intel_display.c b/drivers/gpu/drm/gma500/mdfld_intel_display.c
index 6a3c612c83ab..b8bfb96008b8 100644
--- a/drivers/gpu/drm/gma500/mdfld_intel_display.c
+++ b/drivers/gpu/drm/gma500/mdfld_intel_display.c
@@ -6,15 +6,18 @@
* Eric Anholt <eric@anholt.net>
*/
+#include <linux/delay.h>
#include <linux/i2c.h>
#include <linux/pm_runtime.h>
-#include <drm/drmP.h>
-#include "psb_intel_reg.h"
-#include "gma_display.h"
+#include <drm/drm_crtc.h>
+#include <drm/drm_fourcc.h>
+
#include "framebuffer.h"
-#include "mdfld_output.h"
+#include "gma_display.h"
#include "mdfld_dsi_output.h"
+#include "mdfld_output.h"
+#include "psb_intel_reg.h"
/* Hardcoded currently */
static int ksel = KSEL_CRYSTAL_19;
diff --git a/drivers/gpu/drm/gma500/mdfld_tmd_vid.c b/drivers/gpu/drm/gma500/mdfld_tmd_vid.c
index dc0c6c3d3d29..49c92debb7b2 100644
--- a/drivers/gpu/drm/gma500/mdfld_tmd_vid.c
+++ b/drivers/gpu/drm/gma500/mdfld_tmd_vid.c
@@ -27,6 +27,8 @@
* Scott Rowe <scott.m.rowe@intel.com>
*/
+#include <linux/delay.h>
+
#include "mdfld_dsi_dpi.h"
#include "mdfld_dsi_pkg_sender.h"
diff --git a/drivers/gpu/drm/gma500/mid_bios.c b/drivers/gpu/drm/gma500/mid_bios.c
index 3a12fef339bf..8ab44fec4bfa 100644
--- a/drivers/gpu/drm/gma500/mid_bios.c
+++ b/drivers/gpu/drm/gma500/mid_bios.c
@@ -11,11 +11,10 @@
* - Check ioremap failures
*/
-#include <drm/drmP.h>
#include <drm/drm.h>
-#include <drm/gma_drm.h>
-#include "psb_drv.h"
+
#include "mid_bios.h"
+#include "psb_drv.h"
static void mid_get_fuse_settings(struct drm_device *dev)
{
diff --git a/drivers/gpu/drm/gma500/mid_bios.h b/drivers/gpu/drm/gma500/mid_bios.h
index 7e743f731a92..8707f7c893a7 100644
--- a/drivers/gpu/drm/gma500/mid_bios.h
+++ b/drivers/gpu/drm/gma500/mid_bios.h
@@ -4,6 +4,7 @@
* All Rights Reserved.
*
**************************************************************************/
+struct drm_device;
extern int mid_chip_setup(struct drm_device *dev);
diff --git a/drivers/gpu/drm/gma500/mmu.c b/drivers/gpu/drm/gma500/mmu.c
index 5d806a8ff1bd..505044c9a673 100644
--- a/drivers/gpu/drm/gma500/mmu.c
+++ b/drivers/gpu/drm/gma500/mmu.c
@@ -3,10 +3,12 @@
* Copyright (c) 2007, Intel Corporation.
*
**************************************************************************/
-#include <drm/drmP.h>
+
+#include <linux/highmem.h>
+
+#include "mmu.h"
#include "psb_drv.h"
#include "psb_reg.h"
-#include "mmu.h"
/*
* Code for the SGX MMU:
diff --git a/drivers/gpu/drm/gma500/oaktrail.h b/drivers/gpu/drm/gma500/oaktrail.h
index 72da4e0bc8c7..8d20fa2ee286 100644
--- a/drivers/gpu/drm/gma500/oaktrail.h
+++ b/drivers/gpu/drm/gma500/oaktrail.h
@@ -5,6 +5,8 @@
*
**************************************************************************/
+struct psb_intel_mode_device;
+
/* MID device specific descriptors */
struct oaktrail_timing_info {
diff --git a/drivers/gpu/drm/gma500/oaktrail_crtc.c b/drivers/gpu/drm/gma500/oaktrail_crtc.c
index cb4dafd113b3..167c10767dd4 100644
--- a/drivers/gpu/drm/gma500/oaktrail_crtc.c
+++ b/drivers/gpu/drm/gma500/oaktrail_crtc.c
@@ -3,16 +3,18 @@
* Copyright © 2009 Intel Corporation
*/
+#include <linux/delay.h>
#include <linux/i2c.h>
#include <linux/pm_runtime.h>
-#include <drm/drmP.h>
+#include <drm/drm_fourcc.h>
+
#include "framebuffer.h"
+#include "gma_display.h"
+#include "power.h"
#include "psb_drv.h"
#include "psb_intel_drv.h"
#include "psb_intel_reg.h"
-#include "gma_display.h"
-#include "power.h"
#define MRST_LIMIT_LVDS_100L 0
#define MRST_LIMIT_LVDS_83 1
diff --git a/drivers/gpu/drm/gma500/oaktrail_device.c b/drivers/gpu/drm/gma500/oaktrail_device.c
index e35e99508a70..ade7e2416a66 100644
--- a/drivers/gpu/drm/gma500/oaktrail_device.c
+++ b/drivers/gpu/drm/gma500/oaktrail_device.c
@@ -6,18 +6,20 @@
**************************************************************************/
#include <linux/backlight.h>
-#include <linux/module.h>
+#include <linux/delay.h>
#include <linux/dmi.h>
-#include <drm/drmP.h>
-#include <drm/drm.h>
-#include <drm/gma_drm.h>
-#include "psb_drv.h"
-#include "psb_reg.h"
-#include "psb_intel_reg.h"
+#include <linux/module.h>
+
#include <asm/intel-mid.h>
#include <asm/intel_scu_ipc.h>
-#include "mid_bios.h"
+
+#include <drm/drm.h>
+
#include "intel_bios.h"
+#include "mid_bios.h"
+#include "psb_drv.h"
+#include "psb_intel_reg.h"
+#include "psb_reg.h"
static int oaktrail_output_init(struct drm_device *dev)
{
@@ -315,7 +317,7 @@ static int oaktrail_restore_display_registers(struct drm_device *dev)
/* Actually enable it */
PSB_WVDC32(p->dpll, MRST_DPLL_A);
- DRM_UDELAY(150);
+ udelay(150);
/* Restore mode */
PSB_WVDC32(p->htotal, HTOTAL_A);
diff --git a/drivers/gpu/drm/gma500/oaktrail_hdmi.c b/drivers/gpu/drm/gma500/oaktrail_hdmi.c
index c6d72de1c054..f4c520893ceb 100644
--- a/drivers/gpu/drm/gma500/oaktrail_hdmi.c
+++ b/drivers/gpu/drm/gma500/oaktrail_hdmi.c
@@ -24,11 +24,13 @@
* Li Peng <peng.li@intel.com>
*/
-#include <drm/drmP.h>
+#include <linux/delay.h>
+
#include <drm/drm.h>
+
+#include "psb_drv.h"
#include "psb_intel_drv.h"
#include "psb_intel_reg.h"
-#include "psb_drv.h"
#define HDMI_READ(reg) readl(hdmi_dev->regs + (reg))
#define HDMI_WRITE(reg, val) writel(val, hdmi_dev->regs + (reg))
@@ -815,7 +817,7 @@ void oaktrail_hdmi_restore(struct drm_device *dev)
PSB_WVDC32(hdmi_dev->saveDPLL_ADJUST, DPLL_ADJUST);
PSB_WVDC32(hdmi_dev->saveDPLL_UPDATE, DPLL_UPDATE);
PSB_WVDC32(hdmi_dev->saveDPLL_CLK_ENABLE, DPLL_CLK_ENABLE);
- DRM_UDELAY(150);
+ udelay(150);
/* pipe */
PSB_WVDC32(pipeb->src, PIPEBSRC);
diff --git a/drivers/gpu/drm/gma500/oaktrail_lvds.c b/drivers/gpu/drm/gma500/oaktrail_lvds.c
index 56e652664ae2..7390403ea1b7 100644
--- a/drivers/gpu/drm/gma500/oaktrail_lvds.c
+++ b/drivers/gpu/drm/gma500/oaktrail_lvds.c
@@ -9,15 +9,15 @@
*/
#include <linux/i2c.h>
-#include <drm/drmP.h>
+#include <linux/pm_runtime.h>
+
#include <asm/intel-mid.h>
#include "intel_bios.h"
+#include "power.h"
#include "psb_drv.h"
#include "psb_intel_drv.h"
#include "psb_intel_reg.h"
-#include "power.h"
-#include <linux/pm_runtime.h>
/* The max/min PWM frequency in BPCR[31:17] - */
/* The smallest number is 1 (not 0) that can fit in the
diff --git a/drivers/gpu/drm/gma500/oaktrail_lvds_i2c.c b/drivers/gpu/drm/gma500/oaktrail_lvds_i2c.c
index f913a62eee5f..baaf8212e01d 100644
--- a/drivers/gpu/drm/gma500/oaktrail_lvds_i2c.c
+++ b/drivers/gpu/drm/gma500/oaktrail_lvds_i2c.c
@@ -23,17 +23,16 @@
*
*/
+#include <linux/delay.h>
+#include <linux/i2c-algo-bit.h>
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/types.h>
-#include <linux/i2c.h>
-#include <linux/i2c-algo-bit.h>
-#include <linux/init.h>
-#include <linux/io.h>
-#include <linux/delay.h>
-#include <drm/drmP.h>
#include "psb_drv.h"
#include "psb_intel_reg.h"
diff --git a/drivers/gpu/drm/gma500/power.h b/drivers/gpu/drm/gma500/power.h
index 56d8708bd41c..0c89c4d6ec20 100644
--- a/drivers/gpu/drm/gma500/power.h
+++ b/drivers/gpu/drm/gma500/power.h
@@ -31,7 +31,9 @@
#define _PSB_POWERMGMT_H_
#include <linux/pci.h>
-#include <drm/drmP.h>
+
+struct device;
+struct drm_device;
void gma_power_init(struct drm_device *dev);
void gma_power_uninit(struct drm_device *dev);
diff --git a/drivers/gpu/drm/gma500/psb_device.c b/drivers/gpu/drm/gma500/psb_device.c
index 649b0282bff8..ece994c4c21a 100644
--- a/drivers/gpu/drm/gma500/psb_device.c
+++ b/drivers/gpu/drm/gma500/psb_device.c
@@ -6,15 +6,15 @@
**************************************************************************/
#include <linux/backlight.h>
-#include <drm/drmP.h>
+
#include <drm/drm.h>
-#include <drm/gma_drm.h>
-#include "psb_drv.h"
-#include "psb_reg.h"
-#include "psb_intel_reg.h"
+
+#include "gma_device.h"
#include "intel_bios.h"
#include "psb_device.h"
-#include "gma_device.h"
+#include "psb_drv.h"
+#include "psb_intel_reg.h"
+#include "psb_reg.h"
static int psb_output_init(struct drm_device *dev)
{
diff --git a/drivers/gpu/drm/gma500/psb_drv.c b/drivers/gpu/drm/gma500/psb_drv.c
index 5280be2c2500..7005f8f69c68 100644
--- a/drivers/gpu/drm/gma500/psb_drv.c
+++ b/drivers/gpu/drm/gma500/psb_drv.c
@@ -7,23 +7,32 @@
*
**************************************************************************/
-#include <drm/drmP.h>
+#include <linux/cpu.h>
+#include <linux/module.h>
+#include <linux/notifier.h>
+#include <linux/pm_runtime.h>
+#include <linux/spinlock.h>
+
+#include <asm/set_memory.h>
+
+#include <acpi/video.h>
+
#include <drm/drm.h>
-#include "psb_drv.h"
+#include <drm/drm_drv.h>
+#include <drm/drm_file.h>
+#include <drm/drm_ioctl.h>
+#include <drm/drm_irq.h>
+#include <drm/drm_pci.h>
+#include <drm/drm_pciids.h>
+#include <drm/drm_vblank.h>
+
#include "framebuffer.h"
-#include "psb_reg.h"
-#include "psb_intel_reg.h"
#include "intel_bios.h"
#include "mid_bios.h"
-#include <drm/drm_pciids.h>
#include "power.h"
-#include <linux/cpu.h>
-#include <linux/notifier.h>
-#include <linux/spinlock.h>
-#include <linux/pm_runtime.h>
-#include <acpi/video.h>
-#include <linux/module.h>
-#include <asm/set_memory.h>
+#include "psb_drv.h"
+#include "psb_intel_reg.h"
+#include "psb_reg.h"
static struct drm_driver driver;
static int psb_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
diff --git a/drivers/gpu/drm/gma500/psb_drv.h b/drivers/gpu/drm/gma500/psb_drv.h
index e0eec6d2b63c..9b3c03f4a38d 100644
--- a/drivers/gpu/drm/gma500/psb_drv.h
+++ b/drivers/gpu/drm/gma500/psb_drv.h
@@ -11,17 +11,17 @@
#include <linux/kref.h>
#include <linux/mm_types.h>
-#include <drm/drmP.h>
-#include <drm/gma_drm.h>
-#include "psb_reg.h"
-#include "psb_intel_drv.h"
+#include <drm/drm_device.h>
+
#include "gma_display.h"
-#include "intel_bios.h"
#include "gtt.h"
-#include "power.h"
-#include "opregion.h"
-#include "oaktrail.h"
+#include "intel_bios.h"
#include "mmu.h"
+#include "oaktrail.h"
+#include "opregion.h"
+#include "power.h"
+#include "psb_intel_drv.h"
+#include "psb_reg.h"
#define DRIVER_AUTHOR "Alan Cox <alan@linux.intel.com> and others"
diff --git a/drivers/gpu/drm/gma500/psb_intel_display.c b/drivers/gpu/drm/gma500/psb_intel_display.c
index 3dcf3a32c6ca..4256410535f0 100644
--- a/drivers/gpu/drm/gma500/psb_intel_display.c
+++ b/drivers/gpu/drm/gma500/psb_intel_display.c
@@ -6,16 +6,17 @@
* Eric Anholt <eric@anholt.net>
*/
+#include <linux/delay.h>
#include <linux/i2c.h>
-#include <drm/drmP.h>
#include <drm/drm_plane_helper.h>
+
#include "framebuffer.h"
+#include "gma_display.h"
+#include "power.h"
#include "psb_drv.h"
#include "psb_intel_drv.h"
#include "psb_intel_reg.h"
-#include "gma_display.h"
-#include "power.h"
#define INTEL_LIMIT_I9XX_SDVO_DAC 0
#define INTEL_LIMIT_I9XX_LVDS 1
diff --git a/drivers/gpu/drm/gma500/psb_intel_lvds.c b/drivers/gpu/drm/gma500/psb_intel_lvds.c
index eaacacab1b46..afaebab7bc17 100644
--- a/drivers/gpu/drm/gma500/psb_intel_lvds.c
+++ b/drivers/gpu/drm/gma500/psb_intel_lvds.c
@@ -9,14 +9,13 @@
*/
#include <linux/i2c.h>
-#include <drm/drmP.h>
+#include <linux/pm_runtime.h>
#include "intel_bios.h"
+#include "power.h"
#include "psb_drv.h"
#include "psb_intel_drv.h"
#include "psb_intel_reg.h"
-#include "power.h"
-#include <linux/pm_runtime.h>
/*
* LVDS I2C backlight control macros
diff --git a/drivers/gpu/drm/gma500/psb_intel_modes.c b/drivers/gpu/drm/gma500/psb_intel_modes.c
index 4831caedbed8..88653a40aeb5 100644
--- a/drivers/gpu/drm/gma500/psb_intel_modes.c
+++ b/drivers/gpu/drm/gma500/psb_intel_modes.c
@@ -6,7 +6,7 @@
*/
#include <linux/i2c.h>
-#include <drm/drmP.h>
+
#include "psb_intel_drv.h"
/**
diff --git a/drivers/gpu/drm/gma500/psb_intel_sdvo.c b/drivers/gpu/drm/gma500/psb_intel_sdvo.c
index dd3cec0e3190..264d7ad004b4 100644
--- a/drivers/gpu/drm/gma500/psb_intel_sdvo.c
+++ b/drivers/gpu/drm/gma500/psb_intel_sdvo.c
@@ -25,19 +25,20 @@
* Authors:
* Eric Anholt <eric@anholt.net>
*/
-#include <linux/module.h>
+
+#include <linux/delay.h>
#include <linux/i2c.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
#include <linux/slab.h>
-#include <linux/delay.h>
-#include <drm/drmP.h>
+
#include <drm/drm_crtc.h>
#include <drm/drm_edid.h>
-#include "psb_intel_drv.h"
-#include <drm/gma_drm.h>
+
#include "psb_drv.h"
-#include "psb_intel_sdvo_regs.h"
+#include "psb_intel_drv.h"
#include "psb_intel_reg.h"
-#include <linux/kernel.h>
+#include "psb_intel_sdvo_regs.h"
#define SDVO_TMDS_MASK (SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_TMDS1)
#define SDVO_RGB_MASK (SDVO_OUTPUT_RGB0 | SDVO_OUTPUT_RGB1)
diff --git a/drivers/gpu/drm/gma500/psb_irq.c b/drivers/gpu/drm/gma500/psb_irq.c
index 5d48703bb2e2..e6265fb85626 100644
--- a/drivers/gpu/drm/gma500/psb_irq.c
+++ b/drivers/gpu/drm/gma500/psb_irq.c
@@ -8,13 +8,14 @@
*
**************************************************************************/
-#include <drm/drmP.h>
+#include <drm/drm_vblank.h>
+
+#include "mdfld_output.h"
+#include "power.h"
#include "psb_drv.h"
-#include "psb_reg.h"
#include "psb_intel_reg.h"
-#include "power.h"
#include "psb_irq.h"
-#include "mdfld_output.h"
+#include "psb_reg.h"
/*
* inline functions
diff --git a/drivers/gpu/drm/gma500/psb_irq.h b/drivers/gpu/drm/gma500/psb_irq.h
index 604cba75528b..58fd502e3b9d 100644
--- a/drivers/gpu/drm/gma500/psb_irq.h
+++ b/drivers/gpu/drm/gma500/psb_irq.h
@@ -12,7 +12,7 @@
#ifndef _PSB_IRQ_H_
#define _PSB_IRQ_H_
-#include <drm/drmP.h>
+struct drm_device;
bool sysirq_init(struct drm_device *dev);
void sysirq_uninit(struct drm_device *dev);
diff --git a/drivers/gpu/drm/gma500/psb_lid.c b/drivers/gpu/drm/gma500/psb_lid.c
index 5a9cc1721e2d..97b0c52bfd8a 100644
--- a/drivers/gpu/drm/gma500/psb_lid.c
+++ b/drivers/gpu/drm/gma500/psb_lid.c
@@ -5,11 +5,11 @@
* Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
**************************************************************************/
-#include <drm/drmP.h>
+#include <linux/spinlock.h>
+
#include "psb_drv.h"
-#include "psb_reg.h"
#include "psb_intel_reg.h"
-#include <linux/spinlock.h>
+#include "psb_reg.h"
static void psb_lid_timer_func(struct timer_list *t)
{
diff --git a/drivers/gpu/drm/gma500/tc35876x-dsi-lvds.c b/drivers/gpu/drm/gma500/tc35876x-dsi-lvds.c
index 37c997e24b9e..7de3ce637c7f 100644
--- a/drivers/gpu/drm/gma500/tc35876x-dsi-lvds.c
+++ b/drivers/gpu/drm/gma500/tc35876x-dsi-lvds.c
@@ -22,15 +22,18 @@
*
*/
-#include "mdfld_dsi_dpi.h"
-#include "mdfld_output.h"
-#include "mdfld_dsi_pkg_sender.h"
-#include "tc35876x-dsi-lvds.h"
-#include <linux/platform_data/tc35876x.h>
+#include <linux/delay.h>
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/platform_data/tc35876x.h>
+
#include <asm/intel_scu_ipc.h>
+#include "mdfld_dsi_dpi.h"
+#include "mdfld_dsi_pkg_sender.h"
+#include "mdfld_output.h"
+#include "tc35876x-dsi-lvds.h"
+
static struct i2c_client *tc35876x_client;
static struct i2c_client *cmi_lcd_i2c_client;
diff --git a/drivers/gpu/drm/hisilicon/hibmc/Kconfig b/drivers/gpu/drm/hisilicon/hibmc/Kconfig
index 7cf8d38da8be..f20eedf0073a 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/Kconfig
+++ b/drivers/gpu/drm/hisilicon/hibmc/Kconfig
@@ -3,7 +3,7 @@ config DRM_HISI_HIBMC
tristate "DRM Support for Hisilicon Hibmc"
depends on DRM && PCI && MMU
select DRM_KMS_HELPER
- select DRM_TTM
+ select DRM_VRAM_HELPER
help
Choose this option if you have a Hisilicon Hibmc soc chipset.
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_de.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_de.c
index 0a753d397d7d..08657a3627f3 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_de.c
+++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_de.c
@@ -91,27 +91,26 @@ static void hibmc_plane_atomic_update(struct drm_plane *plane,
struct drm_plane_state *state = plane->state;
u32 reg;
int ret;
- u64 gpu_addr = 0;
+ s64 gpu_addr = 0;
unsigned int line_l;
struct hibmc_drm_private *priv = plane->dev->dev_private;
struct hibmc_framebuffer *hibmc_fb;
- struct hibmc_bo *bo;
+ struct drm_gem_vram_object *gbo;
if (!state->fb)
return;
hibmc_fb = to_hibmc_framebuffer(state->fb);
- bo = gem_to_hibmc_bo(hibmc_fb->obj);
- ret = ttm_bo_reserve(&bo->bo, true, false, NULL);
+ gbo = drm_gem_vram_of_gem(hibmc_fb->obj);
+
+ ret = drm_gem_vram_pin(gbo, DRM_GEM_VRAM_PL_FLAG_VRAM);
if (ret) {
- DRM_ERROR("failed to reserve ttm_bo: %d", ret);
+ DRM_ERROR("failed to pin bo: %d", ret);
return;
}
-
- ret = hibmc_bo_pin(bo, TTM_PL_FLAG_VRAM, &gpu_addr);
- ttm_bo_unreserve(&bo->bo);
- if (ret) {
- DRM_ERROR("failed to pin hibmc_bo: %d", ret);
+ gpu_addr = drm_gem_vram_offset(gbo);
+ if (gpu_addr < 0) {
+ drm_gem_vram_unpin(gbo);
return;
}
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c
index d14ce808d1d0..ce89e56937b0 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c
+++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c
@@ -22,14 +22,7 @@
static const struct file_operations hibmc_fops = {
.owner = THIS_MODULE,
- .open = drm_open,
- .release = drm_release,
- .unlocked_ioctl = drm_ioctl,
- .compat_ioctl = drm_compat_ioctl,
- .mmap = hibmc_mmap,
- .poll = drm_poll,
- .read = drm_read,
- .llseek = no_llseek,
+ DRM_VRAM_MM_FILE_OPERATIONS
};
static irqreturn_t hibmc_drm_interrupt(int irq, void *arg)
@@ -58,9 +51,10 @@ static struct drm_driver hibmc_driver = {
.desc = "hibmc drm driver",
.major = 1,
.minor = 0,
- .gem_free_object_unlocked = hibmc_gem_free_object,
+ .gem_free_object_unlocked =
+ drm_gem_vram_driver_gem_free_object_unlocked,
.dumb_create = hibmc_dumb_create,
- .dumb_map_offset = hibmc_dumb_mmap_offset,
+ .dumb_map_offset = drm_gem_vram_driver_dumb_mmap_offset,
.irq_handler = hibmc_drm_interrupt,
};
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h
index 740c64332837..69348bf54a84 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h
+++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h
@@ -18,7 +18,8 @@
#include <drm/drm_atomic.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_gem.h>
-#include <drm/ttm/ttm_bo_driver.h>
+#include <drm/drm_gem_vram_helper.h>
+#include <drm/drm_vram_mm_helper.h>
struct hibmc_framebuffer {
struct drm_framebuffer fb;
@@ -43,36 +44,12 @@ struct hibmc_drm_private {
struct drm_device *dev;
bool mode_config_initialized;
- /* ttm */
- struct ttm_bo_device bdev;
- bool initialized;
-
/* fbdev */
struct hibmc_fbdev *fbdev;
- bool mm_inited;
};
#define to_hibmc_framebuffer(x) container_of(x, struct hibmc_framebuffer, fb)
-struct hibmc_bo {
- struct ttm_buffer_object bo;
- struct ttm_placement placement;
- struct ttm_bo_kmap_obj kmap;
- struct drm_gem_object gem;
- struct ttm_place placements[3];
- int pin_count;
-};
-
-static inline struct hibmc_bo *hibmc_bo(struct ttm_buffer_object *bo)
-{
- return container_of(bo, struct hibmc_bo, bo);
-}
-
-static inline struct hibmc_bo *gem_to_hibmc_bo(struct drm_gem_object *gem)
-{
- return container_of(gem, struct hibmc_bo, gem);
-}
-
void hibmc_set_power_mode(struct hibmc_drm_private *priv,
unsigned int power_mode);
void hibmc_set_current_gate(struct hibmc_drm_private *priv,
@@ -92,14 +69,8 @@ hibmc_framebuffer_init(struct drm_device *dev,
int hibmc_mm_init(struct hibmc_drm_private *hibmc);
void hibmc_mm_fini(struct hibmc_drm_private *hibmc);
-int hibmc_bo_pin(struct hibmc_bo *bo, u32 pl_flag, u64 *gpu_addr);
-int hibmc_bo_unpin(struct hibmc_bo *bo);
-void hibmc_gem_free_object(struct drm_gem_object *obj);
int hibmc_dumb_create(struct drm_file *file, struct drm_device *dev,
struct drm_mode_create_dumb *args);
-int hibmc_dumb_mmap_offset(struct drm_file *file, struct drm_device *dev,
- u32 handle, u64 *offset);
-int hibmc_mmap(struct file *filp, struct vm_area_struct *vma);
extern const struct drm_mode_config_funcs hibmc_mode_funcs;
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_fbdev.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_fbdev.c
index 498545417717..af1ea4cceffa 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_fbdev.c
+++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_fbdev.c
@@ -58,10 +58,10 @@ static int hibmc_drm_fb_create(struct drm_fb_helper *helper,
struct drm_mode_fb_cmd2 mode_cmd;
struct drm_gem_object *gobj = NULL;
int ret = 0;
- int ret1;
size_t size;
unsigned int bytes_per_pixel;
- struct hibmc_bo *bo = NULL;
+ struct drm_gem_vram_object *gbo = NULL;
+ void *base;
DRM_DEBUG_DRIVER("surface width(%d), height(%d) and bpp(%d)\n",
sizes->surface_width, sizes->surface_height,
@@ -83,26 +83,20 @@ static int hibmc_drm_fb_create(struct drm_fb_helper *helper,
return -ENOMEM;
}
- bo = gem_to_hibmc_bo(gobj);
+ gbo = drm_gem_vram_of_gem(gobj);
- ret = ttm_bo_reserve(&bo->bo, true, false, NULL);
- if (ret) {
- DRM_ERROR("failed to reserve ttm_bo: %d\n", ret);
- goto out_unref_gem;
- }
-
- ret = hibmc_bo_pin(bo, TTM_PL_FLAG_VRAM, NULL);
+ ret = drm_gem_vram_pin(gbo, DRM_GEM_VRAM_PL_FLAG_VRAM);
if (ret) {
DRM_ERROR("failed to pin fbcon: %d\n", ret);
- goto out_unreserve_ttm_bo;
+ goto out_unref_gem;
}
- ret = ttm_bo_kmap(&bo->bo, 0, bo->bo.num_pages, &bo->kmap);
- if (ret) {
+ base = drm_gem_vram_kmap(gbo, true, NULL);
+ if (IS_ERR(base)) {
+ ret = PTR_ERR(base);
DRM_ERROR("failed to kmap fbcon: %d\n", ret);
goto out_unpin_bo;
}
- ttm_bo_unreserve(&bo->bo);
info = drm_fb_helper_alloc_fbi(helper);
if (IS_ERR(info)) {
@@ -126,24 +120,17 @@ static int hibmc_drm_fb_create(struct drm_fb_helper *helper,
drm_fb_helper_fill_info(info, &priv->fbdev->helper, sizes);
- info->screen_base = bo->kmap.virtual;
+ info->screen_base = base;
info->screen_size = size;
- info->fix.smem_start = bo->bo.mem.bus.offset + bo->bo.mem.bus.base;
+ info->fix.smem_start = gbo->bo.mem.bus.offset + gbo->bo.mem.bus.base;
info->fix.smem_len = size;
return 0;
out_release_fbi:
- ret1 = ttm_bo_reserve(&bo->bo, true, false, NULL);
- if (ret1) {
- DRM_ERROR("failed to rsv ttm_bo when release fbi: %d\n", ret1);
- goto out_unref_gem;
- }
- ttm_bo_kunmap(&bo->kmap);
+ drm_gem_vram_kunmap(gbo);
out_unpin_bo:
- hibmc_bo_unpin(bo);
-out_unreserve_ttm_bo:
- ttm_bo_unreserve(&bo->bo);
+ drm_gem_vram_unpin(gbo);
out_unref_gem:
drm_gem_object_put_unlocked(gobj);
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c
index 1ee339e0e1db..5d4a03cd7d50 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c
+++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c
@@ -12,335 +12,55 @@
*/
#include <drm/drm_atomic_helper.h>
-#include <drm/ttm/ttm_page_alloc.h>
#include "hibmc_drm_drv.h"
-static inline struct hibmc_drm_private *
-hibmc_bdev(struct ttm_bo_device *bd)
-{
- return container_of(bd, struct hibmc_drm_private, bdev);
-}
-
-static void hibmc_bo_ttm_destroy(struct ttm_buffer_object *tbo)
-{
- struct hibmc_bo *bo = container_of(tbo, struct hibmc_bo, bo);
-
- drm_gem_object_release(&bo->gem);
- kfree(bo);
-}
-
-static bool hibmc_ttm_bo_is_hibmc_bo(struct ttm_buffer_object *bo)
-{
- return bo->destroy == &hibmc_bo_ttm_destroy;
-}
-
-static int
-hibmc_bo_init_mem_type(struct ttm_bo_device *bdev, u32 type,
- struct ttm_mem_type_manager *man)
-{
- switch (type) {
- case TTM_PL_SYSTEM:
- man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
- man->available_caching = TTM_PL_MASK_CACHING;
- man->default_caching = TTM_PL_FLAG_CACHED;
- break;
- case TTM_PL_VRAM:
- man->func = &ttm_bo_manager_func;
- man->flags = TTM_MEMTYPE_FLAG_FIXED |
- TTM_MEMTYPE_FLAG_MAPPABLE;
- man->available_caching = TTM_PL_FLAG_UNCACHED |
- TTM_PL_FLAG_WC;
- man->default_caching = TTM_PL_FLAG_WC;
- break;
- default:
- DRM_ERROR("unsupported memory type %u\n", type);
- return -EINVAL;
- }
- return 0;
-}
-
-void hibmc_ttm_placement(struct hibmc_bo *bo, int domain)
-{
- u32 count = 0;
- u32 i;
-
- bo->placement.placement = bo->placements;
- bo->placement.busy_placement = bo->placements;
- if (domain & TTM_PL_FLAG_VRAM)
- bo->placements[count++].flags = TTM_PL_FLAG_WC |
- TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_VRAM;
- if (domain & TTM_PL_FLAG_SYSTEM)
- bo->placements[count++].flags = TTM_PL_MASK_CACHING |
- TTM_PL_FLAG_SYSTEM;
- if (!count)
- bo->placements[count++].flags = TTM_PL_MASK_CACHING |
- TTM_PL_FLAG_SYSTEM;
-
- bo->placement.num_placement = count;
- bo->placement.num_busy_placement = count;
- for (i = 0; i < count; i++) {
- bo->placements[i].fpfn = 0;
- bo->placements[i].lpfn = 0;
- }
-}
-
-static void
-hibmc_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl)
-{
- struct hibmc_bo *hibmcbo = hibmc_bo(bo);
-
- if (!hibmc_ttm_bo_is_hibmc_bo(bo))
- return;
-
- hibmc_ttm_placement(hibmcbo, TTM_PL_FLAG_SYSTEM);
- *pl = hibmcbo->placement;
-}
-
-static int hibmc_bo_verify_access(struct ttm_buffer_object *bo,
- struct file *filp)
-{
- struct hibmc_bo *hibmcbo = hibmc_bo(bo);
-
- return drm_vma_node_verify_access(&hibmcbo->gem.vma_node,
- filp->private_data);
-}
-
-static int hibmc_ttm_io_mem_reserve(struct ttm_bo_device *bdev,
- struct ttm_mem_reg *mem)
-{
- struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
- struct hibmc_drm_private *hibmc = hibmc_bdev(bdev);
-
- mem->bus.addr = NULL;
- mem->bus.offset = 0;
- mem->bus.size = mem->num_pages << PAGE_SHIFT;
- mem->bus.base = 0;
- mem->bus.is_iomem = false;
- if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
- return -EINVAL;
- switch (mem->mem_type) {
- case TTM_PL_SYSTEM:
- /* system memory */
- return 0;
- case TTM_PL_VRAM:
- mem->bus.offset = mem->start << PAGE_SHIFT;
- mem->bus.base = pci_resource_start(hibmc->dev->pdev, 0);
- mem->bus.is_iomem = true;
- break;
- default:
- return -EINVAL;
- }
- return 0;
-}
-
-static void hibmc_ttm_backend_destroy(struct ttm_tt *tt)
-{
- ttm_tt_fini(tt);
- kfree(tt);
-}
-
-static struct ttm_backend_func hibmc_tt_backend_func = {
- .destroy = &hibmc_ttm_backend_destroy,
-};
-
-static struct ttm_tt *hibmc_ttm_tt_create(struct ttm_buffer_object *bo,
- u32 page_flags)
-{
- struct ttm_tt *tt;
- int ret;
-
- tt = kzalloc(sizeof(*tt), GFP_KERNEL);
- if (!tt) {
- DRM_ERROR("failed to allocate ttm_tt\n");
- return NULL;
- }
- tt->func = &hibmc_tt_backend_func;
- ret = ttm_tt_init(tt, bo, page_flags);
- if (ret) {
- DRM_ERROR("failed to initialize ttm_tt: %d\n", ret);
- kfree(tt);
- return NULL;
- }
- return tt;
-}
-
-struct ttm_bo_driver hibmc_bo_driver = {
- .ttm_tt_create = hibmc_ttm_tt_create,
- .init_mem_type = hibmc_bo_init_mem_type,
- .evict_flags = hibmc_bo_evict_flags,
- .move = NULL,
- .verify_access = hibmc_bo_verify_access,
- .io_mem_reserve = &hibmc_ttm_io_mem_reserve,
- .io_mem_free = NULL,
-};
-
int hibmc_mm_init(struct hibmc_drm_private *hibmc)
{
+ struct drm_vram_mm *vmm;
int ret;
struct drm_device *dev = hibmc->dev;
- struct ttm_bo_device *bdev = &hibmc->bdev;
- ret = ttm_bo_device_init(&hibmc->bdev,
- &hibmc_bo_driver,
- dev->anon_inode->i_mapping,
- true);
- if (ret) {
- DRM_ERROR("error initializing bo driver: %d\n", ret);
+ vmm = drm_vram_helper_alloc_mm(dev,
+ pci_resource_start(dev->pdev, 0),
+ hibmc->fb_size, &drm_gem_vram_mm_funcs);
+ if (IS_ERR(vmm)) {
+ ret = PTR_ERR(vmm);
+ DRM_ERROR("Error initializing VRAM MM; %d\n", ret);
return ret;
}
- ret = ttm_bo_init_mm(bdev, TTM_PL_VRAM,
- hibmc->fb_size >> PAGE_SHIFT);
- if (ret) {
- DRM_ERROR("failed ttm VRAM init: %d\n", ret);
- return ret;
- }
-
- hibmc->mm_inited = true;
return 0;
}
void hibmc_mm_fini(struct hibmc_drm_private *hibmc)
{
- if (!hibmc->mm_inited)
- return;
-
- ttm_bo_device_release(&hibmc->bdev);
- hibmc->mm_inited = false;
-}
-
-static void hibmc_bo_unref(struct hibmc_bo **bo)
-{
- struct ttm_buffer_object *tbo;
-
- if ((*bo) == NULL)
+ if (!hibmc->dev->vram_mm)
return;
- tbo = &((*bo)->bo);
- ttm_bo_put(tbo);
- *bo = NULL;
-}
-
-int hibmc_bo_create(struct drm_device *dev, int size, int align,
- u32 flags, struct hibmc_bo **phibmcbo)
-{
- struct hibmc_drm_private *hibmc = dev->dev_private;
- struct hibmc_bo *hibmcbo;
- size_t acc_size;
- int ret;
-
- hibmcbo = kzalloc(sizeof(*hibmcbo), GFP_KERNEL);
- if (!hibmcbo) {
- DRM_ERROR("failed to allocate hibmcbo\n");
- return -ENOMEM;
- }
- ret = drm_gem_object_init(dev, &hibmcbo->gem, size);
- if (ret) {
- DRM_ERROR("failed to initialize drm gem object: %d\n", ret);
- kfree(hibmcbo);
- return ret;
- }
-
- hibmcbo->bo.bdev = &hibmc->bdev;
-
- hibmc_ttm_placement(hibmcbo, TTM_PL_FLAG_VRAM | TTM_PL_FLAG_SYSTEM);
-
- acc_size = ttm_bo_dma_acc_size(&hibmc->bdev, size,
- sizeof(struct hibmc_bo));
-
- ret = ttm_bo_init(&hibmc->bdev, &hibmcbo->bo, size,
- ttm_bo_type_device, &hibmcbo->placement,
- align >> PAGE_SHIFT, false, acc_size,
- NULL, NULL, hibmc_bo_ttm_destroy);
- if (ret) {
- hibmc_bo_unref(&hibmcbo);
- DRM_ERROR("failed to initialize ttm_bo: %d\n", ret);
- return ret;
- }
-
- *phibmcbo = hibmcbo;
- return 0;
-}
-
-int hibmc_bo_pin(struct hibmc_bo *bo, u32 pl_flag, u64 *gpu_addr)
-{
- struct ttm_operation_ctx ctx = { false, false };
- int i, ret;
-
- if (bo->pin_count) {
- bo->pin_count++;
- if (gpu_addr)
- *gpu_addr = bo->bo.offset;
- return 0;
- }
-
- hibmc_ttm_placement(bo, pl_flag);
- for (i = 0; i < bo->placement.num_placement; i++)
- bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT;
- ret = ttm_bo_validate(&bo->bo, &bo->placement, &ctx);
- if (ret)
- return ret;
-
- bo->pin_count = 1;
- if (gpu_addr)
- *gpu_addr = bo->bo.offset;
- return 0;
-}
-
-int hibmc_bo_unpin(struct hibmc_bo *bo)
-{
- struct ttm_operation_ctx ctx = { false, false };
- int i, ret;
-
- if (!bo->pin_count) {
- DRM_ERROR("unpin bad %p\n", bo);
- return 0;
- }
- bo->pin_count--;
- if (bo->pin_count)
- return 0;
-
- for (i = 0; i < bo->placement.num_placement ; i++)
- bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT;
- ret = ttm_bo_validate(&bo->bo, &bo->placement, &ctx);
- if (ret) {
- DRM_ERROR("validate failed for unpin: %d\n", ret);
- return ret;
- }
-
- return 0;
-}
-
-int hibmc_mmap(struct file *filp, struct vm_area_struct *vma)
-{
- struct drm_file *file_priv = filp->private_data;
- struct hibmc_drm_private *hibmc = file_priv->minor->dev->dev_private;
-
- return ttm_bo_mmap(filp, vma, &hibmc->bdev);
+ drm_vram_helper_release_mm(hibmc->dev);
}
int hibmc_gem_create(struct drm_device *dev, u32 size, bool iskernel,
struct drm_gem_object **obj)
{
- struct hibmc_bo *hibmcbo;
+ struct drm_gem_vram_object *gbo;
int ret;
*obj = NULL;
- size = PAGE_ALIGN(size);
- if (size == 0) {
- DRM_ERROR("error: zero size\n");
+ size = roundup(size, PAGE_SIZE);
+ if (size == 0)
return -EINVAL;
- }
- ret = hibmc_bo_create(dev, size, 0, 0, &hibmcbo);
- if (ret) {
+ gbo = drm_gem_vram_create(dev, &dev->vram_mm->bdev, size, 0, false);
+ if (IS_ERR(gbo)) {
+ ret = PTR_ERR(gbo);
if (ret != -ERESTARTSYS)
DRM_ERROR("failed to allocate GEM object: %d\n", ret);
return ret;
}
- *obj = &hibmcbo->gem;
+ *obj = &gbo->gem;
return 0;
}
@@ -372,35 +92,6 @@ int hibmc_dumb_create(struct drm_file *file, struct drm_device *dev,
return 0;
}
-void hibmc_gem_free_object(struct drm_gem_object *obj)
-{
- struct hibmc_bo *hibmcbo = gem_to_hibmc_bo(obj);
-
- hibmc_bo_unref(&hibmcbo);
-}
-
-static u64 hibmc_bo_mmap_offset(struct hibmc_bo *bo)
-{
- return drm_vma_node_offset_addr(&bo->bo.vma_node);
-}
-
-int hibmc_dumb_mmap_offset(struct drm_file *file, struct drm_device *dev,
- u32 handle, u64 *offset)
-{
- struct drm_gem_object *obj;
- struct hibmc_bo *bo;
-
- obj = drm_gem_object_lookup(file, handle);
- if (!obj)
- return -ENOENT;
-
- bo = gem_to_hibmc_bo(obj);
- *offset = hibmc_bo_mmap_offset(bo);
-
- drm_gem_object_put_unlocked(obj);
- return 0;
-}
-
static void hibmc_user_framebuffer_destroy(struct drm_framebuffer *fb)
{
struct hibmc_framebuffer *hibmc_fb = to_hibmc_framebuffer(fb);
diff --git a/drivers/gpu/drm/i915/Kconfig b/drivers/gpu/drm/i915/Kconfig
index 255f224db64b..978cb39a47a8 100644
--- a/drivers/gpu/drm/i915/Kconfig
+++ b/drivers/gpu/drm/i915/Kconfig
@@ -133,3 +133,9 @@ depends on DRM_I915
depends on EXPERT
source "drivers/gpu/drm/i915/Kconfig.debug"
endmenu
+
+menu "drm/i915 Profile Guided Optimisation"
+ visible if EXPERT
+ depends on DRM_I915
+ source "drivers/gpu/drm/i915/Kconfig.profile"
+endmenu
diff --git a/drivers/gpu/drm/i915/Kconfig.profile b/drivers/gpu/drm/i915/Kconfig.profile
new file mode 100644
index 000000000000..0e5db98da8f3
--- /dev/null
+++ b/drivers/gpu/drm/i915/Kconfig.profile
@@ -0,0 +1,13 @@
+config DRM_I915_SPIN_REQUEST
+ int
+ default 5 # microseconds
+ help
+ Before sleeping waiting for a request (GPU operation) to complete,
+ we may spend some time polling for its completion. As the IRQ may
+ take a non-negligible time to setup, we do a short spin first to
+ check if the request will complete in the time it would have taken
+ us to enable the interrupt.
+
+ May be 0 to disable the initial spin. In practice, we estimate
+ the cost of enabling the interrupt (if currently disabled) to be
+ a few microseconds.
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index fbcb0904f4a8..68106fe35a04 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -35,32 +35,56 @@ subdir-ccflags-y += \
# Extra header tests
include $(src)/Makefile.header-test
+subdir-ccflags-y += -I$(src)
+
# Please keep these build lists sorted!
# core driver code
i915-y += i915_drv.o \
i915_irq.o \
- i915_memcpy.o \
- i915_mm.o \
i915_params.o \
i915_pci.o \
- i915_reset.o \
i915_suspend.o \
- i915_sw_fence.o \
- i915_syncmap.o \
i915_sysfs.o \
- i915_user_extensions.o \
intel_csr.o \
intel_device_info.o \
intel_pm.o \
intel_runtime_pm.o \
- intel_workarounds.o
+ intel_wakeref.o \
+ intel_uncore.o
+
+# core library code
+i915-y += \
+ i915_memcpy.o \
+ i915_mm.o \
+ i915_sw_fence.o \
+ i915_syncmap.o \
+ i915_user_extensions.o
i915-$(CONFIG_COMPAT) += i915_ioc32.o
i915-$(CONFIG_DEBUG_FS) += i915_debugfs.o intel_pipe_crc.o
i915-$(CONFIG_PERF_EVENTS) += i915_pmu.o
-# GEM code
+# "Graphics Technology" (aka we talk to the gpu)
+obj-y += gt/
+gt-y += \
+ gt/intel_breadcrumbs.o \
+ gt/intel_context.o \
+ gt/intel_engine_cs.o \
+ gt/intel_engine_pm.o \
+ gt/intel_gt_pm.o \
+ gt/intel_hangcheck.o \
+ gt/intel_lrc.o \
+ gt/intel_reset.o \
+ gt/intel_ringbuffer.o \
+ gt/intel_mocs.o \
+ gt/intel_sseu.o \
+ gt/intel_workarounds.o
+gt-$(CONFIG_DRM_I915_SELFTEST) += \
+ gt/mock_engine.o
+i915-y += $(gt-y)
+
+# GEM (Graphics Execution Management) code
i915-y += \
i915_active.o \
i915_cmd_parser.o \
@@ -75,6 +99,7 @@ i915-y += \
i915_gem_internal.o \
i915_gem.o \
i915_gem_object.o \
+ i915_gem_pm.o \
i915_gem_render_state.o \
i915_gem_shrinker.o \
i915_gem_stolen.o \
@@ -88,14 +113,6 @@ i915-y += \
i915_timeline.o \
i915_trace_points.o \
i915_vma.o \
- intel_breadcrumbs.o \
- intel_context.o \
- intel_engine_cs.o \
- intel_hangcheck.o \
- intel_lrc.o \
- intel_mocs.o \
- intel_ringbuffer.o \
- intel_uncore.o \
intel_wopcm.o
# general-purpose microcontroller (GuC) support
@@ -159,8 +176,8 @@ i915-y += dvo_ch7017.o \
intel_dsi_dcs_backlight.o \
intel_dsi_vbt.o \
intel_dvo.o \
+ intel_gmbus.o \
intel_hdmi.o \
- intel_i2c.o \
intel_lspcon.o \
intel_lvds.o \
intel_panel.o \
@@ -176,6 +193,7 @@ i915-$(CONFIG_DRM_I915_SELFTEST) += \
selftests/i915_random.o \
selftests/i915_selftest.o \
selftests/igt_flush_test.o \
+ selftests/igt_gem_utils.o \
selftests/igt_live_test.o \
selftests/igt_reset.o \
selftests/igt_spinner.o
diff --git a/drivers/gpu/drm/i915/Makefile.header-test b/drivers/gpu/drm/i915/Makefile.header-test
index c1c391816fa7..3a9663002d4a 100644
--- a/drivers/gpu/drm/i915/Makefile.header-test
+++ b/drivers/gpu/drm/i915/Makefile.header-test
@@ -4,37 +4,65 @@
# Test the headers are compilable as standalone units
header_test := \
i915_active_types.h \
+ i915_debugfs.h \
+ i915_drv.h \
i915_gem_context_types.h \
+ i915_gem_pm.h \
+ i915_irq.h \
+ i915_params.h \
i915_priolist_types.h \
+ i915_reg.h \
i915_scheduler_types.h \
i915_timeline_types.h \
+ i915_utils.h \
+ intel_acpi.h \
+ intel_atomic.h \
intel_atomic_plane.h \
intel_audio.h \
+ intel_bios.h \
intel_cdclk.h \
intel_color.h \
+ intel_combo_phy.h \
intel_connector.h \
- intel_context_types.h \
intel_crt.h \
intel_csr.h \
intel_ddi.h \
intel_dp.h \
+ intel_dp_aux_backlight.h \
+ intel_dp_link_training.h \
+ intel_dp_mst.h \
+ intel_dpio_phy.h \
+ intel_dpll_mgr.h \
+ intel_drv.h \
+ intel_dsi.h \
+ intel_dsi_dcs_backlight.h \
intel_dvo.h \
- intel_engine_types.h \
+ intel_dvo_dev.h \
intel_fbc.h \
intel_fbdev.h \
+ intel_fifo_underrun.h \
intel_frontbuffer.h \
+ intel_gmbus.h \
intel_hdcp.h \
intel_hdmi.h \
+ intel_hotplug.h \
+ intel_lpe_audio.h \
intel_lspcon.h \
intel_lvds.h \
+ intel_overlay.h \
intel_panel.h \
intel_pipe_crc.h \
intel_pm.h \
intel_psr.h \
+ intel_quirks.h \
+ intel_runtime_pm.h \
intel_sdvo.h \
+ intel_sideband.h \
intel_sprite.h \
intel_tv.h \
- intel_workarounds_types.h
+ intel_uncore.h \
+ intel_vdsc.h \
+ intel_wakeref.h
quiet_cmd_header_test = HDRTEST $@
cmd_header_test = echo "\#include \"$(<F)\"" > $@
diff --git a/drivers/gpu/drm/i915/dvo_ch7017.c b/drivers/gpu/drm/i915/dvo_ch7017.c
index caac9942e1e3..602380fe74f3 100644
--- a/drivers/gpu/drm/i915/dvo_ch7017.c
+++ b/drivers/gpu/drm/i915/dvo_ch7017.c
@@ -25,7 +25,8 @@
*
*/
-#include "dvo.h"
+#include "intel_drv.h"
+#include "intel_dvo_dev.h"
#define CH7017_TV_DISPLAY_MODE 0x00
#define CH7017_FLICKER_FILTER 0x01
diff --git a/drivers/gpu/drm/i915/dvo_ch7xxx.c b/drivers/gpu/drm/i915/dvo_ch7xxx.c
index 397ac5233726..e070bebee7b5 100644
--- a/drivers/gpu/drm/i915/dvo_ch7xxx.c
+++ b/drivers/gpu/drm/i915/dvo_ch7xxx.c
@@ -26,7 +26,8 @@ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
**************************************************************************/
-#include "dvo.h"
+#include "intel_drv.h"
+#include "intel_dvo_dev.h"
#define CH7xxx_REG_VID 0x4a
#define CH7xxx_REG_DID 0x4b
diff --git a/drivers/gpu/drm/i915/dvo_ivch.c b/drivers/gpu/drm/i915/dvo_ivch.c
index 24278cc49090..09dba35f3ffa 100644
--- a/drivers/gpu/drm/i915/dvo_ivch.c
+++ b/drivers/gpu/drm/i915/dvo_ivch.c
@@ -29,7 +29,8 @@
*
*/
-#include "dvo.h"
+#include "intel_drv.h"
+#include "intel_dvo_dev.h"
/*
* register definitions for the i82807aa.
diff --git a/drivers/gpu/drm/i915/dvo_ns2501.c b/drivers/gpu/drm/i915/dvo_ns2501.c
index c584e01dc8dc..c83a5d88d62b 100644
--- a/drivers/gpu/drm/i915/dvo_ns2501.c
+++ b/drivers/gpu/drm/i915/dvo_ns2501.c
@@ -26,9 +26,10 @@
*
*/
-#include "dvo.h"
-#include "i915_reg.h"
#include "i915_drv.h"
+#include "i915_reg.h"
+#include "intel_drv.h"
+#include "intel_dvo_dev.h"
#define NS2501_VID 0x1305
#define NS2501_DID 0x6726
diff --git a/drivers/gpu/drm/i915/dvo_sil164.c b/drivers/gpu/drm/i915/dvo_sil164.c
index 4ae5d8fd9ff0..04698eaeb632 100644
--- a/drivers/gpu/drm/i915/dvo_sil164.c
+++ b/drivers/gpu/drm/i915/dvo_sil164.c
@@ -26,7 +26,8 @@ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
**************************************************************************/
-#include "dvo.h"
+#include "intel_drv.h"
+#include "intel_dvo_dev.h"
#define SIL164_VID 0x0001
#define SIL164_DID 0x0006
diff --git a/drivers/gpu/drm/i915/dvo_tfp410.c b/drivers/gpu/drm/i915/dvo_tfp410.c
index d603bc2f2506..623114ee73cd 100644
--- a/drivers/gpu/drm/i915/dvo_tfp410.c
+++ b/drivers/gpu/drm/i915/dvo_tfp410.c
@@ -25,7 +25,8 @@
*
*/
-#include "dvo.h"
+#include "intel_drv.h"
+#include "intel_dvo_dev.h"
/* register definitions according to the TFP410 data sheet */
#define TFP410_VID 0x014C
diff --git a/drivers/gpu/drm/i915/gt/Makefile b/drivers/gpu/drm/i915/gt/Makefile
new file mode 100644
index 000000000000..1c75b5c9790c
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/Makefile
@@ -0,0 +1,2 @@
+# Extra header tests
+include $(src)/Makefile.header-test
diff --git a/drivers/gpu/drm/i915/gt/Makefile.header-test b/drivers/gpu/drm/i915/gt/Makefile.header-test
new file mode 100644
index 000000000000..61e06cbb4b32
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/Makefile.header-test
@@ -0,0 +1,16 @@
+# SPDX-License-Identifier: MIT
+# Copyright © 2019 Intel Corporation
+
+# Test the headers are compilable as standalone units
+header_test := $(notdir $(wildcard $(src)/*.h))
+
+quiet_cmd_header_test = HDRTEST $@
+ cmd_header_test = echo "\#include \"$(<F)\"" > $@
+
+header_test_%.c: %.h
+ $(call cmd,header_test)
+
+extra-$(CONFIG_DRM_I915_WERROR) += \
+ $(foreach h,$(header_test),$(patsubst %.h,header_test_%.o,$(h)))
+
+clean-files += $(foreach h,$(header_test),$(patsubst %.h,header_test_%.c,$(h)))
diff --git a/drivers/gpu/drm/i915/intel_breadcrumbs.c b/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
index 832cb6b1e9bd..c092bdf5f0bf 100644
--- a/drivers/gpu/drm/i915/intel_breadcrumbs.c
+++ b/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
@@ -81,6 +81,22 @@ static inline bool __request_completed(const struct i915_request *rq)
return i915_seqno_passed(__hwsp_seqno(rq), rq->fence.seqno);
}
+__maybe_unused static bool
+check_signal_order(struct intel_context *ce, struct i915_request *rq)
+{
+ if (!list_is_last(&rq->signal_link, &ce->signals) &&
+ i915_seqno_passed(rq->fence.seqno,
+ list_next_entry(rq, signal_link)->fence.seqno))
+ return false;
+
+ if (!list_is_first(&rq->signal_link, &ce->signals) &&
+ i915_seqno_passed(list_prev_entry(rq, signal_link)->fence.seqno,
+ rq->fence.seqno))
+ return false;
+
+ return true;
+}
+
static bool
__dma_fence_signal(struct dma_fence *fence)
{
@@ -130,6 +146,8 @@ void intel_engine_breadcrumbs_irq(struct intel_engine_cs *engine)
struct i915_request *rq =
list_entry(pos, typeof(*rq), signal_link);
+ GEM_BUG_ON(!check_signal_order(ce, rq));
+
if (!__request_completed(rq))
break;
@@ -312,6 +330,7 @@ bool i915_request_enable_breadcrumb(struct i915_request *rq)
list_add(&rq->signal_link, pos);
if (pos == &ce->signals) /* catch transitions from empty list */
list_move_tail(&ce->signal_link, &b->signalers);
+ GEM_BUG_ON(!check_signal_order(ce, rq));
set_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags);
spin_unlock(&b->irq_lock);
diff --git a/drivers/gpu/drm/i915/gt/intel_context.c b/drivers/gpu/drm/i915/gt/intel_context.c
new file mode 100644
index 000000000000..5b31e1e05ddd
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_context.c
@@ -0,0 +1,179 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include "i915_drv.h"
+#include "i915_gem_context.h"
+#include "i915_globals.h"
+
+#include "intel_context.h"
+#include "intel_engine.h"
+#include "intel_engine_pm.h"
+
+static struct i915_global_context {
+ struct i915_global base;
+ struct kmem_cache *slab_ce;
+} global;
+
+static struct intel_context *intel_context_alloc(void)
+{
+ return kmem_cache_zalloc(global.slab_ce, GFP_KERNEL);
+}
+
+void intel_context_free(struct intel_context *ce)
+{
+ kmem_cache_free(global.slab_ce, ce);
+}
+
+struct intel_context *
+intel_context_create(struct i915_gem_context *ctx,
+ struct intel_engine_cs *engine)
+{
+ struct intel_context *ce;
+
+ ce = intel_context_alloc();
+ if (!ce)
+ return ERR_PTR(-ENOMEM);
+
+ intel_context_init(ce, ctx, engine);
+ return ce;
+}
+
+int __intel_context_do_pin(struct intel_context *ce)
+{
+ int err;
+
+ if (mutex_lock_interruptible(&ce->pin_mutex))
+ return -EINTR;
+
+ if (likely(!atomic_read(&ce->pin_count))) {
+ intel_wakeref_t wakeref;
+
+ err = 0;
+ with_intel_runtime_pm(ce->engine->i915, wakeref)
+ err = ce->ops->pin(ce);
+ if (err)
+ goto err;
+
+ i915_gem_context_get(ce->gem_context); /* for ctx->ppgtt */
+
+ intel_context_get(ce);
+ smp_mb__before_atomic(); /* flush pin before it is visible */
+ }
+
+ atomic_inc(&ce->pin_count);
+ GEM_BUG_ON(!intel_context_is_pinned(ce)); /* no overflow! */
+
+ mutex_unlock(&ce->pin_mutex);
+ return 0;
+
+err:
+ mutex_unlock(&ce->pin_mutex);
+ return err;
+}
+
+void intel_context_unpin(struct intel_context *ce)
+{
+ if (likely(atomic_add_unless(&ce->pin_count, -1, 1)))
+ return;
+
+ /* We may be called from inside intel_context_pin() to evict another */
+ intel_context_get(ce);
+ mutex_lock_nested(&ce->pin_mutex, SINGLE_DEPTH_NESTING);
+
+ if (likely(atomic_dec_and_test(&ce->pin_count))) {
+ ce->ops->unpin(ce);
+
+ i915_gem_context_put(ce->gem_context);
+ intel_context_put(ce);
+ }
+
+ mutex_unlock(&ce->pin_mutex);
+ intel_context_put(ce);
+}
+
+static void intel_context_retire(struct i915_active_request *active,
+ struct i915_request *rq)
+{
+ struct intel_context *ce =
+ container_of(active, typeof(*ce), active_tracker);
+
+ intel_context_unpin(ce);
+}
+
+void
+intel_context_init(struct intel_context *ce,
+ struct i915_gem_context *ctx,
+ struct intel_engine_cs *engine)
+{
+ GEM_BUG_ON(!engine->cops);
+
+ kref_init(&ce->ref);
+
+ ce->gem_context = ctx;
+ ce->engine = engine;
+ ce->ops = engine->cops;
+ ce->sseu = engine->sseu;
+ ce->saturated = 0;
+
+ INIT_LIST_HEAD(&ce->signal_link);
+ INIT_LIST_HEAD(&ce->signals);
+
+ mutex_init(&ce->pin_mutex);
+
+ i915_active_request_init(&ce->active_tracker,
+ NULL, intel_context_retire);
+}
+
+static void i915_global_context_shrink(void)
+{
+ kmem_cache_shrink(global.slab_ce);
+}
+
+static void i915_global_context_exit(void)
+{
+ kmem_cache_destroy(global.slab_ce);
+}
+
+static struct i915_global_context global = { {
+ .shrink = i915_global_context_shrink,
+ .exit = i915_global_context_exit,
+} };
+
+int __init i915_global_context_init(void)
+{
+ global.slab_ce = KMEM_CACHE(intel_context, SLAB_HWCACHE_ALIGN);
+ if (!global.slab_ce)
+ return -ENOMEM;
+
+ i915_global_register(&global.base);
+ return 0;
+}
+
+void intel_context_enter_engine(struct intel_context *ce)
+{
+ intel_engine_pm_get(ce->engine);
+}
+
+void intel_context_exit_engine(struct intel_context *ce)
+{
+ ce->saturated = 0;
+ intel_engine_pm_put(ce->engine);
+}
+
+struct i915_request *intel_context_create_request(struct intel_context *ce)
+{
+ struct i915_request *rq;
+ int err;
+
+ err = intel_context_pin(ce);
+ if (unlikely(err))
+ return ERR_PTR(err);
+
+ rq = i915_request_create(ce);
+ intel_context_unpin(ce);
+
+ return rq;
+}
diff --git a/drivers/gpu/drm/i915/gt/intel_context.h b/drivers/gpu/drm/i915/gt/intel_context.h
new file mode 100644
index 000000000000..63392c88cd98
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_context.h
@@ -0,0 +1,130 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __INTEL_CONTEXT_H__
+#define __INTEL_CONTEXT_H__
+
+#include <linux/lockdep.h>
+
+#include "intel_context_types.h"
+#include "intel_engine_types.h"
+
+void intel_context_init(struct intel_context *ce,
+ struct i915_gem_context *ctx,
+ struct intel_engine_cs *engine);
+
+struct intel_context *
+intel_context_create(struct i915_gem_context *ctx,
+ struct intel_engine_cs *engine);
+
+void intel_context_free(struct intel_context *ce);
+
+/**
+ * intel_context_lock_pinned - Stablises the 'pinned' status of the HW context
+ * @ce - the context
+ *
+ * Acquire a lock on the pinned status of the HW context, such that the context
+ * can neither be bound to the GPU or unbound whilst the lock is held, i.e.
+ * intel_context_is_pinned() remains stable.
+ */
+static inline int intel_context_lock_pinned(struct intel_context *ce)
+ __acquires(ce->pin_mutex)
+{
+ return mutex_lock_interruptible(&ce->pin_mutex);
+}
+
+/**
+ * intel_context_is_pinned - Reports the 'pinned' status
+ * @ce - the context
+ *
+ * While in use by the GPU, the context, along with its ring and page
+ * tables is pinned into memory and the GTT.
+ *
+ * Returns: true if the context is currently pinned for use by the GPU.
+ */
+static inline bool
+intel_context_is_pinned(struct intel_context *ce)
+{
+ return atomic_read(&ce->pin_count);
+}
+
+/**
+ * intel_context_unlock_pinned - Releases the earlier locking of 'pinned' status
+ * @ce - the context
+ *
+ * Releases the lock earlier acquired by intel_context_unlock_pinned().
+ */
+static inline void intel_context_unlock_pinned(struct intel_context *ce)
+ __releases(ce->pin_mutex)
+{
+ mutex_unlock(&ce->pin_mutex);
+}
+
+int __intel_context_do_pin(struct intel_context *ce);
+
+static inline int intel_context_pin(struct intel_context *ce)
+{
+ if (likely(atomic_inc_not_zero(&ce->pin_count)))
+ return 0;
+
+ return __intel_context_do_pin(ce);
+}
+
+static inline void __intel_context_pin(struct intel_context *ce)
+{
+ GEM_BUG_ON(!intel_context_is_pinned(ce));
+ atomic_inc(&ce->pin_count);
+}
+
+void intel_context_unpin(struct intel_context *ce);
+
+void intel_context_enter_engine(struct intel_context *ce);
+void intel_context_exit_engine(struct intel_context *ce);
+
+static inline void intel_context_enter(struct intel_context *ce)
+{
+ if (!ce->active_count++)
+ ce->ops->enter(ce);
+}
+
+static inline void intel_context_mark_active(struct intel_context *ce)
+{
+ ++ce->active_count;
+}
+
+static inline void intel_context_exit(struct intel_context *ce)
+{
+ GEM_BUG_ON(!ce->active_count);
+ if (!--ce->active_count)
+ ce->ops->exit(ce);
+}
+
+static inline struct intel_context *intel_context_get(struct intel_context *ce)
+{
+ kref_get(&ce->ref);
+ return ce;
+}
+
+static inline void intel_context_put(struct intel_context *ce)
+{
+ kref_put(&ce->ref, ce->ops->destroy);
+}
+
+static inline void intel_context_timeline_lock(struct intel_context *ce)
+ __acquires(&ce->ring->timeline->mutex)
+{
+ mutex_lock(&ce->ring->timeline->mutex);
+}
+
+static inline void intel_context_timeline_unlock(struct intel_context *ce)
+ __releases(&ce->ring->timeline->mutex)
+{
+ mutex_unlock(&ce->ring->timeline->mutex);
+}
+
+struct i915_request *intel_context_create_request(struct intel_context *ce);
+
+#endif /* __INTEL_CONTEXT_H__ */
diff --git a/drivers/gpu/drm/i915/intel_context_types.h b/drivers/gpu/drm/i915/gt/intel_context_types.h
index 339c7437fe82..963a312430e6 100644
--- a/drivers/gpu/drm/i915/intel_context_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_context_types.h
@@ -10,11 +10,11 @@
#include <linux/kref.h>
#include <linux/list.h>
#include <linux/mutex.h>
-#include <linux/rbtree.h>
#include <linux/types.h>
#include "i915_active_types.h"
#include "intel_engine_types.h"
+#include "intel_sseu.h"
struct i915_gem_context;
struct i915_vma;
@@ -25,20 +25,13 @@ struct intel_context_ops {
int (*pin)(struct intel_context *ce);
void (*unpin)(struct intel_context *ce);
+ void (*enter)(struct intel_context *ce);
+ void (*exit)(struct intel_context *ce);
+
void (*reset)(struct intel_context *ce);
void (*destroy)(struct kref *kref);
};
-/*
- * Powergating configuration for a particular (context,engine).
- */
-struct intel_sseu {
- u8 slice_mask;
- u8 subslice_mask;
- u8 min_eus_per_subslice;
- u8 max_eus_per_subslice;
-};
-
struct intel_context {
struct kref ref;
@@ -46,7 +39,6 @@ struct intel_context {
struct intel_engine_cs *engine;
struct intel_engine_cs *active;
- struct list_head active_link;
struct list_head signal_link;
struct list_head signals;
@@ -56,6 +48,8 @@ struct intel_context {
u32 *lrc_reg_state;
u64 lrc_desc;
+ unsigned int active_count; /* notionally protected by timeline->mutex */
+
atomic_t pin_count;
struct mutex pin_mutex; /* guards pinning and associated on-gpuing */
@@ -68,7 +62,6 @@ struct intel_context {
struct i915_active_request active_tracker;
const struct intel_context_ops *ops;
- struct rb_node node;
/** sseu: Control eu/slice partitioning */
struct intel_sseu sseu;
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/gt/intel_engine.h
index 72c7c337ace9..9359b3a7ad9c 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine.h
@@ -106,24 +106,6 @@ hangcheck_action_to_str(const enum intel_engine_hangcheck_action a)
void intel_engines_set_scheduler_caps(struct drm_i915_private *i915);
-static inline bool __execlists_need_preempt(int prio, int last)
-{
- /*
- * Allow preemption of low -> normal -> high, but we do
- * not allow low priority tasks to preempt other low priority
- * tasks under the impression that latency for low priority
- * tasks does not matter (as much as background throughput),
- * so kiss.
- *
- * More naturally we would write
- * prio >= max(0, last);
- * except that we wish to prevent triggering preemption at the same
- * priority level: the task that is running should remain running
- * to preserve FIFO ordering of dependencies.
- */
- return prio > max(I915_PRIORITY_NORMAL - 1, last);
-}
-
static inline void
execlists_set_active(struct intel_engine_execlists *execlists,
unsigned int bit)
@@ -233,8 +215,6 @@ intel_write_status_page(struct intel_engine_cs *engine, int reg, u32 value)
*/
#define I915_GEM_HWS_PREEMPT 0x32
#define I915_GEM_HWS_PREEMPT_ADDR (I915_GEM_HWS_PREEMPT * sizeof(u32))
-#define I915_GEM_HWS_HANGCHECK 0x34
-#define I915_GEM_HWS_HANGCHECK_ADDR (I915_GEM_HWS_HANGCHECK * sizeof(u32))
#define I915_GEM_HWS_SEQNO 0x40
#define I915_GEM_HWS_SEQNO_ADDR (I915_GEM_HWS_SEQNO * sizeof(u32))
#define I915_GEM_HWS_SCRATCH 0x80
@@ -362,14 +342,16 @@ __intel_ring_space(unsigned int head, unsigned int tail, unsigned int size)
return (head - tail - CACHELINE_BYTES) & (size - 1);
}
-int intel_engine_setup_common(struct intel_engine_cs *engine);
+int intel_engines_init_mmio(struct drm_i915_private *i915);
+int intel_engines_setup(struct drm_i915_private *i915);
+int intel_engines_init(struct drm_i915_private *i915);
+void intel_engines_cleanup(struct drm_i915_private *i915);
+
int intel_engine_init_common(struct intel_engine_cs *engine);
void intel_engine_cleanup_common(struct intel_engine_cs *engine);
-int intel_init_render_ring_buffer(struct intel_engine_cs *engine);
-int intel_init_bsd_ring_buffer(struct intel_engine_cs *engine);
-int intel_init_blt_ring_buffer(struct intel_engine_cs *engine);
-int intel_init_vebox_ring_buffer(struct intel_engine_cs *engine);
+int intel_ring_submission_setup(struct intel_engine_cs *engine);
+int intel_ring_submission_init(struct intel_engine_cs *engine);
int intel_engine_stop_cs(struct intel_engine_cs *engine);
void intel_engine_cancel_stop_cs(struct intel_engine_cs *engine);
@@ -382,6 +364,8 @@ u64 intel_engine_get_last_batch_head(const struct intel_engine_cs *engine);
void intel_engine_get_instdone(struct intel_engine_cs *engine,
struct intel_instdone *instdone);
+void intel_engine_init_execlists(struct intel_engine_cs *engine);
+
void intel_engine_init_breadcrumbs(struct intel_engine_cs *engine);
void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine);
@@ -458,19 +442,14 @@ static inline void intel_engine_reset(struct intel_engine_cs *engine,
{
if (engine->reset.reset)
engine->reset.reset(engine, stalled);
+ engine->serial++; /* contexts lost */
}
-void intel_engines_sanitize(struct drm_i915_private *i915, bool force);
-void intel_gt_resume(struct drm_i915_private *i915);
-
bool intel_engine_is_idle(struct intel_engine_cs *engine);
bool intel_engines_are_idle(struct drm_i915_private *dev_priv);
void intel_engine_lost_context(struct intel_engine_cs *engine);
-void intel_engines_park(struct drm_i915_private *i915);
-void intel_engines_unpark(struct drm_i915_private *i915);
-
void intel_engines_reset_default_submission(struct drm_i915_private *i915);
unsigned int intel_engines_has_context_isolation(struct drm_i915_private *i915);
@@ -567,17 +546,4 @@ static inline bool inject_preempt_hang(struct intel_engine_execlists *execlists)
#endif
-static inline u32
-intel_engine_next_hangcheck_seqno(struct intel_engine_cs *engine)
-{
- return engine->hangcheck.next_seqno =
- next_pseudo_random32(engine->hangcheck.next_seqno);
-}
-
-static inline u32
-intel_engine_get_hangcheck_seqno(struct intel_engine_cs *engine)
-{
- return intel_read_status_page(engine, I915_GEM_HWS_HANGCHECK);
-}
-
#endif /* _INTEL_RINGBUFFER_H_ */
diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
index eea9bec04f1b..2590f5904b67 100644
--- a/drivers/gpu/drm/i915/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
@@ -25,9 +25,11 @@
#include <drm/drm_print.h>
#include "i915_drv.h"
-#include "i915_reset.h"
-#include "intel_ringbuffer.h"
+
+#include "intel_engine.h"
+#include "intel_engine_pm.h"
#include "intel_lrc.h"
+#include "intel_reset.h"
/* Haswell does have the CXT_SIZE register however it does not appear to be
* valid. Now, docs explain in dwords what is in the context object. The full
@@ -48,35 +50,24 @@
struct engine_class_info {
const char *name;
- int (*init_legacy)(struct intel_engine_cs *engine);
- int (*init_execlists)(struct intel_engine_cs *engine);
-
u8 uabi_class;
};
static const struct engine_class_info intel_engine_classes[] = {
[RENDER_CLASS] = {
.name = "rcs",
- .init_execlists = logical_render_ring_init,
- .init_legacy = intel_init_render_ring_buffer,
.uabi_class = I915_ENGINE_CLASS_RENDER,
},
[COPY_ENGINE_CLASS] = {
.name = "bcs",
- .init_execlists = logical_xcs_ring_init,
- .init_legacy = intel_init_blt_ring_buffer,
.uabi_class = I915_ENGINE_CLASS_COPY,
},
[VIDEO_DECODE_CLASS] = {
.name = "vcs",
- .init_execlists = logical_xcs_ring_init,
- .init_legacy = intel_init_bsd_ring_buffer,
.uabi_class = I915_ENGINE_CLASS_VIDEO,
},
[VIDEO_ENHANCEMENT_CLASS] = {
.name = "vecs",
- .init_execlists = logical_xcs_ring_init,
- .init_legacy = intel_init_vebox_ring_buffer,
.uabi_class = I915_ENGINE_CLASS_VIDEO_ENHANCE,
},
};
@@ -212,6 +203,22 @@ __intel_engine_context_size(struct drm_i915_private *dev_priv, u8 class)
PAGE_SIZE);
case 5:
case 4:
+ /*
+ * There is a discrepancy here between the size reported
+ * by the register and the size of the context layout
+ * in the docs. Both are described as authorative!
+ *
+ * The discrepancy is on the order of a few cachelines,
+ * but the total is under one page (4k), which is our
+ * minimum allocation anyway so it should all come
+ * out in the wash.
+ */
+ cxt_size = I915_READ(CXT_SIZE) + 1;
+ DRM_DEBUG_DRIVER("gen%d CXT_SIZE = %d bytes [0x%08x]\n",
+ INTEL_GEN(dev_priv),
+ cxt_size * 64,
+ cxt_size - 1);
+ return round_up(cxt_size * 64, PAGE_SIZE);
case 3:
case 2:
/* For the special day when i810 gets merged. */
@@ -312,6 +319,12 @@ intel_engine_setup(struct drm_i915_private *dev_priv,
engine->class = info->class;
engine->instance = info->instance;
+ /*
+ * To be overridden by the backend on setup. However to facilitate
+ * cleanup on error during setup, we always provide the destroy vfunc.
+ */
+ engine->destroy = (typeof(engine->destroy))kfree;
+
engine->uabi_class = intel_engine_classes[info->class].uabi_class;
engine->context_size = __intel_engine_context_size(dev_priv,
@@ -336,18 +349,70 @@ intel_engine_setup(struct drm_i915_private *dev_priv,
return 0;
}
+static void __setup_engine_capabilities(struct intel_engine_cs *engine)
+{
+ struct drm_i915_private *i915 = engine->i915;
+
+ if (engine->class == VIDEO_DECODE_CLASS) {
+ /*
+ * HEVC support is present on first engine instance
+ * before Gen11 and on all instances afterwards.
+ */
+ if (INTEL_GEN(i915) >= 11 ||
+ (INTEL_GEN(i915) >= 9 && engine->instance == 0))
+ engine->uabi_capabilities |=
+ I915_VIDEO_CLASS_CAPABILITY_HEVC;
+
+ /*
+ * SFC block is present only on even logical engine
+ * instances.
+ */
+ if ((INTEL_GEN(i915) >= 11 &&
+ RUNTIME_INFO(i915)->vdbox_sfc_access & engine->mask) ||
+ (INTEL_GEN(i915) >= 9 && engine->instance == 0))
+ engine->uabi_capabilities |=
+ I915_VIDEO_AND_ENHANCE_CLASS_CAPABILITY_SFC;
+ } else if (engine->class == VIDEO_ENHANCEMENT_CLASS) {
+ if (INTEL_GEN(i915) >= 9)
+ engine->uabi_capabilities |=
+ I915_VIDEO_AND_ENHANCE_CLASS_CAPABILITY_SFC;
+ }
+}
+
+static void intel_setup_engine_capabilities(struct drm_i915_private *i915)
+{
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+
+ for_each_engine(engine, i915, id)
+ __setup_engine_capabilities(engine);
+}
+
+/**
+ * intel_engines_cleanup() - free the resources allocated for Command Streamers
+ * @i915: the i915 devic
+ */
+void intel_engines_cleanup(struct drm_i915_private *i915)
+{
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+
+ for_each_engine(engine, i915, id) {
+ engine->destroy(engine);
+ i915->engine[id] = NULL;
+ }
+}
+
/**
* intel_engines_init_mmio() - allocate and prepare the Engine Command Streamers
- * @dev_priv: i915 device private
+ * @i915: the i915 device
*
* Return: non-zero if the initialization failed.
*/
-int intel_engines_init_mmio(struct drm_i915_private *dev_priv)
+int intel_engines_init_mmio(struct drm_i915_private *i915)
{
- struct intel_device_info *device_info = mkwrite_device_info(dev_priv);
- const unsigned int engine_mask = INTEL_INFO(dev_priv)->engine_mask;
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
+ struct intel_device_info *device_info = mkwrite_device_info(i915);
+ const unsigned int engine_mask = INTEL_INFO(i915)->engine_mask;
unsigned int mask = 0;
unsigned int i;
int err;
@@ -360,10 +425,10 @@ int intel_engines_init_mmio(struct drm_i915_private *dev_priv)
return -ENODEV;
for (i = 0; i < ARRAY_SIZE(intel_engines); i++) {
- if (!HAS_ENGINE(dev_priv, i))
+ if (!HAS_ENGINE(i915, i))
continue;
- err = intel_engine_setup(dev_priv, i);
+ err = intel_engine_setup(i915, i);
if (err)
goto cleanup;
@@ -379,69 +444,52 @@ int intel_engines_init_mmio(struct drm_i915_private *dev_priv)
device_info->engine_mask = mask;
/* We always presume we have at least RCS available for later probing */
- if (WARN_ON(!HAS_ENGINE(dev_priv, RCS0))) {
+ if (WARN_ON(!HAS_ENGINE(i915, RCS0))) {
err = -ENODEV;
goto cleanup;
}
- RUNTIME_INFO(dev_priv)->num_engines = hweight32(mask);
+ RUNTIME_INFO(i915)->num_engines = hweight32(mask);
- i915_check_and_clear_faults(dev_priv);
+ i915_check_and_clear_faults(i915);
+
+ intel_setup_engine_capabilities(i915);
return 0;
cleanup:
- for_each_engine(engine, dev_priv, id)
- kfree(engine);
+ intel_engines_cleanup(i915);
return err;
}
/**
* intel_engines_init() - init the Engine Command Streamers
- * @dev_priv: i915 device private
+ * @i915: i915 device private
*
* Return: non-zero if the initialization failed.
*/
-int intel_engines_init(struct drm_i915_private *dev_priv)
+int intel_engines_init(struct drm_i915_private *i915)
{
+ int (*init)(struct intel_engine_cs *engine);
struct intel_engine_cs *engine;
- enum intel_engine_id id, err_id;
+ enum intel_engine_id id;
int err;
- for_each_engine(engine, dev_priv, id) {
- const struct engine_class_info *class_info =
- &intel_engine_classes[engine->class];
- int (*init)(struct intel_engine_cs *engine);
-
- if (HAS_EXECLISTS(dev_priv))
- init = class_info->init_execlists;
- else
- init = class_info->init_legacy;
-
- err = -EINVAL;
- err_id = id;
-
- if (GEM_DEBUG_WARN_ON(!init))
- goto cleanup;
+ if (HAS_EXECLISTS(i915))
+ init = intel_execlists_submission_init;
+ else
+ init = intel_ring_submission_init;
+ for_each_engine(engine, i915, id) {
err = init(engine);
if (err)
goto cleanup;
-
- GEM_BUG_ON(!engine->submit_request);
}
return 0;
cleanup:
- for_each_engine(engine, dev_priv, id) {
- if (id >= err_id) {
- kfree(engine);
- dev_priv->engine[id] = NULL;
- } else {
- dev_priv->gt.cleanup_engine(engine);
- }
- }
+ intel_engines_cleanup(i915);
return err;
}
@@ -450,7 +498,7 @@ static void intel_engine_init_batch_pool(struct intel_engine_cs *engine)
i915_gem_batch_pool_init(&engine->batch_pool, engine);
}
-static void intel_engine_init_execlist(struct intel_engine_cs *engine)
+void intel_engine_init_execlists(struct intel_engine_cs *engine)
{
struct intel_engine_execlists * const execlists = &engine->execlists;
@@ -557,16 +605,7 @@ err:
return ret;
}
-/**
- * intel_engines_setup_common - setup engine state not requiring hw access
- * @engine: Engine to setup.
- *
- * Initializes @engine@ structure members shared between legacy and execlists
- * submission modes which do not require hardware access.
- *
- * Typically done early in the submission mode specific engine setup stage.
- */
-int intel_engine_setup_common(struct intel_engine_cs *engine)
+static int intel_engine_setup_common(struct intel_engine_cs *engine)
{
int err;
@@ -583,10 +622,15 @@ int intel_engine_setup_common(struct intel_engine_cs *engine)
i915_timeline_set_subclass(&engine->timeline, TIMELINE_ENGINE);
intel_engine_init_breadcrumbs(engine);
- intel_engine_init_execlist(engine);
+ intel_engine_init_execlists(engine);
intel_engine_init_hangcheck(engine);
intel_engine_init_batch_pool(engine);
intel_engine_init_cmd_parser(engine);
+ intel_engine_init__pm(engine);
+
+ /* Use the whole device by default */
+ engine->sseu =
+ intel_sseu_from_device_info(&RUNTIME_INFO(engine->i915)->sseu);
return 0;
@@ -595,6 +639,49 @@ err_hwsp:
return err;
}
+/**
+ * intel_engines_setup- setup engine state not requiring hw access
+ * @i915: Device to setup.
+ *
+ * Initializes engine structure members shared between legacy and execlists
+ * submission modes which do not require hardware access.
+ *
+ * Typically done early in the submission mode specific engine setup stage.
+ */
+int intel_engines_setup(struct drm_i915_private *i915)
+{
+ int (*setup)(struct intel_engine_cs *engine);
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ int err;
+
+ if (HAS_EXECLISTS(i915))
+ setup = intel_execlists_submission_setup;
+ else
+ setup = intel_ring_submission_setup;
+
+ for_each_engine(engine, i915, id) {
+ err = intel_engine_setup_common(engine);
+ if (err)
+ goto cleanup;
+
+ err = setup(engine);
+ if (err)
+ goto cleanup;
+
+ /* We expect the backend to take control over its state */
+ GEM_BUG_ON(engine->destroy == (typeof(engine->destroy))kfree);
+
+ GEM_BUG_ON(!engine->cops);
+ }
+
+ return 0;
+
+cleanup:
+ intel_engines_cleanup(i915);
+ return err;
+}
+
void intel_engines_set_scheduler_caps(struct drm_i915_private *i915)
{
static const struct {
@@ -675,6 +762,7 @@ static int measure_breadcrumb_dw(struct intel_engine_cs *engine)
goto out_timeline;
dw = engine->emit_fini_breadcrumb(&frame->rq, frame->cs) - frame->cs;
+ GEM_BUG_ON(dw & 1); /* RING_TAIL must be qword aligned */
i915_timeline_unpin(&frame->timeline);
@@ -690,11 +778,17 @@ static int pin_context(struct i915_gem_context *ctx,
struct intel_context **out)
{
struct intel_context *ce;
+ int err;
- ce = intel_context_pin(ctx, engine);
+ ce = i915_gem_context_get_engine(ctx, engine->id);
if (IS_ERR(ce))
return PTR_ERR(ce);
+ err = intel_context_pin(ce);
+ intel_context_put(ce);
+ if (err)
+ return err;
+
*out = ce;
return 0;
}
@@ -753,30 +847,6 @@ err_unpin:
return ret;
}
-void intel_gt_resume(struct drm_i915_private *i915)
-{
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
-
- /*
- * After resume, we may need to poke into the pinned kernel
- * contexts to paper over any damage caused by the sudden suspend.
- * Only the kernel contexts should remain pinned over suspend,
- * allowing us to fixup the user contexts on their first pin.
- */
- for_each_engine(engine, i915, id) {
- struct intel_context *ce;
-
- ce = engine->kernel_context;
- if (ce)
- ce->ops->reset(ce);
-
- ce = engine->preempt_context;
- if (ce)
- ce->ops->reset(ce);
- }
-}
-
/**
* intel_engines_cleanup_common - cleans up the engine state created by
* the common initiailizers.
@@ -1062,10 +1132,15 @@ bool intel_engine_is_idle(struct intel_engine_cs *engine)
if (i915_reset_failed(engine->i915))
return true;
+ if (!intel_wakeref_active(&engine->wakeref))
+ return true;
+
/* Waiting to drain ELSP? */
if (READ_ONCE(engine->execlists.active)) {
struct tasklet_struct *t = &engine->execlists.tasklet;
+ synchronize_hardirq(engine->i915->drm.irq);
+
local_bh_disable();
if (tasklet_trylock(t)) {
/* Must wait for any GPU reset in progress. */
@@ -1123,117 +1198,6 @@ void intel_engines_reset_default_submission(struct drm_i915_private *i915)
engine->set_default_submission(engine);
}
-static bool reset_engines(struct drm_i915_private *i915)
-{
- if (INTEL_INFO(i915)->gpu_reset_clobbers_display)
- return false;
-
- return intel_gpu_reset(i915, ALL_ENGINES) == 0;
-}
-
-/**
- * intel_engines_sanitize: called after the GPU has lost power
- * @i915: the i915 device
- * @force: ignore a failed reset and sanitize engine state anyway
- *
- * Anytime we reset the GPU, either with an explicit GPU reset or through a
- * PCI power cycle, the GPU loses state and we must reset our state tracking
- * to match. Note that calling intel_engines_sanitize() if the GPU has not
- * been reset results in much confusion!
- */
-void intel_engines_sanitize(struct drm_i915_private *i915, bool force)
-{
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
-
- GEM_TRACE("\n");
-
- if (!reset_engines(i915) && !force)
- return;
-
- for_each_engine(engine, i915, id)
- intel_engine_reset(engine, false);
-}
-
-/**
- * intel_engines_park: called when the GT is transitioning from busy->idle
- * @i915: the i915 device
- *
- * The GT is now idle and about to go to sleep (maybe never to wake again?).
- * Time for us to tidy and put away our toys (release resources back to the
- * system).
- */
-void intel_engines_park(struct drm_i915_private *i915)
-{
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
-
- for_each_engine(engine, i915, id) {
- /* Flush the residual irq tasklets first. */
- intel_engine_disarm_breadcrumbs(engine);
- tasklet_kill(&engine->execlists.tasklet);
-
- /*
- * We are committed now to parking the engines, make sure there
- * will be no more interrupts arriving later and the engines
- * are truly idle.
- */
- if (wait_for(intel_engine_is_idle(engine), 10)) {
- struct drm_printer p = drm_debug_printer(__func__);
-
- dev_err(i915->drm.dev,
- "%s is not idle before parking\n",
- engine->name);
- intel_engine_dump(engine, &p, NULL);
- }
-
- /* Must be reset upon idling, or we may miss the busy wakeup. */
- GEM_BUG_ON(engine->execlists.queue_priority_hint != INT_MIN);
-
- if (engine->park)
- engine->park(engine);
-
- if (engine->pinned_default_state) {
- i915_gem_object_unpin_map(engine->default_state);
- engine->pinned_default_state = NULL;
- }
-
- i915_gem_batch_pool_fini(&engine->batch_pool);
- engine->execlists.no_priolist = false;
- }
-
- i915->gt.active_engines = 0;
-}
-
-/**
- * intel_engines_unpark: called when the GT is transitioning from idle->busy
- * @i915: the i915 device
- *
- * The GT was idle and now about to fire up with some new user requests.
- */
-void intel_engines_unpark(struct drm_i915_private *i915)
-{
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
-
- for_each_engine(engine, i915, id) {
- void *map;
-
- /* Pin the default state for fast resets from atomic context. */
- map = NULL;
- if (engine->default_state)
- map = i915_gem_object_pin_map(engine->default_state,
- I915_MAP_WB);
- if (!IS_ERR_OR_NULL(map))
- engine->pinned_default_state = map;
-
- if (engine->unpark)
- engine->unpark(engine);
-
- intel_engine_init_hangcheck(engine);
- }
-}
-
/**
* intel_engine_lost_context: called when the GPU is reset into unknown state
* @engine: the engine
@@ -1312,8 +1276,11 @@ static void print_request(struct drm_printer *m,
i915_request_completed(rq) ? "!" :
i915_request_started(rq) ? "*" :
"",
+ test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
+ &rq->fence.flags) ? "+" :
test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
- &rq->fence.flags) ? "+" : "",
+ &rq->fence.flags) ? "-" :
+ "",
buf,
jiffies_to_msecs(jiffies - rq->emitted_jiffies),
name);
@@ -1518,9 +1485,8 @@ void intel_engine_dump(struct intel_engine_cs *engine,
if (i915_reset_failed(engine->i915))
drm_printf(m, "*** WEDGED ***\n");
- drm_printf(m, "\tHangcheck %x:%x [%d ms]\n",
- engine->hangcheck.last_seqno,
- engine->hangcheck.next_seqno,
+ drm_printf(m, "\tAwake? %d\n", atomic_read(&engine->wakeref.count));
+ drm_printf(m, "\tHangcheck: %d ms ago\n",
jiffies_to_msecs(jiffies - engine->hangcheck.action_timestamp));
drm_printf(m, "\tReset count: %d (global %d)\n",
i915_reset_engine_count(error, engine),
@@ -1752,6 +1718,5 @@ intel_engine_find_active_request(struct intel_engine_cs *engine)
}
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
-#include "selftests/mock_engine.c"
-#include "selftests/intel_engine_cs.c"
+#include "selftest_engine_cs.c"
#endif
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_pm.c b/drivers/gpu/drm/i915/gt/intel_engine_pm.c
new file mode 100644
index 000000000000..ccf034764741
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_engine_pm.c
@@ -0,0 +1,164 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include "i915_drv.h"
+
+#include "intel_engine.h"
+#include "intel_engine_pm.h"
+#include "intel_gt_pm.h"
+
+static int __engine_unpark(struct intel_wakeref *wf)
+{
+ struct intel_engine_cs *engine =
+ container_of(wf, typeof(*engine), wakeref);
+ void *map;
+
+ GEM_TRACE("%s\n", engine->name);
+
+ intel_gt_pm_get(engine->i915);
+
+ /* Pin the default state for fast resets from atomic context. */
+ map = NULL;
+ if (engine->default_state)
+ map = i915_gem_object_pin_map(engine->default_state,
+ I915_MAP_WB);
+ if (!IS_ERR_OR_NULL(map))
+ engine->pinned_default_state = map;
+
+ if (engine->unpark)
+ engine->unpark(engine);
+
+ intel_engine_init_hangcheck(engine);
+ return 0;
+}
+
+void intel_engine_pm_get(struct intel_engine_cs *engine)
+{
+ intel_wakeref_get(engine->i915, &engine->wakeref, __engine_unpark);
+}
+
+void intel_engine_park(struct intel_engine_cs *engine)
+{
+ /*
+ * We are committed now to parking this engine, make sure there
+ * will be no more interrupts arriving later and the engine
+ * is truly idle.
+ */
+ if (wait_for(intel_engine_is_idle(engine), 10)) {
+ struct drm_printer p = drm_debug_printer(__func__);
+
+ dev_err(engine->i915->drm.dev,
+ "%s is not idle before parking\n",
+ engine->name);
+ intel_engine_dump(engine, &p, NULL);
+ }
+}
+
+static bool switch_to_kernel_context(struct intel_engine_cs *engine)
+{
+ struct i915_request *rq;
+
+ /* Already inside the kernel context, safe to power down. */
+ if (engine->wakeref_serial == engine->serial)
+ return true;
+
+ /* GPU is pointing to the void, as good as in the kernel context. */
+ if (i915_reset_failed(engine->i915))
+ return true;
+
+ /*
+ * Note, we do this without taking the timeline->mutex. We cannot
+ * as we may be called while retiring the kernel context and so
+ * already underneath the timeline->mutex. Instead we rely on the
+ * exclusive property of the __engine_park that prevents anyone
+ * else from creating a request on this engine. This also requires
+ * that the ring is empty and we avoid any waits while constructing
+ * the context, as they assume protection by the timeline->mutex.
+ * This should hold true as we can only park the engine after
+ * retiring the last request, thus all rings should be empty and
+ * all timelines idle.
+ */
+ rq = __i915_request_create(engine->kernel_context, GFP_NOWAIT);
+ if (IS_ERR(rq))
+ /* Context switch failed, hope for the best! Maybe reset? */
+ return true;
+
+ /* Check again on the next retirement. */
+ engine->wakeref_serial = engine->serial + 1;
+ __i915_request_commit(rq);
+
+ return false;
+}
+
+static int __engine_park(struct intel_wakeref *wf)
+{
+ struct intel_engine_cs *engine =
+ container_of(wf, typeof(*engine), wakeref);
+
+ /*
+ * If one and only one request is completed between pm events,
+ * we know that we are inside the kernel context and it is
+ * safe to power down. (We are paranoid in case that runtime
+ * suspend causes corruption to the active context image, and
+ * want to avoid that impacting userspace.)
+ */
+ if (!switch_to_kernel_context(engine))
+ return -EBUSY;
+
+ GEM_TRACE("%s\n", engine->name);
+
+ intel_engine_disarm_breadcrumbs(engine);
+
+ /* Must be reset upon idling, or we may miss the busy wakeup. */
+ GEM_BUG_ON(engine->execlists.queue_priority_hint != INT_MIN);
+
+ if (engine->park)
+ engine->park(engine);
+
+ if (engine->pinned_default_state) {
+ i915_gem_object_unpin_map(engine->default_state);
+ engine->pinned_default_state = NULL;
+ }
+
+ engine->execlists.no_priolist = false;
+
+ intel_gt_pm_put(engine->i915);
+ return 0;
+}
+
+void intel_engine_pm_put(struct intel_engine_cs *engine)
+{
+ intel_wakeref_put(engine->i915, &engine->wakeref, __engine_park);
+}
+
+void intel_engine_init__pm(struct intel_engine_cs *engine)
+{
+ intel_wakeref_init(&engine->wakeref);
+}
+
+int intel_engines_resume(struct drm_i915_private *i915)
+{
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ int err = 0;
+
+ intel_gt_pm_get(i915);
+ for_each_engine(engine, i915, id) {
+ intel_engine_pm_get(engine);
+ engine->serial++; /* kernel context lost */
+ err = engine->resume(engine);
+ intel_engine_pm_put(engine);
+ if (err) {
+ dev_err(i915->drm.dev,
+ "Failed to restart %s (%d)\n",
+ engine->name, err);
+ break;
+ }
+ }
+ intel_gt_pm_put(i915);
+
+ return err;
+}
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_pm.h b/drivers/gpu/drm/i915/gt/intel_engine_pm.h
new file mode 100644
index 000000000000..b326cd993d60
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_engine_pm.h
@@ -0,0 +1,22 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef INTEL_ENGINE_PM_H
+#define INTEL_ENGINE_PM_H
+
+struct drm_i915_private;
+struct intel_engine_cs;
+
+void intel_engine_pm_get(struct intel_engine_cs *engine);
+void intel_engine_pm_put(struct intel_engine_cs *engine);
+
+void intel_engine_park(struct intel_engine_cs *engine);
+
+void intel_engine_init__pm(struct intel_engine_cs *engine);
+
+int intel_engines_resume(struct drm_i915_private *i915);
+
+#endif /* INTEL_ENGINE_PM_H */
diff --git a/drivers/gpu/drm/i915/intel_engine_types.h b/drivers/gpu/drm/i915/gt/intel_engine_types.h
index 1f970c76b6a6..40e774acc2cd 100644
--- a/drivers/gpu/drm/i915/intel_engine_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine_types.h
@@ -14,14 +14,15 @@
#include <linux/types.h>
#include "i915_gem.h"
+#include "i915_gem_batch_pool.h"
+#include "i915_pmu.h"
#include "i915_priolist_types.h"
#include "i915_selftest.h"
#include "i915_timeline_types.h"
+#include "intel_sseu.h"
+#include "intel_wakeref.h"
#include "intel_workarounds_types.h"
-#include "i915_gem_batch_pool.h"
-#include "i915_pmu.h"
-
#define I915_MAX_SLICES 3
#define I915_MAX_SUBSLICES 8
@@ -52,8 +53,8 @@ struct intel_instdone {
struct intel_engine_hangcheck {
u64 acthd;
- u32 last_seqno;
- u32 next_seqno;
+ u32 last_ring;
+ u32 last_head;
unsigned long action_timestamp;
struct intel_instdone instdone;
};
@@ -226,6 +227,7 @@ struct intel_engine_execlists {
* @queue: queue of requests, in priority lists
*/
struct rb_root_cached queue;
+ struct rb_root_cached virtual;
/**
* @csb_write: control register for Context Switch buffer
@@ -278,6 +280,10 @@ struct intel_engine_cs {
u32 context_size;
u32 mmio_base;
+ u32 uabi_capabilities;
+
+ struct intel_sseu sseu;
+
struct intel_ring *buffer;
struct i915_timeline timeline;
@@ -285,6 +291,10 @@ struct intel_engine_cs {
struct intel_context *kernel_context; /* pinned */
struct intel_context *preempt_context; /* pinned; optional */
+ unsigned long serial;
+
+ unsigned long wakeref_serial;
+ struct intel_wakeref wakeref;
struct drm_i915_gem_object *default_state;
void *pinned_default_state;
@@ -357,7 +367,7 @@ struct intel_engine_cs {
void (*irq_enable)(struct intel_engine_cs *engine);
void (*irq_disable)(struct intel_engine_cs *engine);
- int (*init_hw)(struct intel_engine_cs *engine);
+ int (*resume)(struct intel_engine_cs *engine);
struct {
void (*prepare)(struct intel_engine_cs *engine);
@@ -398,6 +408,13 @@ struct intel_engine_cs {
void (*submit_request)(struct i915_request *rq);
/*
+ * Called on signaling of a SUBMIT_FENCE, passing along the signaling
+ * request down to the bonded pairs.
+ */
+ void (*bond_execute)(struct i915_request *rq,
+ struct dma_fence *signal);
+
+ /*
* Call when the priority on a request has changed and it and its
* dependencies may need rescheduling. Note the request itself may
* not be ready to run!
@@ -413,7 +430,7 @@ struct intel_engine_cs {
*/
void (*cancel_requests)(struct intel_engine_cs *engine);
- void (*cleanup)(struct intel_engine_cs *engine);
+ void (*destroy)(struct intel_engine_cs *engine);
struct intel_engine_execlists execlists;
@@ -438,6 +455,7 @@ struct intel_engine_cs {
#define I915_ENGINE_HAS_PREEMPTION BIT(2)
#define I915_ENGINE_HAS_SEMAPHORES BIT(3)
#define I915_ENGINE_NEEDS_BREADCRUMB_TASKLET BIT(4)
+#define I915_ENGINE_IS_VIRTUAL BIT(5)
unsigned int flags;
/*
@@ -527,6 +545,12 @@ intel_engine_needs_breadcrumb_tasklet(const struct intel_engine_cs *engine)
return engine->flags & I915_ENGINE_NEEDS_BREADCRUMB_TASKLET;
}
+static inline bool
+intel_engine_is_virtual(const struct intel_engine_cs *engine)
+{
+ return engine->flags & I915_ENGINE_IS_VIRTUAL;
+}
+
#define instdone_slice_mask(dev_priv__) \
(IS_GEN(dev_priv__, 7) ? \
1 : RUNTIME_INFO(dev_priv__)->sseu.slice_mask)
diff --git a/drivers/gpu/drm/i915/intel_gpu_commands.h b/drivers/gpu/drm/i915/gt/intel_gpu_commands.h
index a34ece53a771..a34ece53a771 100644
--- a/drivers/gpu/drm/i915/intel_gpu_commands.h
+++ b/drivers/gpu/drm/i915/gt/intel_gpu_commands.h
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm.c b/drivers/gpu/drm/i915/gt/intel_gt_pm.c
new file mode 100644
index 000000000000..ae7155f0e063
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_gt_pm.c
@@ -0,0 +1,143 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include "i915_drv.h"
+#include "intel_gt_pm.h"
+#include "intel_pm.h"
+#include "intel_wakeref.h"
+
+static void pm_notify(struct drm_i915_private *i915, int state)
+{
+ blocking_notifier_call_chain(&i915->gt.pm_notifications, state, i915);
+}
+
+static int intel_gt_unpark(struct intel_wakeref *wf)
+{
+ struct drm_i915_private *i915 =
+ container_of(wf, typeof(*i915), gt.wakeref);
+
+ GEM_TRACE("\n");
+
+ /*
+ * It seems that the DMC likes to transition between the DC states a lot
+ * when there are no connected displays (no active power domains) during
+ * command submission.
+ *
+ * This activity has negative impact on the performance of the chip with
+ * huge latencies observed in the interrupt handler and elsewhere.
+ *
+ * Work around it by grabbing a GT IRQ power domain whilst there is any
+ * GT activity, preventing any DC state transitions.
+ */
+ i915->gt.awake = intel_display_power_get(i915, POWER_DOMAIN_GT_IRQ);
+ GEM_BUG_ON(!i915->gt.awake);
+
+ intel_enable_gt_powersave(i915);
+
+ i915_update_gfx_val(i915);
+ if (INTEL_GEN(i915) >= 6)
+ gen6_rps_busy(i915);
+
+ i915_pmu_gt_unparked(i915);
+
+ i915_queue_hangcheck(i915);
+
+ pm_notify(i915, INTEL_GT_UNPARK);
+
+ return 0;
+}
+
+void intel_gt_pm_get(struct drm_i915_private *i915)
+{
+ intel_wakeref_get(i915, &i915->gt.wakeref, intel_gt_unpark);
+}
+
+static int intel_gt_park(struct intel_wakeref *wf)
+{
+ struct drm_i915_private *i915 =
+ container_of(wf, typeof(*i915), gt.wakeref);
+ intel_wakeref_t wakeref = fetch_and_zero(&i915->gt.awake);
+
+ GEM_TRACE("\n");
+
+ pm_notify(i915, INTEL_GT_PARK);
+
+ i915_pmu_gt_parked(i915);
+ if (INTEL_GEN(i915) >= 6)
+ gen6_rps_idle(i915);
+
+ GEM_BUG_ON(!wakeref);
+ intel_display_power_put(i915, POWER_DOMAIN_GT_IRQ, wakeref);
+
+ return 0;
+}
+
+void intel_gt_pm_put(struct drm_i915_private *i915)
+{
+ intel_wakeref_put(i915, &i915->gt.wakeref, intel_gt_park);
+}
+
+void intel_gt_pm_init(struct drm_i915_private *i915)
+{
+ intel_wakeref_init(&i915->gt.wakeref);
+ BLOCKING_INIT_NOTIFIER_HEAD(&i915->gt.pm_notifications);
+}
+
+static bool reset_engines(struct drm_i915_private *i915)
+{
+ if (INTEL_INFO(i915)->gpu_reset_clobbers_display)
+ return false;
+
+ return intel_gpu_reset(i915, ALL_ENGINES) == 0;
+}
+
+/**
+ * intel_gt_sanitize: called after the GPU has lost power
+ * @i915: the i915 device
+ * @force: ignore a failed reset and sanitize engine state anyway
+ *
+ * Anytime we reset the GPU, either with an explicit GPU reset or through a
+ * PCI power cycle, the GPU loses state and we must reset our state tracking
+ * to match. Note that calling intel_gt_sanitize() if the GPU has not
+ * been reset results in much confusion!
+ */
+void intel_gt_sanitize(struct drm_i915_private *i915, bool force)
+{
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+
+ GEM_TRACE("\n");
+
+ if (!reset_engines(i915) && !force)
+ return;
+
+ for_each_engine(engine, i915, id)
+ intel_engine_reset(engine, false);
+}
+
+void intel_gt_resume(struct drm_i915_private *i915)
+{
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+
+ /*
+ * After resume, we may need to poke into the pinned kernel
+ * contexts to paper over any damage caused by the sudden suspend.
+ * Only the kernel contexts should remain pinned over suspend,
+ * allowing us to fixup the user contexts on their first pin.
+ */
+ for_each_engine(engine, i915, id) {
+ struct intel_context *ce;
+
+ ce = engine->kernel_context;
+ if (ce)
+ ce->ops->reset(ce);
+
+ ce = engine->preempt_context;
+ if (ce)
+ ce->ops->reset(ce);
+ }
+}
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm.h b/drivers/gpu/drm/i915/gt/intel_gt_pm.h
new file mode 100644
index 000000000000..7dd1130a19a4
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_gt_pm.h
@@ -0,0 +1,27 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef INTEL_GT_PM_H
+#define INTEL_GT_PM_H
+
+#include <linux/types.h>
+
+struct drm_i915_private;
+
+enum {
+ INTEL_GT_UNPARK,
+ INTEL_GT_PARK,
+};
+
+void intel_gt_pm_get(struct drm_i915_private *i915);
+void intel_gt_pm_put(struct drm_i915_private *i915);
+
+void intel_gt_pm_init(struct drm_i915_private *i915);
+
+void intel_gt_sanitize(struct drm_i915_private *i915, bool force);
+void intel_gt_resume(struct drm_i915_private *i915);
+
+#endif /* INTEL_GT_PM_H */
diff --git a/drivers/gpu/drm/i915/intel_hangcheck.c b/drivers/gpu/drm/i915/gt/intel_hangcheck.c
index 3d51ed1428d4..3a4d09b80fa0 100644
--- a/drivers/gpu/drm/i915/intel_hangcheck.c
+++ b/drivers/gpu/drm/i915/gt/intel_hangcheck.c
@@ -22,12 +22,13 @@
*
*/
+#include "intel_reset.h"
#include "i915_drv.h"
-#include "i915_reset.h"
struct hangcheck {
u64 acthd;
- u32 seqno;
+ u32 ring;
+ u32 head;
enum intel_engine_hangcheck_action action;
unsigned long action_timestamp;
int deadlock;
@@ -133,26 +134,31 @@ static void hangcheck_load_sample(struct intel_engine_cs *engine,
struct hangcheck *hc)
{
hc->acthd = intel_engine_get_active_head(engine);
- hc->seqno = intel_engine_get_hangcheck_seqno(engine);
+ hc->ring = ENGINE_READ(engine, RING_START);
+ hc->head = ENGINE_READ(engine, RING_HEAD);
}
static void hangcheck_store_sample(struct intel_engine_cs *engine,
const struct hangcheck *hc)
{
engine->hangcheck.acthd = hc->acthd;
- engine->hangcheck.last_seqno = hc->seqno;
+ engine->hangcheck.last_ring = hc->ring;
+ engine->hangcheck.last_head = hc->head;
}
static enum intel_engine_hangcheck_action
hangcheck_get_action(struct intel_engine_cs *engine,
const struct hangcheck *hc)
{
- if (engine->hangcheck.last_seqno != hc->seqno)
- return ENGINE_ACTIVE_SEQNO;
-
if (intel_engine_is_idle(engine))
return ENGINE_IDLE;
+ if (engine->hangcheck.last_ring != hc->ring)
+ return ENGINE_ACTIVE_SEQNO;
+
+ if (engine->hangcheck.last_head != hc->head)
+ return ENGINE_ACTIVE_SEQNO;
+
return engine_stuck(engine, hc->acthd);
}
@@ -256,6 +262,7 @@ static void i915_hangcheck_elapsed(struct work_struct *work)
struct intel_engine_cs *engine;
enum intel_engine_id id;
unsigned int hung = 0, stuck = 0, wedged = 0;
+ intel_wakeref_t wakeref;
if (!i915_modparams.enable_hangcheck)
return;
@@ -266,6 +273,10 @@ static void i915_hangcheck_elapsed(struct work_struct *work)
if (i915_terminally_wedged(dev_priv))
return;
+ wakeref = intel_runtime_pm_get_if_in_use(dev_priv);
+ if (!wakeref)
+ return;
+
/* As enabling the GPU requires fairly extensive mmio access,
* periodically arm the mmio checker to see if we are triggering
* any invalid access.
@@ -313,6 +324,8 @@ static void i915_hangcheck_elapsed(struct work_struct *work)
if (hung)
hangcheck_declare_hang(dev_priv, hung, stuck);
+ intel_runtime_pm_put(dev_priv, wakeref);
+
/* Reset timer in case GPU hangs without another request being added */
i915_queue_hangcheck(dev_priv);
}
@@ -330,5 +343,5 @@ void intel_hangcheck_init(struct drm_i915_private *i915)
}
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
-#include "selftests/intel_hangcheck.c"
+#include "selftest_hangcheck.c"
#endif
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c
index 11e5a86610bf..1f7bee0cae0c 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/gt/intel_lrc.c
@@ -133,13 +133,13 @@
*/
#include <linux/interrupt.h>
-#include <drm/i915_drm.h>
#include "i915_drv.h"
#include "i915_gem_render_state.h"
-#include "i915_reset.h"
#include "i915_vgpu.h"
+#include "intel_engine_pm.h"
#include "intel_lrc_reg.h"
#include "intel_mocs.h"
+#include "intel_reset.h"
#include "intel_workarounds.h"
#define RING_EXECLIST_QFULL (1 << 0x2)
@@ -164,7 +164,53 @@
#define WA_TAIL_DWORDS 2
#define WA_TAIL_BYTES (sizeof(u32) * WA_TAIL_DWORDS)
-#define ACTIVE_PRIORITY (I915_PRIORITY_NOSEMAPHORE)
+struct virtual_engine {
+ struct intel_engine_cs base;
+ struct intel_context context;
+
+ /*
+ * We allow only a single request through the virtual engine at a time
+ * (each request in the timeline waits for the completion fence of
+ * the previous before being submitted). By restricting ourselves to
+ * only submitting a single request, each request is placed on to a
+ * physical to maximise load spreading (by virtue of the late greedy
+ * scheduling -- each real engine takes the next available request
+ * upon idling).
+ */
+ struct i915_request *request;
+
+ /*
+ * We keep a rbtree of available virtual engines inside each physical
+ * engine, sorted by priority. Here we preallocate the nodes we need
+ * for the virtual engine, indexed by physical_engine->id.
+ */
+ struct ve_node {
+ struct rb_node rb;
+ int prio;
+ } nodes[I915_NUM_ENGINES];
+
+ /*
+ * Keep track of bonded pairs -- restrictions upon on our selection
+ * of physical engines any particular request may be submitted to.
+ * If we receive a submit-fence from a master engine, we will only
+ * use one of sibling_mask physical engines.
+ */
+ struct ve_bond {
+ const struct intel_engine_cs *master;
+ intel_engine_mask_t sibling_mask;
+ } *bonds;
+ unsigned int num_bonds;
+
+ /* And finally, which physical engines this virtual engine maps onto. */
+ unsigned int num_siblings;
+ struct intel_engine_cs *siblings[0];
+};
+
+static struct virtual_engine *to_virtual_engine(struct intel_engine_cs *engine)
+{
+ GEM_BUG_ON(!intel_engine_is_virtual(engine));
+ return container_of(engine, struct virtual_engine, base);
+}
static int execlists_context_deferred_alloc(struct intel_context *ce,
struct intel_engine_cs *engine);
@@ -189,23 +235,12 @@ static int effective_prio(const struct i915_request *rq)
/*
* On unwinding the active request, we give it a priority bump
- * equivalent to a freshly submitted request. This protects it from
- * being gazumped again, but it would be preferable if we didn't
- * let it be gazumped in the first place!
- *
- * See __unwind_incomplete_requests()
+ * if it has completed waiting on any semaphore. If we know that
+ * the request has already started, we can prevent an unwanted
+ * preempt-to-idle cycle by taking that into account now.
*/
- if (~prio & ACTIVE_PRIORITY && __i915_request_has_started(rq)) {
- /*
- * After preemption, we insert the active request at the
- * end of the new priority level. This means that we will be
- * _lower_ priority than the preemptee all things equal (and
- * so the preemption is valid), so adjust our comparison
- * accordingly.
- */
- prio |= ACTIVE_PRIORITY;
- prio--;
- }
+ if (__i915_request_has_started(rq))
+ prio |= I915_PRIORITY_NOSEMAPHORE;
/* Restrict mere WAIT boosts from triggering preemption */
return prio | __NO_PREEMPTION;
@@ -229,7 +264,8 @@ static int queue_prio(const struct intel_engine_execlists *execlists)
}
static inline bool need_preempt(const struct intel_engine_cs *engine,
- const struct i915_request *rq)
+ const struct i915_request *rq,
+ struct rb_node *rb)
{
int last_prio;
@@ -252,8 +288,8 @@ static inline bool need_preempt(const struct intel_engine_cs *engine,
* ourselves, ignore the request.
*/
last_prio = effective_prio(rq);
- if (!__execlists_need_preempt(engine->execlists.queue_priority_hint,
- last_prio))
+ if (!i915_scheduler_need_preempt(engine->execlists.queue_priority_hint,
+ last_prio))
return false;
/*
@@ -264,6 +300,25 @@ static inline bool need_preempt(const struct intel_engine_cs *engine,
rq_prio(list_next_entry(rq, link)) > last_prio)
return true;
+ if (rb) {
+ struct virtual_engine *ve =
+ rb_entry(rb, typeof(*ve), nodes[engine->id].rb);
+ bool preempt = false;
+
+ if (engine == ve->siblings[0]) { /* only preempt one sibling */
+ struct i915_request *next;
+
+ rcu_read_lock();
+ next = READ_ONCE(ve->request);
+ if (next)
+ preempt = rq_prio(next) > last_prio;
+ rcu_read_unlock();
+ }
+
+ if (preempt)
+ return preempt;
+ }
+
/*
* If the inflight context did not trigger the preemption, then maybe
* it was the set of queued requests? Pick the highest priority in
@@ -375,13 +430,15 @@ __unwind_incomplete_requests(struct intel_engine_cs *engine)
{
struct i915_request *rq, *rn, *active = NULL;
struct list_head *uninitialized_var(pl);
- int prio = I915_PRIORITY_INVALID | ACTIVE_PRIORITY;
+ int prio = I915_PRIORITY_INVALID;
lockdep_assert_held(&engine->timeline.lock);
list_for_each_entry_safe_reverse(rq, rn,
&engine->timeline.requests,
link) {
+ struct intel_engine_cs *owner;
+
if (i915_request_completed(rq))
break;
@@ -390,40 +447,29 @@ __unwind_incomplete_requests(struct intel_engine_cs *engine)
GEM_BUG_ON(rq->hw_context->active);
- GEM_BUG_ON(rq_prio(rq) == I915_PRIORITY_INVALID);
- if (rq_prio(rq) != prio) {
- prio = rq_prio(rq);
- pl = i915_sched_lookup_priolist(engine, prio);
- }
- GEM_BUG_ON(RB_EMPTY_ROOT(&engine->execlists.queue.rb_root));
-
- list_add(&rq->sched.link, pl);
-
- active = rq;
- }
+ /*
+ * Push the request back into the queue for later resubmission.
+ * If this request is not native to this physical engine (i.e.
+ * it came from a virtual source), push it back onto the virtual
+ * engine so that it can be moved across onto another physical
+ * engine as load dictates.
+ */
+ owner = rq->hw_context->engine;
+ if (likely(owner == engine)) {
+ GEM_BUG_ON(rq_prio(rq) == I915_PRIORITY_INVALID);
+ if (rq_prio(rq) != prio) {
+ prio = rq_prio(rq);
+ pl = i915_sched_lookup_priolist(engine, prio);
+ }
+ GEM_BUG_ON(RB_EMPTY_ROOT(&engine->execlists.queue.rb_root));
- /*
- * The active request is now effectively the start of a new client
- * stream, so give it the equivalent small priority bump to prevent
- * it being gazumped a second time by another peer.
- *
- * Note we have to be careful not to apply a priority boost to a request
- * still spinning on its semaphores. If the request hasn't started, that
- * means it is still waiting for its dependencies to be signaled, and
- * if we apply a priority boost to this request, we will boost it past
- * its signalers and so break PI.
- *
- * One consequence of this preemption boost is that we may jump
- * over lesser priorities (such as I915_PRIORITY_WAIT), effectively
- * making those priorities non-preemptible. They will be moved forward
- * in the priority queue, but they will not gain immediate access to
- * the GPU.
- */
- if (~prio & ACTIVE_PRIORITY && __i915_request_has_started(active)) {
- prio |= ACTIVE_PRIORITY;
- active->sched.attr.priority = prio;
- list_move_tail(&active->sched.link,
- i915_sched_lookup_priolist(engine, prio));
+ list_add(&rq->sched.link, pl);
+ active = rq;
+ } else {
+ rq->engine = owner;
+ owner->submit_request(rq);
+ active = NULL;
+ }
}
return active;
@@ -475,6 +521,15 @@ execlists_context_schedule_in(struct i915_request *rq)
rq->hw_context->active = rq->engine;
}
+static void kick_siblings(struct i915_request *rq)
+{
+ struct virtual_engine *ve = to_virtual_engine(rq->hw_context->engine);
+ struct i915_request *next = READ_ONCE(ve->request);
+
+ if (next && next->execution_mask & ~rq->execution_mask)
+ tasklet_schedule(&ve->base.execlists.tasklet);
+}
+
static inline void
execlists_context_schedule_out(struct i915_request *rq, unsigned long status)
{
@@ -482,6 +537,18 @@ execlists_context_schedule_out(struct i915_request *rq, unsigned long status)
intel_engine_context_out(rq->engine);
execlists_context_status_change(rq, status);
trace_i915_request_out(rq);
+
+ /*
+ * If this is part of a virtual engine, its next request may have
+ * been blocked waiting for access to the active context. We have
+ * to kick all the siblings again in case we need to switch (e.g.
+ * the next request is not runnable on this engine). Hopefully,
+ * we will already have submitted the next request before the
+ * tasklet runs and do not need to rebuild each virtual tree
+ * and kick everyone again.
+ */
+ if (rq->engine != rq->hw_context->engine)
+ kick_siblings(rq);
}
static u64 execlists_update_context(struct i915_request *rq)
@@ -535,7 +602,7 @@ static void execlists_submit_ports(struct intel_engine_cs *engine)
* that all ELSP are drained i.e. we have processed the CSB,
* before allowing ourselves to idle and calling intel_runtime_pm_put().
*/
- GEM_BUG_ON(!engine->i915->gt.awake);
+ GEM_BUG_ON(!intel_wakeref_active(&engine->wakeref));
/*
* ELSQ note: the submit queue is not cleared after being submitted
@@ -659,6 +726,93 @@ static void complete_preempt_context(struct intel_engine_execlists *execlists)
execlists));
}
+static void virtual_update_register_offsets(u32 *regs,
+ struct intel_engine_cs *engine)
+{
+ u32 base = engine->mmio_base;
+
+ /* Must match execlists_init_reg_state()! */
+
+ regs[CTX_CONTEXT_CONTROL] =
+ i915_mmio_reg_offset(RING_CONTEXT_CONTROL(base));
+ regs[CTX_RING_HEAD] = i915_mmio_reg_offset(RING_HEAD(base));
+ regs[CTX_RING_TAIL] = i915_mmio_reg_offset(RING_TAIL(base));
+ regs[CTX_RING_BUFFER_START] = i915_mmio_reg_offset(RING_START(base));
+ regs[CTX_RING_BUFFER_CONTROL] = i915_mmio_reg_offset(RING_CTL(base));
+
+ regs[CTX_BB_HEAD_U] = i915_mmio_reg_offset(RING_BBADDR_UDW(base));
+ regs[CTX_BB_HEAD_L] = i915_mmio_reg_offset(RING_BBADDR(base));
+ regs[CTX_BB_STATE] = i915_mmio_reg_offset(RING_BBSTATE(base));
+ regs[CTX_SECOND_BB_HEAD_U] =
+ i915_mmio_reg_offset(RING_SBBADDR_UDW(base));
+ regs[CTX_SECOND_BB_HEAD_L] = i915_mmio_reg_offset(RING_SBBADDR(base));
+ regs[CTX_SECOND_BB_STATE] = i915_mmio_reg_offset(RING_SBBSTATE(base));
+
+ regs[CTX_CTX_TIMESTAMP] =
+ i915_mmio_reg_offset(RING_CTX_TIMESTAMP(base));
+ regs[CTX_PDP3_UDW] = i915_mmio_reg_offset(GEN8_RING_PDP_UDW(base, 3));
+ regs[CTX_PDP3_LDW] = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(base, 3));
+ regs[CTX_PDP2_UDW] = i915_mmio_reg_offset(GEN8_RING_PDP_UDW(base, 2));
+ regs[CTX_PDP2_LDW] = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(base, 2));
+ regs[CTX_PDP1_UDW] = i915_mmio_reg_offset(GEN8_RING_PDP_UDW(base, 1));
+ regs[CTX_PDP1_LDW] = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(base, 1));
+ regs[CTX_PDP0_UDW] = i915_mmio_reg_offset(GEN8_RING_PDP_UDW(base, 0));
+ regs[CTX_PDP0_LDW] = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(base, 0));
+
+ if (engine->class == RENDER_CLASS) {
+ regs[CTX_RCS_INDIRECT_CTX] =
+ i915_mmio_reg_offset(RING_INDIRECT_CTX(base));
+ regs[CTX_RCS_INDIRECT_CTX_OFFSET] =
+ i915_mmio_reg_offset(RING_INDIRECT_CTX_OFFSET(base));
+ regs[CTX_BB_PER_CTX_PTR] =
+ i915_mmio_reg_offset(RING_BB_PER_CTX_PTR(base));
+
+ regs[CTX_R_PWR_CLK_STATE] =
+ i915_mmio_reg_offset(GEN8_R_PWR_CLK_STATE);
+ }
+}
+
+static bool virtual_matches(const struct virtual_engine *ve,
+ const struct i915_request *rq,
+ const struct intel_engine_cs *engine)
+{
+ const struct intel_engine_cs *active;
+
+ if (!(rq->execution_mask & engine->mask)) /* We peeked too soon! */
+ return false;
+
+ /*
+ * We track when the HW has completed saving the context image
+ * (i.e. when we have seen the final CS event switching out of
+ * the context) and must not overwrite the context image before
+ * then. This restricts us to only using the active engine
+ * while the previous virtualized request is inflight (so
+ * we reuse the register offsets). This is a very small
+ * hystersis on the greedy seelction algorithm.
+ */
+ active = READ_ONCE(ve->context.active);
+ if (active && active != engine)
+ return false;
+
+ return true;
+}
+
+static void virtual_xfer_breadcrumbs(struct virtual_engine *ve,
+ struct intel_engine_cs *engine)
+{
+ struct intel_engine_cs *old = ve->siblings[0];
+
+ /* All unattached (rq->engine == old) must already be completed */
+
+ spin_lock(&old->breadcrumbs.irq_lock);
+ if (!list_empty(&ve->context.signal_link)) {
+ list_move_tail(&ve->context.signal_link,
+ &engine->breadcrumbs.signalers);
+ intel_engine_queue_breadcrumbs(engine);
+ }
+ spin_unlock(&old->breadcrumbs.irq_lock);
+}
+
static void execlists_dequeue(struct intel_engine_cs *engine)
{
struct intel_engine_execlists * const execlists = &engine->execlists;
@@ -691,6 +845,26 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
* and context switches) submission.
*/
+ for (rb = rb_first_cached(&execlists->virtual); rb; ) {
+ struct virtual_engine *ve =
+ rb_entry(rb, typeof(*ve), nodes[engine->id].rb);
+ struct i915_request *rq = READ_ONCE(ve->request);
+
+ if (!rq) { /* lazily cleanup after another engine handled rq */
+ rb_erase_cached(rb, &execlists->virtual);
+ RB_CLEAR_NODE(rb);
+ rb = rb_first_cached(&execlists->virtual);
+ continue;
+ }
+
+ if (!virtual_matches(ve, rq, engine)) {
+ rb = rb_next(rb);
+ continue;
+ }
+
+ break;
+ }
+
if (last) {
/*
* Don't resubmit or switch until all outstanding
@@ -712,7 +886,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
if (!execlists_is_active(execlists, EXECLISTS_ACTIVE_HWACK))
return;
- if (need_preempt(engine, last)) {
+ if (need_preempt(engine, last, rb)) {
inject_preempt_context(engine);
return;
}
@@ -752,6 +926,93 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
last->tail = last->wa_tail;
}
+ while (rb) { /* XXX virtual is always taking precedence */
+ struct virtual_engine *ve =
+ rb_entry(rb, typeof(*ve), nodes[engine->id].rb);
+ struct i915_request *rq;
+
+ spin_lock(&ve->base.timeline.lock);
+
+ rq = ve->request;
+ if (unlikely(!rq)) { /* lost the race to a sibling */
+ spin_unlock(&ve->base.timeline.lock);
+ rb_erase_cached(rb, &execlists->virtual);
+ RB_CLEAR_NODE(rb);
+ rb = rb_first_cached(&execlists->virtual);
+ continue;
+ }
+
+ GEM_BUG_ON(rq != ve->request);
+ GEM_BUG_ON(rq->engine != &ve->base);
+ GEM_BUG_ON(rq->hw_context != &ve->context);
+
+ if (rq_prio(rq) >= queue_prio(execlists)) {
+ if (!virtual_matches(ve, rq, engine)) {
+ spin_unlock(&ve->base.timeline.lock);
+ rb = rb_next(rb);
+ continue;
+ }
+
+ if (last && !can_merge_rq(last, rq)) {
+ spin_unlock(&ve->base.timeline.lock);
+ return; /* leave this rq for another engine */
+ }
+
+ GEM_TRACE("%s: virtual rq=%llx:%lld%s, new engine? %s\n",
+ engine->name,
+ rq->fence.context,
+ rq->fence.seqno,
+ i915_request_completed(rq) ? "!" :
+ i915_request_started(rq) ? "*" :
+ "",
+ yesno(engine != ve->siblings[0]));
+
+ ve->request = NULL;
+ ve->base.execlists.queue_priority_hint = INT_MIN;
+ rb_erase_cached(rb, &execlists->virtual);
+ RB_CLEAR_NODE(rb);
+
+ GEM_BUG_ON(!(rq->execution_mask & engine->mask));
+ rq->engine = engine;
+
+ if (engine != ve->siblings[0]) {
+ u32 *regs = ve->context.lrc_reg_state;
+ unsigned int n;
+
+ GEM_BUG_ON(READ_ONCE(ve->context.active));
+ virtual_update_register_offsets(regs, engine);
+
+ if (!list_empty(&ve->context.signals))
+ virtual_xfer_breadcrumbs(ve, engine);
+
+ /*
+ * Move the bound engine to the top of the list
+ * for future execution. We then kick this
+ * tasklet first before checking others, so that
+ * we preferentially reuse this set of bound
+ * registers.
+ */
+ for (n = 1; n < ve->num_siblings; n++) {
+ if (ve->siblings[n] == engine) {
+ swap(ve->siblings[n],
+ ve->siblings[0]);
+ break;
+ }
+ }
+
+ GEM_BUG_ON(ve->siblings[0] != engine);
+ }
+
+ __i915_request_submit(rq);
+ trace_i915_request_in(rq, port_index(port, execlists));
+ submit = true;
+ last = rq;
+ }
+
+ spin_unlock(&ve->base.timeline.lock);
+ break;
+ }
+
while ((rb = rb_first_cached(&execlists->queue))) {
struct i915_priolist *p = to_priolist(rb);
struct i915_request *rq, *rn;
@@ -1085,7 +1346,7 @@ static void execlists_submission_tasklet(unsigned long data)
GEM_TRACE("%s awake?=%d, active=%x\n",
engine->name,
- !!engine->i915->gt.awake,
+ !!intel_wakeref_active(&engine->wakeref),
engine->execlists.active);
spin_lock_irqsave(&engine->timeline.lock, flags);
@@ -1232,7 +1493,7 @@ __execlists_update_reg_state(struct intel_context *ce,
/* RPCS */
if (engine->class == RENDER_CLASS)
regs[CTX_R_PWR_CLK_STATE + 1] =
- gen8_make_rpcs(engine->i915, &ce->sseu);
+ intel_sseu_make_rpcs(engine->i915, &ce->sseu);
}
static int
@@ -1316,6 +1577,9 @@ static const struct intel_context_ops execlists_context_ops = {
.pin = execlists_context_pin,
.unpin = execlists_context_unpin,
+ .enter = intel_context_enter_engine,
+ .exit = intel_context_exit_engine,
+
.reset = execlists_context_reset,
.destroy = execlists_context_destroy,
};
@@ -1695,8 +1959,8 @@ static int intel_init_workaround_bb(struct intel_engine_cs *engine)
unsigned int i;
int ret;
- if (GEM_DEBUG_WARN_ON(engine->id != RCS0))
- return -EINVAL;
+ if (engine->class != RENDER_CLASS)
+ return 0;
switch (INTEL_GEN(engine->i915)) {
case 11:
@@ -1787,7 +2051,7 @@ static bool unexpected_starting_state(struct intel_engine_cs *engine)
return unexpected;
}
-static int gen8_init_common_ring(struct intel_engine_cs *engine)
+static int execlists_resume(struct intel_engine_cs *engine)
{
intel_engine_apply_workarounds(engine);
intel_engine_apply_whitelist(engine);
@@ -1820,7 +2084,7 @@ static void execlists_reset_prepare(struct intel_engine_cs *engine)
* completed the reset in i915_gem_reset_finish(). If a request
* is completed by one engine, it may then queue a request
* to a second via its execlists->tasklet *just* as we are
- * calling engine->init_hw() and also writing the ELSP.
+ * calling engine->resume() and also writing the ELSP.
* Turning off the execlists->tasklet until the reset is over
* prevents the race.
*/
@@ -1872,6 +2136,25 @@ static void reset_csb_pointers(struct intel_engine_execlists *execlists)
&execlists->csb_status[reset_value]);
}
+static struct i915_request *active_request(struct i915_request *rq)
+{
+ const struct list_head * const list = &rq->engine->timeline.requests;
+ const struct intel_context * const context = rq->hw_context;
+ struct i915_request *active = NULL;
+
+ list_for_each_entry_from_reverse(rq, list, link) {
+ if (i915_request_completed(rq))
+ break;
+
+ if (rq->hw_context != context)
+ break;
+
+ active = rq;
+ }
+
+ return active;
+}
+
static void __execlists_reset(struct intel_engine_cs *engine, bool stalled)
{
struct intel_engine_execlists * const execlists = &engine->execlists;
@@ -1892,7 +2175,8 @@ static void __execlists_reset(struct intel_engine_cs *engine, bool stalled)
if (!port_isset(execlists->port))
goto out_clear;
- ce = port_request(execlists->port)->hw_context;
+ rq = port_request(execlists->port);
+ ce = rq->hw_context;
/*
* Catch up with any missed context-switch interrupts.
@@ -1905,16 +2189,10 @@ static void __execlists_reset(struct intel_engine_cs *engine, bool stalled)
*/
execlists_cancel_port_requests(execlists);
- /* Push back any incomplete requests for replay after the reset. */
- rq = __unwind_incomplete_requests(engine);
+ rq = active_request(rq);
if (!rq)
goto out_replay;
- if (rq->hw_context != ce) { /* caught just before a CS event */
- rq = NULL;
- goto out_replay;
- }
-
/*
* If this request hasn't started yet, e.g. it is waiting on a
* semaphore, we need to avoid skipping the request or else we
@@ -1961,13 +2239,16 @@ static void __execlists_reset(struct intel_engine_cs *engine, bool stalled)
}
execlists_init_reg_state(regs, ce, engine, ce->ring);
- /* Rerun the request; its payload has been neutered (if guilty). */
out_replay:
+ /* Rerun the request; its payload has been neutered (if guilty). */
ce->ring->head =
rq ? intel_ring_wrap(ce->ring, rq->head) : ce->ring->tail;
intel_ring_update_space(ce->ring);
__execlists_update_reg_state(ce, engine);
+ /* Push back any incomplete requests for replay after the reset. */
+ __unwind_incomplete_requests(engine);
+
out_clear:
execlists_clear_all_active(execlists);
}
@@ -2041,6 +2322,26 @@ static void execlists_cancel_requests(struct intel_engine_cs *engine)
i915_priolist_free(p);
}
+ /* Cancel all attached virtual engines */
+ while ((rb = rb_first_cached(&execlists->virtual))) {
+ struct virtual_engine *ve =
+ rb_entry(rb, typeof(*ve), nodes[engine->id].rb);
+
+ rb_erase_cached(rb, &execlists->virtual);
+ RB_CLEAR_NODE(rb);
+
+ spin_lock(&ve->base.timeline.lock);
+ if (ve->request) {
+ ve->request->engine = engine;
+ __i915_request_submit(ve->request);
+ dma_fence_set_error(&ve->request->fence, -EIO);
+ i915_request_mark_complete(ve->request);
+ ve->base.execlists.queue_priority_hint = INT_MIN;
+ ve->request = NULL;
+ }
+ spin_unlock(&ve->base.timeline.lock);
+ }
+
/* Remaining _unready_ requests will be nop'ed when submitted */
execlists->queue_priority_hint = INT_MIN;
@@ -2270,12 +2571,6 @@ static u32 *gen8_emit_fini_breadcrumb(struct i915_request *request, u32 *cs)
request->timeline->hwsp_offset,
0);
- cs = gen8_emit_ggtt_write(cs,
- intel_engine_next_hangcheck_seqno(request->engine),
- I915_GEM_HWS_HANGCHECK_ADDR,
- MI_FLUSH_DW_STORE_INDEX);
-
-
*cs++ = MI_USER_INTERRUPT;
*cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
@@ -2287,19 +2582,17 @@ static u32 *gen8_emit_fini_breadcrumb(struct i915_request *request, u32 *cs)
static u32 *gen8_emit_fini_breadcrumb_rcs(struct i915_request *request, u32 *cs)
{
+ /* XXX flush+write+CS_STALL all in one upsets gem_concurrent_blt:kbl */
cs = gen8_emit_ggtt_write_rcs(cs,
request->fence.seqno,
request->timeline->hwsp_offset,
PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH |
PIPE_CONTROL_DEPTH_CACHE_FLUSH |
- PIPE_CONTROL_DC_FLUSH_ENABLE |
- PIPE_CONTROL_FLUSH_ENABLE |
- PIPE_CONTROL_CS_STALL);
-
- cs = gen8_emit_ggtt_write_rcs(cs,
- intel_engine_next_hangcheck_seqno(request->engine),
- I915_GEM_HWS_HANGCHECK_ADDR,
- PIPE_CONTROL_STORE_DATA_INDEX);
+ PIPE_CONTROL_DC_FLUSH_ENABLE);
+ cs = gen8_emit_pipe_control(cs,
+ PIPE_CONTROL_FLUSH_ENABLE |
+ PIPE_CONTROL_CS_STALL,
+ 0);
*cs++ = MI_USER_INTERRUPT;
*cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
@@ -2329,38 +2622,9 @@ static int gen8_init_rcs_context(struct i915_request *rq)
return i915_gem_render_state_emit(rq);
}
-/**
- * intel_logical_ring_cleanup() - deallocate the Engine Command Streamer
- * @engine: Engine Command Streamer.
- */
-void intel_logical_ring_cleanup(struct intel_engine_cs *engine)
+static void execlists_park(struct intel_engine_cs *engine)
{
- struct drm_i915_private *dev_priv;
-
- /*
- * Tasklet cannot be active at this point due intel_mark_active/idle
- * so this is just for documentation.
- */
- if (WARN_ON(test_bit(TASKLET_STATE_SCHED,
- &engine->execlists.tasklet.state)))
- tasklet_kill(&engine->execlists.tasklet);
-
- dev_priv = engine->i915;
-
- if (engine->buffer) {
- WARN_ON((ENGINE_READ(engine, RING_MI_MODE) & MODE_IDLE) == 0);
- }
-
- if (engine->cleanup)
- engine->cleanup(engine);
-
- intel_engine_cleanup_common(engine);
-
- lrc_destroy_wa_ctx(engine);
-
- engine->i915 = NULL;
- dev_priv->engine[engine->id] = NULL;
- kfree(engine);
+ intel_engine_park(engine);
}
void intel_execlists_set_default_submission(struct intel_engine_cs *engine)
@@ -2374,7 +2638,7 @@ void intel_execlists_set_default_submission(struct intel_engine_cs *engine)
engine->reset.reset = execlists_reset;
engine->reset.finish = execlists_reset_finish;
- engine->park = NULL;
+ engine->park = execlists_park;
engine->unpark = NULL;
engine->flags |= I915_ENGINE_SUPPORTS_STATS;
@@ -2385,11 +2649,20 @@ void intel_execlists_set_default_submission(struct intel_engine_cs *engine)
engine->flags |= I915_ENGINE_HAS_PREEMPTION;
}
+static void execlists_destroy(struct intel_engine_cs *engine)
+{
+ intel_engine_cleanup_common(engine);
+ lrc_destroy_wa_ctx(engine);
+ kfree(engine);
+}
+
static void
logical_ring_default_vfuncs(struct intel_engine_cs *engine)
{
/* Default vfuncs which can be overriden by each engine. */
- engine->init_hw = gen8_init_common_ring;
+
+ engine->destroy = execlists_destroy;
+ engine->resume = execlists_resume;
engine->reset.prepare = execlists_reset_prepare;
engine->reset.reset = execlists_reset;
@@ -2442,15 +2715,8 @@ logical_ring_default_irqs(struct intel_engine_cs *engine)
engine->irq_keep_mask = GT_CONTEXT_SWITCH_INTERRUPT << shift;
}
-static int
-logical_ring_setup(struct intel_engine_cs *engine)
+int intel_execlists_submission_setup(struct intel_engine_cs *engine)
{
- int err;
-
- err = intel_engine_setup_common(engine);
- if (err)
- return err;
-
/* Intentionally left blank. */
engine->buffer = NULL;
@@ -2460,10 +2726,16 @@ logical_ring_setup(struct intel_engine_cs *engine)
logical_ring_default_vfuncs(engine);
logical_ring_default_irqs(engine);
+ if (engine->class == RENDER_CLASS) {
+ engine->init_context = gen8_init_rcs_context;
+ engine->emit_flush = gen8_emit_flush_render;
+ engine->emit_fini_breadcrumb = gen8_emit_fini_breadcrumb_rcs;
+ }
+
return 0;
}
-static int logical_ring_init(struct intel_engine_cs *engine)
+int intel_execlists_submission_init(struct intel_engine_cs *engine)
{
struct drm_i915_private *i915 = engine->i915;
struct intel_engine_execlists * const execlists = &engine->execlists;
@@ -2475,6 +2747,15 @@ static int logical_ring_init(struct intel_engine_cs *engine)
return ret;
intel_engine_init_workarounds(engine);
+ intel_engine_init_whitelist(engine);
+
+ if (intel_init_workaround_bb(engine))
+ /*
+ * We continue even if we fail to initialize WA batch
+ * because we only expect rare glitches but nothing
+ * critical to prevent us from using GPU
+ */
+ DRM_ERROR("WA batch buffer initialization failed\n");
if (HAS_LOGICAL_RING_ELSQ(i915)) {
execlists->submit_reg = i915->uncore.regs +
@@ -2507,182 +2788,6 @@ static int logical_ring_init(struct intel_engine_cs *engine)
return 0;
}
-int logical_render_ring_init(struct intel_engine_cs *engine)
-{
- int ret;
-
- ret = logical_ring_setup(engine);
- if (ret)
- return ret;
-
- /* Override some for render ring. */
- engine->init_context = gen8_init_rcs_context;
- engine->emit_flush = gen8_emit_flush_render;
- engine->emit_fini_breadcrumb = gen8_emit_fini_breadcrumb_rcs;
-
- ret = logical_ring_init(engine);
- if (ret)
- return ret;
-
- ret = intel_init_workaround_bb(engine);
- if (ret) {
- /*
- * We continue even if we fail to initialize WA batch
- * because we only expect rare glitches but nothing
- * critical to prevent us from using GPU
- */
- DRM_ERROR("WA batch buffer initialization failed: %d\n",
- ret);
- }
-
- intel_engine_init_whitelist(engine);
-
- return 0;
-}
-
-int logical_xcs_ring_init(struct intel_engine_cs *engine)
-{
- int err;
-
- err = logical_ring_setup(engine);
- if (err)
- return err;
-
- return logical_ring_init(engine);
-}
-
-u32 gen8_make_rpcs(struct drm_i915_private *i915, struct intel_sseu *req_sseu)
-{
- const struct sseu_dev_info *sseu = &RUNTIME_INFO(i915)->sseu;
- bool subslice_pg = sseu->has_subslice_pg;
- struct intel_sseu ctx_sseu;
- u8 slices, subslices;
- u32 rpcs = 0;
-
- /*
- * No explicit RPCS request is needed to ensure full
- * slice/subslice/EU enablement prior to Gen9.
- */
- if (INTEL_GEN(i915) < 9)
- return 0;
-
- /*
- * If i915/perf is active, we want a stable powergating configuration
- * on the system.
- *
- * We could choose full enablement, but on ICL we know there are use
- * cases which disable slices for functional, apart for performance
- * reasons. So in this case we select a known stable subset.
- */
- if (!i915->perf.oa.exclusive_stream) {
- ctx_sseu = *req_sseu;
- } else {
- ctx_sseu = intel_device_default_sseu(i915);
-
- if (IS_GEN(i915, 11)) {
- /*
- * We only need subslice count so it doesn't matter
- * which ones we select - just turn off low bits in the
- * amount of half of all available subslices per slice.
- */
- ctx_sseu.subslice_mask =
- ~(~0 << (hweight8(ctx_sseu.subslice_mask) / 2));
- ctx_sseu.slice_mask = 0x1;
- }
- }
-
- slices = hweight8(ctx_sseu.slice_mask);
- subslices = hweight8(ctx_sseu.subslice_mask);
-
- /*
- * Since the SScount bitfield in GEN8_R_PWR_CLK_STATE is only three bits
- * wide and Icelake has up to eight subslices, specfial programming is
- * needed in order to correctly enable all subslices.
- *
- * According to documentation software must consider the configuration
- * as 2x4x8 and hardware will translate this to 1x8x8.
- *
- * Furthemore, even though SScount is three bits, maximum documented
- * value for it is four. From this some rules/restrictions follow:
- *
- * 1.
- * If enabled subslice count is greater than four, two whole slices must
- * be enabled instead.
- *
- * 2.
- * When more than one slice is enabled, hardware ignores the subslice
- * count altogether.
- *
- * From these restrictions it follows that it is not possible to enable
- * a count of subslices between the SScount maximum of four restriction,
- * and the maximum available number on a particular SKU. Either all
- * subslices are enabled, or a count between one and four on the first
- * slice.
- */
- if (IS_GEN(i915, 11) &&
- slices == 1 &&
- subslices > min_t(u8, 4, hweight8(sseu->subslice_mask[0]) / 2)) {
- GEM_BUG_ON(subslices & 1);
-
- subslice_pg = false;
- slices *= 2;
- }
-
- /*
- * Starting in Gen9, render power gating can leave
- * slice/subslice/EU in a partially enabled state. We
- * must make an explicit request through RPCS for full
- * enablement.
- */
- if (sseu->has_slice_pg) {
- u32 mask, val = slices;
-
- if (INTEL_GEN(i915) >= 11) {
- mask = GEN11_RPCS_S_CNT_MASK;
- val <<= GEN11_RPCS_S_CNT_SHIFT;
- } else {
- mask = GEN8_RPCS_S_CNT_MASK;
- val <<= GEN8_RPCS_S_CNT_SHIFT;
- }
-
- GEM_BUG_ON(val & ~mask);
- val &= mask;
-
- rpcs |= GEN8_RPCS_ENABLE | GEN8_RPCS_S_CNT_ENABLE | val;
- }
-
- if (subslice_pg) {
- u32 val = subslices;
-
- val <<= GEN8_RPCS_SS_CNT_SHIFT;
-
- GEM_BUG_ON(val & ~GEN8_RPCS_SS_CNT_MASK);
- val &= GEN8_RPCS_SS_CNT_MASK;
-
- rpcs |= GEN8_RPCS_ENABLE | GEN8_RPCS_SS_CNT_ENABLE | val;
- }
-
- if (sseu->has_eu_pg) {
- u32 val;
-
- val = ctx_sseu.min_eus_per_subslice << GEN8_RPCS_EU_MIN_SHIFT;
- GEM_BUG_ON(val & ~GEN8_RPCS_EU_MIN_MASK);
- val &= GEN8_RPCS_EU_MIN_MASK;
-
- rpcs |= val;
-
- val = ctx_sseu.max_eus_per_subslice << GEN8_RPCS_EU_MAX_SHIFT;
- GEM_BUG_ON(val & ~GEN8_RPCS_EU_MAX_MASK);
- val &= GEN8_RPCS_EU_MAX_MASK;
-
- rpcs |= val;
-
- rpcs |= GEN8_RPCS_ENABLE;
- }
-
- return rpcs;
-}
-
static u32 intel_lr_indirect_ctx_offset(struct intel_engine_cs *engine)
{
u32 indirect_ctx_offset;
@@ -2721,12 +2826,15 @@ static void execlists_init_reg_state(u32 *regs,
bool rcs = engine->class == RENDER_CLASS;
u32 base = engine->mmio_base;
- /* A context is actually a big batch buffer with several
+ /*
+ * A context is actually a big batch buffer with several
* MI_LOAD_REGISTER_IMM commands followed by (reg, value) pairs. The
* values we are setting here are only for the first context restore:
* on a subsequent save, the GPU will recreate this batchbuffer with new
* values (including all the missing MI_LOAD_REGISTER_IMM commands that
* we are not initializing here).
+ *
+ * Must keep consistent with virtual_update_register_offsets().
*/
regs[CTX_LRI_HEADER_0] = MI_LOAD_REGISTER_IMM(rcs ? 14 : 11) |
MI_LRI_FORCE_POSTED;
@@ -2945,6 +3053,448 @@ error_deref_obj:
return ret;
}
+static void virtual_context_destroy(struct kref *kref)
+{
+ struct virtual_engine *ve =
+ container_of(kref, typeof(*ve), context.ref);
+ unsigned int n;
+
+ GEM_BUG_ON(ve->request);
+ GEM_BUG_ON(ve->context.active);
+
+ for (n = 0; n < ve->num_siblings; n++) {
+ struct intel_engine_cs *sibling = ve->siblings[n];
+ struct rb_node *node = &ve->nodes[sibling->id].rb;
+
+ if (RB_EMPTY_NODE(node))
+ continue;
+
+ spin_lock_irq(&sibling->timeline.lock);
+
+ /* Detachment is lazily performed in the execlists tasklet */
+ if (!RB_EMPTY_NODE(node))
+ rb_erase_cached(node, &sibling->execlists.virtual);
+
+ spin_unlock_irq(&sibling->timeline.lock);
+ }
+ GEM_BUG_ON(__tasklet_is_scheduled(&ve->base.execlists.tasklet));
+
+ if (ve->context.state)
+ __execlists_context_fini(&ve->context);
+
+ kfree(ve->bonds);
+
+ i915_timeline_fini(&ve->base.timeline);
+ kfree(ve);
+}
+
+static void virtual_engine_initial_hint(struct virtual_engine *ve)
+{
+ int swp;
+
+ /*
+ * Pick a random sibling on starting to help spread the load around.
+ *
+ * New contexts are typically created with exactly the same order
+ * of siblings, and often started in batches. Due to the way we iterate
+ * the array of sibling when submitting requests, sibling[0] is
+ * prioritised for dequeuing. If we make sure that sibling[0] is fairly
+ * randomised across the system, we also help spread the load by the
+ * first engine we inspect being different each time.
+ *
+ * NB This does not force us to execute on this engine, it will just
+ * typically be the first we inspect for submission.
+ */
+ swp = prandom_u32_max(ve->num_siblings);
+ if (!swp)
+ return;
+
+ swap(ve->siblings[swp], ve->siblings[0]);
+ virtual_update_register_offsets(ve->context.lrc_reg_state,
+ ve->siblings[0]);
+}
+
+static int virtual_context_pin(struct intel_context *ce)
+{
+ struct virtual_engine *ve = container_of(ce, typeof(*ve), context);
+ int err;
+
+ /* Note: we must use a real engine class for setting up reg state */
+ err = __execlists_context_pin(ce, ve->siblings[0]);
+ if (err)
+ return err;
+
+ virtual_engine_initial_hint(ve);
+ return 0;
+}
+
+static void virtual_context_enter(struct intel_context *ce)
+{
+ struct virtual_engine *ve = container_of(ce, typeof(*ve), context);
+ unsigned int n;
+
+ for (n = 0; n < ve->num_siblings; n++)
+ intel_engine_pm_get(ve->siblings[n]);
+}
+
+static void virtual_context_exit(struct intel_context *ce)
+{
+ struct virtual_engine *ve = container_of(ce, typeof(*ve), context);
+ unsigned int n;
+
+ ce->saturated = 0;
+ for (n = 0; n < ve->num_siblings; n++)
+ intel_engine_pm_put(ve->siblings[n]);
+}
+
+static const struct intel_context_ops virtual_context_ops = {
+ .pin = virtual_context_pin,
+ .unpin = execlists_context_unpin,
+
+ .enter = virtual_context_enter,
+ .exit = virtual_context_exit,
+
+ .destroy = virtual_context_destroy,
+};
+
+static intel_engine_mask_t virtual_submission_mask(struct virtual_engine *ve)
+{
+ struct i915_request *rq;
+ intel_engine_mask_t mask;
+
+ rq = READ_ONCE(ve->request);
+ if (!rq)
+ return 0;
+
+ /* The rq is ready for submission; rq->execution_mask is now stable. */
+ mask = rq->execution_mask;
+ if (unlikely(!mask)) {
+ /* Invalid selection, submit to a random engine in error */
+ i915_request_skip(rq, -ENODEV);
+ mask = ve->siblings[0]->mask;
+ }
+
+ GEM_TRACE("%s: rq=%llx:%lld, mask=%x, prio=%d\n",
+ ve->base.name,
+ rq->fence.context, rq->fence.seqno,
+ mask, ve->base.execlists.queue_priority_hint);
+
+ return mask;
+}
+
+static void virtual_submission_tasklet(unsigned long data)
+{
+ struct virtual_engine * const ve = (struct virtual_engine *)data;
+ const int prio = ve->base.execlists.queue_priority_hint;
+ intel_engine_mask_t mask;
+ unsigned int n;
+
+ rcu_read_lock();
+ mask = virtual_submission_mask(ve);
+ rcu_read_unlock();
+ if (unlikely(!mask))
+ return;
+
+ local_irq_disable();
+ for (n = 0; READ_ONCE(ve->request) && n < ve->num_siblings; n++) {
+ struct intel_engine_cs *sibling = ve->siblings[n];
+ struct ve_node * const node = &ve->nodes[sibling->id];
+ struct rb_node **parent, *rb;
+ bool first;
+
+ if (unlikely(!(mask & sibling->mask))) {
+ if (!RB_EMPTY_NODE(&node->rb)) {
+ spin_lock(&sibling->timeline.lock);
+ rb_erase_cached(&node->rb,
+ &sibling->execlists.virtual);
+ RB_CLEAR_NODE(&node->rb);
+ spin_unlock(&sibling->timeline.lock);
+ }
+ continue;
+ }
+
+ spin_lock(&sibling->timeline.lock);
+
+ if (!RB_EMPTY_NODE(&node->rb)) {
+ /*
+ * Cheat and avoid rebalancing the tree if we can
+ * reuse this node in situ.
+ */
+ first = rb_first_cached(&sibling->execlists.virtual) ==
+ &node->rb;
+ if (prio == node->prio || (prio > node->prio && first))
+ goto submit_engine;
+
+ rb_erase_cached(&node->rb, &sibling->execlists.virtual);
+ }
+
+ rb = NULL;
+ first = true;
+ parent = &sibling->execlists.virtual.rb_root.rb_node;
+ while (*parent) {
+ struct ve_node *other;
+
+ rb = *parent;
+ other = rb_entry(rb, typeof(*other), rb);
+ if (prio > other->prio) {
+ parent = &rb->rb_left;
+ } else {
+ parent = &rb->rb_right;
+ first = false;
+ }
+ }
+
+ rb_link_node(&node->rb, rb, parent);
+ rb_insert_color_cached(&node->rb,
+ &sibling->execlists.virtual,
+ first);
+
+submit_engine:
+ GEM_BUG_ON(RB_EMPTY_NODE(&node->rb));
+ node->prio = prio;
+ if (first && prio > sibling->execlists.queue_priority_hint) {
+ sibling->execlists.queue_priority_hint = prio;
+ tasklet_hi_schedule(&sibling->execlists.tasklet);
+ }
+
+ spin_unlock(&sibling->timeline.lock);
+ }
+ local_irq_enable();
+}
+
+static void virtual_submit_request(struct i915_request *rq)
+{
+ struct virtual_engine *ve = to_virtual_engine(rq->engine);
+
+ GEM_TRACE("%s: rq=%llx:%lld\n",
+ ve->base.name,
+ rq->fence.context,
+ rq->fence.seqno);
+
+ GEM_BUG_ON(ve->base.submit_request != virtual_submit_request);
+
+ GEM_BUG_ON(ve->request);
+ ve->base.execlists.queue_priority_hint = rq_prio(rq);
+ WRITE_ONCE(ve->request, rq);
+
+ tasklet_schedule(&ve->base.execlists.tasklet);
+}
+
+static struct ve_bond *
+virtual_find_bond(struct virtual_engine *ve,
+ const struct intel_engine_cs *master)
+{
+ int i;
+
+ for (i = 0; i < ve->num_bonds; i++) {
+ if (ve->bonds[i].master == master)
+ return &ve->bonds[i];
+ }
+
+ return NULL;
+}
+
+static void
+virtual_bond_execute(struct i915_request *rq, struct dma_fence *signal)
+{
+ struct virtual_engine *ve = to_virtual_engine(rq->engine);
+ struct ve_bond *bond;
+
+ bond = virtual_find_bond(ve, to_request(signal)->engine);
+ if (bond) {
+ intel_engine_mask_t old, new, cmp;
+
+ cmp = READ_ONCE(rq->execution_mask);
+ do {
+ old = cmp;
+ new = cmp & bond->sibling_mask;
+ } while ((cmp = cmpxchg(&rq->execution_mask, old, new)) != old);
+ }
+}
+
+struct intel_context *
+intel_execlists_create_virtual(struct i915_gem_context *ctx,
+ struct intel_engine_cs **siblings,
+ unsigned int count)
+{
+ struct virtual_engine *ve;
+ unsigned int n;
+ int err;
+
+ if (count == 0)
+ return ERR_PTR(-EINVAL);
+
+ if (count == 1)
+ return intel_context_create(ctx, siblings[0]);
+
+ ve = kzalloc(struct_size(ve, siblings, count), GFP_KERNEL);
+ if (!ve)
+ return ERR_PTR(-ENOMEM);
+
+ ve->base.i915 = ctx->i915;
+ ve->base.id = -1;
+ ve->base.class = OTHER_CLASS;
+ ve->base.uabi_class = I915_ENGINE_CLASS_INVALID;
+ ve->base.instance = I915_ENGINE_CLASS_INVALID_VIRTUAL;
+ ve->base.flags = I915_ENGINE_IS_VIRTUAL;
+
+ snprintf(ve->base.name, sizeof(ve->base.name), "virtual");
+
+ err = i915_timeline_init(ctx->i915, &ve->base.timeline, NULL);
+ if (err)
+ goto err_put;
+ i915_timeline_set_subclass(&ve->base.timeline, TIMELINE_VIRTUAL);
+
+ intel_engine_init_execlists(&ve->base);
+
+ ve->base.cops = &virtual_context_ops;
+ ve->base.request_alloc = execlists_request_alloc;
+
+ ve->base.schedule = i915_schedule;
+ ve->base.submit_request = virtual_submit_request;
+ ve->base.bond_execute = virtual_bond_execute;
+
+ ve->base.execlists.queue_priority_hint = INT_MIN;
+ tasklet_init(&ve->base.execlists.tasklet,
+ virtual_submission_tasklet,
+ (unsigned long)ve);
+
+ intel_context_init(&ve->context, ctx, &ve->base);
+
+ for (n = 0; n < count; n++) {
+ struct intel_engine_cs *sibling = siblings[n];
+
+ GEM_BUG_ON(!is_power_of_2(sibling->mask));
+ if (sibling->mask & ve->base.mask) {
+ DRM_DEBUG("duplicate %s entry in load balancer\n",
+ sibling->name);
+ err = -EINVAL;
+ goto err_put;
+ }
+
+ /*
+ * The virtual engine implementation is tightly coupled to
+ * the execlists backend -- we push out request directly
+ * into a tree inside each physical engine. We could support
+ * layering if we handle cloning of the requests and
+ * submitting a copy into each backend.
+ */
+ if (sibling->execlists.tasklet.func !=
+ execlists_submission_tasklet) {
+ err = -ENODEV;
+ goto err_put;
+ }
+
+ GEM_BUG_ON(RB_EMPTY_NODE(&ve->nodes[sibling->id].rb));
+ RB_CLEAR_NODE(&ve->nodes[sibling->id].rb);
+
+ ve->siblings[ve->num_siblings++] = sibling;
+ ve->base.mask |= sibling->mask;
+
+ /*
+ * All physical engines must be compatible for their emission
+ * functions (as we build the instructions during request
+ * construction and do not alter them before submission
+ * on the physical engine). We use the engine class as a guide
+ * here, although that could be refined.
+ */
+ if (ve->base.class != OTHER_CLASS) {
+ if (ve->base.class != sibling->class) {
+ DRM_DEBUG("invalid mixing of engine class, sibling %d, already %d\n",
+ sibling->class, ve->base.class);
+ err = -EINVAL;
+ goto err_put;
+ }
+ continue;
+ }
+
+ ve->base.class = sibling->class;
+ ve->base.uabi_class = sibling->uabi_class;
+ snprintf(ve->base.name, sizeof(ve->base.name),
+ "v%dx%d", ve->base.class, count);
+ ve->base.context_size = sibling->context_size;
+
+ ve->base.emit_bb_start = sibling->emit_bb_start;
+ ve->base.emit_flush = sibling->emit_flush;
+ ve->base.emit_init_breadcrumb = sibling->emit_init_breadcrumb;
+ ve->base.emit_fini_breadcrumb = sibling->emit_fini_breadcrumb;
+ ve->base.emit_fini_breadcrumb_dw =
+ sibling->emit_fini_breadcrumb_dw;
+ }
+
+ return &ve->context;
+
+err_put:
+ intel_context_put(&ve->context);
+ return ERR_PTR(err);
+}
+
+struct intel_context *
+intel_execlists_clone_virtual(struct i915_gem_context *ctx,
+ struct intel_engine_cs *src)
+{
+ struct virtual_engine *se = to_virtual_engine(src);
+ struct intel_context *dst;
+
+ dst = intel_execlists_create_virtual(ctx,
+ se->siblings,
+ se->num_siblings);
+ if (IS_ERR(dst))
+ return dst;
+
+ if (se->num_bonds) {
+ struct virtual_engine *de = to_virtual_engine(dst->engine);
+
+ de->bonds = kmemdup(se->bonds,
+ sizeof(*se->bonds) * se->num_bonds,
+ GFP_KERNEL);
+ if (!de->bonds) {
+ intel_context_put(dst);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ de->num_bonds = se->num_bonds;
+ }
+
+ return dst;
+}
+
+int intel_virtual_engine_attach_bond(struct intel_engine_cs *engine,
+ const struct intel_engine_cs *master,
+ const struct intel_engine_cs *sibling)
+{
+ struct virtual_engine *ve = to_virtual_engine(engine);
+ struct ve_bond *bond;
+ int n;
+
+ /* Sanity check the sibling is part of the virtual engine */
+ for (n = 0; n < ve->num_siblings; n++)
+ if (sibling == ve->siblings[n])
+ break;
+ if (n == ve->num_siblings)
+ return -EINVAL;
+
+ bond = virtual_find_bond(ve, master);
+ if (bond) {
+ bond->sibling_mask |= sibling->mask;
+ return 0;
+ }
+
+ bond = krealloc(ve->bonds,
+ sizeof(*bond) * (ve->num_bonds + 1),
+ GFP_KERNEL);
+ if (!bond)
+ return -ENOMEM;
+
+ bond[ve->num_bonds].master = master;
+ bond[ve->num_bonds].sibling_mask = sibling->mask;
+
+ ve->bonds = bond;
+ ve->num_bonds++;
+
+ return 0;
+}
+
void intel_execlists_show_requests(struct intel_engine_cs *engine,
struct drm_printer *m,
void (*show_request)(struct drm_printer *m,
@@ -3002,6 +3552,29 @@ void intel_execlists_show_requests(struct intel_engine_cs *engine,
show_request(m, last, "\t\tQ ");
}
+ last = NULL;
+ count = 0;
+ for (rb = rb_first_cached(&execlists->virtual); rb; rb = rb_next(rb)) {
+ struct virtual_engine *ve =
+ rb_entry(rb, typeof(*ve), nodes[engine->id].rb);
+ struct i915_request *rq = READ_ONCE(ve->request);
+
+ if (rq) {
+ if (count++ < max - 1)
+ show_request(m, rq, "\t\tV ");
+ else
+ last = rq;
+ }
+ }
+ if (last) {
+ if (count > max) {
+ drm_printf(m,
+ "\t\t...skipping %d virtual requests...\n",
+ count - max);
+ }
+ show_request(m, last, "\t\tV ");
+ }
+
spin_unlock_irqrestore(&engine->timeline.lock, flags);
}
@@ -3037,5 +3610,5 @@ void intel_lr_context_reset(struct intel_engine_cs *engine,
}
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
-#include "selftests/intel_lrc.c"
+#include "selftest_lrc.c"
#endif
diff --git a/drivers/gpu/drm/i915/intel_lrc.h b/drivers/gpu/drm/i915/gt/intel_lrc.h
index 84aa230ea27b..e029aee87adf 100644
--- a/drivers/gpu/drm/i915/intel_lrc.h
+++ b/drivers/gpu/drm/i915/gt/intel_lrc.h
@@ -24,8 +24,7 @@
#ifndef _INTEL_LRC_H_
#define _INTEL_LRC_H_
-#include "intel_ringbuffer.h"
-#include "i915_gem_context.h"
+#include "intel_engine.h"
/* Execlists regs */
#define RING_ELSP(base) _MMIO((base) + 0x230)
@@ -67,8 +66,9 @@ enum {
/* Logical Rings */
void intel_logical_ring_cleanup(struct intel_engine_cs *engine);
-int logical_render_ring_init(struct intel_engine_cs *engine);
-int logical_xcs_ring_init(struct intel_engine_cs *engine);
+
+int intel_execlists_submission_setup(struct intel_engine_cs *engine);
+int intel_execlists_submission_init(struct intel_engine_cs *engine);
/* Logical Ring Contexts */
@@ -99,7 +99,6 @@ int logical_xcs_ring_init(struct intel_engine_cs *engine);
struct drm_printer;
struct drm_i915_private;
-struct i915_gem_context;
void intel_execlists_set_default_submission(struct intel_engine_cs *engine);
@@ -115,6 +114,17 @@ void intel_execlists_show_requests(struct intel_engine_cs *engine,
const char *prefix),
unsigned int max);
-u32 gen8_make_rpcs(struct drm_i915_private *i915, struct intel_sseu *ctx_sseu);
+struct intel_context *
+intel_execlists_create_virtual(struct i915_gem_context *ctx,
+ struct intel_engine_cs **siblings,
+ unsigned int count);
+
+struct intel_context *
+intel_execlists_clone_virtual(struct i915_gem_context *ctx,
+ struct intel_engine_cs *src);
+
+int intel_virtual_engine_attach_bond(struct intel_engine_cs *engine,
+ const struct intel_engine_cs *master,
+ const struct intel_engine_cs *sibling);
#endif /* _INTEL_LRC_H_ */
diff --git a/drivers/gpu/drm/i915/intel_lrc_reg.h b/drivers/gpu/drm/i915/gt/intel_lrc_reg.h
index 5ef932d810a7..5ef932d810a7 100644
--- a/drivers/gpu/drm/i915/intel_lrc_reg.h
+++ b/drivers/gpu/drm/i915/gt/intel_lrc_reg.h
diff --git a/drivers/gpu/drm/i915/intel_mocs.c b/drivers/gpu/drm/i915/gt/intel_mocs.c
index 274ba78500c0..79df66022d3a 100644
--- a/drivers/gpu/drm/i915/intel_mocs.c
+++ b/drivers/gpu/drm/i915/gt/intel_mocs.c
@@ -20,9 +20,11 @@
* SOFTWARE.
*/
+#include "i915_drv.h"
+
+#include "intel_engine.h"
#include "intel_mocs.h"
#include "intel_lrc.h"
-#include "intel_ringbuffer.h"
/* structures required */
struct drm_i915_mocs_entry {
diff --git a/drivers/gpu/drm/i915/intel_mocs.h b/drivers/gpu/drm/i915/gt/intel_mocs.h
index 3d99d1271b2b..0913704a1af2 100644
--- a/drivers/gpu/drm/i915/intel_mocs.h
+++ b/drivers/gpu/drm/i915/gt/intel_mocs.h
@@ -49,7 +49,9 @@
* context handling keep the MOCS in step.
*/
-#include "i915_drv.h"
+struct drm_i915_private;
+struct i915_request;
+struct intel_engine_cs;
int intel_rcs_context_init_mocs(struct i915_request *rq);
void intel_mocs_init_l3cc_table(struct drm_i915_private *dev_priv);
diff --git a/drivers/gpu/drm/i915/i915_reset.c b/drivers/gpu/drm/i915/gt/intel_reset.c
index 677d59304e78..8c60f7550f9c 100644
--- a/drivers/gpu/drm/i915/i915_reset.c
+++ b/drivers/gpu/drm/i915/gt/intel_reset.c
@@ -9,9 +9,13 @@
#include "i915_drv.h"
#include "i915_gpu_error.h"
-#include "i915_reset.h"
+#include "i915_irq.h"
+#include "intel_engine_pm.h"
+#include "intel_gt_pm.h"
+#include "intel_reset.h"
#include "intel_guc.h"
+#include "intel_overlay.h"
#define RESET_MAX_RETRIES 3
@@ -641,9 +645,6 @@ int intel_gpu_reset(struct drm_i915_private *i915,
bool intel_has_gpu_reset(struct drm_i915_private *i915)
{
- if (USES_GUC(i915))
- return false;
-
if (!i915_modparams.reset)
return NULL;
@@ -683,6 +684,7 @@ static void reset_prepare_engine(struct intel_engine_cs *engine)
* written to the powercontext is undefined and so we may lose
* GPU state upon resume, i.e. fail to restart after a reset.
*/
+ intel_engine_pm_get(engine);
intel_uncore_forcewake_get(engine->uncore, FORCEWAKE_ALL);
engine->reset.prepare(engine);
}
@@ -718,6 +720,7 @@ static void reset_prepare(struct drm_i915_private *i915)
struct intel_engine_cs *engine;
enum intel_engine_id id;
+ intel_gt_pm_get(i915);
for_each_engine(engine, i915, id)
reset_prepare_engine(engine);
@@ -755,48 +758,10 @@ static int gt_reset(struct drm_i915_private *i915,
static void reset_finish_engine(struct intel_engine_cs *engine)
{
engine->reset.finish(engine);
+ intel_engine_pm_put(engine);
intel_uncore_forcewake_put(engine->uncore, FORCEWAKE_ALL);
}
-struct i915_gpu_restart {
- struct work_struct work;
- struct drm_i915_private *i915;
-};
-
-static void restart_work(struct work_struct *work)
-{
- struct i915_gpu_restart *arg = container_of(work, typeof(*arg), work);
- struct drm_i915_private *i915 = arg->i915;
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
- intel_wakeref_t wakeref;
-
- wakeref = intel_runtime_pm_get(i915);
- mutex_lock(&i915->drm.struct_mutex);
- WRITE_ONCE(i915->gpu_error.restart, NULL);
-
- for_each_engine(engine, i915, id) {
- struct i915_request *rq;
-
- /*
- * Ostensibily, we always want a context loaded for powersaving,
- * so if the engine is idle after the reset, send a request
- * to load our scratch kernel_context.
- */
- if (!intel_engine_is_idle(engine))
- continue;
-
- rq = i915_request_alloc(engine, i915->kernel_context);
- if (!IS_ERR(rq))
- i915_request_add(rq);
- }
-
- mutex_unlock(&i915->drm.struct_mutex);
- intel_runtime_pm_put(i915, wakeref);
-
- kfree(arg);
-}
-
static void reset_finish(struct drm_i915_private *i915)
{
struct intel_engine_cs *engine;
@@ -806,29 +771,7 @@ static void reset_finish(struct drm_i915_private *i915)
reset_finish_engine(engine);
intel_engine_signal_breadcrumbs(engine);
}
-}
-
-static void reset_restart(struct drm_i915_private *i915)
-{
- struct i915_gpu_restart *arg;
-
- /*
- * Following the reset, ensure that we always reload context for
- * powersaving, and to correct engine->last_retired_context. Since
- * this requires us to submit a request, queue a worker to do that
- * task for us to evade any locking here.
- */
- if (READ_ONCE(i915->gpu_error.restart))
- return;
-
- arg = kmalloc(sizeof(*arg), GFP_KERNEL);
- if (arg) {
- arg->i915 = i915;
- INIT_WORK(&arg->work, restart_work);
-
- WRITE_ONCE(i915->gpu_error.restart, arg);
- queue_work(i915->wq, &arg->work);
- }
+ intel_gt_pm_put(i915);
}
static void nop_submit_request(struct i915_request *request)
@@ -889,6 +832,7 @@ static void __i915_gem_set_wedged(struct drm_i915_private *i915)
* in nop_submit_request.
*/
synchronize_rcu_expedited();
+ set_bit(I915_WEDGED, &error->flags);
/* Mark all executing requests as skipped */
for_each_engine(engine, i915, id)
@@ -896,9 +840,6 @@ static void __i915_gem_set_wedged(struct drm_i915_private *i915)
reset_finish(i915);
- smp_mb__before_atomic();
- set_bit(I915_WEDGED, &error->flags);
-
GEM_TRACE("end\n");
}
@@ -956,7 +897,7 @@ static bool __i915_gem_unset_wedged(struct drm_i915_private *i915)
}
mutex_unlock(&i915->gt.timelines.mutex);
- intel_engines_sanitize(i915, false);
+ intel_gt_sanitize(i915, false);
/*
* Undo nop_submit_request. We prevent all new i915 requests from
@@ -1034,7 +975,6 @@ void i915_reset(struct drm_i915_private *i915,
GEM_TRACE("flags=%lx\n", error->flags);
might_sleep();
- assert_rpm_wakelock_held(i915);
GEM_BUG_ON(!test_bit(I915_RESET_BACKOFF, &error->flags));
/* Clear any previous failed attempts at recovery. Time to try again. */
@@ -1087,8 +1027,6 @@ void i915_reset(struct drm_i915_private *i915,
finish:
reset_finish(i915);
- if (!__i915_wedged(error))
- reset_restart(i915);
return;
taint:
@@ -1104,7 +1042,7 @@ taint:
* rather than continue on into oblivion. For everyone else,
* the system should still plod along, but they have been warned!
*/
- add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
+ add_taint_for_CI(TAINT_WARN);
error:
__i915_gem_set_wedged(i915);
goto finish;
@@ -1137,6 +1075,9 @@ int i915_reset_engine(struct intel_engine_cs *engine, const char *msg)
GEM_TRACE("%s flags=%lx\n", engine->name, error->flags);
GEM_BUG_ON(!test_bit(I915_RESET_ENGINE + engine->id, &error->flags));
+ if (!intel_wakeref_active(&engine->wakeref))
+ return 0;
+
reset_prepare_engine(engine);
if (msg)
@@ -1168,7 +1109,7 @@ int i915_reset_engine(struct intel_engine_cs *engine, const char *msg)
* have been reset to their default values. Follow the init_ring
* process to program RING_MODE, HWSP and re-enable submission.
*/
- ret = engine->init_hw(engine);
+ ret = engine->resume(engine);
if (ret)
goto out;
@@ -1425,25 +1366,6 @@ int i915_terminally_wedged(struct drm_i915_private *i915)
return __i915_wedged(error) ? -EIO : 0;
}
-bool i915_reset_flush(struct drm_i915_private *i915)
-{
- int err;
-
- cancel_delayed_work_sync(&i915->gpu_error.hangcheck_work);
-
- flush_workqueue(i915->wq);
- GEM_BUG_ON(READ_ONCE(i915->gpu_error.restart));
-
- mutex_lock(&i915->drm.struct_mutex);
- err = i915_gem_wait_for_idle(i915,
- I915_WAIT_LOCKED |
- I915_WAIT_FOR_IDLE_BOOST,
- MAX_SCHEDULE_TIMEOUT);
- mutex_unlock(&i915->drm.struct_mutex);
-
- return !err;
-}
-
static void i915_wedge_me(struct work_struct *work)
{
struct i915_wedge_me *w = container_of(work, typeof(*w), work.work);
@@ -1472,3 +1394,7 @@ void __i915_fini_wedge(struct i915_wedge_me *w)
destroy_delayed_work_on_stack(&w->work);
w->i915 = NULL;
}
+
+#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
+#include "selftest_reset.c"
+#endif
diff --git a/drivers/gpu/drm/i915/i915_reset.h b/drivers/gpu/drm/i915/gt/intel_reset.h
index 3c0450289b8f..b52efaab4941 100644
--- a/drivers/gpu/drm/i915/i915_reset.h
+++ b/drivers/gpu/drm/i915/gt/intel_reset.h
@@ -11,7 +11,7 @@
#include <linux/types.h>
#include <linux/srcu.h>
-#include "intel_engine_types.h"
+#include "gt/intel_engine_types.h"
struct drm_i915_private;
struct i915_request;
@@ -34,7 +34,6 @@ int i915_reset_engine(struct intel_engine_cs *engine,
const char *reason);
void i915_reset_request(struct i915_request *rq, bool guilty);
-bool i915_reset_flush(struct drm_i915_private *i915);
int __must_check i915_reset_trylock(struct drm_i915_private *i915);
void i915_reset_unlock(struct drm_i915_private *i915, int tag);
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/gt/intel_ringbuffer.c
index 029fd8ec1857..f0d60affdba3 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/gt/intel_ringbuffer.c
@@ -33,9 +33,8 @@
#include "i915_drv.h"
#include "i915_gem_render_state.h"
-#include "i915_reset.h"
#include "i915_trace.h"
-#include "intel_drv.h"
+#include "intel_reset.h"
#include "intel_workarounds.h"
/* Rough estimate of the typical request size, performing a flush,
@@ -310,11 +309,6 @@ static u32 *gen6_rcs_emit_breadcrumb(struct i915_request *rq, u32 *cs)
*cs++ = rq->timeline->hwsp_offset | PIPE_CONTROL_GLOBAL_GTT;
*cs++ = rq->fence.seqno;
- *cs++ = GFX_OP_PIPE_CONTROL(4);
- *cs++ = PIPE_CONTROL_QW_WRITE | PIPE_CONTROL_STORE_DATA_INDEX;
- *cs++ = I915_GEM_HWS_HANGCHECK_ADDR | PIPE_CONTROL_GLOBAL_GTT;
- *cs++ = intel_engine_next_hangcheck_seqno(rq->engine);
-
*cs++ = MI_USER_INTERRUPT;
*cs++ = MI_NOOP;
@@ -416,13 +410,6 @@ static u32 *gen7_rcs_emit_breadcrumb(struct i915_request *rq, u32 *cs)
*cs++ = rq->timeline->hwsp_offset;
*cs++ = rq->fence.seqno;
- *cs++ = GFX_OP_PIPE_CONTROL(4);
- *cs++ = (PIPE_CONTROL_QW_WRITE |
- PIPE_CONTROL_STORE_DATA_INDEX |
- PIPE_CONTROL_GLOBAL_GTT_IVB);
- *cs++ = I915_GEM_HWS_HANGCHECK_ADDR;
- *cs++ = intel_engine_next_hangcheck_seqno(rq->engine);
-
*cs++ = MI_USER_INTERRUPT;
*cs++ = MI_NOOP;
@@ -441,12 +428,7 @@ static u32 *gen6_xcs_emit_breadcrumb(struct i915_request *rq, u32 *cs)
*cs++ = I915_GEM_HWS_SEQNO_ADDR | MI_FLUSH_DW_USE_GTT;
*cs++ = rq->fence.seqno;
- *cs++ = MI_FLUSH_DW | MI_FLUSH_DW_OP_STOREDW | MI_FLUSH_DW_STORE_INDEX;
- *cs++ = I915_GEM_HWS_HANGCHECK_ADDR | MI_FLUSH_DW_USE_GTT;
- *cs++ = intel_engine_next_hangcheck_seqno(rq->engine);
-
*cs++ = MI_USER_INTERRUPT;
- *cs++ = MI_NOOP;
rq->tail = intel_ring_offset(rq, cs);
assert_ring_tail_valid(rq->ring, rq->tail);
@@ -466,10 +448,6 @@ static u32 *gen7_xcs_emit_breadcrumb(struct i915_request *rq, u32 *cs)
*cs++ = I915_GEM_HWS_SEQNO_ADDR | MI_FLUSH_DW_USE_GTT;
*cs++ = rq->fence.seqno;
- *cs++ = MI_FLUSH_DW | MI_FLUSH_DW_OP_STOREDW | MI_FLUSH_DW_STORE_INDEX;
- *cs++ = I915_GEM_HWS_HANGCHECK_ADDR | MI_FLUSH_DW_USE_GTT;
- *cs++ = intel_engine_next_hangcheck_seqno(rq->engine);
-
for (i = 0; i < GEN7_XCS_WA; i++) {
*cs++ = MI_STORE_DWORD_INDEX;
*cs++ = I915_GEM_HWS_SEQNO_ADDR;
@@ -481,6 +459,7 @@ static u32 *gen7_xcs_emit_breadcrumb(struct i915_request *rq, u32 *cs)
*cs++ = 0;
*cs++ = MI_USER_INTERRUPT;
+ *cs++ = MI_NOOP;
rq->tail = intel_ring_offset(rq, cs);
assert_ring_tail_valid(rq->ring, rq->tail);
@@ -638,12 +617,15 @@ static bool stop_ring(struct intel_engine_cs *engine)
return (ENGINE_READ(engine, RING_HEAD) & HEAD_ADDR) == 0;
}
-static int init_ring_common(struct intel_engine_cs *engine)
+static int xcs_resume(struct intel_engine_cs *engine)
{
struct drm_i915_private *dev_priv = engine->i915;
struct intel_ring *ring = engine->buffer;
int ret = 0;
+ GEM_TRACE("%s: ring:{HEAD:%04x, TAIL:%04x}\n",
+ engine->name, ring->head, ring->tail);
+
intel_uncore_forcewake_get(engine->uncore, FORCEWAKE_ALL);
if (!stop_ring(engine)) {
@@ -828,12 +810,23 @@ static int intel_rcs_ctx_init(struct i915_request *rq)
return 0;
}
-static int init_render_ring(struct intel_engine_cs *engine)
+static int rcs_resume(struct intel_engine_cs *engine)
{
struct drm_i915_private *dev_priv = engine->i915;
- int ret = init_ring_common(engine);
- if (ret)
- return ret;
+
+ /*
+ * Disable CONSTANT_BUFFER before it is loaded from the context
+ * image. For as it is loaded, it is executed and the stored
+ * address may no longer be valid, leading to a GPU hang.
+ *
+ * This imposes the requirement that userspace reload their
+ * CONSTANT_BUFFER on every batch, fortunately a requirement
+ * they are already accustomed to from before contexts were
+ * enabled.
+ */
+ if (IS_GEN(dev_priv, 4))
+ I915_WRITE(ECOSKPD,
+ _MASKED_BIT_ENABLE(ECO_CONSTANT_BUFFER_SR_DISABLE));
/* WaTimedSingleVertexDispatch:cl,bw,ctg,elk,ilk,snb */
if (IS_GEN_RANGE(dev_priv, 4, 6))
@@ -873,10 +866,7 @@ static int init_render_ring(struct intel_engine_cs *engine)
if (IS_GEN_RANGE(dev_priv, 6, 7))
I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING));
- if (INTEL_GEN(dev_priv) >= 6)
- ENGINE_WRITE(engine, RING_IMR, ~engine->irq_keep_mask);
-
- return 0;
+ return xcs_resume(engine);
}
static void cancel_requests(struct intel_engine_cs *engine)
@@ -918,11 +908,8 @@ static u32 *i9xx_emit_breadcrumb(struct i915_request *rq, u32 *cs)
*cs++ = I915_GEM_HWS_SEQNO_ADDR;
*cs++ = rq->fence.seqno;
- *cs++ = MI_STORE_DWORD_INDEX;
- *cs++ = I915_GEM_HWS_HANGCHECK_ADDR;
- *cs++ = intel_engine_next_hangcheck_seqno(rq->engine);
-
*cs++ = MI_USER_INTERRUPT;
+ *cs++ = MI_NOOP;
rq->tail = intel_ring_offset(rq, cs);
assert_ring_tail_valid(rq->ring, rq->tail);
@@ -940,10 +927,6 @@ static u32 *gen5_emit_breadcrumb(struct i915_request *rq, u32 *cs)
*cs++ = MI_FLUSH;
- *cs++ = MI_STORE_DWORD_INDEX;
- *cs++ = I915_GEM_HWS_HANGCHECK_ADDR;
- *cs++ = intel_engine_next_hangcheck_seqno(rq->engine);
-
BUILD_BUG_ON(GEN5_WA_STORES < 1);
for (i = 0; i < GEN5_WA_STORES; i++) {
*cs++ = MI_STORE_DWORD_INDEX;
@@ -952,7 +935,6 @@ static u32 *gen5_emit_breadcrumb(struct i915_request *rq, u32 *cs)
}
*cs++ = MI_USER_INTERRUPT;
- *cs++ = MI_NOOP;
rq->tail = intel_ring_offset(rq, cs);
assert_ring_tail_valid(rq->ring, rq->tail);
@@ -1517,77 +1499,13 @@ static const struct intel_context_ops ring_context_ops = {
.pin = ring_context_pin,
.unpin = ring_context_unpin,
+ .enter = intel_context_enter_engine,
+ .exit = intel_context_exit_engine,
+
.reset = ring_context_reset,
.destroy = ring_context_destroy,
};
-static int intel_init_ring_buffer(struct intel_engine_cs *engine)
-{
- struct i915_timeline *timeline;
- struct intel_ring *ring;
- int err;
-
- err = intel_engine_setup_common(engine);
- if (err)
- return err;
-
- timeline = i915_timeline_create(engine->i915, engine->status_page.vma);
- if (IS_ERR(timeline)) {
- err = PTR_ERR(timeline);
- goto err;
- }
- GEM_BUG_ON(timeline->has_initial_breadcrumb);
-
- ring = intel_engine_create_ring(engine, timeline, 32 * PAGE_SIZE);
- i915_timeline_put(timeline);
- if (IS_ERR(ring)) {
- err = PTR_ERR(ring);
- goto err;
- }
-
- err = intel_ring_pin(ring);
- if (err)
- goto err_ring;
-
- GEM_BUG_ON(engine->buffer);
- engine->buffer = ring;
-
- err = intel_engine_init_common(engine);
- if (err)
- goto err_unpin;
-
- GEM_BUG_ON(ring->timeline->hwsp_ggtt != engine->status_page.vma);
-
- return 0;
-
-err_unpin:
- intel_ring_unpin(ring);
-err_ring:
- intel_ring_put(ring);
-err:
- intel_engine_cleanup_common(engine);
- return err;
-}
-
-void intel_engine_cleanup(struct intel_engine_cs *engine)
-{
- struct drm_i915_private *dev_priv = engine->i915;
-
- WARN_ON(INTEL_GEN(dev_priv) > 2 &&
- (ENGINE_READ(engine, RING_MI_MODE) & MODE_IDLE) == 0);
-
- intel_ring_unpin(engine->buffer);
- intel_ring_put(engine->buffer);
-
- if (engine->cleanup)
- engine->cleanup(engine);
-
- intel_engine_cleanup_common(engine);
-
- dev_priv->engine[engine->id] = NULL;
- kfree(engine);
-}
-
static int load_pd_dir(struct i915_request *rq,
const struct i915_hw_ppgtt *ppgtt)
{
@@ -1646,11 +1564,14 @@ static inline int mi_set_context(struct i915_request *rq, u32 flags)
/* These flags are for resource streamer on HSW+ */
flags |= HSW_MI_RS_SAVE_STATE_EN | HSW_MI_RS_RESTORE_STATE_EN;
else
+ /* We need to save the extended state for powersaving modes */
flags |= MI_SAVE_EXT_STATE_EN | MI_RESTORE_EXT_STATE_EN;
len = 4;
if (IS_GEN(i915, 7))
len += 2 + (num_engines ? 4 * num_engines + 6 : 0);
+ else if (IS_GEN(i915, 5))
+ len += 2;
if (flags & MI_FORCE_RESTORE) {
GEM_BUG_ON(flags & MI_RESTORE_INHIBIT);
flags &= ~MI_FORCE_RESTORE;
@@ -1679,6 +1600,14 @@ static inline int mi_set_context(struct i915_request *rq, u32 flags)
GEN6_PSMI_SLEEP_MSG_DISABLE);
}
}
+ } else if (IS_GEN(i915, 5)) {
+ /*
+ * This w/a is only listed for pre-production ilk a/b steppings,
+ * but is also mentioned for programming the powerctx. To be
+ * safe, just apply the workaround; we do not use SyncFlush so
+ * this should never take effect and so be a no-op!
+ */
+ *cs++ = MI_SUSPEND_FLUSH | MI_SUSPEND_FLUSH_EN;
}
if (force_restore) {
@@ -1732,6 +1661,8 @@ static inline int mi_set_context(struct i915_request *rq, u32 flags)
*cs++ = MI_NOOP;
}
*cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
+ } else if (IS_GEN(i915, 5)) {
+ *cs++ = MI_SUSPEND_FLUSH;
}
intel_ring_advance(rq, cs);
@@ -1776,7 +1707,6 @@ static int switch_context(struct i915_request *rq)
u32 hw_flags = 0;
int ret, i;
- lockdep_assert_held(&rq->i915->drm.struct_mutex);
GEM_BUG_ON(HAS_EXECLISTS(rq->i915));
if (ppgtt) {
@@ -1888,12 +1818,12 @@ static int ring_request_alloc(struct i915_request *request)
*/
request->reserved_space += LEGACY_REQUEST_SIZE;
- ret = switch_context(request);
+ /* Unconditionally invalidate GPU caches and TLBs. */
+ ret = request->engine->emit_flush(request, EMIT_INVALIDATE);
if (ret)
return ret;
- /* Unconditionally invalidate GPU caches and TLBs. */
- ret = request->engine->emit_flush(request, EMIT_INVALIDATE);
+ ret = switch_context(request);
if (ret)
return ret;
@@ -1906,8 +1836,6 @@ static noinline int wait_for_space(struct intel_ring *ring, unsigned int bytes)
struct i915_request *target;
long timeout;
- lockdep_assert_held(&ring->vma->vm->i915->drm.struct_mutex);
-
if (intel_ring_update_space(ring) >= bytes)
return 0;
@@ -2167,24 +2095,6 @@ static int gen6_ring_flush(struct i915_request *rq, u32 mode)
return gen6_flush_dw(rq, mode, MI_INVALIDATE_TLB);
}
-static void intel_ring_init_irq(struct drm_i915_private *dev_priv,
- struct intel_engine_cs *engine)
-{
- if (INTEL_GEN(dev_priv) >= 6) {
- engine->irq_enable = gen6_irq_enable;
- engine->irq_disable = gen6_irq_disable;
- } else if (INTEL_GEN(dev_priv) >= 5) {
- engine->irq_enable = gen5_irq_enable;
- engine->irq_disable = gen5_irq_disable;
- } else if (INTEL_GEN(dev_priv) >= 3) {
- engine->irq_enable = i9xx_irq_enable;
- engine->irq_disable = i9xx_irq_disable;
- } else {
- engine->irq_enable = i8xx_irq_enable;
- engine->irq_disable = i8xx_irq_disable;
- }
-}
-
static void i9xx_set_default_submission(struct intel_engine_cs *engine)
{
engine->submit_request = i9xx_submit_request;
@@ -2200,15 +2110,51 @@ static void gen6_bsd_set_default_submission(struct intel_engine_cs *engine)
engine->submit_request = gen6_bsd_submit_request;
}
-static void intel_ring_default_vfuncs(struct drm_i915_private *dev_priv,
- struct intel_engine_cs *engine)
+static void ring_destroy(struct intel_engine_cs *engine)
{
+ struct drm_i915_private *dev_priv = engine->i915;
+
+ WARN_ON(INTEL_GEN(dev_priv) > 2 &&
+ (ENGINE_READ(engine, RING_MI_MODE) & MODE_IDLE) == 0);
+
+ intel_ring_unpin(engine->buffer);
+ intel_ring_put(engine->buffer);
+
+ intel_engine_cleanup_common(engine);
+ kfree(engine);
+}
+
+static void setup_irq(struct intel_engine_cs *engine)
+{
+ struct drm_i915_private *i915 = engine->i915;
+
+ if (INTEL_GEN(i915) >= 6) {
+ engine->irq_enable = gen6_irq_enable;
+ engine->irq_disable = gen6_irq_disable;
+ } else if (INTEL_GEN(i915) >= 5) {
+ engine->irq_enable = gen5_irq_enable;
+ engine->irq_disable = gen5_irq_disable;
+ } else if (INTEL_GEN(i915) >= 3) {
+ engine->irq_enable = i9xx_irq_enable;
+ engine->irq_disable = i9xx_irq_disable;
+ } else {
+ engine->irq_enable = i8xx_irq_enable;
+ engine->irq_disable = i8xx_irq_disable;
+ }
+}
+
+static void setup_common(struct intel_engine_cs *engine)
+{
+ struct drm_i915_private *i915 = engine->i915;
+
/* gen8+ are only supported with execlists */
- GEM_BUG_ON(INTEL_GEN(dev_priv) >= 8);
+ GEM_BUG_ON(INTEL_GEN(i915) >= 8);
- intel_ring_init_irq(dev_priv, engine);
+ setup_irq(engine);
- engine->init_hw = init_ring_common;
+ engine->destroy = ring_destroy;
+
+ engine->resume = xcs_resume;
engine->reset.prepare = reset_prepare;
engine->reset.reset = reset_ring;
engine->reset.finish = reset_finish;
@@ -2222,117 +2168,96 @@ static void intel_ring_default_vfuncs(struct drm_i915_private *dev_priv,
* engine->emit_init_breadcrumb().
*/
engine->emit_fini_breadcrumb = i9xx_emit_breadcrumb;
- if (IS_GEN(dev_priv, 5))
+ if (IS_GEN(i915, 5))
engine->emit_fini_breadcrumb = gen5_emit_breadcrumb;
engine->set_default_submission = i9xx_set_default_submission;
- if (INTEL_GEN(dev_priv) >= 6)
+ if (INTEL_GEN(i915) >= 6)
engine->emit_bb_start = gen6_emit_bb_start;
- else if (INTEL_GEN(dev_priv) >= 4)
+ else if (INTEL_GEN(i915) >= 4)
engine->emit_bb_start = i965_emit_bb_start;
- else if (IS_I830(dev_priv) || IS_I845G(dev_priv))
+ else if (IS_I830(i915) || IS_I845G(i915))
engine->emit_bb_start = i830_emit_bb_start;
else
engine->emit_bb_start = i915_emit_bb_start;
}
-int intel_init_render_ring_buffer(struct intel_engine_cs *engine)
+static void setup_rcs(struct intel_engine_cs *engine)
{
- struct drm_i915_private *dev_priv = engine->i915;
- int ret;
-
- intel_ring_default_vfuncs(dev_priv, engine);
+ struct drm_i915_private *i915 = engine->i915;
- if (HAS_L3_DPF(dev_priv))
+ if (HAS_L3_DPF(i915))
engine->irq_keep_mask = GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT;
- if (INTEL_GEN(dev_priv) >= 7) {
+ if (INTEL_GEN(i915) >= 7) {
engine->init_context = intel_rcs_ctx_init;
engine->emit_flush = gen7_render_ring_flush;
engine->emit_fini_breadcrumb = gen7_rcs_emit_breadcrumb;
- } else if (IS_GEN(dev_priv, 6)) {
+ } else if (IS_GEN(i915, 6)) {
engine->init_context = intel_rcs_ctx_init;
engine->emit_flush = gen6_render_ring_flush;
engine->emit_fini_breadcrumb = gen6_rcs_emit_breadcrumb;
- } else if (IS_GEN(dev_priv, 5)) {
+ } else if (IS_GEN(i915, 5)) {
engine->emit_flush = gen4_render_ring_flush;
} else {
- if (INTEL_GEN(dev_priv) < 4)
+ if (INTEL_GEN(i915) < 4)
engine->emit_flush = gen2_render_ring_flush;
else
engine->emit_flush = gen4_render_ring_flush;
engine->irq_enable_mask = I915_USER_INTERRUPT;
}
- if (IS_HASWELL(dev_priv))
+ if (IS_HASWELL(i915))
engine->emit_bb_start = hsw_emit_bb_start;
- engine->init_hw = init_render_ring;
-
- ret = intel_init_ring_buffer(engine);
- if (ret)
- return ret;
-
- return 0;
+ engine->resume = rcs_resume;
}
-int intel_init_bsd_ring_buffer(struct intel_engine_cs *engine)
+static void setup_vcs(struct intel_engine_cs *engine)
{
- struct drm_i915_private *dev_priv = engine->i915;
-
- intel_ring_default_vfuncs(dev_priv, engine);
+ struct drm_i915_private *i915 = engine->i915;
- if (INTEL_GEN(dev_priv) >= 6) {
+ if (INTEL_GEN(i915) >= 6) {
/* gen6 bsd needs a special wa for tail updates */
- if (IS_GEN(dev_priv, 6))
+ if (IS_GEN(i915, 6))
engine->set_default_submission = gen6_bsd_set_default_submission;
engine->emit_flush = gen6_bsd_ring_flush;
engine->irq_enable_mask = GT_BSD_USER_INTERRUPT;
- if (IS_GEN(dev_priv, 6))
+ if (IS_GEN(i915, 6))
engine->emit_fini_breadcrumb = gen6_xcs_emit_breadcrumb;
else
engine->emit_fini_breadcrumb = gen7_xcs_emit_breadcrumb;
} else {
engine->emit_flush = bsd_ring_flush;
- if (IS_GEN(dev_priv, 5))
+ if (IS_GEN(i915, 5))
engine->irq_enable_mask = ILK_BSD_USER_INTERRUPT;
else
engine->irq_enable_mask = I915_BSD_USER_INTERRUPT;
}
-
- return intel_init_ring_buffer(engine);
}
-int intel_init_blt_ring_buffer(struct intel_engine_cs *engine)
+static void setup_bcs(struct intel_engine_cs *engine)
{
- struct drm_i915_private *dev_priv = engine->i915;
-
- GEM_BUG_ON(INTEL_GEN(dev_priv) < 6);
-
- intel_ring_default_vfuncs(dev_priv, engine);
+ struct drm_i915_private *i915 = engine->i915;
engine->emit_flush = gen6_ring_flush;
engine->irq_enable_mask = GT_BLT_USER_INTERRUPT;
- if (IS_GEN(dev_priv, 6))
+ if (IS_GEN(i915, 6))
engine->emit_fini_breadcrumb = gen6_xcs_emit_breadcrumb;
else
engine->emit_fini_breadcrumb = gen7_xcs_emit_breadcrumb;
-
- return intel_init_ring_buffer(engine);
}
-int intel_init_vebox_ring_buffer(struct intel_engine_cs *engine)
+static void setup_vecs(struct intel_engine_cs *engine)
{
- struct drm_i915_private *dev_priv = engine->i915;
-
- GEM_BUG_ON(INTEL_GEN(dev_priv) < 7);
+ struct drm_i915_private *i915 = engine->i915;
- intel_ring_default_vfuncs(dev_priv, engine);
+ GEM_BUG_ON(INTEL_GEN(i915) < 7);
engine->emit_flush = gen6_ring_flush;
engine->irq_enable_mask = PM_VEBOX_USER_INTERRUPT;
@@ -2340,6 +2265,73 @@ int intel_init_vebox_ring_buffer(struct intel_engine_cs *engine)
engine->irq_disable = hsw_vebox_irq_disable;
engine->emit_fini_breadcrumb = gen7_xcs_emit_breadcrumb;
+}
- return intel_init_ring_buffer(engine);
+int intel_ring_submission_setup(struct intel_engine_cs *engine)
+{
+ setup_common(engine);
+
+ switch (engine->class) {
+ case RENDER_CLASS:
+ setup_rcs(engine);
+ break;
+ case VIDEO_DECODE_CLASS:
+ setup_vcs(engine);
+ break;
+ case COPY_ENGINE_CLASS:
+ setup_bcs(engine);
+ break;
+ case VIDEO_ENHANCEMENT_CLASS:
+ setup_vecs(engine);
+ break;
+ default:
+ MISSING_CASE(engine->class);
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+int intel_ring_submission_init(struct intel_engine_cs *engine)
+{
+ struct i915_timeline *timeline;
+ struct intel_ring *ring;
+ int err;
+
+ timeline = i915_timeline_create(engine->i915, engine->status_page.vma);
+ if (IS_ERR(timeline)) {
+ err = PTR_ERR(timeline);
+ goto err;
+ }
+ GEM_BUG_ON(timeline->has_initial_breadcrumb);
+
+ ring = intel_engine_create_ring(engine, timeline, 32 * PAGE_SIZE);
+ i915_timeline_put(timeline);
+ if (IS_ERR(ring)) {
+ err = PTR_ERR(ring);
+ goto err;
+ }
+
+ err = intel_ring_pin(ring);
+ if (err)
+ goto err_ring;
+
+ GEM_BUG_ON(engine->buffer);
+ engine->buffer = ring;
+
+ err = intel_engine_init_common(engine);
+ if (err)
+ goto err_unpin;
+
+ GEM_BUG_ON(ring->timeline->hwsp_ggtt != engine->status_page.vma);
+
+ return 0;
+
+err_unpin:
+ intel_ring_unpin(ring);
+err_ring:
+ intel_ring_put(ring);
+err:
+ intel_engine_cleanup_common(engine);
+ return err;
}
diff --git a/drivers/gpu/drm/i915/gt/intel_sseu.c b/drivers/gpu/drm/i915/gt/intel_sseu.c
new file mode 100644
index 000000000000..7f448f3bea0b
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_sseu.c
@@ -0,0 +1,142 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include "i915_drv.h"
+#include "intel_lrc_reg.h"
+#include "intel_sseu.h"
+
+u32 intel_sseu_make_rpcs(struct drm_i915_private *i915,
+ const struct intel_sseu *req_sseu)
+{
+ const struct sseu_dev_info *sseu = &RUNTIME_INFO(i915)->sseu;
+ bool subslice_pg = sseu->has_subslice_pg;
+ struct intel_sseu ctx_sseu;
+ u8 slices, subslices;
+ u32 rpcs = 0;
+
+ /*
+ * No explicit RPCS request is needed to ensure full
+ * slice/subslice/EU enablement prior to Gen9.
+ */
+ if (INTEL_GEN(i915) < 9)
+ return 0;
+
+ /*
+ * If i915/perf is active, we want a stable powergating configuration
+ * on the system.
+ *
+ * We could choose full enablement, but on ICL we know there are use
+ * cases which disable slices for functional, apart for performance
+ * reasons. So in this case we select a known stable subset.
+ */
+ if (!i915->perf.oa.exclusive_stream) {
+ ctx_sseu = *req_sseu;
+ } else {
+ ctx_sseu = intel_sseu_from_device_info(sseu);
+
+ if (IS_GEN(i915, 11)) {
+ /*
+ * We only need subslice count so it doesn't matter
+ * which ones we select - just turn off low bits in the
+ * amount of half of all available subslices per slice.
+ */
+ ctx_sseu.subslice_mask =
+ ~(~0 << (hweight8(ctx_sseu.subslice_mask) / 2));
+ ctx_sseu.slice_mask = 0x1;
+ }
+ }
+
+ slices = hweight8(ctx_sseu.slice_mask);
+ subslices = hweight8(ctx_sseu.subslice_mask);
+
+ /*
+ * Since the SScount bitfield in GEN8_R_PWR_CLK_STATE is only three bits
+ * wide and Icelake has up to eight subslices, specfial programming is
+ * needed in order to correctly enable all subslices.
+ *
+ * According to documentation software must consider the configuration
+ * as 2x4x8 and hardware will translate this to 1x8x8.
+ *
+ * Furthemore, even though SScount is three bits, maximum documented
+ * value for it is four. From this some rules/restrictions follow:
+ *
+ * 1.
+ * If enabled subslice count is greater than four, two whole slices must
+ * be enabled instead.
+ *
+ * 2.
+ * When more than one slice is enabled, hardware ignores the subslice
+ * count altogether.
+ *
+ * From these restrictions it follows that it is not possible to enable
+ * a count of subslices between the SScount maximum of four restriction,
+ * and the maximum available number on a particular SKU. Either all
+ * subslices are enabled, or a count between one and four on the first
+ * slice.
+ */
+ if (IS_GEN(i915, 11) &&
+ slices == 1 &&
+ subslices > min_t(u8, 4, hweight8(sseu->subslice_mask[0]) / 2)) {
+ GEM_BUG_ON(subslices & 1);
+
+ subslice_pg = false;
+ slices *= 2;
+ }
+
+ /*
+ * Starting in Gen9, render power gating can leave
+ * slice/subslice/EU in a partially enabled state. We
+ * must make an explicit request through RPCS for full
+ * enablement.
+ */
+ if (sseu->has_slice_pg) {
+ u32 mask, val = slices;
+
+ if (INTEL_GEN(i915) >= 11) {
+ mask = GEN11_RPCS_S_CNT_MASK;
+ val <<= GEN11_RPCS_S_CNT_SHIFT;
+ } else {
+ mask = GEN8_RPCS_S_CNT_MASK;
+ val <<= GEN8_RPCS_S_CNT_SHIFT;
+ }
+
+ GEM_BUG_ON(val & ~mask);
+ val &= mask;
+
+ rpcs |= GEN8_RPCS_ENABLE | GEN8_RPCS_S_CNT_ENABLE | val;
+ }
+
+ if (subslice_pg) {
+ u32 val = subslices;
+
+ val <<= GEN8_RPCS_SS_CNT_SHIFT;
+
+ GEM_BUG_ON(val & ~GEN8_RPCS_SS_CNT_MASK);
+ val &= GEN8_RPCS_SS_CNT_MASK;
+
+ rpcs |= GEN8_RPCS_ENABLE | GEN8_RPCS_SS_CNT_ENABLE | val;
+ }
+
+ if (sseu->has_eu_pg) {
+ u32 val;
+
+ val = ctx_sseu.min_eus_per_subslice << GEN8_RPCS_EU_MIN_SHIFT;
+ GEM_BUG_ON(val & ~GEN8_RPCS_EU_MIN_MASK);
+ val &= GEN8_RPCS_EU_MIN_MASK;
+
+ rpcs |= val;
+
+ val = ctx_sseu.max_eus_per_subslice << GEN8_RPCS_EU_MAX_SHIFT;
+ GEM_BUG_ON(val & ~GEN8_RPCS_EU_MAX_MASK);
+ val &= GEN8_RPCS_EU_MAX_MASK;
+
+ rpcs |= val;
+
+ rpcs |= GEN8_RPCS_ENABLE;
+ }
+
+ return rpcs;
+}
diff --git a/drivers/gpu/drm/i915/gt/intel_sseu.h b/drivers/gpu/drm/i915/gt/intel_sseu.h
new file mode 100644
index 000000000000..73bc824094e8
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_sseu.h
@@ -0,0 +1,67 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __INTEL_SSEU_H__
+#define __INTEL_SSEU_H__
+
+#include <linux/types.h>
+
+struct drm_i915_private;
+
+#define GEN_MAX_SLICES (6) /* CNL upper bound */
+#define GEN_MAX_SUBSLICES (8) /* ICL upper bound */
+
+struct sseu_dev_info {
+ u8 slice_mask;
+ u8 subslice_mask[GEN_MAX_SLICES];
+ u16 eu_total;
+ u8 eu_per_subslice;
+ u8 min_eu_in_pool;
+ /* For each slice, which subslice(s) has(have) 7 EUs (bitfield)? */
+ u8 subslice_7eu[3];
+ u8 has_slice_pg:1;
+ u8 has_subslice_pg:1;
+ u8 has_eu_pg:1;
+
+ /* Topology fields */
+ u8 max_slices;
+ u8 max_subslices;
+ u8 max_eus_per_subslice;
+
+ /* We don't have more than 8 eus per subslice at the moment and as we
+ * store eus enabled using bits, no need to multiply by eus per
+ * subslice.
+ */
+ u8 eu_mask[GEN_MAX_SLICES * GEN_MAX_SUBSLICES];
+};
+
+/*
+ * Powergating configuration for a particular (context,engine).
+ */
+struct intel_sseu {
+ u8 slice_mask;
+ u8 subslice_mask;
+ u8 min_eus_per_subslice;
+ u8 max_eus_per_subslice;
+};
+
+static inline struct intel_sseu
+intel_sseu_from_device_info(const struct sseu_dev_info *sseu)
+{
+ struct intel_sseu value = {
+ .slice_mask = sseu->slice_mask,
+ .subslice_mask = sseu->subslice_mask[0],
+ .min_eus_per_subslice = sseu->max_eus_per_subslice,
+ .max_eus_per_subslice = sseu->max_eus_per_subslice,
+ };
+
+ return value;
+}
+
+u32 intel_sseu_make_rpcs(struct drm_i915_private *i915,
+ const struct intel_sseu *req_sseu);
+
+#endif /* __INTEL_SSEU_H__ */
diff --git a/drivers/gpu/drm/i915/intel_workarounds.c b/drivers/gpu/drm/i915/gt/intel_workarounds.c
index 841b8e515f4d..88c195098bda 100644
--- a/drivers/gpu/drm/i915/intel_workarounds.c
+++ b/drivers/gpu/drm/i915/gt/intel_workarounds.c
@@ -122,6 +122,7 @@ static void _wa_add(struct i915_wa_list *wal, const struct i915_wa *wa)
wal->wa_count++;
wa_->val |= wa->val;
wa_->mask |= wa->mask;
+ wa_->read |= wa->read;
return;
}
}
@@ -146,9 +147,10 @@ wa_write_masked_or(struct i915_wa_list *wal, i915_reg_t reg, u32 mask,
u32 val)
{
struct i915_wa wa = {
- .reg = reg,
+ .reg = reg,
.mask = mask,
- .val = val
+ .val = val,
+ .read = mask,
};
_wa_add(wal, &wa);
@@ -172,6 +174,19 @@ wa_write_or(struct i915_wa_list *wal, i915_reg_t reg, u32 val)
wa_write_masked_or(wal, reg, val, val);
}
+static void
+ignore_wa_write_or(struct i915_wa_list *wal, i915_reg_t reg, u32 mask, u32 val)
+{
+ struct i915_wa wa = {
+ .reg = reg,
+ .mask = mask,
+ .val = val,
+ /* Bonkers HW, skip verifying */
+ };
+
+ _wa_add(wal, &wa);
+}
+
#define WA_SET_BIT_MASKED(addr, mask) \
wa_write_masked_or(wal, (addr), (mask), _MASKED_BIT_ENABLE(mask))
@@ -181,10 +196,9 @@ wa_write_or(struct i915_wa_list *wal, i915_reg_t reg, u32 val)
#define WA_SET_FIELD_MASKED(addr, mask, value) \
wa_write_masked_or(wal, (addr), (mask), _MASKED_FIELD((mask), (value)))
-static void gen8_ctx_workarounds_init(struct intel_engine_cs *engine)
+static void gen8_ctx_workarounds_init(struct intel_engine_cs *engine,
+ struct i915_wa_list *wal)
{
- struct i915_wa_list *wal = &engine->ctx_wa_list;
-
WA_SET_BIT_MASKED(INSTPM, INSTPM_FORCE_ORDERING);
/* WaDisableAsyncFlipPerfMode:bdw,chv */
@@ -230,12 +244,12 @@ static void gen8_ctx_workarounds_init(struct intel_engine_cs *engine)
GEN6_WIZ_HASHING_16x4);
}
-static void bdw_ctx_workarounds_init(struct intel_engine_cs *engine)
+static void bdw_ctx_workarounds_init(struct intel_engine_cs *engine,
+ struct i915_wa_list *wal)
{
struct drm_i915_private *i915 = engine->i915;
- struct i915_wa_list *wal = &engine->ctx_wa_list;
- gen8_ctx_workarounds_init(engine);
+ gen8_ctx_workarounds_init(engine, wal);
/* WaDisableThreadStallDopClockGating:bdw (pre-production) */
WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, STALL_DOP_GATING_DISABLE);
@@ -258,11 +272,10 @@ static void bdw_ctx_workarounds_init(struct intel_engine_cs *engine)
(IS_BDW_GT3(i915) ? HDC_FENCE_DEST_SLM_DISABLE : 0));
}
-static void chv_ctx_workarounds_init(struct intel_engine_cs *engine)
+static void chv_ctx_workarounds_init(struct intel_engine_cs *engine,
+ struct i915_wa_list *wal)
{
- struct i915_wa_list *wal = &engine->ctx_wa_list;
-
- gen8_ctx_workarounds_init(engine);
+ gen8_ctx_workarounds_init(engine, wal);
/* WaDisableThreadStallDopClockGating:chv */
WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, STALL_DOP_GATING_DISABLE);
@@ -271,10 +284,10 @@ static void chv_ctx_workarounds_init(struct intel_engine_cs *engine)
WA_SET_BIT_MASKED(HIZ_CHICKEN, CHV_HZ_8X8_MODE_IN_1X);
}
-static void gen9_ctx_workarounds_init(struct intel_engine_cs *engine)
+static void gen9_ctx_workarounds_init(struct intel_engine_cs *engine,
+ struct i915_wa_list *wal)
{
struct drm_i915_private *i915 = engine->i915;
- struct i915_wa_list *wal = &engine->ctx_wa_list;
if (HAS_LLC(i915)) {
/* WaCompressedResourceSamplerPbeMediaNewHashMode:skl,kbl
@@ -369,10 +382,10 @@ static void gen9_ctx_workarounds_init(struct intel_engine_cs *engine)
WA_SET_BIT_MASKED(GEN9_WM_CHICKEN3, GEN9_FACTOR_IN_CLR_VAL_HIZ);
}
-static void skl_tune_iz_hashing(struct intel_engine_cs *engine)
+static void skl_tune_iz_hashing(struct intel_engine_cs *engine,
+ struct i915_wa_list *wal)
{
struct drm_i915_private *i915 = engine->i915;
- struct i915_wa_list *wal = &engine->ctx_wa_list;
u8 vals[3] = { 0, 0, 0 };
unsigned int i;
@@ -409,17 +422,17 @@ static void skl_tune_iz_hashing(struct intel_engine_cs *engine)
GEN9_IZ_HASHING(0, vals[0]));
}
-static void skl_ctx_workarounds_init(struct intel_engine_cs *engine)
+static void skl_ctx_workarounds_init(struct intel_engine_cs *engine,
+ struct i915_wa_list *wal)
{
- gen9_ctx_workarounds_init(engine);
- skl_tune_iz_hashing(engine);
+ gen9_ctx_workarounds_init(engine, wal);
+ skl_tune_iz_hashing(engine, wal);
}
-static void bxt_ctx_workarounds_init(struct intel_engine_cs *engine)
+static void bxt_ctx_workarounds_init(struct intel_engine_cs *engine,
+ struct i915_wa_list *wal)
{
- struct i915_wa_list *wal = &engine->ctx_wa_list;
-
- gen9_ctx_workarounds_init(engine);
+ gen9_ctx_workarounds_init(engine, wal);
/* WaDisableThreadStallDopClockGating:bxt */
WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN,
@@ -430,12 +443,12 @@ static void bxt_ctx_workarounds_init(struct intel_engine_cs *engine)
GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
}
-static void kbl_ctx_workarounds_init(struct intel_engine_cs *engine)
+static void kbl_ctx_workarounds_init(struct intel_engine_cs *engine,
+ struct i915_wa_list *wal)
{
struct drm_i915_private *i915 = engine->i915;
- struct i915_wa_list *wal = &engine->ctx_wa_list;
- gen9_ctx_workarounds_init(engine);
+ gen9_ctx_workarounds_init(engine, wal);
/* WaToEnableHwFixForPushConstHWBug:kbl */
if (IS_KBL_REVID(i915, KBL_REVID_C0, REVID_FOREVER))
@@ -447,22 +460,20 @@ static void kbl_ctx_workarounds_init(struct intel_engine_cs *engine)
GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
}
-static void glk_ctx_workarounds_init(struct intel_engine_cs *engine)
+static void glk_ctx_workarounds_init(struct intel_engine_cs *engine,
+ struct i915_wa_list *wal)
{
- struct i915_wa_list *wal = &engine->ctx_wa_list;
-
- gen9_ctx_workarounds_init(engine);
+ gen9_ctx_workarounds_init(engine, wal);
/* WaToEnableHwFixForPushConstHWBug:glk */
WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
}
-static void cfl_ctx_workarounds_init(struct intel_engine_cs *engine)
+static void cfl_ctx_workarounds_init(struct intel_engine_cs *engine,
+ struct i915_wa_list *wal)
{
- struct i915_wa_list *wal = &engine->ctx_wa_list;
-
- gen9_ctx_workarounds_init(engine);
+ gen9_ctx_workarounds_init(engine, wal);
/* WaToEnableHwFixForPushConstHWBug:cfl */
WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
@@ -473,10 +484,10 @@ static void cfl_ctx_workarounds_init(struct intel_engine_cs *engine)
GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
}
-static void cnl_ctx_workarounds_init(struct intel_engine_cs *engine)
+static void cnl_ctx_workarounds_init(struct intel_engine_cs *engine,
+ struct i915_wa_list *wal)
{
struct drm_i915_private *i915 = engine->i915;
- struct i915_wa_list *wal = &engine->ctx_wa_list;
/* WaForceContextSaveRestoreNonCoherent:cnl */
WA_SET_BIT_MASKED(CNL_HDC_CHICKEN0,
@@ -513,10 +524,16 @@ static void cnl_ctx_workarounds_init(struct intel_engine_cs *engine)
WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, DISABLE_EARLY_EOT);
}
-static void icl_ctx_workarounds_init(struct intel_engine_cs *engine)
+static void icl_ctx_workarounds_init(struct intel_engine_cs *engine,
+ struct i915_wa_list *wal)
{
struct drm_i915_private *i915 = engine->i915;
- struct i915_wa_list *wal = &engine->ctx_wa_list;
+
+ /* WaDisableBankHangMode:icl */
+ wa_write(wal,
+ GEN8_L3CNTLREG,
+ intel_uncore_read(engine->uncore, GEN8_L3CNTLREG) |
+ GEN8_ERRDETBCTRL);
/* WaDisableBankHangMode:icl */
wa_write(wal,
@@ -562,33 +579,42 @@ static void icl_ctx_workarounds_init(struct intel_engine_cs *engine)
WA_SET_FIELD_MASKED(GEN8_CS_CHICKEN1,
GEN9_PREEMPT_GPGPU_LEVEL_MASK,
GEN9_PREEMPT_GPGPU_THREAD_GROUP_LEVEL);
+
+ /* allow headerless messages for preemptible GPGPU context */
+ WA_SET_BIT_MASKED(GEN10_SAMPLER_MODE,
+ GEN11_SAMPLER_ENABLE_HEADLESS_MSG);
}
-void intel_engine_init_ctx_wa(struct intel_engine_cs *engine)
+static void
+__intel_engine_init_ctx_wa(struct intel_engine_cs *engine,
+ struct i915_wa_list *wal,
+ const char *name)
{
struct drm_i915_private *i915 = engine->i915;
- struct i915_wa_list *wal = &engine->ctx_wa_list;
- wa_init_start(wal, "context");
+ if (engine->class != RENDER_CLASS)
+ return;
+
+ wa_init_start(wal, name);
if (IS_GEN(i915, 11))
- icl_ctx_workarounds_init(engine);
+ icl_ctx_workarounds_init(engine, wal);
else if (IS_CANNONLAKE(i915))
- cnl_ctx_workarounds_init(engine);
+ cnl_ctx_workarounds_init(engine, wal);
else if (IS_COFFEELAKE(i915))
- cfl_ctx_workarounds_init(engine);
+ cfl_ctx_workarounds_init(engine, wal);
else if (IS_GEMINILAKE(i915))
- glk_ctx_workarounds_init(engine);
+ glk_ctx_workarounds_init(engine, wal);
else if (IS_KABYLAKE(i915))
- kbl_ctx_workarounds_init(engine);
+ kbl_ctx_workarounds_init(engine, wal);
else if (IS_BROXTON(i915))
- bxt_ctx_workarounds_init(engine);
+ bxt_ctx_workarounds_init(engine, wal);
else if (IS_SKYLAKE(i915))
- skl_ctx_workarounds_init(engine);
+ skl_ctx_workarounds_init(engine, wal);
else if (IS_CHERRYVIEW(i915))
- chv_ctx_workarounds_init(engine);
+ chv_ctx_workarounds_init(engine, wal);
else if (IS_BROADWELL(i915))
- bdw_ctx_workarounds_init(engine);
+ bdw_ctx_workarounds_init(engine, wal);
else if (INTEL_GEN(i915) < 8)
return;
else
@@ -597,6 +623,11 @@ void intel_engine_init_ctx_wa(struct intel_engine_cs *engine)
wa_init_finish(wal);
}
+void intel_engine_init_ctx_wa(struct intel_engine_cs *engine)
+{
+ __intel_engine_init_ctx_wa(engine, &engine->ctx_wa_list, "context");
+}
+
int intel_engine_emit_ctx_wa(struct i915_request *rq)
{
struct i915_wa_list *wal = &rq->engine->ctx_wa_list;
@@ -915,6 +946,21 @@ wal_get_fw_for_rmw(struct intel_uncore *uncore, const struct i915_wa_list *wal)
return fw;
}
+static bool
+wa_verify(const struct i915_wa *wa, u32 cur, const char *name, const char *from)
+{
+ if ((cur ^ wa->val) & wa->read) {
+ DRM_ERROR("%s workaround lost on %s! (%x=%x/%x, expected %x, mask=%x)\n",
+ name, from, i915_mmio_reg_offset(wa->reg),
+ cur, cur & wa->read,
+ wa->val, wa->mask);
+
+ return false;
+ }
+
+ return true;
+}
+
static void
wa_list_apply(struct intel_uncore *uncore, const struct i915_wa_list *wal)
{
@@ -933,6 +979,10 @@ wa_list_apply(struct intel_uncore *uncore, const struct i915_wa_list *wal)
for (i = 0, wa = wal->list; i < wal->count; i++, wa++) {
intel_uncore_rmw_fw(uncore, wa->reg, wa->mask, wa->val);
+ if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
+ wa_verify(wa,
+ intel_uncore_read_fw(uncore, wa->reg),
+ wal->name, "application");
}
intel_uncore_forcewake_put__locked(uncore, fw);
@@ -944,20 +994,6 @@ void intel_gt_apply_workarounds(struct drm_i915_private *i915)
wa_list_apply(&i915->uncore, &i915->gt_wa_list);
}
-static bool
-wa_verify(const struct i915_wa *wa, u32 cur, const char *name, const char *from)
-{
- if ((cur ^ wa->val) & wa->mask) {
- DRM_ERROR("%s workaround lost on %s! (%x=%x/%x, expected %x, mask=%x)\n",
- name, from, i915_mmio_reg_offset(wa->reg), cur,
- cur & wa->mask, wa->val, wa->mask);
-
- return false;
- }
-
- return true;
-}
-
static bool wa_list_verify(struct intel_uncore *uncore,
const struct i915_wa_list *wal,
const char *from)
@@ -1062,7 +1098,8 @@ void intel_engine_init_whitelist(struct intel_engine_cs *engine)
struct drm_i915_private *i915 = engine->i915;
struct i915_wa_list *w = &engine->whitelist;
- GEM_BUG_ON(engine->id != RCS0);
+ if (engine->class != RENDER_CLASS)
+ return;
wa_init_start(w, "whitelist");
@@ -1123,9 +1160,10 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
_3D_CHICKEN3_AA_LINE_QUALITY_FIX_ENABLE);
/* WaPipelineFlushCoherentLines:icl */
- wa_write_or(wal,
- GEN8_L3SQCREG4,
- GEN8_LQSC_FLUSH_COHERENT_LINES);
+ ignore_wa_write_or(wal,
+ GEN8_L3SQCREG4,
+ GEN8_LQSC_FLUSH_COHERENT_LINES,
+ GEN8_LQSC_FLUSH_COHERENT_LINES);
/*
* Wa_1405543622:icl
@@ -1152,9 +1190,10 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
* Wa_1405733216:icl
* Formerly known as WaDisableCleanEvicts
*/
- wa_write_or(wal,
- GEN8_L3SQCREG4,
- GEN11_LQSC_CLEAN_EVICT_DISABLE);
+ ignore_wa_write_or(wal,
+ GEN8_L3SQCREG4,
+ GEN11_LQSC_CLEAN_EVICT_DISABLE,
+ GEN11_LQSC_CLEAN_EVICT_DISABLE);
/* WaForwardProgressSoftReset:icl */
wa_write_or(wal,
@@ -1260,6 +1299,128 @@ void intel_engine_apply_workarounds(struct intel_engine_cs *engine)
wa_list_apply(engine->uncore, &engine->wa_list);
}
+static struct i915_vma *
+create_scratch(struct i915_address_space *vm, int count)
+{
+ struct drm_i915_gem_object *obj;
+ struct i915_vma *vma;
+ unsigned int size;
+ int err;
+
+ size = round_up(count * sizeof(u32), PAGE_SIZE);
+ obj = i915_gem_object_create_internal(vm->i915, size);
+ if (IS_ERR(obj))
+ return ERR_CAST(obj);
+
+ i915_gem_object_set_cache_coherency(obj, I915_CACHE_LLC);
+
+ vma = i915_vma_instance(obj, vm, NULL);
+ if (IS_ERR(vma)) {
+ err = PTR_ERR(vma);
+ goto err_obj;
+ }
+
+ err = i915_vma_pin(vma, 0, 0,
+ i915_vma_is_ggtt(vma) ? PIN_GLOBAL : PIN_USER);
+ if (err)
+ goto err_obj;
+
+ return vma;
+
+err_obj:
+ i915_gem_object_put(obj);
+ return ERR_PTR(err);
+}
+
+static int
+wa_list_srm(struct i915_request *rq,
+ const struct i915_wa_list *wal,
+ struct i915_vma *vma)
+{
+ const struct i915_wa *wa;
+ unsigned int i;
+ u32 srm, *cs;
+
+ srm = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT;
+ if (INTEL_GEN(rq->i915) >= 8)
+ srm++;
+
+ cs = intel_ring_begin(rq, 4 * wal->count);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ for (i = 0, wa = wal->list; i < wal->count; i++, wa++) {
+ *cs++ = srm;
+ *cs++ = i915_mmio_reg_offset(wa->reg);
+ *cs++ = i915_ggtt_offset(vma) + sizeof(u32) * i;
+ *cs++ = 0;
+ }
+ intel_ring_advance(rq, cs);
+
+ return 0;
+}
+
+static int engine_wa_list_verify(struct intel_context *ce,
+ const struct i915_wa_list * const wal,
+ const char *from)
+{
+ const struct i915_wa *wa;
+ struct i915_request *rq;
+ struct i915_vma *vma;
+ unsigned int i;
+ u32 *results;
+ int err;
+
+ if (!wal->count)
+ return 0;
+
+ vma = create_scratch(&ce->engine->i915->ggtt.vm, wal->count);
+ if (IS_ERR(vma))
+ return PTR_ERR(vma);
+
+ rq = intel_context_create_request(ce);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto err_vma;
+ }
+
+ err = wa_list_srm(rq, wal, vma);
+ if (err)
+ goto err_vma;
+
+ i915_request_add(rq);
+ if (i915_request_wait(rq, I915_WAIT_LOCKED, HZ / 5) < 0) {
+ err = -ETIME;
+ goto err_vma;
+ }
+
+ results = i915_gem_object_pin_map(vma->obj, I915_MAP_WB);
+ if (IS_ERR(results)) {
+ err = PTR_ERR(results);
+ goto err_vma;
+ }
+
+ err = 0;
+ for (i = 0, wa = wal->list; i < wal->count; i++, wa++)
+ if (!wa_verify(wa, results[i], wal->name, from))
+ err = -ENXIO;
+
+ i915_gem_object_unpin_map(vma->obj);
+
+err_vma:
+ i915_vma_unpin(vma);
+ i915_vma_put(vma);
+ return err;
+}
+
+int intel_engine_verify_workarounds(struct intel_engine_cs *engine,
+ const char *from)
+{
+ return engine_wa_list_verify(engine->kernel_context,
+ &engine->wa_list,
+ from);
+}
+
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
-#include "selftests/intel_workarounds.c"
+#include "selftest_workarounds.c"
#endif
diff --git a/drivers/gpu/drm/i915/intel_workarounds.h b/drivers/gpu/drm/i915/gt/intel_workarounds.h
index 34eee5ec511e..3761a6ee58bb 100644
--- a/drivers/gpu/drm/i915/intel_workarounds.h
+++ b/drivers/gpu/drm/i915/gt/intel_workarounds.h
@@ -4,13 +4,17 @@
* Copyright © 2014-2018 Intel Corporation
*/
-#ifndef _I915_WORKAROUNDS_H_
-#define _I915_WORKAROUNDS_H_
+#ifndef _INTEL_WORKAROUNDS_H_
+#define _INTEL_WORKAROUNDS_H_
#include <linux/slab.h>
#include "intel_workarounds_types.h"
+struct drm_i915_private;
+struct i915_request;
+struct intel_engine_cs;
+
static inline void intel_wa_list_free(struct i915_wa_list *wal)
{
kfree(wal->list);
@@ -30,5 +34,7 @@ void intel_engine_apply_whitelist(struct intel_engine_cs *engine);
void intel_engine_init_workarounds(struct intel_engine_cs *engine);
void intel_engine_apply_workarounds(struct intel_engine_cs *engine);
+int intel_engine_verify_workarounds(struct intel_engine_cs *engine,
+ const char *from);
#endif
diff --git a/drivers/gpu/drm/i915/intel_workarounds_types.h b/drivers/gpu/drm/i915/gt/intel_workarounds_types.h
index 30918da180ff..42ac1fb99572 100644
--- a/drivers/gpu/drm/i915/intel_workarounds_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_workarounds_types.h
@@ -12,9 +12,10 @@
#include "i915_reg.h"
struct i915_wa {
- i915_reg_t reg;
- u32 mask;
- u32 val;
+ i915_reg_t reg;
+ u32 mask;
+ u32 val;
+ u32 read;
};
struct i915_wa_list {
diff --git a/drivers/gpu/drm/i915/selftests/mock_engine.c b/drivers/gpu/drm/i915/gt/mock_engine.c
index 61a8206ed677..2941916b37bf 100644
--- a/drivers/gpu/drm/i915/selftests/mock_engine.c
+++ b/drivers/gpu/drm/i915/gt/mock_engine.c
@@ -22,8 +22,13 @@
*
*/
+#include "i915_drv.h"
+#include "i915_gem_context.h"
+#include "intel_context.h"
+#include "intel_engine_pm.h"
+
#include "mock_engine.h"
-#include "mock_request.h"
+#include "selftests/mock_request.h"
struct mock_ring {
struct intel_ring base;
@@ -154,6 +159,9 @@ static const struct intel_context_ops mock_context_ops = {
.pin = mock_context_pin,
.unpin = mock_context_unpin,
+ .enter = intel_context_enter_engine,
+ .exit = intel_context_exit_engine,
+
.destroy = mock_context_destroy,
};
@@ -257,29 +265,44 @@ struct intel_engine_cs *mock_engine(struct drm_i915_private *i915,
engine->base.reset.finish = mock_reset_finish;
engine->base.cancel_requests = mock_cancel_requests;
- if (i915_timeline_init(i915, &engine->base.timeline, NULL))
- goto err_free;
- i915_timeline_set_subclass(&engine->base.timeline, TIMELINE_ENGINE);
-
- intel_engine_init_breadcrumbs(&engine->base);
-
/* fake hw queue */
spin_lock_init(&engine->hw_lock);
timer_setup(&engine->hw_delay, hw_delay_complete, 0);
INIT_LIST_HEAD(&engine->hw_queue);
- if (pin_context(i915->kernel_context, &engine->base,
- &engine->base.kernel_context))
+ return &engine->base;
+}
+
+int mock_engine_init(struct intel_engine_cs *engine)
+{
+ struct drm_i915_private *i915 = engine->i915;
+ int err;
+
+ intel_engine_init_breadcrumbs(engine);
+ intel_engine_init_execlists(engine);
+ intel_engine_init__pm(engine);
+
+ if (i915_timeline_init(i915, &engine->timeline, NULL))
goto err_breadcrumbs;
+ i915_timeline_set_subclass(&engine->timeline, TIMELINE_ENGINE);
- return &engine->base;
+ engine->kernel_context =
+ i915_gem_context_get_engine(i915->kernel_context, engine->id);
+ if (IS_ERR(engine->kernel_context))
+ goto err_timeline;
+
+ err = intel_context_pin(engine->kernel_context);
+ intel_context_put(engine->kernel_context);
+ if (err)
+ goto err_timeline;
+
+ return 0;
+err_timeline:
+ i915_timeline_fini(&engine->timeline);
err_breadcrumbs:
- intel_engine_fini_breadcrumbs(&engine->base);
- i915_timeline_fini(&engine->base.timeline);
-err_free:
- kfree(engine);
- return NULL;
+ intel_engine_fini_breadcrumbs(engine);
+ return -ENOMEM;
}
void mock_engine_flush(struct intel_engine_cs *engine)
diff --git a/drivers/gpu/drm/i915/selftests/mock_engine.h b/drivers/gpu/drm/i915/gt/mock_engine.h
index b9cc3a245f16..3f9b698c49d2 100644
--- a/drivers/gpu/drm/i915/selftests/mock_engine.h
+++ b/drivers/gpu/drm/i915/gt/mock_engine.h
@@ -29,7 +29,7 @@
#include <linux/spinlock.h>
#include <linux/timer.h>
-#include "../intel_ringbuffer.h"
+#include "gt/intel_engine.h"
struct mock_engine {
struct intel_engine_cs base;
@@ -42,6 +42,8 @@ struct mock_engine {
struct intel_engine_cs *mock_engine(struct drm_i915_private *i915,
const char *name,
int id);
+int mock_engine_init(struct intel_engine_cs *engine);
+
void mock_engine_flush(struct intel_engine_cs *engine);
void mock_engine_reset(struct intel_engine_cs *engine);
void mock_engine_free(struct intel_engine_cs *engine);
diff --git a/drivers/gpu/drm/i915/selftests/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/selftest_engine_cs.c
index cfaa6b296835..cfaa6b296835 100644
--- a/drivers/gpu/drm/i915/selftests/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/gt/selftest_engine_cs.c
diff --git a/drivers/gpu/drm/i915/selftests/intel_hangcheck.c b/drivers/gpu/drm/i915/gt/selftest_hangcheck.c
index 050bd1e19e02..48a51739b926 100644
--- a/drivers/gpu/drm/i915/selftests/intel_hangcheck.c
+++ b/drivers/gpu/drm/i915/gt/selftest_hangcheck.c
@@ -24,14 +24,18 @@
#include <linux/kthread.h>
-#include "../i915_selftest.h"
-#include "i915_random.h"
-#include "igt_flush_test.h"
-#include "igt_reset.h"
-#include "igt_wedge_me.h"
+#include "intel_engine_pm.h"
-#include "mock_context.h"
-#include "mock_drm.h"
+#include "i915_selftest.h"
+#include "selftests/i915_random.h"
+#include "selftests/igt_flush_test.h"
+#include "selftests/igt_gem_utils.h"
+#include "selftests/igt_reset.h"
+#include "selftests/igt_wedge_me.h"
+#include "selftests/igt_atomic.h"
+
+#include "selftests/mock_context.h"
+#include "selftests/mock_drm.h"
#define IGT_IDLE_TIMEOUT 50 /* ms; time to wait after flushing between tests */
@@ -173,7 +177,7 @@ hang_create_request(struct hang *h, struct intel_engine_cs *engine)
if (err)
goto unpin_vma;
- rq = i915_request_alloc(engine, h->ctx);
+ rq = igt_request_alloc(h->ctx, engine);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
goto unpin_hws;
@@ -362,54 +366,6 @@ unlock:
return err;
}
-static int igt_global_reset(void *arg)
-{
- struct drm_i915_private *i915 = arg;
- unsigned int reset_count;
- int err = 0;
-
- /* Check that we can issue a global GPU reset */
-
- igt_global_reset_lock(i915);
-
- reset_count = i915_reset_count(&i915->gpu_error);
-
- i915_reset(i915, ALL_ENGINES, NULL);
-
- if (i915_reset_count(&i915->gpu_error) == reset_count) {
- pr_err("No GPU reset recorded!\n");
- err = -EINVAL;
- }
-
- igt_global_reset_unlock(i915);
-
- if (i915_reset_failed(i915))
- err = -EIO;
-
- return err;
-}
-
-static int igt_wedged_reset(void *arg)
-{
- struct drm_i915_private *i915 = arg;
- intel_wakeref_t wakeref;
-
- /* Check that we can recover a wedged device with a GPU reset */
-
- igt_global_reset_lock(i915);
- wakeref = intel_runtime_pm_get(i915);
-
- i915_gem_set_wedged(i915);
-
- GEM_BUG_ON(!i915_reset_failed(i915));
- i915_reset(i915, ALL_ENGINES, NULL);
-
- intel_runtime_pm_put(i915, wakeref);
- igt_global_reset_unlock(i915);
-
- return i915_reset_failed(i915) ? -EIO : 0;
-}
-
static bool wait_for_idle(struct intel_engine_cs *engine)
{
return wait_for(intel_engine_is_idle(engine), IGT_IDLE_TIMEOUT) == 0;
@@ -453,7 +409,7 @@ static int igt_reset_nop(void *arg)
for (i = 0; i < 16; i++) {
struct i915_request *rq;
- rq = i915_request_alloc(engine, ctx);
+ rq = igt_request_alloc(ctx, engine);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
break;
@@ -479,19 +435,6 @@ static int igt_reset_nop(void *arg)
break;
}
- if (!i915_reset_flush(i915)) {
- struct drm_printer p =
- drm_info_printer(i915->drm.dev);
-
- pr_err("%s failed to idle after reset\n",
- engine->name);
- intel_engine_dump(engine, &p,
- "%s\n", engine->name);
-
- err = -EIO;
- break;
- }
-
err = igt_flush_test(i915, 0);
if (err)
break;
@@ -565,7 +508,7 @@ static int igt_reset_nop_engine(void *arg)
for (i = 0; i < 16; i++) {
struct i915_request *rq;
- rq = i915_request_alloc(engine, ctx);
+ rq = igt_request_alloc(ctx, engine);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
break;
@@ -594,19 +537,6 @@ static int igt_reset_nop_engine(void *arg)
err = -EINVAL;
break;
}
-
- if (!i915_reset_flush(i915)) {
- struct drm_printer p =
- drm_info_printer(i915->drm.dev);
-
- pr_err("%s failed to idle after reset\n",
- engine->name);
- intel_engine_dump(engine, &p,
- "%s\n", engine->name);
-
- err = -EIO;
- break;
- }
} while (time_before(jiffies, end_time));
clear_bit(I915_RESET_ENGINE + id, &i915->gpu_error.flags);
pr_info("%s(%s): %d resets\n", __func__, engine->name, count);
@@ -669,6 +599,7 @@ static int __igt_reset_engine(struct drm_i915_private *i915, bool active)
reset_engine_count = i915_reset_engine_count(&i915->gpu_error,
engine);
+ intel_engine_pm_get(engine);
set_bit(I915_RESET_ENGINE + id, &i915->gpu_error.flags);
do {
if (active) {
@@ -721,21 +652,9 @@ static int __igt_reset_engine(struct drm_i915_private *i915, bool active)
err = -EINVAL;
break;
}
-
- if (!i915_reset_flush(i915)) {
- struct drm_printer p =
- drm_info_printer(i915->drm.dev);
-
- pr_err("%s failed to idle after reset\n",
- engine->name);
- intel_engine_dump(engine, &p,
- "%s\n", engine->name);
-
- err = -EIO;
- break;
- }
} while (time_before(jiffies, end_time));
clear_bit(I915_RESET_ENGINE + id, &i915->gpu_error.flags);
+ intel_engine_pm_put(engine);
if (err)
break;
@@ -835,7 +754,7 @@ static int active_engine(void *data)
struct i915_request *new;
mutex_lock(&engine->i915->drm.struct_mutex);
- new = i915_request_alloc(engine, ctx[idx]);
+ new = igt_request_alloc(ctx[idx], engine);
if (IS_ERR(new)) {
mutex_unlock(&engine->i915->drm.struct_mutex);
err = PTR_ERR(new);
@@ -942,6 +861,7 @@ static int __igt_reset_engines(struct drm_i915_private *i915,
get_task_struct(tsk);
}
+ intel_engine_pm_get(engine);
set_bit(I915_RESET_ENGINE + id, &i915->gpu_error.flags);
do {
struct i915_request *rq = NULL;
@@ -1018,6 +938,7 @@ static int __igt_reset_engines(struct drm_i915_private *i915,
}
} while (time_before(jiffies, end_time));
clear_bit(I915_RESET_ENGINE + id, &i915->gpu_error.flags);
+ intel_engine_pm_put(engine);
pr_info("i915_reset_engine(%s:%s): %lu resets\n",
engine->name, test_name, count);
@@ -1069,7 +990,9 @@ unwind:
if (err)
break;
- err = igt_flush_test(i915, 0);
+ mutex_lock(&i915->drm.struct_mutex);
+ err = igt_flush_test(i915, I915_WAIT_LOCKED);
+ mutex_unlock(&i915->drm.struct_mutex);
if (err)
break;
}
@@ -1681,44 +1604,8 @@ err_unlock:
return err;
}
-static void __preempt_begin(void)
-{
- preempt_disable();
-}
-
-static void __preempt_end(void)
-{
- preempt_enable();
-}
-
-static void __softirq_begin(void)
-{
- local_bh_disable();
-}
-
-static void __softirq_end(void)
-{
- local_bh_enable();
-}
-
-static void __hardirq_begin(void)
-{
- local_irq_disable();
-}
-
-static void __hardirq_end(void)
-{
- local_irq_enable();
-}
-
-struct atomic_section {
- const char *name;
- void (*critical_section_begin)(void);
- void (*critical_section_end)(void);
-};
-
static int __igt_atomic_reset_engine(struct intel_engine_cs *engine,
- const struct atomic_section *p,
+ const struct igt_atomic_section *p,
const char *mode)
{
struct tasklet_struct * const t = &engine->execlists.tasklet;
@@ -1743,7 +1630,7 @@ static int __igt_atomic_reset_engine(struct intel_engine_cs *engine,
}
static int igt_atomic_reset_engine(struct intel_engine_cs *engine,
- const struct atomic_section *p)
+ const struct igt_atomic_section *p)
{
struct drm_i915_private *i915 = engine->i915;
struct i915_request *rq;
@@ -1794,79 +1681,43 @@ out:
return err;
}
-static void force_reset(struct drm_i915_private *i915)
-{
- i915_gem_set_wedged(i915);
- i915_reset(i915, 0, NULL);
-}
-
-static int igt_atomic_reset(void *arg)
+static int igt_reset_engines_atomic(void *arg)
{
- static const struct atomic_section phases[] = {
- { "preempt", __preempt_begin, __preempt_end },
- { "softirq", __softirq_begin, __softirq_end },
- { "hardirq", __hardirq_begin, __hardirq_end },
- { }
- };
struct drm_i915_private *i915 = arg;
- intel_wakeref_t wakeref;
+ const typeof(*igt_atomic_phases) *p;
int err = 0;
- /* Check that the resets are usable from atomic context */
+ /* Check that the engines resets are usable from atomic context */
+
+ if (!intel_has_reset_engine(i915))
+ return 0;
if (USES_GUC_SUBMISSION(i915))
- return 0; /* guc is dead; long live the guc */
+ return 0;
igt_global_reset_lock(i915);
mutex_lock(&i915->drm.struct_mutex);
- wakeref = intel_runtime_pm_get(i915);
/* Flush any requests before we get started and check basics */
- force_reset(i915);
- if (i915_reset_failed(i915))
+ if (!igt_force_reset(i915))
goto unlock;
- if (intel_has_gpu_reset(i915)) {
- const typeof(*phases) *p;
-
- for (p = phases; p->name; p++) {
- GEM_TRACE("intel_gpu_reset under %s\n", p->name);
-
- p->critical_section_begin();
- err = intel_gpu_reset(i915, ALL_ENGINES);
- p->critical_section_end();
-
- if (err) {
- pr_err("intel_gpu_reset failed under %s\n",
- p->name);
- goto out;
- }
- }
-
- force_reset(i915);
- }
-
- if (intel_has_reset_engine(i915)) {
+ for (p = igt_atomic_phases; p->name; p++) {
struct intel_engine_cs *engine;
enum intel_engine_id id;
for_each_engine(engine, i915, id) {
- const typeof(*phases) *p;
-
- for (p = phases; p->name; p++) {
- err = igt_atomic_reset_engine(engine, p);
- if (err)
- goto out;
- }
+ err = igt_atomic_reset_engine(engine, p);
+ if (err)
+ goto out;
}
}
out:
/* As we poke around the guts, do a full reset before continuing. */
- force_reset(i915);
+ igt_force_reset(i915);
unlock:
- intel_runtime_pm_put(i915, wakeref);
mutex_unlock(&i915->drm.struct_mutex);
igt_global_reset_unlock(i915);
@@ -1876,21 +1727,19 @@ unlock:
int intel_hangcheck_live_selftests(struct drm_i915_private *i915)
{
static const struct i915_subtest tests[] = {
- SUBTEST(igt_global_reset), /* attempt to recover GPU first */
- SUBTEST(igt_wedged_reset),
SUBTEST(igt_hang_sanitycheck),
SUBTEST(igt_reset_nop),
SUBTEST(igt_reset_nop_engine),
SUBTEST(igt_reset_idle_engine),
SUBTEST(igt_reset_active_engine),
SUBTEST(igt_reset_engines),
+ SUBTEST(igt_reset_engines_atomic),
SUBTEST(igt_reset_queue),
SUBTEST(igt_reset_wait),
SUBTEST(igt_reset_evict_ggtt),
SUBTEST(igt_reset_evict_ppgtt),
SUBTEST(igt_reset_evict_fence),
SUBTEST(igt_handle_error),
- SUBTEST(igt_atomic_reset),
};
intel_wakeref_t wakeref;
bool saved_hangcheck;
diff --git a/drivers/gpu/drm/i915/selftests/intel_lrc.c b/drivers/gpu/drm/i915/gt/selftest_lrc.c
index e8b0b5dbcb2c..a8c50900e2d4 100644
--- a/drivers/gpu/drm/i915/selftests/intel_lrc.c
+++ b/drivers/gpu/drm/i915/gt/selftest_lrc.c
@@ -6,15 +6,15 @@
#include <linux/prime_numbers.h>
-#include "../i915_reset.h"
-
-#include "../i915_selftest.h"
-#include "igt_flush_test.h"
-#include "igt_live_test.h"
-#include "igt_spinner.h"
-#include "i915_random.h"
-
-#include "mock_context.h"
+#include "gt/intel_reset.h"
+#include "i915_selftest.h"
+#include "selftests/i915_random.h"
+#include "selftests/igt_flush_test.h"
+#include "selftests/igt_gem_utils.h"
+#include "selftests/igt_live_test.h"
+#include "selftests/igt_spinner.h"
+#include "selftests/lib_sw_fence.h"
+#include "selftests/mock_context.h"
static int live_sanitycheck(void *arg)
{
@@ -152,7 +152,7 @@ static int live_busywait_preempt(void *arg)
* fails, we hang instead.
*/
- lo = i915_request_alloc(engine, ctx_lo);
+ lo = igt_request_alloc(ctx_lo, engine);
if (IS_ERR(lo)) {
err = PTR_ERR(lo);
goto err_vma;
@@ -196,7 +196,7 @@ static int live_busywait_preempt(void *arg)
goto err_vma;
}
- hi = i915_request_alloc(engine, ctx_hi);
+ hi = igt_request_alloc(ctx_hi, engine);
if (IS_ERR(hi)) {
err = PTR_ERR(hi);
goto err_vma;
@@ -641,14 +641,19 @@ static struct i915_request *dummy_request(struct intel_engine_cs *engine)
GEM_BUG_ON(i915_request_completed(rq));
i915_sw_fence_init(&rq->submit, dummy_notify);
- i915_sw_fence_commit(&rq->submit);
+ set_bit(I915_FENCE_FLAG_ACTIVE, &rq->fence.flags);
return rq;
}
static void dummy_request_free(struct i915_request *dummy)
{
+ /* We have to fake the CS interrupt to kick the next request */
+ i915_sw_fence_commit(&dummy->submit);
+
i915_request_mark_complete(dummy);
+ dma_fence_signal(&dummy->fence);
+
i915_sched_node_fini(&dummy->sched);
i915_sw_fence_fini(&dummy->submit);
@@ -861,13 +866,13 @@ static int live_chain_preempt(void *arg)
i915_request_add(rq);
for (i = 0; i < count; i++) {
- rq = i915_request_alloc(engine, lo.ctx);
+ rq = igt_request_alloc(lo.ctx, engine);
if (IS_ERR(rq))
goto err_wedged;
i915_request_add(rq);
}
- rq = i915_request_alloc(engine, hi.ctx);
+ rq = igt_request_alloc(hi.ctx, engine);
if (IS_ERR(rq))
goto err_wedged;
i915_request_add(rq);
@@ -886,7 +891,7 @@ static int live_chain_preempt(void *arg)
}
igt_spinner_end(&lo.spin);
- rq = i915_request_alloc(engine, lo.ctx);
+ rq = igt_request_alloc(lo.ctx, engine);
if (IS_ERR(rq))
goto err_wedged;
i915_request_add(rq);
@@ -1093,7 +1098,7 @@ static int smoke_submit(struct preempt_smoke *smoke,
ctx->sched.priority = prio;
- rq = i915_request_alloc(smoke->engine, ctx);
+ rq = igt_request_alloc(ctx, smoke->engine);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
goto unpin;
@@ -1306,6 +1311,504 @@ err_unlock:
return err;
}
+static int nop_virtual_engine(struct drm_i915_private *i915,
+ struct intel_engine_cs **siblings,
+ unsigned int nsibling,
+ unsigned int nctx,
+ unsigned int flags)
+#define CHAIN BIT(0)
+{
+ IGT_TIMEOUT(end_time);
+ struct i915_request *request[16];
+ struct i915_gem_context *ctx[16];
+ struct intel_context *ve[16];
+ unsigned long n, prime, nc;
+ struct igt_live_test t;
+ ktime_t times[2] = {};
+ int err;
+
+ GEM_BUG_ON(!nctx || nctx > ARRAY_SIZE(ctx));
+
+ for (n = 0; n < nctx; n++) {
+ ctx[n] = kernel_context(i915);
+ if (!ctx[n]) {
+ err = -ENOMEM;
+ nctx = n;
+ goto out;
+ }
+
+ ve[n] = intel_execlists_create_virtual(ctx[n],
+ siblings, nsibling);
+ if (IS_ERR(ve[n])) {
+ kernel_context_close(ctx[n]);
+ err = PTR_ERR(ve[n]);
+ nctx = n;
+ goto out;
+ }
+
+ err = intel_context_pin(ve[n]);
+ if (err) {
+ intel_context_put(ve[n]);
+ kernel_context_close(ctx[n]);
+ nctx = n;
+ goto out;
+ }
+ }
+
+ err = igt_live_test_begin(&t, i915, __func__, ve[0]->engine->name);
+ if (err)
+ goto out;
+
+ for_each_prime_number_from(prime, 1, 8192) {
+ times[1] = ktime_get_raw();
+
+ if (flags & CHAIN) {
+ for (nc = 0; nc < nctx; nc++) {
+ for (n = 0; n < prime; n++) {
+ request[nc] =
+ i915_request_create(ve[nc]);
+ if (IS_ERR(request[nc])) {
+ err = PTR_ERR(request[nc]);
+ goto out;
+ }
+
+ i915_request_add(request[nc]);
+ }
+ }
+ } else {
+ for (n = 0; n < prime; n++) {
+ for (nc = 0; nc < nctx; nc++) {
+ request[nc] =
+ i915_request_create(ve[nc]);
+ if (IS_ERR(request[nc])) {
+ err = PTR_ERR(request[nc]);
+ goto out;
+ }
+
+ i915_request_add(request[nc]);
+ }
+ }
+ }
+
+ for (nc = 0; nc < nctx; nc++) {
+ if (i915_request_wait(request[nc],
+ I915_WAIT_LOCKED,
+ HZ / 10) < 0) {
+ pr_err("%s(%s): wait for %llx:%lld timed out\n",
+ __func__, ve[0]->engine->name,
+ request[nc]->fence.context,
+ request[nc]->fence.seqno);
+
+ GEM_TRACE("%s(%s) failed at request %llx:%lld\n",
+ __func__, ve[0]->engine->name,
+ request[nc]->fence.context,
+ request[nc]->fence.seqno);
+ GEM_TRACE_DUMP();
+ i915_gem_set_wedged(i915);
+ break;
+ }
+ }
+
+ times[1] = ktime_sub(ktime_get_raw(), times[1]);
+ if (prime == 1)
+ times[0] = times[1];
+
+ if (__igt_timeout(end_time, NULL))
+ break;
+ }
+
+ err = igt_live_test_end(&t);
+ if (err)
+ goto out;
+
+ pr_info("Requestx%d latencies on %s: 1 = %lluns, %lu = %lluns\n",
+ nctx, ve[0]->engine->name, ktime_to_ns(times[0]),
+ prime, div64_u64(ktime_to_ns(times[1]), prime));
+
+out:
+ if (igt_flush_test(i915, I915_WAIT_LOCKED))
+ err = -EIO;
+
+ for (nc = 0; nc < nctx; nc++) {
+ intel_context_unpin(ve[nc]);
+ intel_context_put(ve[nc]);
+ kernel_context_close(ctx[nc]);
+ }
+ return err;
+}
+
+static int live_virtual_engine(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ struct intel_engine_cs *siblings[MAX_ENGINE_INSTANCE + 1];
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ unsigned int class, inst;
+ int err = -ENODEV;
+
+ if (USES_GUC_SUBMISSION(i915))
+ return 0;
+
+ mutex_lock(&i915->drm.struct_mutex);
+
+ for_each_engine(engine, i915, id) {
+ err = nop_virtual_engine(i915, &engine, 1, 1, 0);
+ if (err) {
+ pr_err("Failed to wrap engine %s: err=%d\n",
+ engine->name, err);
+ goto out_unlock;
+ }
+ }
+
+ for (class = 0; class <= MAX_ENGINE_CLASS; class++) {
+ int nsibling, n;
+
+ nsibling = 0;
+ for (inst = 0; inst <= MAX_ENGINE_INSTANCE; inst++) {
+ if (!i915->engine_class[class][inst])
+ continue;
+
+ siblings[nsibling++] = i915->engine_class[class][inst];
+ }
+ if (nsibling < 2)
+ continue;
+
+ for (n = 1; n <= nsibling + 1; n++) {
+ err = nop_virtual_engine(i915, siblings, nsibling,
+ n, 0);
+ if (err)
+ goto out_unlock;
+ }
+
+ err = nop_virtual_engine(i915, siblings, nsibling, n, CHAIN);
+ if (err)
+ goto out_unlock;
+ }
+
+out_unlock:
+ mutex_unlock(&i915->drm.struct_mutex);
+ return err;
+}
+
+static int mask_virtual_engine(struct drm_i915_private *i915,
+ struct intel_engine_cs **siblings,
+ unsigned int nsibling)
+{
+ struct i915_request *request[MAX_ENGINE_INSTANCE + 1];
+ struct i915_gem_context *ctx;
+ struct intel_context *ve;
+ struct igt_live_test t;
+ unsigned int n;
+ int err;
+
+ /*
+ * Check that by setting the execution mask on a request, we can
+ * restrict it to our desired engine within the virtual engine.
+ */
+
+ ctx = kernel_context(i915);
+ if (!ctx)
+ return -ENOMEM;
+
+ ve = intel_execlists_create_virtual(ctx, siblings, nsibling);
+ if (IS_ERR(ve)) {
+ err = PTR_ERR(ve);
+ goto out_close;
+ }
+
+ err = intel_context_pin(ve);
+ if (err)
+ goto out_put;
+
+ err = igt_live_test_begin(&t, i915, __func__, ve->engine->name);
+ if (err)
+ goto out_unpin;
+
+ for (n = 0; n < nsibling; n++) {
+ request[n] = i915_request_create(ve);
+ if (IS_ERR(request)) {
+ err = PTR_ERR(request);
+ nsibling = n;
+ goto out;
+ }
+
+ /* Reverse order as it's more likely to be unnatural */
+ request[n]->execution_mask = siblings[nsibling - n - 1]->mask;
+
+ i915_request_get(request[n]);
+ i915_request_add(request[n]);
+ }
+
+ for (n = 0; n < nsibling; n++) {
+ if (i915_request_wait(request[n], I915_WAIT_LOCKED, HZ / 10) < 0) {
+ pr_err("%s(%s): wait for %llx:%lld timed out\n",
+ __func__, ve->engine->name,
+ request[n]->fence.context,
+ request[n]->fence.seqno);
+
+ GEM_TRACE("%s(%s) failed at request %llx:%lld\n",
+ __func__, ve->engine->name,
+ request[n]->fence.context,
+ request[n]->fence.seqno);
+ GEM_TRACE_DUMP();
+ i915_gem_set_wedged(i915);
+ err = -EIO;
+ goto out;
+ }
+
+ if (request[n]->engine != siblings[nsibling - n - 1]) {
+ pr_err("Executed on wrong sibling '%s', expected '%s'\n",
+ request[n]->engine->name,
+ siblings[nsibling - n - 1]->name);
+ err = -EINVAL;
+ goto out;
+ }
+ }
+
+ err = igt_live_test_end(&t);
+ if (err)
+ goto out;
+
+out:
+ if (igt_flush_test(i915, I915_WAIT_LOCKED))
+ err = -EIO;
+
+ for (n = 0; n < nsibling; n++)
+ i915_request_put(request[n]);
+
+out_unpin:
+ intel_context_unpin(ve);
+out_put:
+ intel_context_put(ve);
+out_close:
+ kernel_context_close(ctx);
+ return err;
+}
+
+static int live_virtual_mask(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ struct intel_engine_cs *siblings[MAX_ENGINE_INSTANCE + 1];
+ unsigned int class, inst;
+ int err = 0;
+
+ if (USES_GUC_SUBMISSION(i915))
+ return 0;
+
+ mutex_lock(&i915->drm.struct_mutex);
+
+ for (class = 0; class <= MAX_ENGINE_CLASS; class++) {
+ unsigned int nsibling;
+
+ nsibling = 0;
+ for (inst = 0; inst <= MAX_ENGINE_INSTANCE; inst++) {
+ if (!i915->engine_class[class][inst])
+ break;
+
+ siblings[nsibling++] = i915->engine_class[class][inst];
+ }
+ if (nsibling < 2)
+ continue;
+
+ err = mask_virtual_engine(i915, siblings, nsibling);
+ if (err)
+ goto out_unlock;
+ }
+
+out_unlock:
+ mutex_unlock(&i915->drm.struct_mutex);
+ return err;
+}
+
+static int bond_virtual_engine(struct drm_i915_private *i915,
+ unsigned int class,
+ struct intel_engine_cs **siblings,
+ unsigned int nsibling,
+ unsigned int flags)
+#define BOND_SCHEDULE BIT(0)
+{
+ struct intel_engine_cs *master;
+ struct i915_gem_context *ctx;
+ struct i915_request *rq[16];
+ enum intel_engine_id id;
+ unsigned long n;
+ int err;
+
+ GEM_BUG_ON(nsibling >= ARRAY_SIZE(rq) - 1);
+
+ ctx = kernel_context(i915);
+ if (!ctx)
+ return -ENOMEM;
+
+ err = 0;
+ rq[0] = ERR_PTR(-ENOMEM);
+ for_each_engine(master, i915, id) {
+ struct i915_sw_fence fence = {};
+
+ if (master->class == class)
+ continue;
+
+ memset_p((void *)rq, ERR_PTR(-EINVAL), ARRAY_SIZE(rq));
+
+ rq[0] = igt_request_alloc(ctx, master);
+ if (IS_ERR(rq[0])) {
+ err = PTR_ERR(rq[0]);
+ goto out;
+ }
+ i915_request_get(rq[0]);
+
+ if (flags & BOND_SCHEDULE) {
+ onstack_fence_init(&fence);
+ err = i915_sw_fence_await_sw_fence_gfp(&rq[0]->submit,
+ &fence,
+ GFP_KERNEL);
+ }
+ i915_request_add(rq[0]);
+ if (err < 0)
+ goto out;
+
+ for (n = 0; n < nsibling; n++) {
+ struct intel_context *ve;
+
+ ve = intel_execlists_create_virtual(ctx,
+ siblings,
+ nsibling);
+ if (IS_ERR(ve)) {
+ err = PTR_ERR(ve);
+ onstack_fence_fini(&fence);
+ goto out;
+ }
+
+ err = intel_virtual_engine_attach_bond(ve->engine,
+ master,
+ siblings[n]);
+ if (err) {
+ intel_context_put(ve);
+ onstack_fence_fini(&fence);
+ goto out;
+ }
+
+ err = intel_context_pin(ve);
+ intel_context_put(ve);
+ if (err) {
+ onstack_fence_fini(&fence);
+ goto out;
+ }
+
+ rq[n + 1] = i915_request_create(ve);
+ intel_context_unpin(ve);
+ if (IS_ERR(rq[n + 1])) {
+ err = PTR_ERR(rq[n + 1]);
+ onstack_fence_fini(&fence);
+ goto out;
+ }
+ i915_request_get(rq[n + 1]);
+
+ err = i915_request_await_execution(rq[n + 1],
+ &rq[0]->fence,
+ ve->engine->bond_execute);
+ i915_request_add(rq[n + 1]);
+ if (err < 0) {
+ onstack_fence_fini(&fence);
+ goto out;
+ }
+ }
+ onstack_fence_fini(&fence);
+
+ if (i915_request_wait(rq[0],
+ I915_WAIT_LOCKED,
+ HZ / 10) < 0) {
+ pr_err("Master request did not execute (on %s)!\n",
+ rq[0]->engine->name);
+ err = -EIO;
+ goto out;
+ }
+
+ for (n = 0; n < nsibling; n++) {
+ if (i915_request_wait(rq[n + 1],
+ I915_WAIT_LOCKED,
+ MAX_SCHEDULE_TIMEOUT) < 0) {
+ err = -EIO;
+ goto out;
+ }
+
+ if (rq[n + 1]->engine != siblings[n]) {
+ pr_err("Bonded request did not execute on target engine: expected %s, used %s; master was %s\n",
+ siblings[n]->name,
+ rq[n + 1]->engine->name,
+ rq[0]->engine->name);
+ err = -EINVAL;
+ goto out;
+ }
+ }
+
+ for (n = 0; !IS_ERR(rq[n]); n++)
+ i915_request_put(rq[n]);
+ rq[0] = ERR_PTR(-ENOMEM);
+ }
+
+out:
+ for (n = 0; !IS_ERR(rq[n]); n++)
+ i915_request_put(rq[n]);
+ if (igt_flush_test(i915, I915_WAIT_LOCKED))
+ err = -EIO;
+
+ kernel_context_close(ctx);
+ return err;
+}
+
+static int live_virtual_bond(void *arg)
+{
+ static const struct phase {
+ const char *name;
+ unsigned int flags;
+ } phases[] = {
+ { "", 0 },
+ { "schedule", BOND_SCHEDULE },
+ { },
+ };
+ struct drm_i915_private *i915 = arg;
+ struct intel_engine_cs *siblings[MAX_ENGINE_INSTANCE + 1];
+ unsigned int class, inst;
+ int err = 0;
+
+ if (USES_GUC_SUBMISSION(i915))
+ return 0;
+
+ mutex_lock(&i915->drm.struct_mutex);
+
+ for (class = 0; class <= MAX_ENGINE_CLASS; class++) {
+ const struct phase *p;
+ int nsibling;
+
+ nsibling = 0;
+ for (inst = 0; inst <= MAX_ENGINE_INSTANCE; inst++) {
+ if (!i915->engine_class[class][inst])
+ break;
+
+ GEM_BUG_ON(nsibling == ARRAY_SIZE(siblings));
+ siblings[nsibling++] = i915->engine_class[class][inst];
+ }
+ if (nsibling < 2)
+ continue;
+
+ for (p = phases; p->name; p++) {
+ err = bond_virtual_engine(i915,
+ class, siblings, nsibling,
+ p->flags);
+ if (err) {
+ pr_err("%s(%s): failed class=%d, nsibling=%d, err=%d\n",
+ __func__, p->name, class, nsibling, err);
+ goto out_unlock;
+ }
+ }
+ }
+
+out_unlock:
+ mutex_unlock(&i915->drm.struct_mutex);
+ return err;
+}
+
int intel_execlists_live_selftests(struct drm_i915_private *i915)
{
static const struct i915_subtest tests[] = {
@@ -1318,6 +1821,9 @@ int intel_execlists_live_selftests(struct drm_i915_private *i915)
SUBTEST(live_chain_preempt),
SUBTEST(live_preempt_hang),
SUBTEST(live_preempt_smoke),
+ SUBTEST(live_virtual_engine),
+ SUBTEST(live_virtual_mask),
+ SUBTEST(live_virtual_bond),
};
if (!HAS_EXECLISTS(i915))
diff --git a/drivers/gpu/drm/i915/gt/selftest_reset.c b/drivers/gpu/drm/i915/gt/selftest_reset.c
new file mode 100644
index 000000000000..607473439eb0
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/selftest_reset.c
@@ -0,0 +1,118 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2018 Intel Corporation
+ */
+
+#include "i915_selftest.h"
+#include "selftests/igt_reset.h"
+#include "selftests/igt_atomic.h"
+
+static int igt_global_reset(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ unsigned int reset_count;
+ int err = 0;
+
+ /* Check that we can issue a global GPU reset */
+
+ igt_global_reset_lock(i915);
+
+ reset_count = i915_reset_count(&i915->gpu_error);
+
+ i915_reset(i915, ALL_ENGINES, NULL);
+
+ if (i915_reset_count(&i915->gpu_error) == reset_count) {
+ pr_err("No GPU reset recorded!\n");
+ err = -EINVAL;
+ }
+
+ igt_global_reset_unlock(i915);
+
+ if (i915_reset_failed(i915))
+ err = -EIO;
+
+ return err;
+}
+
+static int igt_wedged_reset(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ intel_wakeref_t wakeref;
+
+ /* Check that we can recover a wedged device with a GPU reset */
+
+ igt_global_reset_lock(i915);
+ wakeref = intel_runtime_pm_get(i915);
+
+ i915_gem_set_wedged(i915);
+
+ GEM_BUG_ON(!i915_reset_failed(i915));
+ i915_reset(i915, ALL_ENGINES, NULL);
+
+ intel_runtime_pm_put(i915, wakeref);
+ igt_global_reset_unlock(i915);
+
+ return i915_reset_failed(i915) ? -EIO : 0;
+}
+
+static int igt_atomic_reset(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ const typeof(*igt_atomic_phases) *p;
+ int err = 0;
+
+ /* Check that the resets are usable from atomic context */
+
+ igt_global_reset_lock(i915);
+ mutex_lock(&i915->drm.struct_mutex);
+
+ /* Flush any requests before we get started and check basics */
+ if (!igt_force_reset(i915))
+ goto unlock;
+
+ for (p = igt_atomic_phases; p->name; p++) {
+ GEM_TRACE("intel_gpu_reset under %s\n", p->name);
+
+ p->critical_section_begin();
+ reset_prepare(i915);
+ err = intel_gpu_reset(i915, ALL_ENGINES);
+ reset_finish(i915);
+ p->critical_section_end();
+
+ if (err) {
+ pr_err("intel_gpu_reset failed under %s\n", p->name);
+ break;
+ }
+ }
+
+ /* As we poke around the guts, do a full reset before continuing. */
+ igt_force_reset(i915);
+
+unlock:
+ mutex_unlock(&i915->drm.struct_mutex);
+ igt_global_reset_unlock(i915);
+
+ return err;
+}
+
+int intel_reset_live_selftests(struct drm_i915_private *i915)
+{
+ static const struct i915_subtest tests[] = {
+ SUBTEST(igt_global_reset), /* attempt to recover GPU first */
+ SUBTEST(igt_wedged_reset),
+ SUBTEST(igt_atomic_reset),
+ };
+ intel_wakeref_t wakeref;
+ int err = 0;
+
+ if (!intel_has_gpu_reset(i915))
+ return 0;
+
+ if (i915_terminally_wedged(i915))
+ return -EIO; /* we're long past hope of a successful reset */
+
+ with_intel_runtime_pm(i915, wakeref)
+ err = i915_subtests(tests, i915);
+
+ return err;
+}
diff --git a/drivers/gpu/drm/i915/selftests/intel_workarounds.c b/drivers/gpu/drm/i915/gt/selftest_workarounds.c
index 567b6f8dae86..f9c9e7291187 100644
--- a/drivers/gpu/drm/i915/selftests/intel_workarounds.c
+++ b/drivers/gpu/drm/i915/gt/selftest_workarounds.c
@@ -4,15 +4,16 @@
* Copyright © 2018 Intel Corporation
*/
-#include "../i915_selftest.h"
-#include "../i915_reset.h"
+#include "i915_selftest.h"
+#include "intel_reset.h"
-#include "igt_flush_test.h"
-#include "igt_reset.h"
-#include "igt_spinner.h"
-#include "igt_wedge_me.h"
-#include "mock_context.h"
-#include "mock_drm.h"
+#include "selftests/igt_flush_test.h"
+#include "selftests/igt_gem_utils.h"
+#include "selftests/igt_reset.h"
+#include "selftests/igt_spinner.h"
+#include "selftests/igt_wedge_me.h"
+#include "selftests/mock_context.h"
+#include "selftests/mock_drm.h"
static const struct wo_register {
enum intel_platform platform;
@@ -21,12 +22,13 @@ static const struct wo_register {
{ INTEL_GEMINILAKE, 0x731c }
};
-#define REF_NAME_MAX (INTEL_ENGINE_CS_MAX_NAME + 4)
+#define REF_NAME_MAX (INTEL_ENGINE_CS_MAX_NAME + 8)
struct wa_lists {
struct i915_wa_list gt_wa_list;
struct {
char name[REF_NAME_MAX];
struct i915_wa_list wa_list;
+ struct i915_wa_list ctx_wa_list;
} engine[I915_NUM_ENGINES];
};
@@ -51,6 +53,12 @@ reference_lists_init(struct drm_i915_private *i915, struct wa_lists *lists)
wa_init_start(wal, name);
engine_init_workarounds(engine, wal);
wa_init_finish(wal);
+
+ snprintf(name, REF_NAME_MAX, "%s_CTX_REF", engine->name);
+
+ __intel_engine_init_ctx_wa(engine,
+ &lists->engine[id].ctx_wa_list,
+ name);
}
}
@@ -71,7 +79,6 @@ read_nonprivs(struct i915_gem_context *ctx, struct intel_engine_cs *engine)
{
const u32 base = engine->mmio_base;
struct drm_i915_gem_object *result;
- intel_wakeref_t wakeref;
struct i915_request *rq;
struct i915_vma *vma;
u32 srm, *cs;
@@ -103,9 +110,7 @@ read_nonprivs(struct i915_gem_context *ctx, struct intel_engine_cs *engine)
if (err)
goto err_obj;
- rq = ERR_PTR(-ENODEV);
- with_intel_runtime_pm(engine->i915, wakeref)
- rq = i915_request_alloc(engine, ctx);
+ rq = igt_request_alloc(ctx, engine);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
goto err_pin;
@@ -340,49 +345,6 @@ out:
return err;
}
-static struct i915_vma *create_scratch(struct i915_gem_context *ctx)
-{
- struct drm_i915_gem_object *obj;
- struct i915_vma *vma;
- void *ptr;
- int err;
-
- obj = i915_gem_object_create_internal(ctx->i915, PAGE_SIZE);
- if (IS_ERR(obj))
- return ERR_CAST(obj);
-
- i915_gem_object_set_cache_coherency(obj, I915_CACHE_LLC);
-
- ptr = i915_gem_object_pin_map(obj, I915_MAP_WB);
- if (IS_ERR(ptr)) {
- err = PTR_ERR(ptr);
- goto err_obj;
- }
- memset(ptr, 0xc5, PAGE_SIZE);
- i915_gem_object_flush_map(obj);
- i915_gem_object_unpin_map(obj);
-
- vma = i915_vma_instance(obj, &ctx->ppgtt->vm, NULL);
- if (IS_ERR(vma)) {
- err = PTR_ERR(vma);
- goto err_obj;
- }
-
- err = i915_vma_pin(vma, 0, 0, PIN_USER);
- if (err)
- goto err_obj;
-
- err = i915_gem_object_set_to_cpu_domain(obj, false);
- if (err)
- goto err_obj;
-
- return vma;
-
-err_obj:
- i915_gem_object_put(obj);
- return ERR_PTR(err);
-}
-
static struct i915_vma *create_batch(struct i915_gem_context *ctx)
{
struct drm_i915_gem_object *obj;
@@ -475,7 +437,7 @@ static int check_dirty_whitelist(struct i915_gem_context *ctx,
int err = 0, i, v;
u32 *cs, *results;
- scratch = create_scratch(ctx);
+ scratch = create_scratch(&ctx->ppgtt->vm, 2 * ARRAY_SIZE(values) + 1);
if (IS_ERR(scratch))
return PTR_ERR(scratch);
@@ -557,7 +519,7 @@ static int check_dirty_whitelist(struct i915_gem_context *ctx,
i915_gem_object_unpin_map(batch->obj);
i915_gem_chipset_flush(ctx->i915);
- rq = i915_request_alloc(engine, ctx);
+ rq = igt_request_alloc(ctx, engine);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
goto out_batch;
@@ -743,26 +705,343 @@ out:
return err;
}
-static bool verify_gt_engine_wa(struct drm_i915_private *i915,
- struct wa_lists *lists, const char *str)
+static int read_whitelisted_registers(struct i915_gem_context *ctx,
+ struct intel_engine_cs *engine,
+ struct i915_vma *results)
+{
+ struct i915_request *rq;
+ int i, err = 0;
+ u32 srm, *cs;
+
+ rq = igt_request_alloc(ctx, engine);
+ if (IS_ERR(rq))
+ return PTR_ERR(rq);
+
+ srm = MI_STORE_REGISTER_MEM;
+ if (INTEL_GEN(ctx->i915) >= 8)
+ srm++;
+
+ cs = intel_ring_begin(rq, 4 * engine->whitelist.count);
+ if (IS_ERR(cs)) {
+ err = PTR_ERR(cs);
+ goto err_req;
+ }
+
+ for (i = 0; i < engine->whitelist.count; i++) {
+ u64 offset = results->node.start + sizeof(u32) * i;
+
+ *cs++ = srm;
+ *cs++ = i915_mmio_reg_offset(engine->whitelist.list[i].reg);
+ *cs++ = lower_32_bits(offset);
+ *cs++ = upper_32_bits(offset);
+ }
+ intel_ring_advance(rq, cs);
+
+err_req:
+ i915_request_add(rq);
+
+ if (i915_request_wait(rq, I915_WAIT_LOCKED, HZ / 5) < 0)
+ err = -EIO;
+
+ return err;
+}
+
+static int scrub_whitelisted_registers(struct i915_gem_context *ctx,
+ struct intel_engine_cs *engine)
+{
+ struct i915_request *rq;
+ struct i915_vma *batch;
+ int i, err = 0;
+ u32 *cs;
+
+ batch = create_batch(ctx);
+ if (IS_ERR(batch))
+ return PTR_ERR(batch);
+
+ cs = i915_gem_object_pin_map(batch->obj, I915_MAP_WC);
+ if (IS_ERR(cs)) {
+ err = PTR_ERR(cs);
+ goto err_batch;
+ }
+
+ *cs++ = MI_LOAD_REGISTER_IMM(engine->whitelist.count);
+ for (i = 0; i < engine->whitelist.count; i++) {
+ *cs++ = i915_mmio_reg_offset(engine->whitelist.list[i].reg);
+ *cs++ = 0xffffffff;
+ }
+ *cs++ = MI_BATCH_BUFFER_END;
+
+ i915_gem_object_flush_map(batch->obj);
+ i915_gem_chipset_flush(ctx->i915);
+
+ rq = igt_request_alloc(ctx, engine);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto err_unpin;
+ }
+
+ if (engine->emit_init_breadcrumb) { /* Be nice if we hang */
+ err = engine->emit_init_breadcrumb(rq);
+ if (err)
+ goto err_request;
+ }
+
+ /* Perform the writes from an unprivileged "user" batch */
+ err = engine->emit_bb_start(rq, batch->node.start, 0, 0);
+
+err_request:
+ i915_request_add(rq);
+ if (i915_request_wait(rq, I915_WAIT_LOCKED, HZ / 5) < 0)
+ err = -EIO;
+
+err_unpin:
+ i915_gem_object_unpin_map(batch->obj);
+err_batch:
+ i915_vma_unpin_and_release(&batch, 0);
+ return err;
+}
+
+struct regmask {
+ i915_reg_t reg;
+ unsigned long gen_mask;
+};
+
+static bool find_reg(struct drm_i915_private *i915,
+ i915_reg_t reg,
+ const struct regmask *tbl,
+ unsigned long count)
+{
+ u32 offset = i915_mmio_reg_offset(reg);
+
+ while (count--) {
+ if (INTEL_INFO(i915)->gen_mask & tbl->gen_mask &&
+ i915_mmio_reg_offset(tbl->reg) == offset)
+ return true;
+ tbl++;
+ }
+
+ return false;
+}
+
+static bool pardon_reg(struct drm_i915_private *i915, i915_reg_t reg)
+{
+ /* Alas, we must pardon some whitelists. Mistakes already made */
+ static const struct regmask pardon[] = {
+ { GEN9_CTX_PREEMPT_REG, INTEL_GEN_MASK(9, 9) },
+ { GEN8_L3SQCREG4, INTEL_GEN_MASK(9, 9) },
+ };
+
+ return find_reg(i915, reg, pardon, ARRAY_SIZE(pardon));
+}
+
+static bool result_eq(struct intel_engine_cs *engine,
+ u32 a, u32 b, i915_reg_t reg)
+{
+ if (a != b && !pardon_reg(engine->i915, reg)) {
+ pr_err("Whitelisted register 0x%4x not context saved: A=%08x, B=%08x\n",
+ i915_mmio_reg_offset(reg), a, b);
+ return false;
+ }
+
+ return true;
+}
+
+static bool writeonly_reg(struct drm_i915_private *i915, i915_reg_t reg)
+{
+ /* Some registers do not seem to behave and our writes unreadable */
+ static const struct regmask wo[] = {
+ { GEN9_SLICE_COMMON_ECO_CHICKEN1, INTEL_GEN_MASK(9, 9) },
+ };
+
+ return find_reg(i915, reg, wo, ARRAY_SIZE(wo));
+}
+
+static bool result_neq(struct intel_engine_cs *engine,
+ u32 a, u32 b, i915_reg_t reg)
+{
+ if (a == b && !writeonly_reg(engine->i915, reg)) {
+ pr_err("Whitelist register 0x%4x:%08x was unwritable\n",
+ i915_mmio_reg_offset(reg), a);
+ return false;
+ }
+
+ return true;
+}
+
+static int
+check_whitelisted_registers(struct intel_engine_cs *engine,
+ struct i915_vma *A,
+ struct i915_vma *B,
+ bool (*fn)(struct intel_engine_cs *engine,
+ u32 a, u32 b,
+ i915_reg_t reg))
+{
+ u32 *a, *b;
+ int i, err;
+
+ a = i915_gem_object_pin_map(A->obj, I915_MAP_WB);
+ if (IS_ERR(a))
+ return PTR_ERR(a);
+
+ b = i915_gem_object_pin_map(B->obj, I915_MAP_WB);
+ if (IS_ERR(b)) {
+ err = PTR_ERR(b);
+ goto err_a;
+ }
+
+ err = 0;
+ for (i = 0; i < engine->whitelist.count; i++) {
+ if (!fn(engine, a[i], b[i], engine->whitelist.list[i].reg))
+ err = -EINVAL;
+ }
+
+ i915_gem_object_unpin_map(B->obj);
+err_a:
+ i915_gem_object_unpin_map(A->obj);
+ return err;
+}
+
+static int live_isolated_whitelist(void *arg)
{
+ struct drm_i915_private *i915 = arg;
+ struct {
+ struct i915_gem_context *ctx;
+ struct i915_vma *scratch[2];
+ } client[2] = {};
struct intel_engine_cs *engine;
enum intel_engine_id id;
+ int i, err = 0;
+
+ /*
+ * Check that a write into a whitelist register works, but
+ * invisible to a second context.
+ */
+
+ if (!intel_engines_has_context_isolation(i915))
+ return 0;
+
+ if (!i915->kernel_context->ppgtt)
+ return 0;
+
+ for (i = 0; i < ARRAY_SIZE(client); i++) {
+ struct i915_gem_context *c;
+
+ c = kernel_context(i915);
+ if (IS_ERR(c)) {
+ err = PTR_ERR(c);
+ goto err;
+ }
+
+ client[i].scratch[0] = create_scratch(&c->ppgtt->vm, 1024);
+ if (IS_ERR(client[i].scratch[0])) {
+ err = PTR_ERR(client[i].scratch[0]);
+ kernel_context_close(c);
+ goto err;
+ }
+
+ client[i].scratch[1] = create_scratch(&c->ppgtt->vm, 1024);
+ if (IS_ERR(client[i].scratch[1])) {
+ err = PTR_ERR(client[i].scratch[1]);
+ i915_vma_unpin_and_release(&client[i].scratch[0], 0);
+ kernel_context_close(c);
+ goto err;
+ }
+
+ client[i].ctx = c;
+ }
+
+ for_each_engine(engine, i915, id) {
+ if (!engine->whitelist.count)
+ continue;
+
+ /* Read default values */
+ err = read_whitelisted_registers(client[0].ctx, engine,
+ client[0].scratch[0]);
+ if (err)
+ goto err;
+
+ /* Try to overwrite registers (should only affect ctx0) */
+ err = scrub_whitelisted_registers(client[0].ctx, engine);
+ if (err)
+ goto err;
+
+ /* Read values from ctx1, we expect these to be defaults */
+ err = read_whitelisted_registers(client[1].ctx, engine,
+ client[1].scratch[0]);
+ if (err)
+ goto err;
+
+ /* Verify that both reads return the same default values */
+ err = check_whitelisted_registers(engine,
+ client[0].scratch[0],
+ client[1].scratch[0],
+ result_eq);
+ if (err)
+ goto err;
+
+ /* Read back the updated values in ctx0 */
+ err = read_whitelisted_registers(client[0].ctx, engine,
+ client[0].scratch[1]);
+ if (err)
+ goto err;
+
+ /* User should be granted privilege to overwhite regs */
+ err = check_whitelisted_registers(engine,
+ client[0].scratch[0],
+ client[0].scratch[1],
+ result_neq);
+ if (err)
+ goto err;
+ }
+
+err:
+ for (i = 0; i < ARRAY_SIZE(client); i++) {
+ if (!client[i].ctx)
+ break;
+
+ i915_vma_unpin_and_release(&client[i].scratch[1], 0);
+ i915_vma_unpin_and_release(&client[i].scratch[0], 0);
+ kernel_context_close(client[i].ctx);
+ }
+
+ if (igt_flush_test(i915, I915_WAIT_LOCKED))
+ err = -EIO;
+
+ return err;
+}
+
+static bool
+verify_wa_lists(struct i915_gem_context *ctx, struct wa_lists *lists,
+ const char *str)
+{
+ struct drm_i915_private *i915 = ctx->i915;
+ struct i915_gem_engines_iter it;
+ struct intel_context *ce;
bool ok = true;
ok &= wa_list_verify(&i915->uncore, &lists->gt_wa_list, str);
- for_each_engine(engine, i915, id)
- ok &= wa_list_verify(engine->uncore,
- &lists->engine[id].wa_list, str);
+ for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
+ enum intel_engine_id id = ce->engine->id;
+
+ ok &= engine_wa_list_verify(ce,
+ &lists->engine[id].wa_list,
+ str) == 0;
+
+ ok &= engine_wa_list_verify(ce,
+ &lists->engine[id].ctx_wa_list,
+ str) == 0;
+ }
+ i915_gem_context_unlock_engines(ctx);
return ok;
}
static int
-live_gpu_reset_gt_engine_workarounds(void *arg)
+live_gpu_reset_workarounds(void *arg)
{
struct drm_i915_private *i915 = arg;
+ struct i915_gem_context *ctx;
intel_wakeref_t wakeref;
struct wa_lists lists;
bool ok;
@@ -770,6 +1049,10 @@ live_gpu_reset_gt_engine_workarounds(void *arg)
if (!intel_has_gpu_reset(i915))
return 0;
+ ctx = kernel_context(i915);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
+
pr_info("Verifying after GPU reset...\n");
igt_global_reset_lock(i915);
@@ -777,15 +1060,16 @@ live_gpu_reset_gt_engine_workarounds(void *arg)
reference_lists_init(i915, &lists);
- ok = verify_gt_engine_wa(i915, &lists, "before reset");
+ ok = verify_wa_lists(ctx, &lists, "before reset");
if (!ok)
goto out;
i915_reset(i915, ALL_ENGINES, "live_workarounds");
- ok = verify_gt_engine_wa(i915, &lists, "after reset");
+ ok = verify_wa_lists(ctx, &lists, "after reset");
out:
+ kernel_context_close(ctx);
reference_lists_fini(i915, &lists);
intel_runtime_pm_put(i915, wakeref);
igt_global_reset_unlock(i915);
@@ -794,7 +1078,7 @@ out:
}
static int
-live_engine_reset_gt_engine_workarounds(void *arg)
+live_engine_reset_workarounds(void *arg)
{
struct drm_i915_private *i915 = arg;
struct intel_engine_cs *engine;
@@ -823,7 +1107,7 @@ live_engine_reset_gt_engine_workarounds(void *arg)
pr_info("Verifying after %s reset...\n", engine->name);
- ok = verify_gt_engine_wa(i915, &lists, "before reset");
+ ok = verify_wa_lists(ctx, &lists, "before reset");
if (!ok) {
ret = -ESRCH;
goto err;
@@ -831,7 +1115,7 @@ live_engine_reset_gt_engine_workarounds(void *arg)
i915_reset_engine(engine, "live_workarounds");
- ok = verify_gt_engine_wa(i915, &lists, "after idle reset");
+ ok = verify_wa_lists(ctx, &lists, "after idle reset");
if (!ok) {
ret = -ESRCH;
goto err;
@@ -862,7 +1146,7 @@ live_engine_reset_gt_engine_workarounds(void *arg)
igt_spinner_end(&spin);
igt_spinner_fini(&spin);
- ok = verify_gt_engine_wa(i915, &lists, "after busy reset");
+ ok = verify_wa_lists(ctx, &lists, "after busy reset");
if (!ok) {
ret = -ESRCH;
goto err;
@@ -885,8 +1169,9 @@ int intel_workarounds_live_selftests(struct drm_i915_private *i915)
static const struct i915_subtest tests[] = {
SUBTEST(live_dirty_whitelist),
SUBTEST(live_reset_whitelist),
- SUBTEST(live_gpu_reset_gt_engine_workarounds),
- SUBTEST(live_engine_reset_gt_engine_workarounds),
+ SUBTEST(live_isolated_whitelist),
+ SUBTEST(live_gpu_reset_workarounds),
+ SUBTEST(live_engine_reset_workarounds),
};
int err;
diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h
index f5a328b5290a..b54f2bdc13a4 100644
--- a/drivers/gpu/drm/i915/gvt/gvt.h
+++ b/drivers/gpu/drm/i915/gvt/gvt.h
@@ -149,9 +149,9 @@ struct intel_vgpu_submission_ops {
struct intel_vgpu_submission {
struct intel_vgpu_execlist execlist[I915_NUM_ENGINES];
struct list_head workload_q_head[I915_NUM_ENGINES];
+ struct intel_context *shadow[I915_NUM_ENGINES];
struct kmem_cache *workloads;
atomic_t running_workload_num;
- struct i915_gem_context *shadow_ctx;
union {
u64 i915_context_pml4;
u64 i915_context_pdps[GEN8_3LVL_PDPES];
diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c
index a68addf95c23..144301b778df 100644
--- a/drivers/gpu/drm/i915/gvt/kvmgt.c
+++ b/drivers/gpu/drm/i915/gvt/kvmgt.c
@@ -1576,7 +1576,7 @@ hw_id_show(struct device *dev, struct device_attribute *attr,
struct intel_vgpu *vgpu = (struct intel_vgpu *)
mdev_get_drvdata(mdev);
return sprintf(buf, "%u\n",
- vgpu->submission.shadow_ctx->hw_id);
+ vgpu->submission.shadow[0]->gem_context->hw_id);
}
return sprintf(buf, "\n");
}
diff --git a/drivers/gpu/drm/i915/gvt/mmio_context.c b/drivers/gpu/drm/i915/gvt/mmio_context.c
index 90bb3df0db50..96e1edf21b3f 100644
--- a/drivers/gpu/drm/i915/gvt/mmio_context.c
+++ b/drivers/gpu/drm/i915/gvt/mmio_context.c
@@ -493,8 +493,7 @@ static void switch_mmio(struct intel_vgpu *pre,
* itself.
*/
if (mmio->in_context &&
- !is_inhibit_context(intel_context_lookup(s->shadow_ctx,
- dev_priv->engine[ring_id])))
+ !is_inhibit_context(s->shadow[ring_id]))
continue;
if (mmio->mask)
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
index 0f919f0a43d4..2f1c12d877cb 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.c
+++ b/drivers/gpu/drm/i915/gvt/scheduler.c
@@ -36,6 +36,7 @@
#include <linux/kthread.h>
#include "i915_drv.h"
+#include "i915_gem_pm.h"
#include "gvt.h"
#define RING_CTX_OFF(x) \
@@ -277,18 +278,23 @@ static int shadow_context_status_change(struct notifier_block *nb,
return NOTIFY_OK;
}
-static void shadow_context_descriptor_update(struct intel_context *ce)
+static void
+shadow_context_descriptor_update(struct intel_context *ce,
+ struct intel_vgpu_workload *workload)
{
- u64 desc = 0;
-
- desc = ce->lrc_desc;
+ u64 desc = ce->lrc_desc;
- /* Update bits 0-11 of the context descriptor which includes flags
+ /*
+ * Update bits 0-11 of the context descriptor which includes flags
* like GEN8_CTX_* cached in desc_template
*/
desc &= U64_MAX << 12;
desc |= ce->gem_context->desc_template & ((1ULL << 12) - 1);
+ desc &= ~(0x3 << GEN8_CTX_ADDRESSING_MODE_SHIFT);
+ desc |= workload->ctx_desc.addressing_mode <<
+ GEN8_CTX_ADDRESSING_MODE_SHIFT;
+
ce->lrc_desc = desc;
}
@@ -382,26 +388,22 @@ intel_gvt_workload_req_alloc(struct intel_vgpu_workload *workload)
{
struct intel_vgpu *vgpu = workload->vgpu;
struct intel_vgpu_submission *s = &vgpu->submission;
- struct i915_gem_context *shadow_ctx = s->shadow_ctx;
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
- struct intel_engine_cs *engine = dev_priv->engine[workload->ring_id];
struct i915_request *rq;
- int ret = 0;
lockdep_assert_held(&dev_priv->drm.struct_mutex);
if (workload->req)
- goto out;
+ return 0;
- rq = i915_request_alloc(engine, shadow_ctx);
+ rq = i915_request_create(s->shadow[workload->ring_id]);
if (IS_ERR(rq)) {
gvt_vgpu_err("fail to allocate gem request\n");
- ret = PTR_ERR(rq);
- goto out;
+ return PTR_ERR(rq);
}
+
workload->req = i915_request_get(rq);
-out:
- return ret;
+ return 0;
}
/**
@@ -416,10 +418,7 @@ int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload)
{
struct intel_vgpu *vgpu = workload->vgpu;
struct intel_vgpu_submission *s = &vgpu->submission;
- struct i915_gem_context *shadow_ctx = s->shadow_ctx;
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
- struct intel_engine_cs *engine = dev_priv->engine[workload->ring_id];
- struct intel_context *ce;
int ret;
lockdep_assert_held(&dev_priv->drm.struct_mutex);
@@ -427,29 +426,13 @@ int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload)
if (workload->shadow)
return 0;
- /* pin shadow context by gvt even the shadow context will be pinned
- * when i915 alloc request. That is because gvt will update the guest
- * context from shadow context when workload is completed, and at that
- * moment, i915 may already unpined the shadow context to make the
- * shadow_ctx pages invalid. So gvt need to pin itself. After update
- * the guest context, gvt can unpin the shadow_ctx safely.
- */
- ce = intel_context_pin(shadow_ctx, engine);
- if (IS_ERR(ce)) {
- gvt_vgpu_err("fail to pin shadow context\n");
- return PTR_ERR(ce);
- }
-
- shadow_ctx->desc_template &= ~(0x3 << GEN8_CTX_ADDRESSING_MODE_SHIFT);
- shadow_ctx->desc_template |= workload->ctx_desc.addressing_mode <<
- GEN8_CTX_ADDRESSING_MODE_SHIFT;
-
if (!test_and_set_bit(workload->ring_id, s->shadow_ctx_desc_updated))
- shadow_context_descriptor_update(ce);
+ shadow_context_descriptor_update(s->shadow[workload->ring_id],
+ workload);
ret = intel_gvt_scan_and_shadow_ringbuffer(workload);
if (ret)
- goto err_unpin;
+ return ret;
if (workload->ring_id == RCS0 && workload->wa_ctx.indirect_ctx.size) {
ret = intel_gvt_scan_and_shadow_wa_ctx(&workload->wa_ctx);
@@ -461,8 +444,6 @@ int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload)
return 0;
err_shadow:
release_shadow_wa_ctx(&workload->wa_ctx);
-err_unpin:
- intel_context_unpin(ce);
return ret;
}
@@ -689,7 +670,6 @@ static int dispatch_workload(struct intel_vgpu_workload *workload)
struct intel_vgpu *vgpu = workload->vgpu;
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
struct intel_vgpu_submission *s = &vgpu->submission;
- struct i915_gem_context *shadow_ctx = s->shadow_ctx;
struct i915_request *rq;
int ring_id = workload->ring_id;
int ret;
@@ -700,7 +680,8 @@ static int dispatch_workload(struct intel_vgpu_workload *workload)
mutex_lock(&vgpu->vgpu_lock);
mutex_lock(&dev_priv->drm.struct_mutex);
- ret = set_context_ppgtt_from_shadow(workload, shadow_ctx);
+ ret = set_context_ppgtt_from_shadow(workload,
+ s->shadow[ring_id]->gem_context);
if (ret < 0) {
gvt_vgpu_err("workload shadow ppgtt isn't ready\n");
goto err_req;
@@ -949,11 +930,6 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
intel_vgpu_trigger_virtual_event(vgpu, event);
}
- /* unpin shadow ctx as the shadow_ctx update is done */
- mutex_lock(&rq->i915->drm.struct_mutex);
- intel_context_unpin(rq->hw_context);
- mutex_unlock(&rq->i915->drm.struct_mutex);
-
i915_request_put(fetch_and_zero(&workload->req));
}
@@ -1032,8 +1008,6 @@ static int workload_thread(void *priv)
workload->ring_id, workload,
workload->vgpu->id);
- intel_runtime_pm_get(gvt->dev_priv);
-
gvt_dbg_sched("ring id %d will dispatch workload %p\n",
workload->ring_id, workload);
@@ -1063,7 +1037,6 @@ complete:
intel_uncore_forcewake_put(&gvt->dev_priv->uncore,
FORCEWAKE_ALL);
- intel_runtime_pm_put_unchecked(gvt->dev_priv);
if (ret && (vgpu_is_vm_unhealthy(ret)))
enter_failsafe_mode(vgpu, GVT_FAILSAFE_GUEST_ERR);
}
@@ -1146,17 +1119,17 @@ err:
}
static void
-i915_context_ppgtt_root_restore(struct intel_vgpu_submission *s)
+i915_context_ppgtt_root_restore(struct intel_vgpu_submission *s,
+ struct i915_hw_ppgtt *ppgtt)
{
- struct i915_hw_ppgtt *i915_ppgtt = s->shadow_ctx->ppgtt;
int i;
- if (i915_vm_is_4lvl(&i915_ppgtt->vm)) {
- px_dma(&i915_ppgtt->pml4) = s->i915_context_pml4;
+ if (i915_vm_is_4lvl(&ppgtt->vm)) {
+ px_dma(&ppgtt->pml4) = s->i915_context_pml4;
} else {
for (i = 0; i < GEN8_3LVL_PDPES; i++)
- px_dma(i915_ppgtt->pdp.page_directory[i]) =
- s->i915_context_pdps[i];
+ px_dma(ppgtt->pdp.page_directory[i]) =
+ s->i915_context_pdps[i];
}
}
@@ -1170,10 +1143,15 @@ i915_context_ppgtt_root_restore(struct intel_vgpu_submission *s)
void intel_vgpu_clean_submission(struct intel_vgpu *vgpu)
{
struct intel_vgpu_submission *s = &vgpu->submission;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
intel_vgpu_select_submission_ops(vgpu, ALL_ENGINES, 0);
- i915_context_ppgtt_root_restore(s);
- i915_gem_context_put(s->shadow_ctx);
+
+ i915_context_ppgtt_root_restore(s, s->shadow[0]->gem_context->ppgtt);
+ for_each_engine(engine, vgpu->gvt->dev_priv, id)
+ intel_context_unpin(s->shadow[id]);
+
kmem_cache_destroy(s->workloads);
}
@@ -1199,17 +1177,17 @@ void intel_vgpu_reset_submission(struct intel_vgpu *vgpu,
}
static void
-i915_context_ppgtt_root_save(struct intel_vgpu_submission *s)
+i915_context_ppgtt_root_save(struct intel_vgpu_submission *s,
+ struct i915_hw_ppgtt *ppgtt)
{
- struct i915_hw_ppgtt *i915_ppgtt = s->shadow_ctx->ppgtt;
int i;
- if (i915_vm_is_4lvl(&i915_ppgtt->vm))
- s->i915_context_pml4 = px_dma(&i915_ppgtt->pml4);
- else {
+ if (i915_vm_is_4lvl(&ppgtt->vm)) {
+ s->i915_context_pml4 = px_dma(&ppgtt->pml4);
+ } else {
for (i = 0; i < GEN8_3LVL_PDPES; i++)
s->i915_context_pdps[i] =
- px_dma(i915_ppgtt->pdp.page_directory[i]);
+ px_dma(ppgtt->pdp.page_directory[i]);
}
}
@@ -1226,16 +1204,36 @@ i915_context_ppgtt_root_save(struct intel_vgpu_submission *s)
int intel_vgpu_setup_submission(struct intel_vgpu *vgpu)
{
struct intel_vgpu_submission *s = &vgpu->submission;
- enum intel_engine_id i;
struct intel_engine_cs *engine;
+ struct i915_gem_context *ctx;
+ enum intel_engine_id i;
int ret;
- s->shadow_ctx = i915_gem_context_create_gvt(
- &vgpu->gvt->dev_priv->drm);
- if (IS_ERR(s->shadow_ctx))
- return PTR_ERR(s->shadow_ctx);
+ ctx = i915_gem_context_create_gvt(&vgpu->gvt->dev_priv->drm);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
+
+ i915_context_ppgtt_root_save(s, ctx->ppgtt);
+
+ for_each_engine(engine, vgpu->gvt->dev_priv, i) {
+ struct intel_context *ce;
+
+ INIT_LIST_HEAD(&s->workload_q_head[i]);
+ s->shadow[i] = ERR_PTR(-EINVAL);
+
+ ce = i915_gem_context_get_engine(ctx, i);
+ if (IS_ERR(ce)) {
+ ret = PTR_ERR(ce);
+ goto out_shadow_ctx;
+ }
- i915_context_ppgtt_root_save(s);
+ ret = intel_context_pin(ce);
+ intel_context_put(ce);
+ if (ret)
+ goto out_shadow_ctx;
+
+ s->shadow[i] = ce;
+ }
bitmap_zero(s->shadow_ctx_desc_updated, I915_NUM_ENGINES);
@@ -1251,16 +1249,21 @@ int intel_vgpu_setup_submission(struct intel_vgpu *vgpu)
goto out_shadow_ctx;
}
- for_each_engine(engine, vgpu->gvt->dev_priv, i)
- INIT_LIST_HEAD(&s->workload_q_head[i]);
-
atomic_set(&s->running_workload_num, 0);
bitmap_zero(s->tlb_handle_pending, I915_NUM_ENGINES);
+ i915_gem_context_put(ctx);
return 0;
out_shadow_ctx:
- i915_gem_context_put(s->shadow_ctx);
+ i915_context_ppgtt_root_restore(s, ctx->ppgtt);
+ for_each_engine(engine, vgpu->gvt->dev_priv, i) {
+ if (IS_ERR(s->shadow[i]))
+ break;
+
+ intel_context_unpin(s->shadow[i]);
+ }
+ i915_gem_context_put(ctx);
return ret;
}
diff --git a/drivers/gpu/drm/i915/i915_cmd_parser.c b/drivers/gpu/drm/i915/i915_cmd_parser.c
index 503d548a55f7..e9fadcb4d592 100644
--- a/drivers/gpu/drm/i915/i915_cmd_parser.c
+++ b/drivers/gpu/drm/i915/i915_cmd_parser.c
@@ -25,8 +25,9 @@
*
*/
+#include "gt/intel_engine.h"
+
#include "i915_drv.h"
-#include "intel_ringbuffer.h"
/**
* DOC: batch buffer command parser
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 5823ffb17821..633a08c0f907 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -32,7 +32,12 @@
#include <drm/drm_debugfs.h>
#include <drm/drm_fourcc.h>
-#include "i915_reset.h"
+#include "gt/intel_reset.h"
+
+#include "i915_debugfs.h"
+#include "i915_gem_context.h"
+#include "i915_irq.h"
+#include "intel_csr.h"
#include "intel_dp.h"
#include "intel_drv.h"
#include "intel_fbc.h"
@@ -41,6 +46,7 @@
#include "intel_hdmi.h"
#include "intel_pm.h"
#include "intel_psr.h"
+#include "intel_sideband.h"
static inline struct drm_i915_private *node_to_i915(struct drm_info_node *node)
{
@@ -206,6 +212,18 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
vma->ggtt_view.rotated.plane[1].offset);
break;
+ case I915_GGTT_VIEW_REMAPPED:
+ seq_printf(m, ", remapped [(%ux%u, stride=%u, offset=%u), (%ux%u, stride=%u, offset=%u)]",
+ vma->ggtt_view.remapped.plane[0].width,
+ vma->ggtt_view.remapped.plane[0].height,
+ vma->ggtt_view.remapped.plane[0].stride,
+ vma->ggtt_view.remapped.plane[0].offset,
+ vma->ggtt_view.remapped.plane[1].width,
+ vma->ggtt_view.remapped.plane[1].height,
+ vma->ggtt_view.remapped.plane[1].stride,
+ vma->ggtt_view.remapped.plane[1].offset);
+ break;
+
default:
MISSING_CASE(vma->ggtt_view.type);
break;
@@ -395,14 +413,17 @@ static void print_context_stats(struct seq_file *m,
struct i915_gem_context *ctx;
list_for_each_entry(ctx, &i915->contexts.list, link) {
+ struct i915_gem_engines_iter it;
struct intel_context *ce;
- list_for_each_entry(ce, &ctx->active_engines, active_link) {
+ for_each_gem_engine(ce,
+ i915_gem_context_lock_engines(ctx), it) {
if (ce->state)
per_file_stats(0, ce->state->obj, &kstats);
if (ce->ring)
per_file_stats(0, ce->ring->vma->obj, &kstats);
}
+ i915_gem_context_unlock_engines(ctx);
if (!IS_ERR_OR_NULL(ctx->file_priv)) {
struct file_stats stats = { .vm = &ctx->ppgtt->vm, };
@@ -1045,8 +1066,6 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
u32 rpmodectl, freq_sts;
- mutex_lock(&dev_priv->pcu_lock);
-
rpmodectl = I915_READ(GEN6_RP_CONTROL);
seq_printf(m, "Video Turbo Mode: %s\n",
yesno(rpmodectl & GEN6_RP_MEDIA_TURBO));
@@ -1056,7 +1075,10 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
yesno((rpmodectl & GEN6_RP_MEDIA_MODE_MASK) ==
GEN6_RP_MEDIA_SW_MODE));
+ vlv_punit_get(dev_priv);
freq_sts = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
+ vlv_punit_put(dev_priv);
+
seq_printf(m, "PUNIT_REG_GPU_FREQ_STS: 0x%08x\n", freq_sts);
seq_printf(m, "DDR freq: %d MHz\n", dev_priv->mem_freq);
@@ -1078,7 +1100,6 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
seq_printf(m,
"efficient (RPe) frequency: %d MHz\n",
intel_gpu_freq(dev_priv, rps->efficient_freq));
- mutex_unlock(&dev_priv->pcu_lock);
} else if (INTEL_GEN(dev_priv) >= 6) {
u32 rp_state_limits;
u32 gt_perf_status;
@@ -1279,7 +1300,6 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused)
struct drm_i915_private *dev_priv = node_to_i915(m->private);
struct intel_engine_cs *engine;
u64 acthd[I915_NUM_ENGINES];
- u32 seqno[I915_NUM_ENGINES];
struct intel_instdone instdone;
intel_wakeref_t wakeref;
enum intel_engine_id id;
@@ -1296,10 +1316,8 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused)
}
with_intel_runtime_pm(dev_priv, wakeref) {
- for_each_engine(engine, dev_priv, id) {
+ for_each_engine(engine, dev_priv, id)
acthd[id] = intel_engine_get_active_head(engine);
- seqno[id] = intel_engine_get_hangcheck_seqno(engine);
- }
intel_engine_get_instdone(dev_priv->engine[RCS0], &instdone);
}
@@ -1316,11 +1334,8 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused)
seq_printf(m, "GT active? %s\n", yesno(dev_priv->gt.awake));
for_each_engine(engine, dev_priv, id) {
- seq_printf(m, "%s:\n", engine->name);
- seq_printf(m, "\tseqno = %x [current %x, last %x], %dms ago\n",
- engine->hangcheck.last_seqno,
- seqno[id],
- engine->hangcheck.next_seqno,
+ seq_printf(m, "%s: %d ms ago\n",
+ engine->name,
jiffies_to_msecs(jiffies -
engine->hangcheck.action_timestamp));
@@ -1483,12 +1498,9 @@ static int gen6_drpc_info(struct seq_file *m)
gen9_powergate_status = I915_READ(GEN9_PWRGT_DOMAIN_STATUS);
}
- if (INTEL_GEN(dev_priv) <= 7) {
- mutex_lock(&dev_priv->pcu_lock);
+ if (INTEL_GEN(dev_priv) <= 7)
sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS,
&rc6vids);
- mutex_unlock(&dev_priv->pcu_lock);
- }
seq_printf(m, "RC1e Enabled: %s\n",
yesno(rcctl1 & GEN6_RC_CTL_RC1e_ENABLE));
@@ -1752,17 +1764,10 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused)
unsigned int max_gpu_freq, min_gpu_freq;
intel_wakeref_t wakeref;
int gpu_freq, ia_freq;
- int ret;
if (!HAS_LLC(dev_priv))
return -ENODEV;
- wakeref = intel_runtime_pm_get(dev_priv);
-
- ret = mutex_lock_interruptible(&dev_priv->pcu_lock);
- if (ret)
- goto out;
-
min_gpu_freq = rps->min_freq;
max_gpu_freq = rps->max_freq;
if (IS_GEN9_BC(dev_priv) || INTEL_GEN(dev_priv) >= 10) {
@@ -1773,6 +1778,7 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused)
seq_puts(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\tEffective Ring freq (MHz)\n");
+ wakeref = intel_runtime_pm_get(dev_priv);
for (gpu_freq = min_gpu_freq; gpu_freq <= max_gpu_freq; gpu_freq++) {
ia_freq = gpu_freq;
sandybridge_pcode_read(dev_priv,
@@ -1786,12 +1792,9 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused)
((ia_freq >> 0) & 0xff) * 100,
((ia_freq >> 8) & 0xff) * 100);
}
-
- mutex_unlock(&dev_priv->pcu_lock);
-
-out:
intel_runtime_pm_put(dev_priv, wakeref);
- return ret;
+
+ return 0;
}
static int i915_opregion(struct seq_file *m, void *unused)
@@ -1892,6 +1895,7 @@ static int i915_context_status(struct seq_file *m, void *unused)
return ret;
list_for_each_entry(ctx, &dev_priv->contexts.list, link) {
+ struct i915_gem_engines_iter it;
struct intel_context *ce;
seq_puts(m, "HW context ");
@@ -1916,7 +1920,8 @@ static int i915_context_status(struct seq_file *m, void *unused)
seq_putc(m, ctx->remap_slice ? 'R' : 'r');
seq_putc(m, '\n');
- list_for_each_entry(ce, &ctx->active_engines, active_link) {
+ for_each_gem_engine(ce,
+ i915_gem_context_lock_engines(ctx), it) {
seq_printf(m, "%s: ", ce->engine->name);
if (ce->state)
describe_obj(m, ce->state->obj);
@@ -1924,6 +1929,7 @@ static int i915_context_status(struct seq_file *m, void *unused)
describe_ctx_ring(m, ce->ring);
seq_putc(m, '\n');
}
+ i915_gem_context_unlock_engines(ctx);
seq_putc(m, '\n');
}
@@ -2028,11 +2034,11 @@ static int i915_rps_boost_info(struct seq_file *m, void *data)
with_intel_runtime_pm_if_in_use(dev_priv, wakeref) {
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
- mutex_lock(&dev_priv->pcu_lock);
+ vlv_punit_get(dev_priv);
act_freq = vlv_punit_read(dev_priv,
PUNIT_REG_GPU_FREQ_STS);
+ vlv_punit_put(dev_priv);
act_freq = (act_freq >> 8) & 0xff;
- mutex_unlock(&dev_priv->pcu_lock);
} else {
act_freq = intel_get_cagf(dev_priv,
I915_READ(GEN6_RPSTAT1));
@@ -2040,8 +2046,7 @@ static int i915_rps_boost_info(struct seq_file *m, void *data)
}
seq_printf(m, "RPS enabled? %d\n", rps->enabled);
- seq_printf(m, "GPU busy? %s [%d requests]\n",
- yesno(dev_priv->gt.awake), dev_priv->gt.active_requests);
+ seq_printf(m, "GPU busy? %s\n", yesno(dev_priv->gt.awake));
seq_printf(m, "Boosts outstanding? %d\n",
atomic_read(&rps->num_waiters));
seq_printf(m, "Interactive? %d\n", READ_ONCE(rps->power.interactive));
@@ -2060,9 +2065,7 @@ static int i915_rps_boost_info(struct seq_file *m, void *data)
seq_printf(m, "Wait boosts: %d\n", atomic_read(&rps->boosts));
- if (INTEL_GEN(dev_priv) >= 6 &&
- rps->enabled &&
- dev_priv->gt.active_requests) {
+ if (INTEL_GEN(dev_priv) >= 6 && rps->enabled && dev_priv->gt.awake) {
u32 rpup, rpupei;
u32 rpdown, rpdownei;
@@ -3091,9 +3094,9 @@ static int i915_engine_info(struct seq_file *m, void *unused)
wakeref = intel_runtime_pm_get(dev_priv);
- seq_printf(m, "GT awake? %s\n", yesno(dev_priv->gt.awake));
- seq_printf(m, "Global active requests: %d\n",
- dev_priv->gt.active_requests);
+ seq_printf(m, "GT awake? %s [%d]\n",
+ yesno(dev_priv->gt.awake),
+ atomic_read(&dev_priv->gt.wakeref.count));
seq_printf(m, "CS timestamp frequency: %u kHz\n",
RUNTIME_INFO(dev_priv)->cs_timestamp_frequency_khz);
@@ -3904,14 +3907,26 @@ i915_drop_caches_set(void *data, u64 val)
/* No need to check and wait for gpu resets, only libdrm auto-restarts
* on ioctls on -EAGAIN. */
- if (val & (DROP_ACTIVE | DROP_RETIRE | DROP_RESET_SEQNO)) {
+ if (val & (DROP_ACTIVE | DROP_IDLE | DROP_RETIRE | DROP_RESET_SEQNO)) {
int ret;
ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
if (ret)
return ret;
- if (val & DROP_ACTIVE)
+ /*
+ * To finish the flush of the idle_worker, we must complete
+ * the switch-to-kernel-context, which requires a double
+ * pass through wait_for_idle: first queues the switch,
+ * second waits for the switch.
+ */
+ if (ret == 0 && val & (DROP_IDLE | DROP_ACTIVE))
+ ret = i915_gem_wait_for_idle(i915,
+ I915_WAIT_INTERRUPTIBLE |
+ I915_WAIT_LOCKED,
+ MAX_SCHEDULE_TIMEOUT);
+
+ if (ret == 0 && val & DROP_IDLE)
ret = i915_gem_wait_for_idle(i915,
I915_WAIT_INTERRUPTIBLE |
I915_WAIT_LOCKED,
@@ -3938,11 +3953,8 @@ i915_drop_caches_set(void *data, u64 val)
fs_reclaim_release(GFP_KERNEL);
if (val & DROP_IDLE) {
- do {
- if (READ_ONCE(i915->gt.active_requests))
- flush_delayed_work(&i915->gt.retire_work);
- drain_delayed_work(&i915->gt.idle_work);
- } while (READ_ONCE(i915->gt.awake));
+ flush_delayed_work(&i915->gem.retire_work);
+ flush_work(&i915->gem.idle_work);
}
if (val & DROP_FREED)
@@ -4757,6 +4769,7 @@ static int i915_hdcp_sink_capability_show(struct seq_file *m, void *data)
{
struct drm_connector *connector = m->private;
struct intel_connector *intel_connector = to_intel_connector(connector);
+ bool hdcp_cap, hdcp2_cap;
if (connector->status != connector_status_connected)
return -ENODEV;
@@ -4767,8 +4780,16 @@ static int i915_hdcp_sink_capability_show(struct seq_file *m, void *data)
seq_printf(m, "%s:%d HDCP version: ", connector->name,
connector->base.id);
- seq_printf(m, "%s ", !intel_hdcp_capable(intel_connector) ?
- "None" : "HDCP1.4");
+ hdcp_cap = intel_hdcp_capable(intel_connector);
+ hdcp2_cap = intel_hdcp2_capable(intel_connector);
+
+ if (hdcp_cap)
+ seq_puts(m, "HDCP1.4 ");
+ if (hdcp2_cap)
+ seq_puts(m, "HDCP2.2 ");
+
+ if (!hdcp_cap && !hdcp2_cap)
+ seq_puts(m, "None");
seq_puts(m, "\n");
return 0;
diff --git a/drivers/gpu/drm/i915/i915_debugfs.h b/drivers/gpu/drm/i915/i915_debugfs.h
new file mode 100644
index 000000000000..c0cd22eb916d
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_debugfs.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __I915_DEBUGFS_H__
+#define __I915_DEBUGFS_H__
+
+struct drm_i915_private;
+struct drm_connector;
+
+#ifdef CONFIG_DEBUG_FS
+int i915_debugfs_register(struct drm_i915_private *dev_priv);
+int i915_debugfs_connector_add(struct drm_connector *connector);
+#else
+static inline int i915_debugfs_register(struct drm_i915_private *dev_priv) { return 0; }
+static inline int i915_debugfs_connector_add(struct drm_connector *connector) { return 0; }
+#endif
+
+#endif /* __I915_DEBUGFS_H__ */
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 1ad88e6d7c04..83d2eb9e74cb 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -47,22 +47,31 @@
#include <drm/drm_probe_helper.h>
#include <drm/i915_drm.h>
+#include "gt/intel_gt_pm.h"
+#include "gt/intel_reset.h"
+#include "gt/intel_workarounds.h"
+
+#include "i915_debugfs.h"
#include "i915_drv.h"
+#include "i915_irq.h"
#include "i915_pmu.h"
#include "i915_query.h"
-#include "i915_reset.h"
#include "i915_trace.h"
#include "i915_vgpu.h"
+#include "intel_acpi.h"
#include "intel_audio.h"
#include "intel_cdclk.h"
#include "intel_csr.h"
#include "intel_dp.h"
#include "intel_drv.h"
#include "intel_fbdev.h"
+#include "intel_gmbus.h"
+#include "intel_hotplug.h"
+#include "intel_overlay.h"
+#include "intel_pipe_crc.h"
#include "intel_pm.h"
#include "intel_sprite.h"
#include "intel_uc.h"
-#include "intel_workarounds.h"
static struct drm_driver driver;
@@ -186,7 +195,8 @@ intel_pch_type(const struct drm_i915_private *dev_priv, unsigned short id)
DRM_DEBUG_KMS("Found Kaby Lake PCH (KBP)\n");
WARN_ON(!IS_SKYLAKE(dev_priv) && !IS_KABYLAKE(dev_priv) &&
!IS_COFFEELAKE(dev_priv));
- return PCH_KBP;
+ /* KBP is SPT compatible */
+ return PCH_SPT;
case INTEL_PCH_CNP_DEVICE_ID_TYPE:
DRM_DEBUG_KMS("Found Cannon Lake PCH (CNP)\n");
WARN_ON(!IS_CANNONLAKE(dev_priv) && !IS_COFFEELAKE(dev_priv));
@@ -433,6 +443,7 @@ static int i915_getparam_ioctl(struct drm_device *dev, void *data,
case I915_PARAM_HAS_EXEC_CAPTURE:
case I915_PARAM_HAS_EXEC_BATCH_FIRST:
case I915_PARAM_HAS_EXEC_FENCE_ARRAY:
+ case I915_PARAM_HAS_EXEC_SUBMIT_FENCE:
/* For the time being all of these are always true;
* if some supported hardware does not have one of these
* features this value needs to be provided from
@@ -697,7 +708,7 @@ static int i915_load_modeset_init(struct drm_device *dev)
if (ret)
goto cleanup_csr;
- intel_setup_gmbus(dev_priv);
+ intel_gmbus_setup(dev_priv);
/* Important: The output setup functions called by modeset_init need
* working irqs for e.g. gmbus and dp aux transfers. */
@@ -732,7 +743,7 @@ cleanup_modeset:
intel_modeset_cleanup(dev);
cleanup_irq:
drm_irq_uninstall(dev);
- intel_teardown_gmbus(dev_priv);
+ intel_gmbus_teardown(dev_priv);
cleanup_csr:
intel_csr_ucode_fini(dev_priv);
intel_power_domains_fini_hw(dev_priv);
@@ -884,6 +895,9 @@ static int i915_driver_init_early(struct drm_i915_private *dev_priv)
mutex_init(&dev_priv->backlight_lock);
mutex_init(&dev_priv->sb_lock);
+ pm_qos_add_request(&dev_priv->sb_qos,
+ PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE);
+
mutex_init(&dev_priv->av_mutex);
mutex_init(&dev_priv->wm.wm_mutex);
mutex_init(&dev_priv->pps_mutex);
@@ -943,6 +957,9 @@ static void i915_driver_cleanup_early(struct drm_i915_private *dev_priv)
i915_gem_cleanup_early(dev_priv);
i915_workqueues_cleanup(dev_priv);
i915_engines_cleanup(dev_priv);
+
+ pm_qos_remove_request(&dev_priv->sb_qos);
+ mutex_destroy(&dev_priv->sb_lock);
}
/**
@@ -1760,7 +1777,7 @@ static void i915_driver_unregister(struct drm_i915_private *dev_priv)
i915_pmu_unregister(dev_priv);
i915_teardown_sysfs(dev_priv);
- drm_dev_unregister(&dev_priv->drm);
+ drm_dev_unplug(&dev_priv->drm);
i915_gem_shrinker_unregister(dev_priv);
}
@@ -2322,7 +2339,7 @@ static int i915_drm_resume_early(struct drm_device *dev)
intel_power_domains_resume(dev_priv);
- intel_engines_sanitize(dev_priv, true);
+ intel_gt_sanitize(dev_priv, true);
enable_rpm_wakeref_asserts(dev_priv);
@@ -2875,7 +2892,7 @@ static int intel_runtime_suspend(struct device *kdev)
*/
i915_gem_runtime_suspend(dev_priv);
- intel_uc_suspend(dev_priv);
+ intel_uc_runtime_suspend(dev_priv);
intel_runtime_pm_disable_interrupts(dev_priv);
@@ -3098,7 +3115,7 @@ static const struct drm_ioctl_desc i915_ioctls[] = {
DRM_IOCTL_DEF_DRV(I915_BATCHBUFFER, drm_noop, DRM_AUTH),
DRM_IOCTL_DEF_DRV(I915_IRQ_EMIT, drm_noop, DRM_AUTH),
DRM_IOCTL_DEF_DRV(I915_IRQ_WAIT, drm_noop, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(I915_GETPARAM, i915_getparam_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(I915_GETPARAM, i915_getparam_ioctl, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_SETPARAM, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF_DRV(I915_ALLOC, drm_noop, DRM_AUTH),
DRM_IOCTL_DEF_DRV(I915_FREE, drm_noop, DRM_AUTH),
@@ -3111,13 +3128,13 @@ static const struct drm_ioctl_desc i915_ioctls[] = {
DRM_IOCTL_DEF_DRV(I915_HWS_ADDR, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF_DRV(I915_GEM_INIT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER, i915_gem_execbuffer_ioctl, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER2_WR, i915_gem_execbuffer2_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER2_WR, i915_gem_execbuffer2_ioctl, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_PIN, i915_gem_reject_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY),
DRM_IOCTL_DEF_DRV(I915_GEM_UNPIN, i915_gem_reject_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF_DRV(I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_SET_CACHING, i915_gem_set_caching_ioctl, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_GET_CACHING, i915_gem_get_caching_ioctl, DRM_RENDER_ALLOW),
- DRM_IOCTL_DEF_DRV(I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_ENTERVT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF_DRV(I915_GEM_LEAVEVT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF_DRV(I915_GEM_CREATE, i915_gem_create_ioctl, DRM_RENDER_ALLOW),
@@ -3136,7 +3153,7 @@ static const struct drm_ioctl_desc i915_ioctls[] = {
DRM_IOCTL_DEF_DRV(I915_OVERLAY_ATTRS, intel_overlay_attrs_ioctl, DRM_MASTER),
DRM_IOCTL_DEF_DRV(I915_SET_SPRITE_COLORKEY, intel_sprite_set_colorkey_ioctl, DRM_MASTER),
DRM_IOCTL_DEF_DRV(I915_GET_SPRITE_COLORKEY, drm_noop, DRM_MASTER),
- DRM_IOCTL_DEF_DRV(I915_GEM_WAIT, i915_gem_wait_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(I915_GEM_WAIT, i915_gem_wait_ioctl, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_CREATE_EXT, i915_gem_context_create_ioctl, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_DESTROY, i915_gem_context_destroy_ioctl, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_REG_READ, i915_reg_read_ioctl, DRM_RENDER_ALLOW),
@@ -3148,6 +3165,8 @@ static const struct drm_ioctl_desc i915_ioctls[] = {
DRM_IOCTL_DEF_DRV(I915_PERF_ADD_CONFIG, i915_perf_add_config_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_PERF_REMOVE_CONFIG, i915_perf_remove_config_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_QUERY, i915_query_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(I915_GEM_VM_CREATE, i915_gem_vm_create_ioctl, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(I915_GEM_VM_DESTROY, i915_gem_vm_destroy_ioctl, DRM_RENDER_ALLOW),
};
static struct drm_driver driver = {
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 066fd2a12851..a2664ea1395b 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -62,18 +62,21 @@
#include "i915_reg.h"
#include "i915_utils.h"
+#include "gt/intel_lrc.h"
+#include "gt/intel_engine.h"
+#include "gt/intel_workarounds.h"
+
#include "intel_bios.h"
#include "intel_device_info.h"
#include "intel_display.h"
#include "intel_dpll_mgr.h"
#include "intel_frontbuffer.h"
-#include "intel_lrc.h"
#include "intel_opregion.h"
-#include "intel_ringbuffer.h"
+#include "intel_runtime_pm.h"
#include "intel_uc.h"
#include "intel_uncore.h"
+#include "intel_wakeref.h"
#include "intel_wopcm.h"
-#include "intel_workarounds.h"
#include "i915_gem.h"
#include "i915_gem_context.h"
@@ -93,8 +96,8 @@
#define DRIVER_NAME "i915"
#define DRIVER_DESC "Intel Graphics"
-#define DRIVER_DATE "20190417"
-#define DRIVER_TIMESTAMP 1555492067
+#define DRIVER_DATE "20190524"
+#define DRIVER_TIMESTAMP 1558719322
/* Use I915_STATE_WARN(x) and I915_STATE_WARN_ON() (rather than WARN() and
* WARN_ON()) for hw state sanity checks to check for unexpected conditions
@@ -133,8 +136,6 @@ bool i915_error_injected(void);
__i915_printk(i915, i915_error_injected() ? KERN_DEBUG : KERN_ERR, \
fmt, ##__VA_ARGS__)
-typedef depot_stack_handle_t intel_wakeref_t;
-
enum hpd_pin {
HPD_NONE = 0,
HPD_TV = HPD_NONE, /* TV is known to be unreliable */
@@ -344,10 +345,6 @@ struct drm_i915_display_funcs {
void (*load_luts)(const struct intel_crtc_state *crtc_state);
};
-#define CSR_VERSION(major, minor) ((major) << 16 | (minor))
-#define CSR_VERSION_MAJOR(version) ((version) >> 16)
-#define CSR_VERSION_MINOR(version) ((version) & 0xffff)
-
struct intel_csr {
struct work_struct work;
const char *fw_path;
@@ -535,17 +532,11 @@ enum intel_pch {
PCH_IBX, /* Ibexpeak PCH */
PCH_CPT, /* Cougarpoint/Pantherpoint PCH */
PCH_LPT, /* Lynxpoint/Wildcatpoint PCH */
- PCH_SPT, /* Sunrisepoint PCH */
- PCH_KBP, /* Kaby Lake PCH */
+ PCH_SPT, /* Sunrisepoint/Kaby Lake PCH */
PCH_CNP, /* Cannon/Comet Lake PCH */
PCH_ICP, /* Ice Lake PCH */
};
-enum intel_sbi_destination {
- SBI_ICLK,
- SBI_MPHY,
-};
-
#define QUIRK_LVDS_SSC_DISABLE (1<<1)
#define QUIRK_INVERT_BRIGHTNESS (1<<2)
#define QUIRK_BACKLIGHT_PRESENT (1<<3)
@@ -648,6 +639,8 @@ struct intel_rps_ei {
};
struct intel_rps {
+ struct mutex lock; /* protects enabling and the worker */
+
/*
* work, interrupts_enabled and pm_iir are protected by
* dev_priv->irq_lock
@@ -841,6 +834,11 @@ struct i915_power_domains {
struct mutex lock;
int domain_use_count[POWER_DOMAIN_NUM];
+
+ struct delayed_work async_put_work;
+ intel_wakeref_t async_put_wakeref;
+ u64 async_put_domains[2];
+
struct i915_power_well *power_wells;
};
@@ -1561,6 +1559,7 @@ struct drm_i915_private {
/* Sideband mailbox protection */
struct mutex sb_lock;
+ struct pm_qos_request sb_qos;
/** Cached value of IMR to avoid reads in updating the bitfield */
union {
@@ -1709,14 +1708,6 @@ struct drm_i915_private {
*/
u32 edram_size_mb;
- /*
- * Protects RPS/RC6 register access and PCU communication.
- * Must be taken after struct_mutex if nested. Note that
- * this lock may be held for long periods of time when
- * talking to hw - so only take it when talking to hw!
- */
- struct mutex pcu_lock;
-
/* gen6+ GT PM state */
struct intel_gen6_power_mgmt gt_pm;
@@ -1995,8 +1986,6 @@ struct drm_i915_private {
/* Abstract the submission mechanism (legacy ringbuffer or execlists) away */
struct {
- void (*cleanup_engine)(struct intel_engine_cs *engine);
-
struct i915_gt_timelines {
struct mutex mutex; /* protects list, tainted by GPU */
struct list_head active_list;
@@ -2006,10 +1995,10 @@ struct drm_i915_private {
struct list_head hwsp_free_list;
} timelines;
- intel_engine_mask_t active_engines;
struct list_head active_rings;
struct list_head closed_vma;
- u32 active_requests;
+
+ struct intel_wakeref wakeref;
/**
* Is the GPU currently considered idle, or busy executing
@@ -2020,6 +2009,16 @@ struct drm_i915_private {
*/
intel_wakeref_t awake;
+ struct blocking_notifier_head pm_notifications;
+
+ ktime_t last_init_time;
+
+ struct i915_vma *scratch;
+ } gt;
+
+ struct {
+ struct notifier_block pm_notifier;
+
/**
* We leave the user IRQ off as much as possible,
* but this means that requests will finish and never
@@ -2036,12 +2035,8 @@ struct drm_i915_private {
* arrive within a small period of time, we fire
* off the idle_work.
*/
- struct delayed_work idle_work;
-
- ktime_t last_init_time;
-
- struct i915_vma *scratch;
- } gt;
+ struct work_struct idle_work;
+ } gem;
/* For i945gm vblank irq vs. C3 workaround */
struct {
@@ -2585,6 +2580,8 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
#define HAS_RC6p(dev_priv) (INTEL_INFO(dev_priv)->has_rc6p)
#define HAS_RC6pp(dev_priv) (false) /* HW was never validated */
+#define HAS_RPS(dev_priv) (INTEL_INFO(dev_priv)->has_rps)
+
#define HAS_CSR(dev_priv) (INTEL_INFO(dev_priv)->display.has_csr)
#define HAS_RUNTIME_PM(dev_priv) (INTEL_INFO(dev_priv)->has_runtime_pm)
@@ -2636,7 +2633,6 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
#define INTEL_PCH_ID(dev_priv) ((dev_priv)->pch_id)
#define HAS_PCH_ICP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_ICP)
#define HAS_PCH_CNP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_CNP)
-#define HAS_PCH_KBP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_KBP)
#define HAS_PCH_SPT(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_SPT)
#define HAS_PCH_LPT(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_LPT)
#define HAS_PCH_LPT_LP(dev_priv) \
@@ -2714,23 +2710,8 @@ extern unsigned long i915_gfx_val(struct drm_i915_private *dev_priv);
extern void i915_update_gfx_val(struct drm_i915_private *dev_priv);
int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool on);
-int intel_engines_init_mmio(struct drm_i915_private *dev_priv);
-int intel_engines_init(struct drm_i915_private *dev_priv);
-
u32 intel_calculate_mcr_s_ss_select(struct drm_i915_private *dev_priv);
-/* intel_hotplug.c */
-void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
- u32 pin_mask, u32 long_mask);
-void intel_hpd_init(struct drm_i915_private *dev_priv);
-void intel_hpd_init_work(struct drm_i915_private *dev_priv);
-void intel_hpd_cancel_work(struct drm_i915_private *dev_priv);
-enum hpd_pin intel_hpd_pin_default(struct drm_i915_private *dev_priv,
- enum port port);
-bool intel_hpd_disable(struct drm_i915_private *dev_priv, enum hpd_pin pin);
-void intel_hpd_enable(struct drm_i915_private *dev_priv, enum hpd_pin pin);
-
-/* i915_irq.c */
static inline void i915_queue_hangcheck(struct drm_i915_private *dev_priv)
{
unsigned long delay;
@@ -2748,11 +2729,6 @@ static inline void i915_queue_hangcheck(struct drm_i915_private *dev_priv)
&dev_priv->gpu_error.hangcheck_work, delay);
}
-extern void intel_irq_init(struct drm_i915_private *dev_priv);
-extern void intel_irq_fini(struct drm_i915_private *dev_priv);
-int intel_irq_install(struct drm_i915_private *dev_priv);
-void intel_irq_uninstall(struct drm_i915_private *dev_priv);
-
static inline bool intel_gvt_active(struct drm_i915_private *dev_priv)
{
return dev_priv->gvt;
@@ -2763,62 +2739,6 @@ static inline bool intel_vgpu_active(struct drm_i915_private *dev_priv)
return dev_priv->vgpu.active;
}
-u32 i915_pipestat_enable_mask(struct drm_i915_private *dev_priv,
- enum pipe pipe);
-void
-i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
- u32 status_mask);
-
-void
-i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
- u32 status_mask);
-
-void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv);
-void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv);
-void i915_hotplug_interrupt_update(struct drm_i915_private *dev_priv,
- u32 mask,
- u32 bits);
-void ilk_update_display_irq(struct drm_i915_private *dev_priv,
- u32 interrupt_mask,
- u32 enabled_irq_mask);
-static inline void
-ilk_enable_display_irq(struct drm_i915_private *dev_priv, u32 bits)
-{
- ilk_update_display_irq(dev_priv, bits, bits);
-}
-static inline void
-ilk_disable_display_irq(struct drm_i915_private *dev_priv, u32 bits)
-{
- ilk_update_display_irq(dev_priv, bits, 0);
-}
-void bdw_update_pipe_irq(struct drm_i915_private *dev_priv,
- enum pipe pipe,
- u32 interrupt_mask,
- u32 enabled_irq_mask);
-static inline void bdw_enable_pipe_irq(struct drm_i915_private *dev_priv,
- enum pipe pipe, u32 bits)
-{
- bdw_update_pipe_irq(dev_priv, pipe, bits, bits);
-}
-static inline void bdw_disable_pipe_irq(struct drm_i915_private *dev_priv,
- enum pipe pipe, u32 bits)
-{
- bdw_update_pipe_irq(dev_priv, pipe, bits, 0);
-}
-void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
- u32 interrupt_mask,
- u32 enabled_irq_mask);
-static inline void
-ibx_enable_display_interrupt(struct drm_i915_private *dev_priv, u32 bits)
-{
- ibx_display_interrupt_update(dev_priv, bits, bits);
-}
-static inline void
-ibx_disable_display_interrupt(struct drm_i915_private *dev_priv, u32 bits)
-{
- ibx_display_interrupt_update(dev_priv, bits, 0);
-}
-
/* i915_gem.c */
int i915_gem_create_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
@@ -2903,15 +2823,15 @@ static inline void i915_gem_drain_workqueue(struct drm_i915_private *i915)
* grace period so that we catch work queued via RCU from the first
* pass. As neither drain_workqueue() nor flush_workqueue() report
* a result, we make an assumption that we only don't require more
- * than 2 passes to catch all recursive RCU delayed work.
+ * than 3 passes to catch all _recursive_ RCU delayed work.
*
*/
- int pass = 2;
+ int pass = 3;
do {
rcu_barrier();
i915_gem_drain_freed_objects(i915);
- drain_workqueue(i915->wq);
} while (--pass);
+ drain_workqueue(i915->wq);
}
struct i915_vma * __must_check
@@ -2944,6 +2864,10 @@ i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj,
unsigned int n);
dma_addr_t
+i915_gem_object_get_dma_address_len(struct drm_i915_gem_object *obj,
+ unsigned long n,
+ unsigned int *len);
+dma_addr_t
i915_gem_object_get_dma_address(struct drm_i915_gem_object *obj,
unsigned long n);
@@ -3005,7 +2929,7 @@ enum i915_mm_subclass { /* lockdep subclass for obj->mm.lock/struct_mutex */
int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj,
enum i915_mm_subclass subclass);
-void __i915_gem_object_invalidate(struct drm_i915_gem_object *obj);
+void __i915_gem_object_truncate(struct drm_i915_gem_object *obj);
enum i915_map_type {
I915_MAP_WB = 0,
@@ -3124,7 +3048,6 @@ int __must_check i915_gem_init(struct drm_i915_private *dev_priv);
int __must_check i915_gem_init_hw(struct drm_i915_private *dev_priv);
void i915_gem_init_swizzling(struct drm_i915_private *dev_priv);
void i915_gem_fini(struct drm_i915_private *dev_priv);
-void i915_gem_cleanup_engines(struct drm_i915_private *dev_priv);
int i915_gem_wait_for_idle(struct drm_i915_private *dev_priv,
unsigned int flags, long timeout);
void i915_gem_suspend(struct drm_i915_private *dev_priv);
@@ -3266,11 +3189,12 @@ unsigned long i915_gem_shrink(struct drm_i915_private *i915,
unsigned long target,
unsigned long *nr_scanned,
unsigned flags);
-#define I915_SHRINK_PURGEABLE 0x1
-#define I915_SHRINK_UNBOUND 0x2
-#define I915_SHRINK_BOUND 0x4
-#define I915_SHRINK_ACTIVE 0x8
-#define I915_SHRINK_VMAPS 0x10
+#define I915_SHRINK_PURGEABLE BIT(0)
+#define I915_SHRINK_UNBOUND BIT(1)
+#define I915_SHRINK_BOUND BIT(2)
+#define I915_SHRINK_ACTIVE BIT(3)
+#define I915_SHRINK_VMAPS BIT(4)
+#define I915_SHRINK_WRITEBACK BIT(5)
unsigned long i915_gem_shrink_all(struct drm_i915_private *i915);
void i915_gem_shrinker_register(struct drm_i915_private *i915);
void i915_gem_shrinker_unregister(struct drm_i915_private *i915);
@@ -3291,18 +3215,6 @@ u32 i915_gem_fence_size(struct drm_i915_private *dev_priv, u32 size,
u32 i915_gem_fence_alignment(struct drm_i915_private *dev_priv, u32 size,
unsigned int tiling, unsigned int stride);
-/* i915_debugfs.c */
-#ifdef CONFIG_DEBUG_FS
-int i915_debugfs_register(struct drm_i915_private *dev_priv);
-int i915_debugfs_connector_add(struct drm_connector *connector);
-void intel_display_crc_init(struct drm_i915_private *dev_priv);
-#else
-static inline int i915_debugfs_register(struct drm_i915_private *dev_priv) {return 0;}
-static inline int i915_debugfs_connector_add(struct drm_connector *connector)
-{ return 0; }
-static inline void intel_display_crc_init(struct drm_i915_private *dev_priv) {}
-#endif
-
const char *i915_cache_level_str(struct drm_i915_private *i915, int type);
/* i915_cmd_parser.c */
@@ -3330,56 +3242,6 @@ extern int i915_restore_state(struct drm_i915_private *dev_priv);
void i915_setup_sysfs(struct drm_i915_private *dev_priv);
void i915_teardown_sysfs(struct drm_i915_private *dev_priv);
-/* intel_lpe_audio.c */
-int intel_lpe_audio_init(struct drm_i915_private *dev_priv);
-void intel_lpe_audio_teardown(struct drm_i915_private *dev_priv);
-void intel_lpe_audio_irq_handler(struct drm_i915_private *dev_priv);
-void intel_lpe_audio_notify(struct drm_i915_private *dev_priv,
- enum pipe pipe, enum port port,
- const void *eld, int ls_clock, bool dp_output);
-
-/* intel_i2c.c */
-extern int intel_setup_gmbus(struct drm_i915_private *dev_priv);
-extern void intel_teardown_gmbus(struct drm_i915_private *dev_priv);
-extern bool intel_gmbus_is_valid_pin(struct drm_i915_private *dev_priv,
- unsigned int pin);
-extern int intel_gmbus_output_aksv(struct i2c_adapter *adapter);
-
-extern struct i2c_adapter *
-intel_gmbus_get_adapter(struct drm_i915_private *dev_priv, unsigned int pin);
-extern void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed);
-extern void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit);
-static inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
-{
- return container_of(adapter, struct intel_gmbus, adapter)->force_bit;
-}
-extern void intel_i2c_reset(struct drm_i915_private *dev_priv);
-
-/* intel_bios.c */
-void intel_bios_init(struct drm_i915_private *dev_priv);
-void intel_bios_cleanup(struct drm_i915_private *dev_priv);
-bool intel_bios_is_valid_vbt(const void *buf, size_t size);
-bool intel_bios_is_tv_present(struct drm_i915_private *dev_priv);
-bool intel_bios_is_lvds_present(struct drm_i915_private *dev_priv, u8 *i2c_pin);
-bool intel_bios_is_port_present(struct drm_i915_private *dev_priv, enum port port);
-bool intel_bios_is_port_edp(struct drm_i915_private *dev_priv, enum port port);
-bool intel_bios_is_port_dp_dual_mode(struct drm_i915_private *dev_priv, enum port port);
-bool intel_bios_is_dsi_present(struct drm_i915_private *dev_priv, enum port *port);
-bool intel_bios_is_port_hpd_inverted(struct drm_i915_private *dev_priv,
- enum port port);
-bool intel_bios_is_lspcon_present(struct drm_i915_private *dev_priv,
- enum port port);
-enum aux_ch intel_bios_port_aux_ch(struct drm_i915_private *dev_priv, enum port port);
-
-/* intel_acpi.c */
-#ifdef CONFIG_ACPI
-extern void intel_register_dsm_handler(void);
-extern void intel_unregister_dsm_handler(void);
-#else
-static inline void intel_register_dsm_handler(void) { return; }
-static inline void intel_unregister_dsm_handler(void) { return; }
-#endif /* CONFIG_ACPI */
-
/* intel_device_info.c */
static inline struct intel_device_info *
mkwrite_device_info(struct drm_i915_private *dev_priv)
@@ -3387,20 +3249,6 @@ mkwrite_device_info(struct drm_i915_private *dev_priv)
return (struct intel_device_info *)INTEL_INFO(dev_priv);
}
-static inline struct intel_sseu
-intel_device_default_sseu(struct drm_i915_private *i915)
-{
- const struct sseu_dev_info *sseu = &RUNTIME_INFO(i915)->sseu;
- struct intel_sseu value = {
- .slice_mask = sseu->slice_mask,
- .subslice_mask = sseu->subslice_mask[0],
- .min_eus_per_subslice = sseu->max_eus_per_subslice,
- .max_eus_per_subslice = sseu->max_eus_per_subslice,
- };
-
- return value;
-}
-
/* modesetting */
extern void intel_modeset_init_hw(struct drm_device *dev);
extern int intel_modeset_init(struct drm_device *dev);
@@ -3417,115 +3265,15 @@ extern void intel_rps_mark_interactive(struct drm_i915_private *i915,
bool interactive);
extern bool intel_set_memory_cxsr(struct drm_i915_private *dev_priv,
bool enable);
-void intel_dsc_enable(struct intel_encoder *encoder,
- const struct intel_crtc_state *crtc_state);
-void intel_dsc_disable(const struct intel_crtc_state *crtc_state);
int i915_reg_read_ioctl(struct drm_device *dev, void *data,
struct drm_file *file);
-/* overlay */
-extern struct intel_overlay_error_state *
-intel_overlay_capture_error_state(struct drm_i915_private *dev_priv);
-extern void intel_overlay_print_error_state(struct drm_i915_error_state_buf *e,
- struct intel_overlay_error_state *error);
-
extern struct intel_display_error_state *
intel_display_capture_error_state(struct drm_i915_private *dev_priv);
extern void intel_display_print_error_state(struct drm_i915_error_state_buf *e,
struct intel_display_error_state *error);
-int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u32 mbox, u32 *val);
-int sandybridge_pcode_write_timeout(struct drm_i915_private *dev_priv, u32 mbox,
- u32 val, int fast_timeout_us,
- int slow_timeout_ms);
-#define sandybridge_pcode_write(dev_priv, mbox, val) \
- sandybridge_pcode_write_timeout(dev_priv, mbox, val, 500, 0)
-
-int skl_pcode_request(struct drm_i915_private *dev_priv, u32 mbox, u32 request,
- u32 reply_mask, u32 reply, int timeout_base_ms);
-
-/* intel_sideband.c */
-u32 vlv_punit_read(struct drm_i915_private *dev_priv, u32 addr);
-int vlv_punit_write(struct drm_i915_private *dev_priv, u32 addr, u32 val);
-u32 vlv_nc_read(struct drm_i915_private *dev_priv, u8 addr);
-u32 vlv_iosf_sb_read(struct drm_i915_private *dev_priv, u8 port, u32 reg);
-void vlv_iosf_sb_write(struct drm_i915_private *dev_priv, u8 port, u32 reg, u32 val);
-u32 vlv_cck_read(struct drm_i915_private *dev_priv, u32 reg);
-void vlv_cck_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
-u32 vlv_ccu_read(struct drm_i915_private *dev_priv, u32 reg);
-void vlv_ccu_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
-u32 vlv_bunit_read(struct drm_i915_private *dev_priv, u32 reg);
-void vlv_bunit_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
-u32 vlv_dpio_read(struct drm_i915_private *dev_priv, enum pipe pipe, int reg);
-void vlv_dpio_write(struct drm_i915_private *dev_priv, enum pipe pipe, int reg, u32 val);
-u32 intel_sbi_read(struct drm_i915_private *dev_priv, u16 reg,
- enum intel_sbi_destination destination);
-void intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value,
- enum intel_sbi_destination destination);
-u32 vlv_flisdsi_read(struct drm_i915_private *dev_priv, u32 reg);
-void vlv_flisdsi_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
-
-/* intel_dpio_phy.c */
-void bxt_port_to_phy_channel(struct drm_i915_private *dev_priv, enum port port,
- enum dpio_phy *phy, enum dpio_channel *ch);
-void bxt_ddi_phy_set_signal_level(struct drm_i915_private *dev_priv,
- enum port port, u32 margin, u32 scale,
- u32 enable, u32 deemphasis);
-void bxt_ddi_phy_init(struct drm_i915_private *dev_priv, enum dpio_phy phy);
-void bxt_ddi_phy_uninit(struct drm_i915_private *dev_priv, enum dpio_phy phy);
-bool bxt_ddi_phy_is_enabled(struct drm_i915_private *dev_priv,
- enum dpio_phy phy);
-bool bxt_ddi_phy_verify_state(struct drm_i915_private *dev_priv,
- enum dpio_phy phy);
-u8 bxt_ddi_phy_calc_lane_lat_optim_mask(u8 lane_count);
-void bxt_ddi_phy_set_lane_optim_mask(struct intel_encoder *encoder,
- u8 lane_lat_optim_mask);
-u8 bxt_ddi_phy_get_lane_lat_optim_mask(struct intel_encoder *encoder);
-
-void chv_set_phy_signal_level(struct intel_encoder *encoder,
- u32 deemph_reg_value, u32 margin_reg_value,
- bool uniq_trans_scale);
-void chv_data_lane_soft_reset(struct intel_encoder *encoder,
- const struct intel_crtc_state *crtc_state,
- bool reset);
-void chv_phy_pre_pll_enable(struct intel_encoder *encoder,
- const struct intel_crtc_state *crtc_state);
-void chv_phy_pre_encoder_enable(struct intel_encoder *encoder,
- const struct intel_crtc_state *crtc_state);
-void chv_phy_release_cl2_override(struct intel_encoder *encoder);
-void chv_phy_post_pll_disable(struct intel_encoder *encoder,
- const struct intel_crtc_state *old_crtc_state);
-
-void vlv_set_phy_signal_level(struct intel_encoder *encoder,
- u32 demph_reg_value, u32 preemph_reg_value,
- u32 uniqtranscale_reg_value, u32 tx3_demph);
-void vlv_phy_pre_pll_enable(struct intel_encoder *encoder,
- const struct intel_crtc_state *crtc_state);
-void vlv_phy_pre_encoder_enable(struct intel_encoder *encoder,
- const struct intel_crtc_state *crtc_state);
-void vlv_phy_reset_lanes(struct intel_encoder *encoder,
- const struct intel_crtc_state *old_crtc_state);
-
-/* intel_combo_phy.c */
-void icl_combo_phys_init(struct drm_i915_private *dev_priv);
-void icl_combo_phys_uninit(struct drm_i915_private *dev_priv);
-void cnl_combo_phys_init(struct drm_i915_private *dev_priv);
-void cnl_combo_phys_uninit(struct drm_i915_private *dev_priv);
-
-int intel_gpu_freq(struct drm_i915_private *dev_priv, int val);
-int intel_freq_opcode(struct drm_i915_private *dev_priv, int val);
-u64 intel_rc6_residency_ns(struct drm_i915_private *dev_priv,
- const i915_reg_t reg);
-
-u32 intel_get_cagf(struct drm_i915_private *dev_priv, u32 rpstat1);
-
-static inline u64 intel_rc6_residency_us(struct drm_i915_private *dev_priv,
- const i915_reg_t reg)
-{
- return DIV_ROUND_UP_ULL(intel_rc6_residency_ns(dev_priv, reg), 1000);
-}
-
#define __I915_REG_OP(op__, dev_priv__, ...) \
intel_uncore_##op__(&(dev_priv__)->uncore, __VA_ARGS__)
@@ -3599,60 +3347,6 @@ static inline u64 intel_rc6_residency_us(struct drm_i915_private *dev_priv,
#define INTEL_BROADCAST_RGB_FULL 1
#define INTEL_BROADCAST_RGB_LIMITED 2
-static inline i915_reg_t i915_vgacntrl_reg(struct drm_i915_private *dev_priv)
-{
- if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
- return VLV_VGACNTRL;
- else if (INTEL_GEN(dev_priv) >= 5)
- return CPU_VGACNTRL;
- else
- return VGACNTRL;
-}
-
-static inline unsigned long msecs_to_jiffies_timeout(const unsigned int m)
-{
- unsigned long j = msecs_to_jiffies(m);
-
- return min_t(unsigned long, MAX_JIFFY_OFFSET, j + 1);
-}
-
-static inline unsigned long nsecs_to_jiffies_timeout(const u64 n)
-{
- /* nsecs_to_jiffies64() does not guard against overflow */
- if (NSEC_PER_SEC % HZ &&
- div_u64(n, NSEC_PER_SEC) >= MAX_JIFFY_OFFSET / HZ)
- return MAX_JIFFY_OFFSET;
-
- return min_t(u64, MAX_JIFFY_OFFSET, nsecs_to_jiffies64(n) + 1);
-}
-
-/*
- * If you need to wait X milliseconds between events A and B, but event B
- * doesn't happen exactly after event A, you record the timestamp (jiffies) of
- * when event A happened, then just before event B you call this function and
- * pass the timestamp as the first argument, and X as the second argument.
- */
-static inline void
-wait_remaining_ms_from_jiffies(unsigned long timestamp_jiffies, int to_wait_ms)
-{
- unsigned long target_jiffies, tmp_jiffies, remaining_jiffies;
-
- /*
- * Don't re-read the value of "jiffies" every time since it may change
- * behind our back and break the math.
- */
- tmp_jiffies = jiffies;
- target_jiffies = timestamp_jiffies +
- msecs_to_jiffies_timeout(to_wait_ms);
-
- if (time_after(target_jiffies, tmp_jiffies)) {
- remaining_jiffies = target_jiffies - tmp_jiffies;
- while (remaining_jiffies)
- remaining_jiffies =
- schedule_timeout_uninterruptible(remaining_jiffies);
- }
-}
-
void i915_memcpy_init_early(struct drm_i915_private *dev_priv);
bool i915_memcpy_from_wc(void *dst, const void *src, unsigned long len);
@@ -3690,4 +3384,15 @@ static inline u32 i915_scratch_offset(const struct drm_i915_private *i915)
return i915_ggtt_offset(i915->gt.scratch);
}
+static inline void add_taint_for_CI(unsigned int taint)
+{
+ /*
+ * The system is "ok", just about surviving for the user, but
+ * CI results are now unreliable as the HW is very suspect.
+ * CI checks the taint state after every test and will reboot
+ * the machine if the kernel is tainted.
+ */
+ add_taint(taint, LOCKDEP_STILL_OK);
+}
+
#endif
diff --git a/drivers/gpu/drm/i915/i915_fixed.h b/drivers/gpu/drm/i915/i915_fixed.h
index 591dd89ba7af..6621595fe74c 100644
--- a/drivers/gpu/drm/i915/i915_fixed.h
+++ b/drivers/gpu/drm/i915/i915_fixed.h
@@ -71,7 +71,7 @@ static inline u32 mul_round_up_u32_fixed16(u32 val, uint_fixed_16_16_t mul)
{
u64 tmp;
- tmp = (u64)val * mul.val;
+ tmp = mul_u32_u32(val, mul.val);
tmp = DIV_ROUND_UP_ULL(tmp, 1 << 16);
WARN_ON(tmp > U32_MAX);
@@ -83,7 +83,7 @@ static inline uint_fixed_16_16_t mul_fixed16(uint_fixed_16_16_t val,
{
u64 tmp;
- tmp = (u64)val.val * mul.val;
+ tmp = mul_u32_u32(val.val, mul.val);
tmp = tmp >> 16;
return clamp_u64_to_fixed16(tmp);
@@ -114,7 +114,7 @@ static inline uint_fixed_16_16_t mul_u32_fixed16(u32 val, uint_fixed_16_16_t mul
{
u64 tmp;
- tmp = (u64)val * mul.val;
+ tmp = mul_u32_u32(val, mul.val);
return clamp_u64_to_fixed16(tmp);
}
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index ad01c92aaf74..d3b7dac527dc 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -39,19 +39,23 @@
#include <linux/dma-buf.h>
#include <linux/mman.h>
+#include "gt/intel_engine_pm.h"
+#include "gt/intel_gt_pm.h"
+#include "gt/intel_mocs.h"
+#include "gt/intel_reset.h"
+#include "gt/intel_workarounds.h"
+
#include "i915_drv.h"
#include "i915_gem_clflush.h"
#include "i915_gemfs.h"
-#include "i915_globals.h"
-#include "i915_reset.h"
+#include "i915_gem_pm.h"
#include "i915_trace.h"
#include "i915_vgpu.h"
+#include "intel_display.h"
#include "intel_drv.h"
#include "intel_frontbuffer.h"
-#include "intel_mocs.h"
#include "intel_pm.h"
-#include "intel_workarounds.h"
static void i915_gem_flush_free_objects(struct drm_i915_private *i915);
@@ -102,105 +106,6 @@ static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv,
spin_unlock(&dev_priv->mm.object_stat_lock);
}
-static void __i915_gem_park(struct drm_i915_private *i915)
-{
- intel_wakeref_t wakeref;
-
- GEM_TRACE("\n");
-
- lockdep_assert_held(&i915->drm.struct_mutex);
- GEM_BUG_ON(i915->gt.active_requests);
- GEM_BUG_ON(!list_empty(&i915->gt.active_rings));
-
- if (!i915->gt.awake)
- return;
-
- /*
- * Be paranoid and flush a concurrent interrupt to make sure
- * we don't reactivate any irq tasklets after parking.
- *
- * FIXME: Note that even though we have waited for execlists to be idle,
- * there may still be an in-flight interrupt even though the CSB
- * is now empty. synchronize_irq() makes sure that a residual interrupt
- * is completed before we continue, but it doesn't prevent the HW from
- * raising a spurious interrupt later. To complete the shield we should
- * coordinate disabling the CS irq with flushing the interrupts.
- */
- synchronize_irq(i915->drm.irq);
-
- intel_engines_park(i915);
- i915_timelines_park(i915);
-
- i915_pmu_gt_parked(i915);
- i915_vma_parked(i915);
-
- wakeref = fetch_and_zero(&i915->gt.awake);
- GEM_BUG_ON(!wakeref);
-
- if (INTEL_GEN(i915) >= 6)
- gen6_rps_idle(i915);
-
- intel_display_power_put(i915, POWER_DOMAIN_GT_IRQ, wakeref);
-
- i915_globals_park();
-}
-
-void i915_gem_park(struct drm_i915_private *i915)
-{
- GEM_TRACE("\n");
-
- lockdep_assert_held(&i915->drm.struct_mutex);
- GEM_BUG_ON(i915->gt.active_requests);
-
- if (!i915->gt.awake)
- return;
-
- /* Defer the actual call to __i915_gem_park() to prevent ping-pongs */
- mod_delayed_work(i915->wq, &i915->gt.idle_work, msecs_to_jiffies(100));
-}
-
-void i915_gem_unpark(struct drm_i915_private *i915)
-{
- GEM_TRACE("\n");
-
- lockdep_assert_held(&i915->drm.struct_mutex);
- GEM_BUG_ON(!i915->gt.active_requests);
- assert_rpm_wakelock_held(i915);
-
- if (i915->gt.awake)
- return;
-
- /*
- * It seems that the DMC likes to transition between the DC states a lot
- * when there are no connected displays (no active power domains) during
- * command submission.
- *
- * This activity has negative impact on the performance of the chip with
- * huge latencies observed in the interrupt handler and elsewhere.
- *
- * Work around it by grabbing a GT IRQ power domain whilst there is any
- * GT activity, preventing any DC state transitions.
- */
- i915->gt.awake = intel_display_power_get(i915, POWER_DOMAIN_GT_IRQ);
- GEM_BUG_ON(!i915->gt.awake);
-
- i915_globals_unpark();
-
- intel_enable_gt_powersave(i915);
- i915_update_gfx_val(i915);
- if (INTEL_GEN(i915) >= 6)
- gen6_rps_busy(i915);
- i915_pmu_gt_unparked(i915);
-
- intel_engines_unpark(i915);
-
- i915_queue_hangcheck(i915);
-
- queue_delayed_work(i915->wq,
- &i915->gt.retire_work,
- round_jiffies_up_relative(HZ));
-}
-
int
i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
struct drm_file *file)
@@ -656,8 +561,31 @@ i915_gem_dumb_create(struct drm_file *file,
struct drm_device *dev,
struct drm_mode_create_dumb *args)
{
+ int cpp = DIV_ROUND_UP(args->bpp, 8);
+ u32 format;
+
+ switch (cpp) {
+ case 1:
+ format = DRM_FORMAT_C8;
+ break;
+ case 2:
+ format = DRM_FORMAT_RGB565;
+ break;
+ case 4:
+ format = DRM_FORMAT_XRGB8888;
+ break;
+ default:
+ return -EINVAL;
+ }
+
/* have to work out size/pitch and return them */
- args->pitch = ALIGN(args->width * DIV_ROUND_UP(args->bpp, 8), 64);
+ args->pitch = ALIGN(args->width * cpp, 64);
+
+ /* align stride to page size so that we can remap */
+ if (args->pitch > intel_plane_fb_max_stride(to_i915(dev), format,
+ DRM_FORMAT_MOD_LINEAR))
+ args->pitch = ALIGN(args->pitch, 4096);
+
args->size = args->pitch * args->height;
return i915_gem_create(file, to_i915(dev),
&args->size, &args->handle);
@@ -2087,7 +2015,7 @@ static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj)
if (!err)
break;
- } while (flush_delayed_work(&dev_priv->gt.retire_work));
+ } while (flush_delayed_work(&dev_priv->gem.retire_work));
return err;
}
@@ -2143,8 +2071,7 @@ i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
}
/* Immediately discard the backing storage */
-static void
-i915_gem_object_truncate(struct drm_i915_gem_object *obj)
+void __i915_gem_object_truncate(struct drm_i915_gem_object *obj)
{
i915_gem_object_free_mmap_offset(obj);
@@ -2161,28 +2088,6 @@ i915_gem_object_truncate(struct drm_i915_gem_object *obj)
obj->mm.pages = ERR_PTR(-EFAULT);
}
-/* Try to discard unwanted pages */
-void __i915_gem_object_invalidate(struct drm_i915_gem_object *obj)
-{
- struct address_space *mapping;
-
- lockdep_assert_held(&obj->mm.lock);
- GEM_BUG_ON(i915_gem_object_has_pages(obj));
-
- switch (obj->mm.madv) {
- case I915_MADV_DONTNEED:
- i915_gem_object_truncate(obj);
- case __I915_MADV_PURGED:
- return;
- }
-
- if (obj->base.filp == NULL)
- return;
-
- mapping = obj->base.filp->f_mapping,
- invalidate_mapping_pages(mapping, 0, (loff_t)-1);
-}
-
/*
* Move pages to appropriate lru and release the pagevec, decrementing the
* ref count of those pages.
@@ -2870,132 +2775,6 @@ i915_gem_object_pwrite_gtt(struct drm_i915_gem_object *obj,
return 0;
}
-static void
-i915_gem_retire_work_handler(struct work_struct *work)
-{
- struct drm_i915_private *dev_priv =
- container_of(work, typeof(*dev_priv), gt.retire_work.work);
- struct drm_device *dev = &dev_priv->drm;
-
- /* Come back later if the device is busy... */
- if (mutex_trylock(&dev->struct_mutex)) {
- i915_retire_requests(dev_priv);
- mutex_unlock(&dev->struct_mutex);
- }
-
- /*
- * Keep the retire handler running until we are finally idle.
- * We do not need to do this test under locking as in the worst-case
- * we queue the retire worker once too often.
- */
- if (READ_ONCE(dev_priv->gt.awake))
- queue_delayed_work(dev_priv->wq,
- &dev_priv->gt.retire_work,
- round_jiffies_up_relative(HZ));
-}
-
-static bool switch_to_kernel_context_sync(struct drm_i915_private *i915,
- unsigned long mask)
-{
- bool result = true;
-
- /*
- * Even if we fail to switch, give whatever is running a small chance
- * to save itself before we report the failure. Yes, this may be a
- * false positive due to e.g. ENOMEM, caveat emptor!
- */
- if (i915_gem_switch_to_kernel_context(i915, mask))
- result = false;
-
- if (i915_gem_wait_for_idle(i915,
- I915_WAIT_LOCKED |
- I915_WAIT_FOR_IDLE_BOOST,
- I915_GEM_IDLE_TIMEOUT))
- result = false;
-
- if (!result) {
- if (i915_modparams.reset) { /* XXX hide warning from gem_eio */
- dev_err(i915->drm.dev,
- "Failed to idle engines, declaring wedged!\n");
- GEM_TRACE_DUMP();
- }
-
- /* Forcibly cancel outstanding work and leave the gpu quiet. */
- i915_gem_set_wedged(i915);
- }
-
- i915_retire_requests(i915); /* ensure we flush after wedging */
- return result;
-}
-
-static bool load_power_context(struct drm_i915_private *i915)
-{
- /* Force loading the kernel context on all engines */
- if (!switch_to_kernel_context_sync(i915, ALL_ENGINES))
- return false;
-
- /*
- * Immediately park the GPU so that we enable powersaving and
- * treat it as idle. The next time we issue a request, we will
- * unpark and start using the engine->pinned_default_state, otherwise
- * it is in limbo and an early reset may fail.
- */
- __i915_gem_park(i915);
-
- return true;
-}
-
-static void
-i915_gem_idle_work_handler(struct work_struct *work)
-{
- struct drm_i915_private *i915 =
- container_of(work, typeof(*i915), gt.idle_work.work);
- bool rearm_hangcheck;
-
- if (!READ_ONCE(i915->gt.awake))
- return;
-
- if (READ_ONCE(i915->gt.active_requests))
- return;
-
- rearm_hangcheck =
- cancel_delayed_work_sync(&i915->gpu_error.hangcheck_work);
-
- if (!mutex_trylock(&i915->drm.struct_mutex)) {
- /* Currently busy, come back later */
- mod_delayed_work(i915->wq,
- &i915->gt.idle_work,
- msecs_to_jiffies(50));
- goto out_rearm;
- }
-
- /*
- * Flush out the last user context, leaving only the pinned
- * kernel context resident. Should anything unfortunate happen
- * while we are idle (such as the GPU being power cycled), no users
- * will be harmed.
- */
- if (!work_pending(&i915->gt.idle_work.work) &&
- !i915->gt.active_requests) {
- ++i915->gt.active_requests; /* don't requeue idle */
-
- switch_to_kernel_context_sync(i915, i915->gt.active_engines);
-
- if (!--i915->gt.active_requests) {
- __i915_gem_park(i915);
- rearm_hangcheck = false;
- }
- }
-
- mutex_unlock(&i915->drm.struct_mutex);
-
-out_rearm:
- if (rearm_hangcheck) {
- GEM_BUG_ON(!i915->gt.awake);
- i915_queue_hangcheck(i915);
- }
-}
-
void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file)
{
struct drm_i915_private *i915 = to_i915(gem->dev);
@@ -3135,9 +2914,6 @@ wait_for_timelines(struct drm_i915_private *i915,
struct i915_gt_timelines *gt = &i915->gt.timelines;
struct i915_timeline *tl;
- if (!READ_ONCE(i915->gt.active_requests))
- return timeout;
-
mutex_lock(&gt->mutex);
list_for_each_entry(tl, &gt->active_list, link) {
struct i915_request *rq;
@@ -3177,9 +2953,10 @@ wait_for_timelines(struct drm_i915_private *i915,
int i915_gem_wait_for_idle(struct drm_i915_private *i915,
unsigned int flags, long timeout)
{
- GEM_TRACE("flags=%x (%s), timeout=%ld%s\n",
+ GEM_TRACE("flags=%x (%s), timeout=%ld%s, awake?=%s\n",
flags, flags & I915_WAIT_LOCKED ? "locked" : "unlocked",
- timeout, timeout == MAX_SCHEDULE_TIMEOUT ? " (forever)" : "");
+ timeout, timeout == MAX_SCHEDULE_TIMEOUT ? " (forever)" : "",
+ yesno(i915->gt.awake));
/* If the device is asleep, we have no requests outstanding */
if (!READ_ONCE(i915->gt.awake))
@@ -4023,7 +3800,7 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
/* if the object is no longer attached, discard its backing storage */
if (obj->mm.madv == I915_MADV_DONTNEED &&
!i915_gem_object_has_pages(obj))
- i915_gem_object_truncate(obj);
+ __i915_gem_object_truncate(obj);
args->retained = obj->mm.madv != __I915_MADV_PURGED;
mutex_unlock(&obj->mm.lock);
@@ -4401,7 +4178,7 @@ void i915_gem_sanitize(struct drm_i915_private *i915)
* it may impact the display and we are uncertain about the stability
* of the reset, so this could be applied to even earlier gen.
*/
- intel_engines_sanitize(i915, false);
+ intel_gt_sanitize(i915, false);
intel_uncore_forcewake_put(&i915->uncore, FORCEWAKE_ALL);
intel_runtime_pm_put(i915, wakeref);
@@ -4411,133 +4188,6 @@ void i915_gem_sanitize(struct drm_i915_private *i915)
mutex_unlock(&i915->drm.struct_mutex);
}
-void i915_gem_suspend(struct drm_i915_private *i915)
-{
- intel_wakeref_t wakeref;
-
- GEM_TRACE("\n");
-
- wakeref = intel_runtime_pm_get(i915);
-
- flush_workqueue(i915->wq);
-
- mutex_lock(&i915->drm.struct_mutex);
-
- /*
- * We have to flush all the executing contexts to main memory so
- * that they can saved in the hibernation image. To ensure the last
- * context image is coherent, we have to switch away from it. That
- * leaves the i915->kernel_context still active when
- * we actually suspend, and its image in memory may not match the GPU
- * state. Fortunately, the kernel_context is disposable and we do
- * not rely on its state.
- */
- switch_to_kernel_context_sync(i915, i915->gt.active_engines);
-
- mutex_unlock(&i915->drm.struct_mutex);
- i915_reset_flush(i915);
-
- drain_delayed_work(&i915->gt.retire_work);
-
- /*
- * As the idle_work is rearming if it detects a race, play safe and
- * repeat the flush until it is definitely idle.
- */
- drain_delayed_work(&i915->gt.idle_work);
-
- /*
- * Assert that we successfully flushed all the work and
- * reset the GPU back to its idle, low power state.
- */
- GEM_BUG_ON(i915->gt.awake);
-
- intel_uc_suspend(i915);
-
- intel_runtime_pm_put(i915, wakeref);
-}
-
-void i915_gem_suspend_late(struct drm_i915_private *i915)
-{
- struct drm_i915_gem_object *obj;
- struct list_head *phases[] = {
- &i915->mm.unbound_list,
- &i915->mm.bound_list,
- NULL
- }, **phase;
-
- /*
- * Neither the BIOS, ourselves or any other kernel
- * expects the system to be in execlists mode on startup,
- * so we need to reset the GPU back to legacy mode. And the only
- * known way to disable logical contexts is through a GPU reset.
- *
- * So in order to leave the system in a known default configuration,
- * always reset the GPU upon unload and suspend. Afterwards we then
- * clean up the GEM state tracking, flushing off the requests and
- * leaving the system in a known idle state.
- *
- * Note that is of the upmost importance that the GPU is idle and
- * all stray writes are flushed *before* we dismantle the backing
- * storage for the pinned objects.
- *
- * However, since we are uncertain that resetting the GPU on older
- * machines is a good idea, we don't - just in case it leaves the
- * machine in an unusable condition.
- */
-
- mutex_lock(&i915->drm.struct_mutex);
- for (phase = phases; *phase; phase++) {
- list_for_each_entry(obj, *phase, mm.link)
- WARN_ON(i915_gem_object_set_to_gtt_domain(obj, false));
- }
- mutex_unlock(&i915->drm.struct_mutex);
-
- intel_uc_sanitize(i915);
- i915_gem_sanitize(i915);
-}
-
-void i915_gem_resume(struct drm_i915_private *i915)
-{
- GEM_TRACE("\n");
-
- WARN_ON(i915->gt.awake);
-
- mutex_lock(&i915->drm.struct_mutex);
- intel_uncore_forcewake_get(&i915->uncore, FORCEWAKE_ALL);
-
- i915_gem_restore_gtt_mappings(i915);
- i915_gem_restore_fences(i915);
-
- /*
- * As we didn't flush the kernel context before suspend, we cannot
- * guarantee that the context image is complete. So let's just reset
- * it and start again.
- */
- intel_gt_resume(i915);
-
- if (i915_gem_init_hw(i915))
- goto err_wedged;
-
- intel_uc_resume(i915);
-
- /* Always reload a context for powersaving. */
- if (!load_power_context(i915))
- goto err_wedged;
-
-out_unlock:
- intel_uncore_forcewake_put(&i915->uncore, FORCEWAKE_ALL);
- mutex_unlock(&i915->drm.struct_mutex);
- return;
-
-err_wedged:
- if (!i915_reset_failed(i915)) {
- dev_err(i915->drm.dev,
- "Failed to re-initialize GPU, declaring it wedged!\n");
- i915_gem_set_wedged(i915);
- }
- goto out_unlock;
-}
-
void i915_gem_init_swizzling(struct drm_i915_private *dev_priv)
{
if (INTEL_GEN(dev_priv) < 5 ||
@@ -4586,27 +4236,6 @@ static void init_unused_rings(struct drm_i915_private *dev_priv)
}
}
-static int __i915_gem_restart_engines(void *data)
-{
- struct drm_i915_private *i915 = data;
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
- int err;
-
- for_each_engine(engine, i915, id) {
- err = engine->init_hw(engine);
- if (err) {
- DRM_ERROR("Failed to restart %s (%d)\n",
- engine->name, err);
- return err;
- }
- }
-
- intel_engines_set_scheduler_caps(i915);
-
- return 0;
-}
-
int i915_gem_init_hw(struct drm_i915_private *dev_priv)
{
int ret;
@@ -4665,12 +4294,13 @@ int i915_gem_init_hw(struct drm_i915_private *dev_priv)
intel_mocs_init_l3cc_table(dev_priv);
/* Only when the HW is re-initialised, can we replay the requests */
- ret = __i915_gem_restart_engines(dev_priv);
+ ret = intel_engines_resume(dev_priv);
if (ret)
goto cleanup_uc;
intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
+ intel_engines_set_scheduler_caps(dev_priv);
return 0;
cleanup_uc:
@@ -4683,8 +4313,9 @@ out:
static int __intel_engines_record_defaults(struct drm_i915_private *i915)
{
- struct i915_gem_context *ctx;
struct intel_engine_cs *engine;
+ struct i915_gem_context *ctx;
+ struct i915_gem_engines *e;
enum intel_engine_id id;
int err = 0;
@@ -4701,18 +4332,21 @@ static int __intel_engines_record_defaults(struct drm_i915_private *i915)
if (IS_ERR(ctx))
return PTR_ERR(ctx);
+ e = i915_gem_context_lock_engines(ctx);
+
for_each_engine(engine, i915, id) {
+ struct intel_context *ce = e->engines[id];
struct i915_request *rq;
- rq = i915_request_alloc(engine, ctx);
+ rq = intel_context_create_request(ce);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
- goto out_ctx;
+ goto err_active;
}
err = 0;
- if (engine->init_context)
- err = engine->init_context(rq);
+ if (rq->engine->init_context)
+ err = rq->engine->init_context(rq);
i915_request_add(rq);
if (err)
@@ -4720,21 +4354,16 @@ static int __intel_engines_record_defaults(struct drm_i915_private *i915)
}
/* Flush the default context image to memory, and enable powersaving. */
- if (!load_power_context(i915)) {
+ if (!i915_gem_load_power_context(i915)) {
err = -EIO;
goto err_active;
}
for_each_engine(engine, i915, id) {
- struct intel_context *ce;
- struct i915_vma *state;
+ struct intel_context *ce = e->engines[id];
+ struct i915_vma *state = ce->state;
void *vaddr;
- ce = intel_context_lookup(ctx, engine);
- if (!ce)
- continue;
-
- state = ce->state;
if (!state)
continue;
@@ -4790,6 +4419,7 @@ static int __intel_engines_record_defaults(struct drm_i915_private *i915)
}
out_ctx:
+ i915_gem_context_unlock_engines(ctx);
i915_gem_context_set_closed(ctx);
i915_gem_context_put(ctx);
return err;
@@ -4842,6 +4472,23 @@ static void i915_gem_fini_scratch(struct drm_i915_private *i915)
i915_vma_unpin_and_release(&i915->gt.scratch, 0);
}
+static int intel_engines_verify_workarounds(struct drm_i915_private *i915)
+{
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ int err = 0;
+
+ if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
+ return 0;
+
+ for_each_engine(engine, i915, id) {
+ if (intel_engine_verify_workarounds(engine, "load"))
+ err = -EIO;
+ }
+
+ return err;
+}
+
int i915_gem_init(struct drm_i915_private *dev_priv)
{
int ret;
@@ -4853,11 +4500,6 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
dev_priv->mm.unordered_timeline = dma_fence_context_alloc(1);
- if (HAS_LOGICAL_RING_CONTEXTS(dev_priv))
- dev_priv->gt.cleanup_engine = intel_logical_ring_cleanup;
- else
- dev_priv->gt.cleanup_engine = intel_engine_cleanup;
-
i915_timelines_init(dev_priv);
ret = i915_gem_init_userptr(dev_priv);
@@ -4894,6 +4536,12 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
goto err_ggtt;
}
+ ret = intel_engines_setup(dev_priv);
+ if (ret) {
+ GEM_BUG_ON(ret == -EIO);
+ goto err_unlock;
+ }
+
ret = i915_gem_contexts_init(dev_priv);
if (ret) {
GEM_BUG_ON(ret == -EIO);
@@ -4927,6 +4575,10 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
*/
intel_init_clock_gating(dev_priv);
+ ret = intel_engines_verify_workarounds(dev_priv);
+ if (ret)
+ goto err_init_hw;
+
ret = __intel_engines_record_defaults(dev_priv);
if (ret)
goto err_init_hw;
@@ -4955,6 +4607,7 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
err_init_hw:
mutex_unlock(&dev_priv->drm.struct_mutex);
+ i915_gem_set_wedged(dev_priv);
i915_gem_suspend(dev_priv);
i915_gem_suspend_late(dev_priv);
@@ -4967,7 +4620,7 @@ err_uc_init:
err_pm:
if (ret != -EIO) {
intel_cleanup_gt_powersave(dev_priv);
- i915_gem_cleanup_engines(dev_priv);
+ intel_engines_cleanup(dev_priv);
}
err_context:
if (ret != -EIO)
@@ -5016,6 +4669,8 @@ err_uc_misc:
void i915_gem_fini(struct drm_i915_private *dev_priv)
{
+ GEM_BUG_ON(dev_priv->gt.awake);
+
i915_gem_suspend_late(dev_priv);
intel_disable_gt_powersave(dev_priv);
@@ -5025,7 +4680,7 @@ void i915_gem_fini(struct drm_i915_private *dev_priv)
mutex_lock(&dev_priv->drm.struct_mutex);
intel_uc_fini_hw(dev_priv);
intel_uc_fini(dev_priv);
- i915_gem_cleanup_engines(dev_priv);
+ intel_engines_cleanup(dev_priv);
i915_gem_contexts_fini(dev_priv);
i915_gem_fini_scratch(dev_priv);
mutex_unlock(&dev_priv->drm.struct_mutex);
@@ -5049,16 +4704,6 @@ void i915_gem_init_mmio(struct drm_i915_private *i915)
}
void
-i915_gem_cleanup_engines(struct drm_i915_private *dev_priv)
-{
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
-
- for_each_engine(engine, dev_priv, id)
- dev_priv->gt.cleanup_engine(engine);
-}
-
-void
i915_gem_load_init_fences(struct drm_i915_private *dev_priv)
{
int i;
@@ -5110,15 +4755,14 @@ int i915_gem_init_early(struct drm_i915_private *dev_priv)
{
int err;
+ intel_gt_pm_init(dev_priv);
+
INIT_LIST_HEAD(&dev_priv->gt.active_rings);
INIT_LIST_HEAD(&dev_priv->gt.closed_vma);
i915_gem_init__mm(dev_priv);
+ i915_gem_init__pm(dev_priv);
- INIT_DELAYED_WORK(&dev_priv->gt.retire_work,
- i915_gem_retire_work_handler);
- INIT_DELAYED_WORK(&dev_priv->gt.idle_work,
- i915_gem_idle_work_handler);
init_waitqueue_head(&dev_priv->gpu_error.wait_queue);
init_waitqueue_head(&dev_priv->gpu_error.reset_queue);
mutex_init(&dev_priv->gpu_error.wedge_mutex);
@@ -5461,16 +5105,29 @@ i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj,
}
dma_addr_t
-i915_gem_object_get_dma_address(struct drm_i915_gem_object *obj,
- unsigned long n)
+i915_gem_object_get_dma_address_len(struct drm_i915_gem_object *obj,
+ unsigned long n,
+ unsigned int *len)
{
struct scatterlist *sg;
unsigned int offset;
sg = i915_gem_object_get_sg(obj, n, &offset);
+
+ if (len)
+ *len = sg_dma_len(sg) - (offset << PAGE_SHIFT);
+
return sg_dma_address(sg) + (offset << PAGE_SHIFT);
}
+dma_addr_t
+i915_gem_object_get_dma_address(struct drm_i915_gem_object *obj,
+ unsigned long n)
+{
+ return i915_gem_object_get_dma_address_len(obj, n, NULL);
+}
+
+
int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, int align)
{
struct sg_table *pages;
diff --git a/drivers/gpu/drm/i915/i915_gem.h b/drivers/gpu/drm/i915/i915_gem.h
index 9074eb1e843f..fe82d3571072 100644
--- a/drivers/gpu/drm/i915/i915_gem.h
+++ b/drivers/gpu/drm/i915/i915_gem.h
@@ -75,9 +75,6 @@ struct drm_i915_private;
#define I915_GEM_IDLE_TIMEOUT (HZ / 5)
-void i915_gem_park(struct drm_i915_private *i915);
-void i915_gem_unpark(struct drm_i915_private *i915);
-
static inline void __tasklet_disable_sync_once(struct tasklet_struct *t)
{
if (!atomic_fetch_inc(&t->count))
@@ -94,4 +91,9 @@ static inline bool __tasklet_enable(struct tasklet_struct *t)
return atomic_dec_and_test(&t->count);
}
+static inline bool __tasklet_is_scheduled(struct tasklet_struct *t)
+{
+ return test_bit(TASKLET_STATE_SCHED, &t->state);
+}
+
#endif /* __I915_GEM_H__ */
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index dd728b26b5aa..5d2f8ba92b59 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -86,16 +86,16 @@
*/
#include <linux/log2.h>
+#include <linux/nospec.h>
+
#include <drm/i915_drm.h>
+
+#include "gt/intel_lrc_reg.h"
+
#include "i915_drv.h"
#include "i915_globals.h"
#include "i915_trace.h"
#include "i915_user_extensions.h"
-#include "intel_lrc_reg.h"
-#include "intel_workarounds.h"
-
-#define I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE (1 << 1)
-#define I915_CONTEXT_PARAM_VM 0x9
#define ALL_L3_SLICES(dev) (1 << NUM_L3_SLICES(dev)) - 1
@@ -138,6 +138,34 @@ static void lut_close(struct i915_gem_context *ctx)
rcu_read_unlock();
}
+static struct intel_context *
+lookup_user_engine(struct i915_gem_context *ctx,
+ unsigned long flags,
+ const struct i915_engine_class_instance *ci)
+#define LOOKUP_USER_INDEX BIT(0)
+{
+ int idx;
+
+ if (!!(flags & LOOKUP_USER_INDEX) != i915_gem_context_user_engines(ctx))
+ return ERR_PTR(-EINVAL);
+
+ if (!i915_gem_context_user_engines(ctx)) {
+ struct intel_engine_cs *engine;
+
+ engine = intel_engine_lookup_user(ctx->i915,
+ ci->engine_class,
+ ci->engine_instance);
+ if (!engine)
+ return ERR_PTR(-EINVAL);
+
+ idx = engine->id;
+ } else {
+ idx = ci->engine_instance;
+ }
+
+ return i915_gem_context_get_engine(ctx, idx);
+}
+
static inline int new_hw_id(struct drm_i915_private *i915, gfp_t gfp)
{
unsigned int max;
@@ -227,19 +255,70 @@ static void release_hw_id(struct i915_gem_context *ctx)
mutex_unlock(&i915->contexts.mutex);
}
-static void i915_gem_context_free(struct i915_gem_context *ctx)
+static void __free_engines(struct i915_gem_engines *e, unsigned int count)
+{
+ while (count--) {
+ if (!e->engines[count])
+ continue;
+
+ intel_context_put(e->engines[count]);
+ }
+ kfree(e);
+}
+
+static void free_engines(struct i915_gem_engines *e)
+{
+ __free_engines(e, e->num_engines);
+}
+
+static void free_engines_rcu(struct work_struct *wrk)
{
- struct intel_context *it, *n;
+ struct i915_gem_engines *e =
+ container_of(wrk, struct i915_gem_engines, rcu.work);
+ struct drm_i915_private *i915 = e->i915;
+ mutex_lock(&i915->drm.struct_mutex);
+ free_engines(e);
+ mutex_unlock(&i915->drm.struct_mutex);
+}
+
+static struct i915_gem_engines *default_engines(struct i915_gem_context *ctx)
+{
+ struct intel_engine_cs *engine;
+ struct i915_gem_engines *e;
+ enum intel_engine_id id;
+
+ e = kzalloc(struct_size(e, engines, I915_NUM_ENGINES), GFP_KERNEL);
+ if (!e)
+ return ERR_PTR(-ENOMEM);
+
+ e->i915 = ctx->i915;
+ for_each_engine(engine, ctx->i915, id) {
+ struct intel_context *ce;
+
+ ce = intel_context_create(ctx, engine);
+ if (IS_ERR(ce)) {
+ __free_engines(e, id);
+ return ERR_CAST(ce);
+ }
+
+ e->engines[id] = ce;
+ }
+ e->num_engines = id;
+
+ return e;
+}
+
+static void i915_gem_context_free(struct i915_gem_context *ctx)
+{
lockdep_assert_held(&ctx->i915->drm.struct_mutex);
GEM_BUG_ON(!i915_gem_context_is_closed(ctx));
- GEM_BUG_ON(!list_empty(&ctx->active_engines));
release_hw_id(ctx);
i915_ppgtt_put(ctx->ppgtt);
- rbtree_postorder_for_each_entry_safe(it, n, &ctx->hw_contexts, node)
- intel_context_put(it);
+ free_engines(rcu_access_pointer(ctx->engines));
+ mutex_destroy(&ctx->engines_mutex);
if (ctx->timeline)
i915_timeline_put(ctx->timeline);
@@ -348,6 +427,8 @@ static struct i915_gem_context *
__create_context(struct drm_i915_private *dev_priv)
{
struct i915_gem_context *ctx;
+ struct i915_gem_engines *e;
+ int err;
int i;
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
@@ -358,11 +439,15 @@ __create_context(struct drm_i915_private *dev_priv)
list_add_tail(&ctx->link, &dev_priv->contexts.list);
ctx->i915 = dev_priv;
ctx->sched.priority = I915_USER_PRIORITY(I915_PRIORITY_NORMAL);
- INIT_LIST_HEAD(&ctx->active_engines);
mutex_init(&ctx->mutex);
- ctx->hw_contexts = RB_ROOT;
- spin_lock_init(&ctx->hw_contexts_lock);
+ mutex_init(&ctx->engines_mutex);
+ e = default_engines(ctx);
+ if (IS_ERR(e)) {
+ err = PTR_ERR(e);
+ goto err_free;
+ }
+ RCU_INIT_POINTER(ctx->engines, e);
INIT_RADIX_TREE(&ctx->handles_vma, GFP_KERNEL);
INIT_LIST_HEAD(&ctx->handles_list);
@@ -384,6 +469,10 @@ __create_context(struct drm_i915_private *dev_priv)
ctx->hang_timestamp[i] = jiffies - CONTEXT_FAST_HANG_JIFFIES;
return ctx;
+
+err_free:
+ kfree(ctx);
+ return ERR_PTR(err);
}
static struct i915_hw_ppgtt *
@@ -415,8 +504,6 @@ i915_gem_create_context(struct drm_i915_private *dev_priv, unsigned int flags)
lockdep_assert_held(&dev_priv->drm.struct_mutex);
- BUILD_BUG_ON(I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE &
- ~I915_CONTEXT_CREATE_FLAGS_UNKNOWN);
if (flags & I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE &&
!HAS_EXECLISTS(dev_priv))
return ERR_PTR(-EINVAL);
@@ -769,8 +856,7 @@ int i915_gem_vm_create_ioctl(struct drm_device *dev, void *data,
if (err < 0)
goto err_unlock;
- GEM_BUG_ON(err == 0); /* reserved for default/unassigned ppgtt */
- ppgtt->user_handle = err;
+ GEM_BUG_ON(err == 0); /* reserved for invalid/unassigned ppgtt */
mutex_unlock(&file_priv->vm_idr_lock);
@@ -808,10 +894,6 @@ int i915_gem_vm_destroy_ioctl(struct drm_device *dev, void *data,
return err;
ppgtt = idr_remove(&file_priv->vm_idr, id);
- if (ppgtt) {
- GEM_BUG_ON(ppgtt->user_handle != id);
- ppgtt->user_handle = 0;
- }
mutex_unlock(&file_priv->vm_idr_lock);
if (!ppgtt)
@@ -821,26 +903,6 @@ int i915_gem_vm_destroy_ioctl(struct drm_device *dev, void *data,
return 0;
}
-static struct i915_request *
-last_request_on_engine(struct i915_timeline *timeline,
- struct intel_engine_cs *engine)
-{
- struct i915_request *rq;
-
- GEM_BUG_ON(timeline == &engine->timeline);
-
- rq = i915_active_request_raw(&timeline->last_request,
- &engine->i915->drm.struct_mutex);
- if (rq && rq->engine->mask & engine->mask) {
- GEM_TRACE("last request on engine %s: %llx:%llu\n",
- engine->name, rq->fence.context, rq->fence.seqno);
- GEM_BUG_ON(rq->timeline != timeline);
- return rq;
- }
-
- return NULL;
-}
-
struct context_barrier_task {
struct i915_active base;
void (*task)(void *data);
@@ -867,8 +929,8 @@ static int context_barrier_task(struct i915_gem_context *ctx,
{
struct drm_i915_private *i915 = ctx->i915;
struct context_barrier_task *cb;
- struct intel_context *ce, *next;
- intel_wakeref_t wakeref;
+ struct i915_gem_engines_iter it;
+ struct intel_context *ce;
int err = 0;
lockdep_assert_held(&i915->drm.struct_mutex);
@@ -881,21 +943,19 @@ static int context_barrier_task(struct i915_gem_context *ctx,
i915_active_init(i915, &cb->base, cb_retire);
i915_active_acquire(&cb->base);
- wakeref = intel_runtime_pm_get(i915);
- rbtree_postorder_for_each_entry_safe(ce, next, &ctx->hw_contexts, node) {
- struct intel_engine_cs *engine = ce->engine;
+ for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
struct i915_request *rq;
- if (!(engine->mask & engines))
- continue;
-
if (I915_SELFTEST_ONLY(context_barrier_inject_fault &
- engine->mask)) {
+ ce->engine->mask)) {
err = -ENXIO;
break;
}
- rq = i915_request_alloc(engine, ctx);
+ if (!(ce->engine->mask & engines) || !ce->state)
+ continue;
+
+ rq = intel_context_create_request(ce);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
break;
@@ -911,7 +971,7 @@ static int context_barrier_task(struct i915_gem_context *ctx,
if (err)
break;
}
- intel_runtime_pm_put(i915, wakeref);
+ i915_gem_context_unlock_engines(ctx);
cb->task = err ? NULL : task; /* caller needs to unwind instead */
cb->data = data;
@@ -921,54 +981,6 @@ static int context_barrier_task(struct i915_gem_context *ctx,
return err;
}
-int i915_gem_switch_to_kernel_context(struct drm_i915_private *i915,
- intel_engine_mask_t mask)
-{
- struct intel_engine_cs *engine;
-
- GEM_TRACE("awake?=%s\n", yesno(i915->gt.awake));
-
- lockdep_assert_held(&i915->drm.struct_mutex);
- GEM_BUG_ON(!i915->kernel_context);
-
- /* Inoperable, so presume the GPU is safely pointing into the void! */
- if (i915_terminally_wedged(i915))
- return 0;
-
- for_each_engine_masked(engine, i915, mask, mask) {
- struct intel_ring *ring;
- struct i915_request *rq;
-
- rq = i915_request_alloc(engine, i915->kernel_context);
- if (IS_ERR(rq))
- return PTR_ERR(rq);
-
- /* Queue this switch after all other activity */
- list_for_each_entry(ring, &i915->gt.active_rings, active_link) {
- struct i915_request *prev;
-
- prev = last_request_on_engine(ring->timeline, engine);
- if (!prev)
- continue;
-
- if (prev->gem_context == i915->kernel_context)
- continue;
-
- GEM_TRACE("add barrier on %s for %llx:%lld\n",
- engine->name,
- prev->fence.context,
- prev->fence.seqno);
- i915_sw_fence_await_sw_fence_gfp(&rq->submit,
- &prev->submit,
- I915_FENCE_GFP);
- }
-
- i915_request_add(rq);
- }
-
- return 0;
-}
-
static int get_ppgtt(struct drm_i915_file_private *file_priv,
struct i915_gem_context *ctx,
struct drm_i915_gem_context_param *args)
@@ -976,8 +988,6 @@ static int get_ppgtt(struct drm_i915_file_private *file_priv,
struct i915_hw_ppgtt *ppgtt;
int ret;
- return -EINVAL; /* nothing to see here; please move along */
-
if (!ctx->ppgtt)
return -ENODEV;
@@ -993,18 +1003,15 @@ static int get_ppgtt(struct drm_i915_file_private *file_priv,
if (ret)
goto err_put;
- if (!ppgtt->user_handle) {
- ret = idr_alloc(&file_priv->vm_idr, ppgtt, 0, 0, GFP_KERNEL);
- GEM_BUG_ON(!ret);
- if (ret < 0)
- goto err_unlock;
+ ret = idr_alloc(&file_priv->vm_idr, ppgtt, 0, 0, GFP_KERNEL);
+ GEM_BUG_ON(!ret);
+ if (ret < 0)
+ goto err_unlock;
- ppgtt->user_handle = ret;
- i915_ppgtt_get(ppgtt);
- }
+ i915_ppgtt_get(ppgtt);
args->size = 0;
- args->value = ppgtt->user_handle;
+ args->value = ret;
ret = 0;
err_unlock:
@@ -1079,8 +1086,6 @@ static int set_ppgtt(struct drm_i915_file_private *file_priv,
struct i915_hw_ppgtt *ppgtt, *old;
int err;
- return -EINVAL; /* nothing to see here; please move along */
-
if (args->size)
return -EINVAL;
@@ -1095,10 +1100,8 @@ static int set_ppgtt(struct drm_i915_file_private *file_priv,
return err;
ppgtt = idr_find(&file_priv->vm_idr, args->value);
- if (ppgtt) {
- GEM_BUG_ON(ppgtt->user_handle != args->value);
+ if (ppgtt)
i915_ppgtt_get(ppgtt);
- }
mutex_unlock(&file_priv->vm_idr_lock);
if (!ppgtt)
return -ENOENT;
@@ -1156,7 +1159,7 @@ static int gen8_emit_rpcs_config(struct i915_request *rq,
*cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
*cs++ = lower_32_bits(offset);
*cs++ = upper_32_bits(offset);
- *cs++ = gen8_make_rpcs(rq->i915, &sseu);
+ *cs++ = intel_sseu_make_rpcs(rq->i915, &sseu);
intel_ring_advance(rq, cs);
@@ -1166,9 +1169,7 @@ static int gen8_emit_rpcs_config(struct i915_request *rq,
static int
gen8_modify_rpcs(struct intel_context *ce, struct intel_sseu sseu)
{
- struct drm_i915_private *i915 = ce->engine->i915;
struct i915_request *rq;
- intel_wakeref_t wakeref;
int ret;
lockdep_assert_held(&ce->pin_mutex);
@@ -1182,14 +1183,9 @@ gen8_modify_rpcs(struct intel_context *ce, struct intel_sseu sseu)
if (!intel_context_is_pinned(ce))
return 0;
- /* Submitting requests etc needs the hw awake. */
- wakeref = intel_runtime_pm_get(i915);
-
- rq = i915_request_alloc(ce->engine, i915->kernel_context);
- if (IS_ERR(rq)) {
- ret = PTR_ERR(rq);
- goto out_put;
- }
+ rq = i915_request_create(ce->engine->kernel_context);
+ if (IS_ERR(rq))
+ return PTR_ERR(rq);
/* Queue this switch after all other activity by this context. */
ret = i915_active_request_set(&ce->ring->timeline->last_request, rq);
@@ -1213,26 +1209,20 @@ gen8_modify_rpcs(struct intel_context *ce, struct intel_sseu sseu)
out_add:
i915_request_add(rq);
-out_put:
- intel_runtime_pm_put(i915, wakeref);
-
return ret;
}
static int
-__i915_gem_context_reconfigure_sseu(struct i915_gem_context *ctx,
- struct intel_engine_cs *engine,
- struct intel_sseu sseu)
+__intel_context_reconfigure_sseu(struct intel_context *ce,
+ struct intel_sseu sseu)
{
- struct intel_context *ce;
- int ret = 0;
+ int ret;
- GEM_BUG_ON(INTEL_GEN(ctx->i915) < 8);
- GEM_BUG_ON(engine->id != RCS0);
+ GEM_BUG_ON(INTEL_GEN(ce->gem_context->i915) < 8);
- ce = intel_context_pin_lock(ctx, engine);
- if (IS_ERR(ce))
- return PTR_ERR(ce);
+ ret = intel_context_lock_pinned(ce);
+ if (ret)
+ return ret;
/* Nothing to do if unmodified. */
if (!memcmp(&ce->sseu, &sseu, sizeof(sseu)))
@@ -1243,24 +1233,23 @@ __i915_gem_context_reconfigure_sseu(struct i915_gem_context *ctx,
ce->sseu = sseu;
unlock:
- intel_context_pin_unlock(ce);
+ intel_context_unlock_pinned(ce);
return ret;
}
static int
-i915_gem_context_reconfigure_sseu(struct i915_gem_context *ctx,
- struct intel_engine_cs *engine,
- struct intel_sseu sseu)
+intel_context_reconfigure_sseu(struct intel_context *ce, struct intel_sseu sseu)
{
+ struct drm_i915_private *i915 = ce->gem_context->i915;
int ret;
- ret = mutex_lock_interruptible(&ctx->i915->drm.struct_mutex);
+ ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
if (ret)
return ret;
- ret = __i915_gem_context_reconfigure_sseu(ctx, engine, sseu);
+ ret = __intel_context_reconfigure_sseu(ce, sseu);
- mutex_unlock(&ctx->i915->drm.struct_mutex);
+ mutex_unlock(&i915->drm.struct_mutex);
return ret;
}
@@ -1368,8 +1357,9 @@ static int set_sseu(struct i915_gem_context *ctx,
{
struct drm_i915_private *i915 = ctx->i915;
struct drm_i915_gem_context_param_sseu user_sseu;
- struct intel_engine_cs *engine;
+ struct intel_context *ce;
struct intel_sseu sseu;
+ unsigned long lookup;
int ret;
if (args->size < sizeof(user_sseu))
@@ -1382,32 +1372,429 @@ static int set_sseu(struct i915_gem_context *ctx,
sizeof(user_sseu)))
return -EFAULT;
- if (user_sseu.flags || user_sseu.rsvd)
+ if (user_sseu.rsvd)
return -EINVAL;
- engine = intel_engine_lookup_user(i915,
- user_sseu.engine.engine_class,
- user_sseu.engine.engine_instance);
- if (!engine)
+ if (user_sseu.flags & ~(I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX))
return -EINVAL;
+ lookup = 0;
+ if (user_sseu.flags & I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX)
+ lookup |= LOOKUP_USER_INDEX;
+
+ ce = lookup_user_engine(ctx, lookup, &user_sseu.engine);
+ if (IS_ERR(ce))
+ return PTR_ERR(ce);
+
/* Only render engine supports RPCS configuration. */
- if (engine->class != RENDER_CLASS)
- return -ENODEV;
+ if (ce->engine->class != RENDER_CLASS) {
+ ret = -ENODEV;
+ goto out_ce;
+ }
ret = user_to_context_sseu(i915, &user_sseu, &sseu);
if (ret)
- return ret;
+ goto out_ce;
- ret = i915_gem_context_reconfigure_sseu(ctx, engine, sseu);
+ ret = intel_context_reconfigure_sseu(ce, sseu);
if (ret)
- return ret;
+ goto out_ce;
args->size = sizeof(user_sseu);
+out_ce:
+ intel_context_put(ce);
+ return ret;
+}
+
+struct set_engines {
+ struct i915_gem_context *ctx;
+ struct i915_gem_engines *engines;
+};
+
+static int
+set_engines__load_balance(struct i915_user_extension __user *base, void *data)
+{
+ struct i915_context_engines_load_balance __user *ext =
+ container_of_user(base, typeof(*ext), base);
+ const struct set_engines *set = data;
+ struct intel_engine_cs *stack[16];
+ struct intel_engine_cs **siblings;
+ struct intel_context *ce;
+ u16 num_siblings, idx;
+ unsigned int n;
+ int err;
+
+ if (!HAS_EXECLISTS(set->ctx->i915))
+ return -ENODEV;
+
+ if (USES_GUC_SUBMISSION(set->ctx->i915))
+ return -ENODEV; /* not implement yet */
+
+ if (get_user(idx, &ext->engine_index))
+ return -EFAULT;
+
+ if (idx >= set->engines->num_engines) {
+ DRM_DEBUG("Invalid placement value, %d >= %d\n",
+ idx, set->engines->num_engines);
+ return -EINVAL;
+ }
+
+ idx = array_index_nospec(idx, set->engines->num_engines);
+ if (set->engines->engines[idx]) {
+ DRM_DEBUG("Invalid placement[%d], already occupied\n", idx);
+ return -EEXIST;
+ }
+
+ if (get_user(num_siblings, &ext->num_siblings))
+ return -EFAULT;
+
+ err = check_user_mbz(&ext->flags);
+ if (err)
+ return err;
+
+ err = check_user_mbz(&ext->mbz64);
+ if (err)
+ return err;
+
+ siblings = stack;
+ if (num_siblings > ARRAY_SIZE(stack)) {
+ siblings = kmalloc_array(num_siblings,
+ sizeof(*siblings),
+ GFP_KERNEL);
+ if (!siblings)
+ return -ENOMEM;
+ }
+
+ for (n = 0; n < num_siblings; n++) {
+ struct i915_engine_class_instance ci;
+
+ if (copy_from_user(&ci, &ext->engines[n], sizeof(ci))) {
+ err = -EFAULT;
+ goto out_siblings;
+ }
+
+ siblings[n] = intel_engine_lookup_user(set->ctx->i915,
+ ci.engine_class,
+ ci.engine_instance);
+ if (!siblings[n]) {
+ DRM_DEBUG("Invalid sibling[%d]: { class:%d, inst:%d }\n",
+ n, ci.engine_class, ci.engine_instance);
+ err = -EINVAL;
+ goto out_siblings;
+ }
+ }
+
+ ce = intel_execlists_create_virtual(set->ctx, siblings, n);
+ if (IS_ERR(ce)) {
+ err = PTR_ERR(ce);
+ goto out_siblings;
+ }
+
+ if (cmpxchg(&set->engines->engines[idx], NULL, ce)) {
+ intel_context_put(ce);
+ err = -EEXIST;
+ goto out_siblings;
+ }
+
+out_siblings:
+ if (siblings != stack)
+ kfree(siblings);
+
+ return err;
+}
+
+static int
+set_engines__bond(struct i915_user_extension __user *base, void *data)
+{
+ struct i915_context_engines_bond __user *ext =
+ container_of_user(base, typeof(*ext), base);
+ const struct set_engines *set = data;
+ struct i915_engine_class_instance ci;
+ struct intel_engine_cs *virtual;
+ struct intel_engine_cs *master;
+ u16 idx, num_bonds;
+ int err, n;
+
+ if (get_user(idx, &ext->virtual_index))
+ return -EFAULT;
+
+ if (idx >= set->engines->num_engines) {
+ DRM_DEBUG("Invalid index for virtual engine: %d >= %d\n",
+ idx, set->engines->num_engines);
+ return -EINVAL;
+ }
+
+ idx = array_index_nospec(idx, set->engines->num_engines);
+ if (!set->engines->engines[idx]) {
+ DRM_DEBUG("Invalid engine at %d\n", idx);
+ return -EINVAL;
+ }
+ virtual = set->engines->engines[idx]->engine;
+
+ err = check_user_mbz(&ext->flags);
+ if (err)
+ return err;
+
+ for (n = 0; n < ARRAY_SIZE(ext->mbz64); n++) {
+ err = check_user_mbz(&ext->mbz64[n]);
+ if (err)
+ return err;
+ }
+
+ if (copy_from_user(&ci, &ext->master, sizeof(ci)))
+ return -EFAULT;
+
+ master = intel_engine_lookup_user(set->ctx->i915,
+ ci.engine_class, ci.engine_instance);
+ if (!master) {
+ DRM_DEBUG("Unrecognised master engine: { class:%u, instance:%u }\n",
+ ci.engine_class, ci.engine_instance);
+ return -EINVAL;
+ }
+
+ if (get_user(num_bonds, &ext->num_bonds))
+ return -EFAULT;
+
+ for (n = 0; n < num_bonds; n++) {
+ struct intel_engine_cs *bond;
+
+ if (copy_from_user(&ci, &ext->engines[n], sizeof(ci)))
+ return -EFAULT;
+
+ bond = intel_engine_lookup_user(set->ctx->i915,
+ ci.engine_class,
+ ci.engine_instance);
+ if (!bond) {
+ DRM_DEBUG("Unrecognised engine[%d] for bonding: { class:%d, instance: %d }\n",
+ n, ci.engine_class, ci.engine_instance);
+ return -EINVAL;
+ }
+
+ /*
+ * A non-virtual engine has no siblings to choose between; and
+ * a submit fence will always be directed to the one engine.
+ */
+ if (intel_engine_is_virtual(virtual)) {
+ err = intel_virtual_engine_attach_bond(virtual,
+ master,
+ bond);
+ if (err)
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+static const i915_user_extension_fn set_engines__extensions[] = {
+ [I915_CONTEXT_ENGINES_EXT_LOAD_BALANCE] = set_engines__load_balance,
+ [I915_CONTEXT_ENGINES_EXT_BOND] = set_engines__bond,
+};
+
+static int
+set_engines(struct i915_gem_context *ctx,
+ const struct drm_i915_gem_context_param *args)
+{
+ struct i915_context_param_engines __user *user =
+ u64_to_user_ptr(args->value);
+ struct set_engines set = { .ctx = ctx };
+ unsigned int num_engines, n;
+ u64 extensions;
+ int err;
+
+ if (!args->size) { /* switch back to legacy user_ring_map */
+ if (!i915_gem_context_user_engines(ctx))
+ return 0;
+
+ set.engines = default_engines(ctx);
+ if (IS_ERR(set.engines))
+ return PTR_ERR(set.engines);
+
+ goto replace;
+ }
+
+ BUILD_BUG_ON(!IS_ALIGNED(sizeof(*user), sizeof(*user->engines)));
+ if (args->size < sizeof(*user) ||
+ !IS_ALIGNED(args->size, sizeof(*user->engines))) {
+ DRM_DEBUG("Invalid size for engine array: %d\n",
+ args->size);
+ return -EINVAL;
+ }
+
+ /*
+ * Note that I915_EXEC_RING_MASK limits execbuf to only using the
+ * first 64 engines defined here.
+ */
+ num_engines = (args->size - sizeof(*user)) / sizeof(*user->engines);
+
+ set.engines = kmalloc(struct_size(set.engines, engines, num_engines),
+ GFP_KERNEL);
+ if (!set.engines)
+ return -ENOMEM;
+
+ set.engines->i915 = ctx->i915;
+ for (n = 0; n < num_engines; n++) {
+ struct i915_engine_class_instance ci;
+ struct intel_engine_cs *engine;
+
+ if (copy_from_user(&ci, &user->engines[n], sizeof(ci))) {
+ __free_engines(set.engines, n);
+ return -EFAULT;
+ }
+
+ if (ci.engine_class == (u16)I915_ENGINE_CLASS_INVALID &&
+ ci.engine_instance == (u16)I915_ENGINE_CLASS_INVALID_NONE) {
+ set.engines->engines[n] = NULL;
+ continue;
+ }
+
+ engine = intel_engine_lookup_user(ctx->i915,
+ ci.engine_class,
+ ci.engine_instance);
+ if (!engine) {
+ DRM_DEBUG("Invalid engine[%d]: { class:%d, instance:%d }\n",
+ n, ci.engine_class, ci.engine_instance);
+ __free_engines(set.engines, n);
+ return -ENOENT;
+ }
+
+ set.engines->engines[n] = intel_context_create(ctx, engine);
+ if (!set.engines->engines[n]) {
+ __free_engines(set.engines, n);
+ return -ENOMEM;
+ }
+ }
+ set.engines->num_engines = num_engines;
+
+ err = -EFAULT;
+ if (!get_user(extensions, &user->extensions))
+ err = i915_user_extensions(u64_to_user_ptr(extensions),
+ set_engines__extensions,
+ ARRAY_SIZE(set_engines__extensions),
+ &set);
+ if (err) {
+ free_engines(set.engines);
+ return err;
+ }
+
+replace:
+ mutex_lock(&ctx->engines_mutex);
+ if (args->size)
+ i915_gem_context_set_user_engines(ctx);
+ else
+ i915_gem_context_clear_user_engines(ctx);
+ rcu_swap_protected(ctx->engines, set.engines, 1);
+ mutex_unlock(&ctx->engines_mutex);
+
+ INIT_RCU_WORK(&set.engines->rcu, free_engines_rcu);
+ queue_rcu_work(system_wq, &set.engines->rcu);
+
return 0;
}
+static struct i915_gem_engines *
+__copy_engines(struct i915_gem_engines *e)
+{
+ struct i915_gem_engines *copy;
+ unsigned int n;
+
+ copy = kmalloc(struct_size(e, engines, e->num_engines), GFP_KERNEL);
+ if (!copy)
+ return ERR_PTR(-ENOMEM);
+
+ copy->i915 = e->i915;
+ for (n = 0; n < e->num_engines; n++) {
+ if (e->engines[n])
+ copy->engines[n] = intel_context_get(e->engines[n]);
+ else
+ copy->engines[n] = NULL;
+ }
+ copy->num_engines = n;
+
+ return copy;
+}
+
+static int
+get_engines(struct i915_gem_context *ctx,
+ struct drm_i915_gem_context_param *args)
+{
+ struct i915_context_param_engines __user *user;
+ struct i915_gem_engines *e;
+ size_t n, count, size;
+ int err = 0;
+
+ err = mutex_lock_interruptible(&ctx->engines_mutex);
+ if (err)
+ return err;
+
+ e = NULL;
+ if (i915_gem_context_user_engines(ctx))
+ e = __copy_engines(i915_gem_context_engines(ctx));
+ mutex_unlock(&ctx->engines_mutex);
+ if (IS_ERR_OR_NULL(e)) {
+ args->size = 0;
+ return PTR_ERR_OR_ZERO(e);
+ }
+
+ count = e->num_engines;
+
+ /* Be paranoid in case we have an impedance mismatch */
+ if (!check_struct_size(user, engines, count, &size)) {
+ err = -EINVAL;
+ goto err_free;
+ }
+ if (overflows_type(size, args->size)) {
+ err = -EINVAL;
+ goto err_free;
+ }
+
+ if (!args->size) {
+ args->size = size;
+ goto err_free;
+ }
+
+ if (args->size < size) {
+ err = -EINVAL;
+ goto err_free;
+ }
+
+ user = u64_to_user_ptr(args->value);
+ if (!access_ok(user, size)) {
+ err = -EFAULT;
+ goto err_free;
+ }
+
+ if (put_user(0, &user->extensions)) {
+ err = -EFAULT;
+ goto err_free;
+ }
+
+ for (n = 0; n < count; n++) {
+ struct i915_engine_class_instance ci = {
+ .engine_class = I915_ENGINE_CLASS_INVALID,
+ .engine_instance = I915_ENGINE_CLASS_INVALID_NONE,
+ };
+
+ if (e->engines[n]) {
+ ci.engine_class = e->engines[n]->engine->uabi_class;
+ ci.engine_instance = e->engines[n]->engine->instance;
+ }
+
+ if (copy_to_user(&user->engines[n], &ci, sizeof(ci))) {
+ err = -EFAULT;
+ goto err_free;
+ }
+ }
+
+ args->size = size;
+
+err_free:
+ INIT_RCU_WORK(&e->rcu, free_engines_rcu);
+ queue_rcu_work(system_wq, &e->rcu);
+ return err;
+}
+
static int ctx_setparam(struct drm_i915_file_private *fpriv,
struct i915_gem_context *ctx,
struct drm_i915_gem_context_param *args)
@@ -1481,6 +1868,10 @@ static int ctx_setparam(struct drm_i915_file_private *fpriv,
ret = set_ppgtt(fpriv, ctx, args);
break;
+ case I915_CONTEXT_PARAM_ENGINES:
+ ret = set_engines(ctx, args);
+ break;
+
case I915_CONTEXT_PARAM_BAN_PERIOD:
default:
ret = -EINVAL;
@@ -1509,8 +1900,229 @@ static int create_setparam(struct i915_user_extension __user *ext, void *data)
return ctx_setparam(arg->fpriv, arg->ctx, &local.param);
}
+static int clone_engines(struct i915_gem_context *dst,
+ struct i915_gem_context *src)
+{
+ struct i915_gem_engines *e = i915_gem_context_lock_engines(src);
+ struct i915_gem_engines *clone;
+ bool user_engines;
+ unsigned long n;
+
+ clone = kmalloc(struct_size(e, engines, e->num_engines), GFP_KERNEL);
+ if (!clone)
+ goto err_unlock;
+
+ clone->i915 = dst->i915;
+ for (n = 0; n < e->num_engines; n++) {
+ struct intel_engine_cs *engine;
+
+ if (!e->engines[n]) {
+ clone->engines[n] = NULL;
+ continue;
+ }
+ engine = e->engines[n]->engine;
+
+ /*
+ * Virtual engines are singletons; they can only exist
+ * inside a single context, because they embed their
+ * HW context... As each virtual context implies a single
+ * timeline (each engine can only dequeue a single request
+ * at any time), it would be surprising for two contexts
+ * to use the same engine. So let's create a copy of
+ * the virtual engine instead.
+ */
+ if (intel_engine_is_virtual(engine))
+ clone->engines[n] =
+ intel_execlists_clone_virtual(dst, engine);
+ else
+ clone->engines[n] = intel_context_create(dst, engine);
+ if (IS_ERR_OR_NULL(clone->engines[n])) {
+ __free_engines(clone, n);
+ goto err_unlock;
+ }
+ }
+ clone->num_engines = n;
+
+ user_engines = i915_gem_context_user_engines(src);
+ i915_gem_context_unlock_engines(src);
+
+ free_engines(dst->engines);
+ RCU_INIT_POINTER(dst->engines, clone);
+ if (user_engines)
+ i915_gem_context_set_user_engines(dst);
+ else
+ i915_gem_context_clear_user_engines(dst);
+ return 0;
+
+err_unlock:
+ i915_gem_context_unlock_engines(src);
+ return -ENOMEM;
+}
+
+static int clone_flags(struct i915_gem_context *dst,
+ struct i915_gem_context *src)
+{
+ dst->user_flags = src->user_flags;
+ return 0;
+}
+
+static int clone_schedattr(struct i915_gem_context *dst,
+ struct i915_gem_context *src)
+{
+ dst->sched = src->sched;
+ return 0;
+}
+
+static int clone_sseu(struct i915_gem_context *dst,
+ struct i915_gem_context *src)
+{
+ struct i915_gem_engines *e = i915_gem_context_lock_engines(src);
+ struct i915_gem_engines *clone;
+ unsigned long n;
+ int err;
+
+ clone = dst->engines; /* no locking required; sole access */
+ if (e->num_engines != clone->num_engines) {
+ err = -EINVAL;
+ goto unlock;
+ }
+
+ for (n = 0; n < e->num_engines; n++) {
+ struct intel_context *ce = e->engines[n];
+
+ if (clone->engines[n]->engine->class != ce->engine->class) {
+ /* Must have compatible engine maps! */
+ err = -EINVAL;
+ goto unlock;
+ }
+
+ /* serialises with set_sseu */
+ err = intel_context_lock_pinned(ce);
+ if (err)
+ goto unlock;
+
+ clone->engines[n]->sseu = ce->sseu;
+ intel_context_unlock_pinned(ce);
+ }
+
+ err = 0;
+unlock:
+ i915_gem_context_unlock_engines(src);
+ return err;
+}
+
+static int clone_timeline(struct i915_gem_context *dst,
+ struct i915_gem_context *src)
+{
+ if (src->timeline) {
+ GEM_BUG_ON(src->timeline == dst->timeline);
+
+ if (dst->timeline)
+ i915_timeline_put(dst->timeline);
+ dst->timeline = i915_timeline_get(src->timeline);
+ }
+
+ return 0;
+}
+
+static int clone_vm(struct i915_gem_context *dst,
+ struct i915_gem_context *src)
+{
+ struct i915_hw_ppgtt *ppgtt;
+
+ rcu_read_lock();
+ do {
+ ppgtt = READ_ONCE(src->ppgtt);
+ if (!ppgtt)
+ break;
+
+ if (!kref_get_unless_zero(&ppgtt->ref))
+ continue;
+
+ /*
+ * This ppgtt may have be reallocated between
+ * the read and the kref, and reassigned to a third
+ * context. In order to avoid inadvertent sharing
+ * of this ppgtt with that third context (and not
+ * src), we have to confirm that we have the same
+ * ppgtt after passing through the strong memory
+ * barrier implied by a successful
+ * kref_get_unless_zero().
+ *
+ * Once we have acquired the current ppgtt of src,
+ * we no longer care if it is released from src, as
+ * it cannot be reallocated elsewhere.
+ */
+
+ if (ppgtt == READ_ONCE(src->ppgtt))
+ break;
+
+ i915_ppgtt_put(ppgtt);
+ } while (1);
+ rcu_read_unlock();
+
+ if (ppgtt) {
+ __assign_ppgtt(dst, ppgtt);
+ i915_ppgtt_put(ppgtt);
+ }
+
+ return 0;
+}
+
+static int create_clone(struct i915_user_extension __user *ext, void *data)
+{
+ static int (* const fn[])(struct i915_gem_context *dst,
+ struct i915_gem_context *src) = {
+#define MAP(x, y) [ilog2(I915_CONTEXT_CLONE_##x)] = y
+ MAP(ENGINES, clone_engines),
+ MAP(FLAGS, clone_flags),
+ MAP(SCHEDATTR, clone_schedattr),
+ MAP(SSEU, clone_sseu),
+ MAP(TIMELINE, clone_timeline),
+ MAP(VM, clone_vm),
+#undef MAP
+ };
+ struct drm_i915_gem_context_create_ext_clone local;
+ const struct create_ext *arg = data;
+ struct i915_gem_context *dst = arg->ctx;
+ struct i915_gem_context *src;
+ int err, bit;
+
+ if (copy_from_user(&local, ext, sizeof(local)))
+ return -EFAULT;
+
+ BUILD_BUG_ON(GENMASK(BITS_PER_TYPE(local.flags) - 1, ARRAY_SIZE(fn)) !=
+ I915_CONTEXT_CLONE_UNKNOWN);
+
+ if (local.flags & I915_CONTEXT_CLONE_UNKNOWN)
+ return -EINVAL;
+
+ if (local.rsvd)
+ return -EINVAL;
+
+ rcu_read_lock();
+ src = __i915_gem_context_lookup_rcu(arg->fpriv, local.clone_id);
+ rcu_read_unlock();
+ if (!src)
+ return -ENOENT;
+
+ GEM_BUG_ON(src == dst);
+
+ for (bit = 0; bit < ARRAY_SIZE(fn); bit++) {
+ if (!(local.flags & BIT(bit)))
+ continue;
+
+ err = fn[bit](dst, src);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
static const i915_user_extension_fn create_extensions[] = {
[I915_CONTEXT_CREATE_EXT_SETPARAM] = create_setparam,
+ [I915_CONTEXT_CREATE_EXT_CLONE] = create_clone,
};
static bool client_is_banned(struct drm_i915_file_private *file_priv)
@@ -1610,8 +2222,9 @@ static int get_sseu(struct i915_gem_context *ctx,
struct drm_i915_gem_context_param *args)
{
struct drm_i915_gem_context_param_sseu user_sseu;
- struct intel_engine_cs *engine;
struct intel_context *ce;
+ unsigned long lookup;
+ int err;
if (args->size == 0)
goto out;
@@ -1622,25 +2235,33 @@ static int get_sseu(struct i915_gem_context *ctx,
sizeof(user_sseu)))
return -EFAULT;
- if (user_sseu.flags || user_sseu.rsvd)
+ if (user_sseu.rsvd)
return -EINVAL;
- engine = intel_engine_lookup_user(ctx->i915,
- user_sseu.engine.engine_class,
- user_sseu.engine.engine_instance);
- if (!engine)
+ if (user_sseu.flags & ~(I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX))
return -EINVAL;
- ce = intel_context_pin_lock(ctx, engine); /* serialises with set_sseu */
+ lookup = 0;
+ if (user_sseu.flags & I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX)
+ lookup |= LOOKUP_USER_INDEX;
+
+ ce = lookup_user_engine(ctx, lookup, &user_sseu.engine);
if (IS_ERR(ce))
return PTR_ERR(ce);
+ err = intel_context_lock_pinned(ce); /* serialises with set_sseu */
+ if (err) {
+ intel_context_put(ce);
+ return err;
+ }
+
user_sseu.slice_mask = ce->sseu.slice_mask;
user_sseu.subslice_mask = ce->sseu.subslice_mask;
user_sseu.min_eus_per_subslice = ce->sseu.min_eus_per_subslice;
user_sseu.max_eus_per_subslice = ce->sseu.max_eus_per_subslice;
- intel_context_pin_unlock(ce);
+ intel_context_unlock_pinned(ce);
+ intel_context_put(ce);
if (copy_to_user(u64_to_user_ptr(args->value), &user_sseu,
sizeof(user_sseu)))
@@ -1708,6 +2329,10 @@ int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data,
ret = get_ppgtt(file_priv, ctx, args);
break;
+ case I915_CONTEXT_PARAM_ENGINES:
+ ret = get_engines(ctx, args);
+ break;
+
case I915_CONTEXT_PARAM_BAN_PERIOD:
default:
ret = -EINVAL;
@@ -1801,6 +2426,23 @@ out_unlock:
return err;
}
+/* GEM context-engines iterator: for_each_gem_engine() */
+struct intel_context *
+i915_gem_engines_iter_next(struct i915_gem_engines_iter *it)
+{
+ const struct i915_gem_engines *e = it->engines;
+ struct intel_context *ctx;
+
+ do {
+ if (it->idx >= e->num_engines)
+ return NULL;
+
+ ctx = e->engines[it->idx++];
+ } while (!ctx);
+
+ return ctx;
+}
+
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
#include "selftests/mock_context.c"
#include "selftests/i915_gem_context.c"
diff --git a/drivers/gpu/drm/i915/i915_gem_context.h b/drivers/gpu/drm/i915/i915_gem_context.h
index 23dcb01bfd82..9ad4a6362438 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.h
+++ b/drivers/gpu/drm/i915/i915_gem_context.h
@@ -27,9 +27,10 @@
#include "i915_gem_context_types.h"
+#include "gt/intel_context.h"
+
#include "i915_gem.h"
#include "i915_scheduler.h"
-#include "intel_context.h"
#include "intel_device_info.h"
struct drm_device;
@@ -111,6 +112,24 @@ static inline void i915_gem_context_set_force_single_submission(struct i915_gem_
__set_bit(CONTEXT_FORCE_SINGLE_SUBMISSION, &ctx->flags);
}
+static inline bool
+i915_gem_context_user_engines(const struct i915_gem_context *ctx)
+{
+ return test_bit(CONTEXT_USER_ENGINES, &ctx->flags);
+}
+
+static inline void
+i915_gem_context_set_user_engines(struct i915_gem_context *ctx)
+{
+ set_bit(CONTEXT_USER_ENGINES, &ctx->flags);
+}
+
+static inline void
+i915_gem_context_clear_user_engines(struct i915_gem_context *ctx)
+{
+ clear_bit(CONTEXT_USER_ENGINES, &ctx->flags);
+}
+
int __i915_gem_context_pin_hw_id(struct i915_gem_context *ctx);
static inline int i915_gem_context_pin_hw_id(struct i915_gem_context *ctx)
{
@@ -140,10 +159,6 @@ int i915_gem_context_open(struct drm_i915_private *i915,
struct drm_file *file);
void i915_gem_context_close(struct drm_file *file);
-int i915_switch_context(struct i915_request *rq);
-int i915_gem_switch_to_kernel_context(struct drm_i915_private *i915,
- intel_engine_mask_t engine_mask);
-
void i915_gem_context_release(struct kref *ctx_ref);
struct i915_gem_context *
i915_gem_context_create_gvt(struct drm_device *dev);
@@ -179,6 +194,64 @@ static inline void i915_gem_context_put(struct i915_gem_context *ctx)
kref_put(&ctx->ref, i915_gem_context_release);
}
+static inline struct i915_gem_engines *
+i915_gem_context_engines(struct i915_gem_context *ctx)
+{
+ return rcu_dereference_protected(ctx->engines,
+ lockdep_is_held(&ctx->engines_mutex));
+}
+
+static inline struct i915_gem_engines *
+i915_gem_context_lock_engines(struct i915_gem_context *ctx)
+ __acquires(&ctx->engines_mutex)
+{
+ mutex_lock(&ctx->engines_mutex);
+ return i915_gem_context_engines(ctx);
+}
+
+static inline void
+i915_gem_context_unlock_engines(struct i915_gem_context *ctx)
+ __releases(&ctx->engines_mutex)
+{
+ mutex_unlock(&ctx->engines_mutex);
+}
+
+static inline struct intel_context *
+i915_gem_context_lookup_engine(struct i915_gem_context *ctx, unsigned int idx)
+{
+ return i915_gem_context_engines(ctx)->engines[idx];
+}
+
+static inline struct intel_context *
+i915_gem_context_get_engine(struct i915_gem_context *ctx, unsigned int idx)
+{
+ struct intel_context *ce = ERR_PTR(-EINVAL);
+
+ rcu_read_lock(); {
+ struct i915_gem_engines *e = rcu_dereference(ctx->engines);
+ if (likely(idx < e->num_engines && e->engines[idx]))
+ ce = intel_context_get(e->engines[idx]);
+ } rcu_read_unlock();
+
+ return ce;
+}
+
+static inline void
+i915_gem_engines_iter_init(struct i915_gem_engines_iter *it,
+ struct i915_gem_engines *engines)
+{
+ GEM_BUG_ON(!engines);
+ it->engines = engines;
+ it->idx = 0;
+}
+
+struct intel_context *
+i915_gem_engines_iter_next(struct i915_gem_engines_iter *it);
+
+#define for_each_gem_engine(ce, engines, it) \
+ for (i915_gem_engines_iter_init(&(it), (engines)); \
+ ((ce) = i915_gem_engines_iter_next(&(it)));)
+
struct i915_lut_handle *i915_lut_handle_alloc(void);
void i915_lut_handle_free(struct i915_lut_handle *lut);
diff --git a/drivers/gpu/drm/i915/i915_gem_context_types.h b/drivers/gpu/drm/i915/i915_gem_context_types.h
index e2ec58b10fb2..fb965ded2508 100644
--- a/drivers/gpu/drm/i915/i915_gem_context_types.h
+++ b/drivers/gpu/drm/i915/i915_gem_context_types.h
@@ -17,8 +17,9 @@
#include <linux/rcupdate.h>
#include <linux/types.h>
+#include "gt/intel_context_types.h"
+
#include "i915_scheduler.h"
-#include "intel_context_types.h"
struct pid;
@@ -28,6 +29,18 @@ struct i915_hw_ppgtt;
struct i915_timeline;
struct intel_ring;
+struct i915_gem_engines {
+ struct rcu_work rcu;
+ struct drm_i915_private *i915;
+ unsigned int num_engines;
+ struct intel_context *engines[];
+};
+
+struct i915_gem_engines_iter {
+ unsigned int idx;
+ const struct i915_gem_engines *engines;
+};
+
/**
* struct i915_gem_context - client state
*
@@ -41,6 +54,30 @@ struct i915_gem_context {
/** file_priv: owning file descriptor */
struct drm_i915_file_private *file_priv;
+ /**
+ * @engines: User defined engines for this context
+ *
+ * Various uAPI offer the ability to lookup up an
+ * index from this array to select an engine operate on.
+ *
+ * Multiple logically distinct instances of the same engine
+ * may be defined in the array, as well as composite virtual
+ * engines.
+ *
+ * Execbuf uses the I915_EXEC_RING_MASK as an index into this
+ * array to select which HW context + engine to execute on. For
+ * the default array, the user_ring_map[] is used to translate
+ * the legacy uABI onto the approprate index (e.g. both
+ * I915_EXEC_DEFAULT and I915_EXEC_RENDER select the same
+ * context, and I915_EXEC_BSD is weird). For a use defined
+ * array, execbuf uses I915_EXEC_RING_MASK as a plain index.
+ *
+ * User defined by I915_CONTEXT_PARAM_ENGINE (when the
+ * CONTEXT_USER_ENGINES flag is set).
+ */
+ struct i915_gem_engines __rcu *engines;
+ struct mutex engines_mutex; /* guards writes to engines */
+
struct i915_timeline *timeline;
/**
@@ -109,6 +146,7 @@ struct i915_gem_context {
#define CONTEXT_BANNED 0
#define CONTEXT_CLOSED 1
#define CONTEXT_FORCE_SINGLE_SUBMISSION 2
+#define CONTEXT_USER_ENGINES 3
/**
* @hw_id: - unique identifier for the context
@@ -128,15 +166,10 @@ struct i915_gem_context {
atomic_t hw_id_pin_count;
struct list_head hw_id_link;
- struct list_head active_engines;
struct mutex mutex;
struct i915_sched_attr sched;
- /** hw_contexts: per-engine logical HW state */
- struct rb_root hw_contexts;
- spinlock_t hw_contexts_lock;
-
/** ring_size: size for allocating the per-engine ring buffer */
u32 ring_size;
/** desc_template: invariant fields for the HW context descriptor */
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index 060f5903544a..0bdb3e072ba5 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -36,15 +36,8 @@ I915_SELFTEST_DECLARE(static struct igt_evict_ctl {
bool fail_if_busy:1;
} igt_evict_ctl;)
-static bool ggtt_is_idle(struct drm_i915_private *i915)
-{
- return !i915->gt.active_requests;
-}
-
static int ggtt_flush(struct drm_i915_private *i915)
{
- int err;
-
/*
* Not everything in the GGTT is tracked via vma (otherwise we
* could evict as required with minimal stalling) so we are forced
@@ -52,19 +45,10 @@ static int ggtt_flush(struct drm_i915_private *i915)
* the hopes that we can then remove contexts and the like only
* bound by their active reference.
*/
- err = i915_gem_switch_to_kernel_context(i915, i915->gt.active_engines);
- if (err)
- return err;
-
- err = i915_gem_wait_for_idle(i915,
- I915_WAIT_INTERRUPTIBLE |
- I915_WAIT_LOCKED,
- MAX_SCHEDULE_TIMEOUT);
- if (err)
- return err;
-
- GEM_BUG_ON(!ggtt_is_idle(i915));
- return 0;
+ return i915_gem_wait_for_idle(i915,
+ I915_WAIT_INTERRUPTIBLE |
+ I915_WAIT_LOCKED,
+ MAX_SCHEDULE_TIMEOUT);
}
static bool
@@ -222,24 +206,17 @@ search_again:
* us a termination condition, when the last retired context is
* the kernel's there is no more we can evict.
*/
- if (!ggtt_is_idle(dev_priv)) {
- if (I915_SELFTEST_ONLY(igt_evict_ctl.fail_if_busy))
- return -EBUSY;
+ if (I915_SELFTEST_ONLY(igt_evict_ctl.fail_if_busy))
+ return -EBUSY;
- ret = ggtt_flush(dev_priv);
- if (ret)
- return ret;
+ ret = ggtt_flush(dev_priv);
+ if (ret)
+ return ret;
- cond_resched();
- goto search_again;
- }
+ cond_resched();
- /*
- * If we still have pending pageflip completions, drop
- * back to userspace to give our workqueues time to
- * acquire our locks and unpin the old scanouts.
- */
- return intel_has_pending_fb_unpin(dev_priv) ? -EAGAIN : -ENOSPC;
+ flags |= PIN_NONBLOCK;
+ goto search_again;
found:
/* drm_mm doesn't allow any other other operations while
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index c83d2a195d15..8b85c91c3ea4 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -34,6 +34,8 @@
#include <drm/drm_syncobj.h>
#include <drm/i915_drm.h>
+#include "gt/intel_gt_pm.h"
+
#include "i915_drv.h"
#include "i915_gem_clflush.h"
#include "i915_trace.h"
@@ -236,7 +238,8 @@ struct i915_execbuffer {
unsigned int *flags;
struct intel_engine_cs *engine; /** engine to queue the request to */
- struct i915_gem_context *ctx; /** context for building the request */
+ struct intel_context *context; /* logical state for the request */
+ struct i915_gem_context *gem_context; /** caller's context */
struct i915_address_space *vm; /** GTT and vma for the request */
struct i915_request *request; /** our request to build */
@@ -738,7 +741,7 @@ static int eb_select_context(struct i915_execbuffer *eb)
if (unlikely(!ctx))
return -ENOENT;
- eb->ctx = ctx;
+ eb->gem_context = ctx;
if (ctx->ppgtt) {
eb->vm = &ctx->ppgtt->vm;
eb->invalid_flags |= EXEC_OBJECT_NEEDS_GTT;
@@ -784,7 +787,6 @@ static struct i915_request *__eb_wait_for_ring(struct intel_ring *ring)
static int eb_wait_for_ring(const struct i915_execbuffer *eb)
{
- const struct intel_context *ce;
struct i915_request *rq;
int ret = 0;
@@ -794,11 +796,7 @@ static int eb_wait_for_ring(const struct i915_execbuffer *eb)
* keeping all of their resources pinned.
*/
- ce = intel_context_lookup(eb->ctx, eb->engine);
- if (!ce || !ce->ring) /* first use, assume empty! */
- return 0;
-
- rq = __eb_wait_for_ring(ce->ring);
+ rq = __eb_wait_for_ring(eb->context->ring);
if (rq) {
mutex_unlock(&eb->i915->drm.struct_mutex);
@@ -817,15 +815,15 @@ static int eb_wait_for_ring(const struct i915_execbuffer *eb)
static int eb_lookup_vmas(struct i915_execbuffer *eb)
{
- struct radix_tree_root *handles_vma = &eb->ctx->handles_vma;
+ struct radix_tree_root *handles_vma = &eb->gem_context->handles_vma;
struct drm_i915_gem_object *obj;
unsigned int i, batch;
int err;
- if (unlikely(i915_gem_context_is_closed(eb->ctx)))
+ if (unlikely(i915_gem_context_is_closed(eb->gem_context)))
return -ENOENT;
- if (unlikely(i915_gem_context_is_banned(eb->ctx)))
+ if (unlikely(i915_gem_context_is_banned(eb->gem_context)))
return -EIO;
INIT_LIST_HEAD(&eb->relocs);
@@ -870,8 +868,8 @@ static int eb_lookup_vmas(struct i915_execbuffer *eb)
if (!vma->open_count++)
i915_vma_reopen(vma);
list_add(&lut->obj_link, &obj->lut_list);
- list_add(&lut->ctx_link, &eb->ctx->handles_list);
- lut->ctx = eb->ctx;
+ list_add(&lut->ctx_link, &eb->gem_context->handles_list);
+ lut->ctx = eb->gem_context;
lut->handle = handle;
add_vma:
@@ -1227,7 +1225,7 @@ static int __reloc_gpu_alloc(struct i915_execbuffer *eb,
if (err)
goto err_unmap;
- rq = i915_request_alloc(eb->engine, eb->ctx);
+ rq = i915_request_create(eb->context);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
goto err_unpin;
@@ -2079,9 +2077,7 @@ gen8_dispatch_bsd_engine(struct drm_i915_private *dev_priv,
return file_priv->bsd_engine;
}
-#define I915_USER_RINGS (4)
-
-static const enum intel_engine_id user_ring_map[I915_USER_RINGS + 1] = {
+static const enum intel_engine_id user_ring_map[] = {
[I915_EXEC_DEFAULT] = RCS0,
[I915_EXEC_RENDER] = RCS0,
[I915_EXEC_BLT] = BCS0,
@@ -2089,31 +2085,57 @@ static const enum intel_engine_id user_ring_map[I915_USER_RINGS + 1] = {
[I915_EXEC_VEBOX] = VECS0
};
-static struct intel_engine_cs *
-eb_select_engine(struct drm_i915_private *dev_priv,
- struct drm_file *file,
- struct drm_i915_gem_execbuffer2 *args)
+static int eb_pin_context(struct i915_execbuffer *eb, struct intel_context *ce)
{
- unsigned int user_ring_id = args->flags & I915_EXEC_RING_MASK;
- struct intel_engine_cs *engine;
+ int err;
- if (user_ring_id > I915_USER_RINGS) {
- DRM_DEBUG("execbuf with unknown ring: %u\n", user_ring_id);
- return NULL;
- }
+ /*
+ * ABI: Before userspace accesses the GPU (e.g. execbuffer), report
+ * EIO if the GPU is already wedged.
+ */
+ err = i915_terminally_wedged(eb->i915);
+ if (err)
+ return err;
+
+ /*
+ * Pinning the contexts may generate requests in order to acquire
+ * GGTT space, so do this first before we reserve a seqno for
+ * ourselves.
+ */
+ err = intel_context_pin(ce);
+ if (err)
+ return err;
+
+ eb->engine = ce->engine;
+ eb->context = ce;
+ return 0;
+}
+
+static void eb_unpin_context(struct i915_execbuffer *eb)
+{
+ intel_context_unpin(eb->context);
+}
- if ((user_ring_id != I915_EXEC_BSD) &&
- ((args->flags & I915_EXEC_BSD_MASK) != 0)) {
+static unsigned int
+eb_select_legacy_ring(struct i915_execbuffer *eb,
+ struct drm_file *file,
+ struct drm_i915_gem_execbuffer2 *args)
+{
+ struct drm_i915_private *i915 = eb->i915;
+ unsigned int user_ring_id = args->flags & I915_EXEC_RING_MASK;
+
+ if (user_ring_id != I915_EXEC_BSD &&
+ (args->flags & I915_EXEC_BSD_MASK)) {
DRM_DEBUG("execbuf with non bsd ring but with invalid "
"bsd dispatch flags: %d\n", (int)(args->flags));
- return NULL;
+ return -1;
}
- if (user_ring_id == I915_EXEC_BSD && HAS_ENGINE(dev_priv, VCS1)) {
+ if (user_ring_id == I915_EXEC_BSD && HAS_ENGINE(i915, VCS1)) {
unsigned int bsd_idx = args->flags & I915_EXEC_BSD_MASK;
if (bsd_idx == I915_EXEC_BSD_DEFAULT) {
- bsd_idx = gen8_dispatch_bsd_engine(dev_priv, file);
+ bsd_idx = gen8_dispatch_bsd_engine(i915, file);
} else if (bsd_idx >= I915_EXEC_BSD_RING1 &&
bsd_idx <= I915_EXEC_BSD_RING2) {
bsd_idx >>= I915_EXEC_BSD_SHIFT;
@@ -2121,20 +2143,42 @@ eb_select_engine(struct drm_i915_private *dev_priv,
} else {
DRM_DEBUG("execbuf with unknown bsd ring: %u\n",
bsd_idx);
- return NULL;
+ return -1;
}
- engine = dev_priv->engine[_VCS(bsd_idx)];
- } else {
- engine = dev_priv->engine[user_ring_map[user_ring_id]];
+ return _VCS(bsd_idx);
}
- if (!engine) {
- DRM_DEBUG("execbuf with invalid ring: %u\n", user_ring_id);
- return NULL;
+ if (user_ring_id >= ARRAY_SIZE(user_ring_map)) {
+ DRM_DEBUG("execbuf with unknown ring: %u\n", user_ring_id);
+ return -1;
}
- return engine;
+ return user_ring_map[user_ring_id];
+}
+
+static int
+eb_select_engine(struct i915_execbuffer *eb,
+ struct drm_file *file,
+ struct drm_i915_gem_execbuffer2 *args)
+{
+ struct intel_context *ce;
+ unsigned int idx;
+ int err;
+
+ if (i915_gem_context_user_engines(eb->gem_context))
+ idx = args->flags & I915_EXEC_RING_MASK;
+ else
+ idx = eb_select_legacy_ring(eb, file, args);
+
+ ce = i915_gem_context_get_engine(eb->gem_context, idx);
+ if (IS_ERR(ce))
+ return PTR_ERR(ce);
+
+ err = eb_pin_context(eb, ce);
+ intel_context_put(ce);
+
+ return err;
}
static void
@@ -2275,8 +2319,8 @@ i915_gem_do_execbuffer(struct drm_device *dev,
{
struct i915_execbuffer eb;
struct dma_fence *in_fence = NULL;
+ struct dma_fence *exec_fence = NULL;
struct sync_file *out_fence = NULL;
- intel_wakeref_t wakeref;
int out_fence_fd = -1;
int err;
@@ -2318,11 +2362,24 @@ i915_gem_do_execbuffer(struct drm_device *dev,
return -EINVAL;
}
+ if (args->flags & I915_EXEC_FENCE_SUBMIT) {
+ if (in_fence) {
+ err = -EINVAL;
+ goto err_in_fence;
+ }
+
+ exec_fence = sync_file_get_fence(lower_32_bits(args->rsvd2));
+ if (!exec_fence) {
+ err = -EINVAL;
+ goto err_in_fence;
+ }
+ }
+
if (args->flags & I915_EXEC_FENCE_OUT) {
out_fence_fd = get_unused_fd_flags(O_CLOEXEC);
if (out_fence_fd < 0) {
err = out_fence_fd;
- goto err_in_fence;
+ goto err_exec_fence;
}
}
@@ -2336,12 +2393,6 @@ i915_gem_do_execbuffer(struct drm_device *dev,
if (unlikely(err))
goto err_destroy;
- eb.engine = eb_select_engine(eb.i915, file, args);
- if (!eb.engine) {
- err = -EINVAL;
- goto err_engine;
- }
-
/*
* Take a local wakeref for preparing to dispatch the execbuf as
* we expect to access the hardware fairly frequently in the
@@ -2349,16 +2400,20 @@ i915_gem_do_execbuffer(struct drm_device *dev,
* wakeref that we hold until the GPU has been idle for at least
* 100ms.
*/
- wakeref = intel_runtime_pm_get(eb.i915);
+ intel_gt_pm_get(eb.i915);
err = i915_mutex_lock_interruptible(dev);
if (err)
goto err_rpm;
- err = eb_wait_for_ring(&eb); /* may temporarily drop struct_mutex */
+ err = eb_select_engine(&eb, file, args);
if (unlikely(err))
goto err_unlock;
+ err = eb_wait_for_ring(&eb); /* may temporarily drop struct_mutex */
+ if (unlikely(err))
+ goto err_engine;
+
err = eb_relocate(&eb);
if (err) {
/*
@@ -2442,7 +2497,7 @@ i915_gem_do_execbuffer(struct drm_device *dev,
GEM_BUG_ON(eb.reloc_cache.rq);
/* Allocate a request for this batch buffer nice and early. */
- eb.request = i915_request_alloc(eb.engine, eb.ctx);
+ eb.request = i915_request_create(eb.context);
if (IS_ERR(eb.request)) {
err = PTR_ERR(eb.request);
goto err_batch_unpin;
@@ -2454,6 +2509,13 @@ i915_gem_do_execbuffer(struct drm_device *dev,
goto err_request;
}
+ if (exec_fence) {
+ err = i915_request_await_execution(eb.request, exec_fence,
+ eb.engine->bond_execute);
+ if (err < 0)
+ goto err_request;
+ }
+
if (fences) {
err = await_fence_array(&eb, fences);
if (err)
@@ -2480,8 +2542,8 @@ i915_gem_do_execbuffer(struct drm_device *dev,
trace_i915_request_queue(eb.request, eb.batch_flags);
err = eb_submit(&eb);
err_request:
- i915_request_add(eb.request);
add_to_client(eb.request, file);
+ i915_request_add(eb.request);
if (fences)
signal_fence_array(&eb, fences);
@@ -2503,17 +2565,20 @@ err_batch_unpin:
err_vma:
if (eb.exec)
eb_release_vmas(&eb);
+err_engine:
+ eb_unpin_context(&eb);
err_unlock:
mutex_unlock(&dev->struct_mutex);
err_rpm:
- intel_runtime_pm_put(eb.i915, wakeref);
-err_engine:
- i915_gem_context_put(eb.ctx);
+ intel_gt_pm_put(eb.i915);
+ i915_gem_context_put(eb.gem_context);
err_destroy:
eb_destroy(&eb);
err_out_fence:
if (out_fence_fd != -1)
put_unused_fd(out_fence_fd);
+err_exec_fence:
+ dma_fence_put(exec_fence);
err_in_fence:
dma_fence_put(in_fence);
return err;
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 8f460cc4cc1f..266baa11df64 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -37,7 +37,6 @@
#include "i915_drv.h"
#include "i915_vgpu.h"
-#include "i915_reset.h"
#include "i915_trace.h"
#include "intel_drv.h"
#include "intel_frontbuffer.h"
@@ -1829,11 +1828,62 @@ static void gen6_ppgtt_free_pd(struct gen6_hw_ppgtt *ppgtt)
free_pt(&ppgtt->base.vm, pt);
}
+struct gen6_ppgtt_cleanup_work {
+ struct work_struct base;
+ struct i915_vma *vma;
+};
+
+static void gen6_ppgtt_cleanup_work(struct work_struct *wrk)
+{
+ struct gen6_ppgtt_cleanup_work *work =
+ container_of(wrk, typeof(*work), base);
+ /* Side note, vma->vm is the GGTT not the ppgtt we just destroyed! */
+ struct drm_i915_private *i915 = work->vma->vm->i915;
+
+ mutex_lock(&i915->drm.struct_mutex);
+ i915_vma_destroy(work->vma);
+ mutex_unlock(&i915->drm.struct_mutex);
+
+ kfree(work);
+}
+
+static int nop_set_pages(struct i915_vma *vma)
+{
+ return -ENODEV;
+}
+
+static void nop_clear_pages(struct i915_vma *vma)
+{
+}
+
+static int nop_bind(struct i915_vma *vma,
+ enum i915_cache_level cache_level,
+ u32 unused)
+{
+ return -ENODEV;
+}
+
+static void nop_unbind(struct i915_vma *vma)
+{
+}
+
+static const struct i915_vma_ops nop_vma_ops = {
+ .set_pages = nop_set_pages,
+ .clear_pages = nop_clear_pages,
+ .bind_vma = nop_bind,
+ .unbind_vma = nop_unbind,
+};
+
static void gen6_ppgtt_cleanup(struct i915_address_space *vm)
{
struct gen6_hw_ppgtt *ppgtt = to_gen6_ppgtt(i915_vm_to_ppgtt(vm));
+ struct gen6_ppgtt_cleanup_work *work = ppgtt->work;
- i915_vma_destroy(ppgtt->vma);
+ /* FIXME remove the struct_mutex to bring the locking under control */
+ INIT_WORK(&work->base, gen6_ppgtt_cleanup_work);
+ work->vma = ppgtt->vma;
+ work->vma->ops = &nop_vma_ops;
+ schedule_work(&work->base);
gen6_ppgtt_free_pd(ppgtt);
gen6_ppgtt_free_scratch(vm);
@@ -2012,9 +2062,13 @@ static struct i915_hw_ppgtt *gen6_ppgtt_create(struct drm_i915_private *i915)
ppgtt->base.vm.pte_encode = ggtt->vm.pte_encode;
+ ppgtt->work = kmalloc(sizeof(*ppgtt->work), GFP_KERNEL);
+ if (!ppgtt->work)
+ goto err_free;
+
err = gen6_ppgtt_init_scratch(ppgtt);
if (err)
- goto err_free;
+ goto err_work;
ppgtt->vma = pd_vma_create(ppgtt, GEN6_PD_SIZE);
if (IS_ERR(ppgtt->vma)) {
@@ -2026,6 +2080,8 @@ static struct i915_hw_ppgtt *gen6_ppgtt_create(struct drm_i915_private *i915)
err_scratch:
gen6_ppgtt_free_scratch(&ppgtt->base.vm);
+err_work:
+ kfree(ppgtt->work);
err_free:
kfree(ppgtt);
return ERR_PTR(err);
@@ -2752,6 +2808,12 @@ int i915_gem_init_ggtt(struct drm_i915_private *dev_priv)
if (ret)
return ret;
+ if (USES_GUC(dev_priv)) {
+ ret = intel_guc_reserve_ggtt_top(&dev_priv->guc);
+ if (ret)
+ goto err_reserve;
+ }
+
/* Clear any non-preallocated blocks */
drm_mm_for_each_hole(entry, &ggtt->vm.mm, hole_start, hole_end) {
DRM_DEBUG_KMS("clearing unused GTT space: [%lx, %lx]\n",
@@ -2766,12 +2828,14 @@ int i915_gem_init_ggtt(struct drm_i915_private *dev_priv)
if (INTEL_PPGTT(dev_priv) == INTEL_PPGTT_ALIASING) {
ret = i915_gem_init_aliasing_ppgtt(dev_priv);
if (ret)
- goto err;
+ goto err_appgtt;
}
return 0;
-err:
+err_appgtt:
+ intel_guc_release_ggtt_top(&dev_priv->guc);
+err_reserve:
drm_mm_remove_node(&ggtt->error_capture);
return ret;
}
@@ -2797,6 +2861,8 @@ void i915_ggtt_cleanup_hw(struct drm_i915_private *dev_priv)
if (drm_mm_node_allocated(&ggtt->error_capture))
drm_mm_remove_node(&ggtt->error_capture);
+ intel_guc_release_ggtt_top(&dev_priv->guc);
+
if (drm_mm_initialized(&ggtt->vm.mm)) {
intel_vgt_deballoon(dev_priv);
i915_address_space_fini(&ggtt->vm);
@@ -3280,7 +3346,9 @@ static int gen6_gmch_probe(struct i915_ggtt *ggtt)
size = gen6_get_total_gtt_size(snb_gmch_ctl);
ggtt->vm.total = (size / sizeof(gen6_pte_t)) * I915_GTT_PAGE_SIZE;
- ggtt->vm.clear_range = gen6_ggtt_clear_range;
+ ggtt->vm.clear_range = nop_clear_range;
+ if (!HAS_FULL_PPGTT(dev_priv) || intel_scanout_needs_vtd_wa(dev_priv))
+ ggtt->vm.clear_range = gen6_ggtt_clear_range;
ggtt->vm.insert_page = gen6_ggtt_insert_page;
ggtt->vm.insert_entries = gen6_ggtt_insert_entries;
ggtt->vm.cleanup = gen6_gmch_remove;
@@ -3369,17 +3437,6 @@ int i915_ggtt_probe_hw(struct drm_i915_private *dev_priv)
if (ret)
return ret;
- /* Trim the GGTT to fit the GuC mappable upper range (when enabled).
- * This is easier than doing range restriction on the fly, as we
- * currently don't have any bits spare to pass in this upper
- * restriction!
- */
- if (USES_GUC(dev_priv)) {
- ggtt->vm.total = min_t(u64, ggtt->vm.total, GUC_GGTT_TOP);
- ggtt->mappable_end =
- min_t(u64, ggtt->mappable_end, ggtt->vm.total);
- }
-
if ((ggtt->vm.total - 1) >> 32) {
DRM_ERROR("We never expected a Global GTT with more than 32bits"
" of address space! Found %lldM!\n",
@@ -3608,6 +3665,89 @@ err_st_alloc:
return ERR_PTR(ret);
}
+static struct scatterlist *
+remap_pages(struct drm_i915_gem_object *obj, unsigned int offset,
+ unsigned int width, unsigned int height,
+ unsigned int stride,
+ struct sg_table *st, struct scatterlist *sg)
+{
+ unsigned int row;
+
+ for (row = 0; row < height; row++) {
+ unsigned int left = width * I915_GTT_PAGE_SIZE;
+
+ while (left) {
+ dma_addr_t addr;
+ unsigned int length;
+
+ /* We don't need the pages, but need to initialize
+ * the entries so the sg list can be happily traversed.
+ * The only thing we need are DMA addresses.
+ */
+
+ addr = i915_gem_object_get_dma_address_len(obj, offset, &length);
+
+ length = min(left, length);
+
+ st->nents++;
+
+ sg_set_page(sg, NULL, length, 0);
+ sg_dma_address(sg) = addr;
+ sg_dma_len(sg) = length;
+ sg = sg_next(sg);
+
+ offset += length / I915_GTT_PAGE_SIZE;
+ left -= length;
+ }
+
+ offset += stride - width;
+ }
+
+ return sg;
+}
+
+static noinline struct sg_table *
+intel_remap_pages(struct intel_remapped_info *rem_info,
+ struct drm_i915_gem_object *obj)
+{
+ unsigned int size = intel_remapped_info_size(rem_info);
+ struct sg_table *st;
+ struct scatterlist *sg;
+ int ret = -ENOMEM;
+ int i;
+
+ /* Allocate target SG list. */
+ st = kmalloc(sizeof(*st), GFP_KERNEL);
+ if (!st)
+ goto err_st_alloc;
+
+ ret = sg_alloc_table(st, size, GFP_KERNEL);
+ if (ret)
+ goto err_sg_alloc;
+
+ st->nents = 0;
+ sg = st->sgl;
+
+ for (i = 0 ; i < ARRAY_SIZE(rem_info->plane); i++) {
+ sg = remap_pages(obj, rem_info->plane[i].offset,
+ rem_info->plane[i].width, rem_info->plane[i].height,
+ rem_info->plane[i].stride, st, sg);
+ }
+
+ i915_sg_trim(st);
+
+ return st;
+
+err_sg_alloc:
+ kfree(st);
+err_st_alloc:
+
+ DRM_DEBUG_DRIVER("Failed to create remapped mapping for object size %zu! (%ux%u tiles, %u pages)\n",
+ obj->base.size, rem_info->plane[0].width, rem_info->plane[0].height, size);
+
+ return ERR_PTR(ret);
+}
+
static noinline struct sg_table *
intel_partial_pages(const struct i915_ggtt_view *view,
struct drm_i915_gem_object *obj)
@@ -3686,6 +3826,11 @@ i915_get_ggtt_vma_pages(struct i915_vma *vma)
intel_rotate_pages(&vma->ggtt_view.rotated, vma->obj);
break;
+ case I915_GGTT_VIEW_REMAPPED:
+ vma->pages =
+ intel_remap_pages(&vma->ggtt_view.remapped, vma->obj);
+ break;
+
case I915_GGTT_VIEW_PARTIAL:
vma->pages = intel_partial_pages(&vma->ggtt_view, vma->obj);
break;
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h
index f597f35b109b..38496039456b 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.h
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.h
@@ -38,8 +38,8 @@
#include <linux/mm.h>
#include <linux/pagevec.h>
+#include "gt/intel_reset.h"
#include "i915_request.h"
-#include "i915_reset.h"
#include "i915_selftest.h"
#include "i915_timeline.h"
@@ -163,11 +163,18 @@ typedef u64 gen8_ppgtt_pml4e_t;
struct sg_table;
+struct intel_remapped_plane_info {
+ /* in gtt pages */
+ unsigned int width, height, stride, offset;
+} __packed;
+
+struct intel_remapped_info {
+ struct intel_remapped_plane_info plane[2];
+ unsigned int unused_mbz;
+} __packed;
+
struct intel_rotation_info {
- struct intel_rotation_plane_info {
- /* tiles */
- unsigned int width, height, stride, offset;
- } plane[2];
+ struct intel_remapped_plane_info plane[2];
} __packed;
struct intel_partial_info {
@@ -179,12 +186,20 @@ enum i915_ggtt_view_type {
I915_GGTT_VIEW_NORMAL = 0,
I915_GGTT_VIEW_ROTATED = sizeof(struct intel_rotation_info),
I915_GGTT_VIEW_PARTIAL = sizeof(struct intel_partial_info),
+ I915_GGTT_VIEW_REMAPPED = sizeof(struct intel_remapped_info),
};
static inline void assert_i915_gem_gtt_types(void)
{
BUILD_BUG_ON(sizeof(struct intel_rotation_info) != 8*sizeof(unsigned int));
BUILD_BUG_ON(sizeof(struct intel_partial_info) != sizeof(u64) + sizeof(unsigned int));
+ BUILD_BUG_ON(sizeof(struct intel_remapped_info) != 9*sizeof(unsigned int));
+
+ /* Check that rotation/remapped shares offsets for simplicity */
+ BUILD_BUG_ON(offsetof(struct intel_remapped_info, plane[0]) !=
+ offsetof(struct intel_rotation_info, plane[0]));
+ BUILD_BUG_ON(offsetofend(struct intel_remapped_info, plane[1]) !=
+ offsetofend(struct intel_rotation_info, plane[1]));
/* As we encode the size of each branch inside the union into its type,
* we have to be careful that each branch has a unique size.
@@ -193,6 +208,7 @@ static inline void assert_i915_gem_gtt_types(void)
case I915_GGTT_VIEW_NORMAL:
case I915_GGTT_VIEW_PARTIAL:
case I915_GGTT_VIEW_ROTATED:
+ case I915_GGTT_VIEW_REMAPPED:
/* gcc complains if these are identical cases */
break;
}
@@ -204,6 +220,7 @@ struct i915_ggtt_view {
/* Members need to contain no holes/padding */
struct intel_partial_info partial;
struct intel_rotation_info rotated;
+ struct intel_remapped_info remapped;
};
};
@@ -384,6 +401,7 @@ struct i915_ggtt {
u32 pin_bias;
struct drm_mm_node error_capture;
+ struct drm_mm_node uc_fw;
};
struct i915_hw_ppgtt {
@@ -396,8 +414,6 @@ struct i915_hw_ppgtt {
struct i915_page_directory_pointer pdp; /* GEN8+ */
struct i915_page_directory pd; /* GEN6-7 */
};
-
- u32 user_handle;
};
struct gen6_hw_ppgtt {
@@ -408,6 +424,8 @@ struct gen6_hw_ppgtt {
unsigned int pin_count;
bool scan_for_unused_pt;
+
+ struct gen6_ppgtt_cleanup_work *work;
};
#define __to_gen6_ppgtt(base) container_of(base, struct gen6_hw_ppgtt, base)
diff --git a/drivers/gpu/drm/i915/i915_gem_internal.c b/drivers/gpu/drm/i915/i915_gem_internal.c
index ab627ed1269c..21662176819f 100644
--- a/drivers/gpu/drm/i915/i915_gem_internal.c
+++ b/drivers/gpu/drm/i915/i915_gem_internal.c
@@ -28,9 +28,6 @@
#define QUIET (__GFP_NORETRY | __GFP_NOWARN)
#define MAYFAIL (__GFP_RETRY_MAYFAIL | __GFP_NOWARN)
-/* convert swiotlb segment size into sensible units (pages)! */
-#define IO_TLB_SEGPAGES (IO_TLB_SEGSIZE << IO_TLB_SHIFT >> PAGE_SHIFT)
-
static void internal_free_pages(struct sg_table *st)
{
struct scatterlist *sg;
diff --git a/drivers/gpu/drm/i915/i915_gem_pm.c b/drivers/gpu/drm/i915/i915_gem_pm.c
new file mode 100644
index 000000000000..fa9c2ebd966a
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_gem_pm.c
@@ -0,0 +1,250 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include "gt/intel_gt_pm.h"
+
+#include "i915_drv.h"
+#include "i915_gem_pm.h"
+#include "i915_globals.h"
+
+static void i915_gem_park(struct drm_i915_private *i915)
+{
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+
+ lockdep_assert_held(&i915->drm.struct_mutex);
+
+ for_each_engine(engine, i915, id)
+ i915_gem_batch_pool_fini(&engine->batch_pool);
+
+ i915_timelines_park(i915);
+ i915_vma_parked(i915);
+
+ i915_globals_park();
+}
+
+static void idle_work_handler(struct work_struct *work)
+{
+ struct drm_i915_private *i915 =
+ container_of(work, typeof(*i915), gem.idle_work);
+ bool restart = true;
+
+ cancel_delayed_work(&i915->gem.retire_work);
+ mutex_lock(&i915->drm.struct_mutex);
+
+ intel_wakeref_lock(&i915->gt.wakeref);
+ if (!intel_wakeref_active(&i915->gt.wakeref) && !work_pending(work)) {
+ i915_gem_park(i915);
+ restart = false;
+ }
+ intel_wakeref_unlock(&i915->gt.wakeref);
+
+ mutex_unlock(&i915->drm.struct_mutex);
+ if (restart)
+ queue_delayed_work(i915->wq,
+ &i915->gem.retire_work,
+ round_jiffies_up_relative(HZ));
+}
+
+static void retire_work_handler(struct work_struct *work)
+{
+ struct drm_i915_private *i915 =
+ container_of(work, typeof(*i915), gem.retire_work.work);
+
+ /* Come back later if the device is busy... */
+ if (mutex_trylock(&i915->drm.struct_mutex)) {
+ i915_retire_requests(i915);
+ mutex_unlock(&i915->drm.struct_mutex);
+ }
+
+ queue_delayed_work(i915->wq,
+ &i915->gem.retire_work,
+ round_jiffies_up_relative(HZ));
+}
+
+static int pm_notifier(struct notifier_block *nb,
+ unsigned long action,
+ void *data)
+{
+ struct drm_i915_private *i915 =
+ container_of(nb, typeof(*i915), gem.pm_notifier);
+
+ switch (action) {
+ case INTEL_GT_UNPARK:
+ i915_globals_unpark();
+ queue_delayed_work(i915->wq,
+ &i915->gem.retire_work,
+ round_jiffies_up_relative(HZ));
+ break;
+
+ case INTEL_GT_PARK:
+ queue_work(i915->wq, &i915->gem.idle_work);
+ break;
+ }
+
+ return NOTIFY_OK;
+}
+
+static bool switch_to_kernel_context_sync(struct drm_i915_private *i915)
+{
+ bool result = true;
+
+ do {
+ if (i915_gem_wait_for_idle(i915,
+ I915_WAIT_LOCKED |
+ I915_WAIT_FOR_IDLE_BOOST,
+ I915_GEM_IDLE_TIMEOUT) == -ETIME) {
+ /* XXX hide warning from gem_eio */
+ if (i915_modparams.reset) {
+ dev_err(i915->drm.dev,
+ "Failed to idle engines, declaring wedged!\n");
+ GEM_TRACE_DUMP();
+ }
+
+ /*
+ * Forcibly cancel outstanding work and leave
+ * the gpu quiet.
+ */
+ i915_gem_set_wedged(i915);
+ result = false;
+ }
+ } while (i915_retire_requests(i915) && result);
+
+ GEM_BUG_ON(i915->gt.awake);
+ return result;
+}
+
+bool i915_gem_load_power_context(struct drm_i915_private *i915)
+{
+ return switch_to_kernel_context_sync(i915);
+}
+
+void i915_gem_suspend(struct drm_i915_private *i915)
+{
+ GEM_TRACE("\n");
+
+ flush_workqueue(i915->wq);
+
+ mutex_lock(&i915->drm.struct_mutex);
+
+ /*
+ * We have to flush all the executing contexts to main memory so
+ * that they can saved in the hibernation image. To ensure the last
+ * context image is coherent, we have to switch away from it. That
+ * leaves the i915->kernel_context still active when
+ * we actually suspend, and its image in memory may not match the GPU
+ * state. Fortunately, the kernel_context is disposable and we do
+ * not rely on its state.
+ */
+ switch_to_kernel_context_sync(i915);
+
+ mutex_unlock(&i915->drm.struct_mutex);
+
+ /*
+ * Assert that we successfully flushed all the work and
+ * reset the GPU back to its idle, low power state.
+ */
+ GEM_BUG_ON(i915->gt.awake);
+ flush_work(&i915->gem.idle_work);
+
+ cancel_delayed_work_sync(&i915->gpu_error.hangcheck_work);
+
+ i915_gem_drain_freed_objects(i915);
+
+ intel_uc_suspend(i915);
+}
+
+void i915_gem_suspend_late(struct drm_i915_private *i915)
+{
+ struct drm_i915_gem_object *obj;
+ struct list_head *phases[] = {
+ &i915->mm.unbound_list,
+ &i915->mm.bound_list,
+ NULL
+ }, **phase;
+
+ /*
+ * Neither the BIOS, ourselves or any other kernel
+ * expects the system to be in execlists mode on startup,
+ * so we need to reset the GPU back to legacy mode. And the only
+ * known way to disable logical contexts is through a GPU reset.
+ *
+ * So in order to leave the system in a known default configuration,
+ * always reset the GPU upon unload and suspend. Afterwards we then
+ * clean up the GEM state tracking, flushing off the requests and
+ * leaving the system in a known idle state.
+ *
+ * Note that is of the upmost importance that the GPU is idle and
+ * all stray writes are flushed *before* we dismantle the backing
+ * storage for the pinned objects.
+ *
+ * However, since we are uncertain that resetting the GPU on older
+ * machines is a good idea, we don't - just in case it leaves the
+ * machine in an unusable condition.
+ */
+
+ mutex_lock(&i915->drm.struct_mutex);
+ for (phase = phases; *phase; phase++) {
+ list_for_each_entry(obj, *phase, mm.link)
+ WARN_ON(i915_gem_object_set_to_gtt_domain(obj, false));
+ }
+ mutex_unlock(&i915->drm.struct_mutex);
+
+ intel_uc_sanitize(i915);
+ i915_gem_sanitize(i915);
+}
+
+void i915_gem_resume(struct drm_i915_private *i915)
+{
+ GEM_TRACE("\n");
+
+ WARN_ON(i915->gt.awake);
+
+ mutex_lock(&i915->drm.struct_mutex);
+ intel_uncore_forcewake_get(&i915->uncore, FORCEWAKE_ALL);
+
+ i915_gem_restore_gtt_mappings(i915);
+ i915_gem_restore_fences(i915);
+
+ /*
+ * As we didn't flush the kernel context before suspend, we cannot
+ * guarantee that the context image is complete. So let's just reset
+ * it and start again.
+ */
+ intel_gt_resume(i915);
+
+ if (i915_gem_init_hw(i915))
+ goto err_wedged;
+
+ intel_uc_resume(i915);
+
+ /* Always reload a context for powersaving. */
+ if (!i915_gem_load_power_context(i915))
+ goto err_wedged;
+
+out_unlock:
+ intel_uncore_forcewake_put(&i915->uncore, FORCEWAKE_ALL);
+ mutex_unlock(&i915->drm.struct_mutex);
+ return;
+
+err_wedged:
+ if (!i915_reset_failed(i915)) {
+ dev_err(i915->drm.dev,
+ "Failed to re-initialize GPU, declaring it wedged!\n");
+ i915_gem_set_wedged(i915);
+ }
+ goto out_unlock;
+}
+
+void i915_gem_init__pm(struct drm_i915_private *i915)
+{
+ INIT_WORK(&i915->gem.idle_work, idle_work_handler);
+ INIT_DELAYED_WORK(&i915->gem.retire_work, retire_work_handler);
+
+ i915->gem.pm_notifier.notifier_call = pm_notifier;
+ blocking_notifier_chain_register(&i915->gt.pm_notifications,
+ &i915->gem.pm_notifier);
+}
diff --git a/drivers/gpu/drm/i915/i915_gem_pm.h b/drivers/gpu/drm/i915/i915_gem_pm.h
new file mode 100644
index 000000000000..6f7d5d11ac3b
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_gem_pm.h
@@ -0,0 +1,25 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __I915_GEM_PM_H__
+#define __I915_GEM_PM_H__
+
+#include <linux/types.h>
+
+struct drm_i915_private;
+struct work_struct;
+
+void i915_gem_init__pm(struct drm_i915_private *i915);
+
+bool i915_gem_load_power_context(struct drm_i915_private *i915);
+void i915_gem_resume(struct drm_i915_private *i915);
+
+void i915_gem_idle_work_handler(struct work_struct *work);
+
+void i915_gem_suspend(struct drm_i915_private *i915);
+void i915_gem_suspend_late(struct drm_i915_private *i915);
+
+#endif /* __I915_GEM_PM_H__ */
diff --git a/drivers/gpu/drm/i915/i915_gem_shrinker.c b/drivers/gpu/drm/i915/i915_gem_shrinker.c
index 6da795c7e62e..588e3898b120 100644
--- a/drivers/gpu/drm/i915/i915_gem_shrinker.c
+++ b/drivers/gpu/drm/i915/i915_gem_shrinker.c
@@ -114,6 +114,67 @@ static bool unsafe_drop_pages(struct drm_i915_gem_object *obj)
return !i915_gem_object_has_pages(obj);
}
+static void __start_writeback(struct drm_i915_gem_object *obj,
+ unsigned int flags)
+{
+ struct address_space *mapping;
+ struct writeback_control wbc = {
+ .sync_mode = WB_SYNC_NONE,
+ .nr_to_write = SWAP_CLUSTER_MAX,
+ .range_start = 0,
+ .range_end = LLONG_MAX,
+ .for_reclaim = 1,
+ };
+ unsigned long i;
+
+ lockdep_assert_held(&obj->mm.lock);
+ GEM_BUG_ON(i915_gem_object_has_pages(obj));
+
+ switch (obj->mm.madv) {
+ case I915_MADV_DONTNEED:
+ __i915_gem_object_truncate(obj);
+ case __I915_MADV_PURGED:
+ return;
+ }
+
+ if (!obj->base.filp)
+ return;
+
+ if (!(flags & I915_SHRINK_WRITEBACK))
+ return;
+
+ /*
+ * Leave mmapings intact (GTT will have been revoked on unbinding,
+ * leaving only CPU mmapings around) and add those pages to the LRU
+ * instead of invoking writeback so they are aged and paged out
+ * as normal.
+ */
+ mapping = obj->base.filp->f_mapping;
+
+ /* Begin writeback on each dirty page */
+ for (i = 0; i < obj->base.size >> PAGE_SHIFT; i++) {
+ struct page *page;
+
+ page = find_lock_entry(mapping, i);
+ if (!page || xa_is_value(page))
+ continue;
+
+ if (!page_mapped(page) && clear_page_dirty_for_io(page)) {
+ int ret;
+
+ SetPageReclaim(page);
+ ret = mapping->a_ops->writepage(page, &wbc);
+ if (!PageWriteback(page))
+ ClearPageReclaim(page);
+ if (!ret)
+ goto put;
+ }
+ unlock_page(page);
+put:
+ put_page(page);
+ }
+}
+
/**
* i915_gem_shrink - Shrink buffer object caches
* @i915: i915 device
@@ -254,7 +315,7 @@ i915_gem_shrink(struct drm_i915_private *i915,
mutex_lock_nested(&obj->mm.lock,
I915_MM_SHRINKER);
if (!i915_gem_object_has_pages(obj)) {
- __i915_gem_object_invalidate(obj);
+ __start_writeback(obj, flags);
count += obj->base.size >> PAGE_SHIFT;
}
mutex_unlock(&obj->mm.lock);
@@ -366,13 +427,15 @@ i915_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
&sc->nr_scanned,
I915_SHRINK_BOUND |
I915_SHRINK_UNBOUND |
- I915_SHRINK_PURGEABLE);
+ I915_SHRINK_PURGEABLE |
+ I915_SHRINK_WRITEBACK);
if (sc->nr_scanned < sc->nr_to_scan)
freed += i915_gem_shrink(i915,
sc->nr_to_scan - sc->nr_scanned,
&sc->nr_scanned,
I915_SHRINK_BOUND |
- I915_SHRINK_UNBOUND);
+ I915_SHRINK_UNBOUND |
+ I915_SHRINK_WRITEBACK);
if (sc->nr_scanned < sc->nr_to_scan && current_is_kswapd()) {
intel_wakeref_t wakeref;
@@ -382,7 +445,8 @@ i915_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
&sc->nr_scanned,
I915_SHRINK_ACTIVE |
I915_SHRINK_BOUND |
- I915_SHRINK_UNBOUND);
+ I915_SHRINK_UNBOUND |
+ I915_SHRINK_WRITEBACK);
}
}
@@ -404,7 +468,8 @@ i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr)
with_intel_runtime_pm(i915, wakeref)
freed_pages += i915_gem_shrink(i915, -1UL, NULL,
I915_SHRINK_BOUND |
- I915_SHRINK_UNBOUND);
+ I915_SHRINK_UNBOUND |
+ I915_SHRINK_WRITEBACK);
/* Because we may be allocating inside our own driver, we cannot
* assert that there are no objects with pinned pages that are not
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index f51ff683dd2e..4f85cbdddb0d 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -36,8 +36,11 @@
#include <drm/drm_print.h>
-#include "i915_gpu_error.h"
#include "i915_drv.h"
+#include "i915_gpu_error.h"
+#include "intel_atomic.h"
+#include "intel_csr.h"
+#include "intel_overlay.h"
static inline const struct intel_engine_cs *
engine_lookup(const struct drm_i915_private *i915, unsigned int id)
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.h b/drivers/gpu/drm/i915/i915_gpu_error.h
index 5dc761e85d9d..2ecd0c6a1c94 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.h
+++ b/drivers/gpu/drm/i915/i915_gpu_error.h
@@ -13,8 +13,9 @@
#include <drm/drm_mm.h>
+#include "gt/intel_engine.h"
+
#include "intel_device_info.h"
-#include "intel_ringbuffer.h"
#include "intel_uc_fw.h"
#include "i915_gem.h"
@@ -178,8 +179,6 @@ struct i915_gpu_state {
struct scatterlist *sgl, *fit;
};
-struct i915_gpu_restart;
-
struct i915_gpu_error {
/* For hangcheck timer */
#define DRM_I915_HANGCHECK_PERIOD 1500 /* in ms */
@@ -240,8 +239,6 @@ struct i915_gpu_error {
wait_queue_head_t reset_queue;
struct srcu_struct reset_backoff_srcu;
-
- struct i915_gpu_restart *restart;
};
struct drm_i915_error_state_buf {
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index b92cfd69134b..233211fde0ea 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -38,8 +38,12 @@
#include <drm/i915_drm.h>
#include "i915_drv.h"
+#include "i915_irq.h"
#include "i915_trace.h"
#include "intel_drv.h"
+#include "intel_fifo_underrun.h"
+#include "intel_hotplug.h"
+#include "intel_lpe_audio.h"
#include "intel_psr.h"
/**
@@ -1301,7 +1305,7 @@ static void gen6_pm_rps_work(struct work_struct *work)
if ((pm_iir & dev_priv->pm_rps_events) == 0 && !client_boost)
goto out;
- mutex_lock(&dev_priv->pcu_lock);
+ mutex_lock(&rps->lock);
pm_iir |= vlv_wa_c0_ei(dev_priv, pm_iir);
@@ -1367,7 +1371,7 @@ static void gen6_pm_rps_work(struct work_struct *work)
rps->last_adj = 0;
}
- mutex_unlock(&dev_priv->pcu_lock);
+ mutex_unlock(&rps->lock);
out:
/* Make sure not to corrupt PMIMR state used by ringbuffer on GEN6 */
diff --git a/drivers/gpu/drm/i915/i915_irq.h b/drivers/gpu/drm/i915/i915_irq.h
new file mode 100644
index 000000000000..0ccd0d90919d
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_irq.h
@@ -0,0 +1,114 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __I915_IRQ_H__
+#define __I915_IRQ_H__
+
+#include <linux/types.h>
+
+#include "i915_drv.h"
+
+struct drm_i915_private;
+struct intel_crtc;
+
+extern void intel_irq_init(struct drm_i915_private *dev_priv);
+extern void intel_irq_fini(struct drm_i915_private *dev_priv);
+int intel_irq_install(struct drm_i915_private *dev_priv);
+void intel_irq_uninstall(struct drm_i915_private *dev_priv);
+
+u32 i915_pipestat_enable_mask(struct drm_i915_private *dev_priv,
+ enum pipe pipe);
+void
+i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
+ u32 status_mask);
+
+void
+i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
+ u32 status_mask);
+
+void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv);
+void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv);
+
+void i915_hotplug_interrupt_update(struct drm_i915_private *dev_priv,
+ u32 mask,
+ u32 bits);
+void ilk_update_display_irq(struct drm_i915_private *dev_priv,
+ u32 interrupt_mask,
+ u32 enabled_irq_mask);
+static inline void
+ilk_enable_display_irq(struct drm_i915_private *dev_priv, u32 bits)
+{
+ ilk_update_display_irq(dev_priv, bits, bits);
+}
+static inline void
+ilk_disable_display_irq(struct drm_i915_private *dev_priv, u32 bits)
+{
+ ilk_update_display_irq(dev_priv, bits, 0);
+}
+void bdw_update_pipe_irq(struct drm_i915_private *dev_priv,
+ enum pipe pipe,
+ u32 interrupt_mask,
+ u32 enabled_irq_mask);
+static inline void bdw_enable_pipe_irq(struct drm_i915_private *dev_priv,
+ enum pipe pipe, u32 bits)
+{
+ bdw_update_pipe_irq(dev_priv, pipe, bits, bits);
+}
+static inline void bdw_disable_pipe_irq(struct drm_i915_private *dev_priv,
+ enum pipe pipe, u32 bits)
+{
+ bdw_update_pipe_irq(dev_priv, pipe, bits, 0);
+}
+void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
+ u32 interrupt_mask,
+ u32 enabled_irq_mask);
+static inline void
+ibx_enable_display_interrupt(struct drm_i915_private *dev_priv, u32 bits)
+{
+ ibx_display_interrupt_update(dev_priv, bits, bits);
+}
+static inline void
+ibx_disable_display_interrupt(struct drm_i915_private *dev_priv, u32 bits)
+{
+ ibx_display_interrupt_update(dev_priv, bits, 0);
+}
+
+void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, u32 mask);
+void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, u32 mask);
+void gen6_mask_pm_irq(struct drm_i915_private *dev_priv, u32 mask);
+void gen6_unmask_pm_irq(struct drm_i915_private *dev_priv, u32 mask);
+void gen11_reset_rps_interrupts(struct drm_i915_private *dev_priv);
+void gen6_reset_rps_interrupts(struct drm_i915_private *dev_priv);
+void gen6_enable_rps_interrupts(struct drm_i915_private *dev_priv);
+void gen6_disable_rps_interrupts(struct drm_i915_private *dev_priv);
+void gen6_rps_reset_ei(struct drm_i915_private *dev_priv);
+
+static inline u32 gen6_sanitize_rps_pm_mask(const struct drm_i915_private *i915,
+ u32 mask)
+{
+ return mask & ~i915->gt_pm.rps.pm_intrmsk_mbz;
+}
+
+void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv);
+void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv);
+static inline bool intel_irqs_enabled(struct drm_i915_private *dev_priv)
+{
+ /*
+ * We only use drm_irq_uninstall() at unload and VT switch, so
+ * this is the only thing we need to check.
+ */
+ return dev_priv->runtime_pm.irqs_enabled;
+}
+
+int intel_get_crtc_scanline(struct intel_crtc *crtc);
+void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv,
+ u8 pipe_mask);
+void gen8_irq_power_well_pre_disable(struct drm_i915_private *dev_priv,
+ u8 pipe_mask);
+void gen9_reset_guc_interrupts(struct drm_i915_private *dev_priv);
+void gen9_enable_guc_interrupts(struct drm_i915_private *dev_priv);
+void gen9_disable_guc_interrupts(struct drm_i915_private *dev_priv);
+
+#endif /* __I915_IRQ_H__ */
diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c
index f893c2cbce15..d7c07a947497 100644
--- a/drivers/gpu/drm/i915/i915_pci.c
+++ b/drivers/gpu/drm/i915/i915_pci.c
@@ -370,6 +370,7 @@ static const struct intel_device_info intel_ironlake_m_info = {
.has_llc = 1, \
.has_rc6 = 1, \
.has_rc6p = 1, \
+ .has_rps = true, \
.ppgtt_type = INTEL_PPGTT_ALIASING, \
.ppgtt_size = 31, \
I9XX_PIPE_OFFSETS, \
@@ -417,6 +418,7 @@ static const struct intel_device_info intel_sandybridge_m_gt2_info = {
.has_llc = 1, \
.has_rc6 = 1, \
.has_rc6p = 1, \
+ .has_rps = true, \
.ppgtt_type = INTEL_PPGTT_FULL, \
.ppgtt_size = 31, \
IVB_PIPE_OFFSETS, \
@@ -470,6 +472,7 @@ static const struct intel_device_info intel_valleyview_info = {
.num_pipes = 2,
.has_runtime_pm = 1,
.has_rc6 = 1,
+ .has_rps = true,
.display.has_gmch = 1,
.display.has_hotplug = 1,
.ppgtt_type = INTEL_PPGTT_FULL,
@@ -565,6 +568,7 @@ static const struct intel_device_info intel_cherryview_info = {
.has_64bit_reloc = 1,
.has_runtime_pm = 1,
.has_rc6 = 1,
+ .has_rps = true,
.has_logical_ring_contexts = 1,
.display.has_gmch = 1,
.ppgtt_type = INTEL_PPGTT_FULL,
@@ -596,8 +600,6 @@ static const struct intel_device_info intel_cherryview_info = {
#define SKL_PLATFORM \
GEN9_FEATURES, \
- /* Display WA #0477 WaDisableIPC: skl */ \
- .display.has_ipc = 0, \
PLATFORM(INTEL_SKYLAKE)
static const struct intel_device_info intel_skylake_gt1_info = {
@@ -640,6 +642,7 @@ static const struct intel_device_info intel_skylake_gt4_info = {
.has_runtime_pm = 1, \
.display.has_csr = 1, \
.has_rc6 = 1, \
+ .has_rps = true, \
.display.has_dp_mst = 1, \
.has_logical_ring_contexts = 1, \
.has_logical_ring_preemption = 1, \
diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c
index dc4ce694c06a..bebea5ba5c26 100644
--- a/drivers/gpu/drm/i915/i915_perf.c
+++ b/drivers/gpu/drm/i915/i915_perf.c
@@ -195,6 +195,8 @@
#include <linux/sizes.h>
#include <linux/uuid.h>
+#include "gt/intel_lrc_reg.h"
+
#include "i915_drv.h"
#include "i915_oa_hsw.h"
#include "i915_oa_bdw.h"
@@ -210,7 +212,6 @@
#include "i915_oa_cflgt3.h"
#include "i915_oa_cnl.h"
#include "i915_oa_icl.h"
-#include "intel_lrc_reg.h"
/* HW requires this to be a power of two, between 128k and 16M, though driver
* is currently generally designed assuming the largest 16M size is used such
@@ -1202,28 +1203,35 @@ static int i915_oa_read(struct i915_perf_stream *stream,
static struct intel_context *oa_pin_context(struct drm_i915_private *i915,
struct i915_gem_context *ctx)
{
- struct intel_engine_cs *engine = i915->engine[RCS0];
+ struct i915_gem_engines_iter it;
struct intel_context *ce;
- int ret;
+ int err;
- ret = i915_mutex_lock_interruptible(&i915->drm);
- if (ret)
- return ERR_PTR(ret);
+ err = i915_mutex_lock_interruptible(&i915->drm);
+ if (err)
+ return ERR_PTR(err);
- /*
- * As the ID is the gtt offset of the context's vma we
- * pin the vma to ensure the ID remains fixed.
- *
- * NB: implied RCS engine...
- */
- ce = intel_context_pin(ctx, engine);
- mutex_unlock(&i915->drm.struct_mutex);
- if (IS_ERR(ce))
- return ce;
+ for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
+ if (ce->engine->class != RENDER_CLASS)
+ continue;
+
+ /*
+ * As the ID is the gtt offset of the context's vma we
+ * pin the vma to ensure the ID remains fixed.
+ */
+ err = intel_context_pin(ce);
+ if (err == 0) {
+ i915->perf.oa.pinned_ctx = ce;
+ break;
+ }
+ }
+ i915_gem_context_unlock_engines(ctx);
- i915->perf.oa.pinned_ctx = ce;
+ mutex_unlock(&i915->drm.struct_mutex);
+ if (err)
+ return ERR_PTR(err);
- return ce;
+ return i915->perf.oa.pinned_ctx;
}
/**
@@ -1679,7 +1687,7 @@ gen8_update_reg_state_unlocked(struct intel_context *ce,
CTX_REG(reg_state,
CTX_R_PWR_CLK_STATE, GEN8_R_PWR_CLK_STATE,
- gen8_make_rpcs(i915, &ce->sseu));
+ intel_sseu_make_rpcs(i915, &ce->sseu));
}
/*
@@ -1709,7 +1717,6 @@ gen8_update_reg_state_unlocked(struct intel_context *ce,
static int gen8_configure_all_contexts(struct drm_i915_private *dev_priv,
const struct i915_oa_config *oa_config)
{
- struct intel_engine_cs *engine = dev_priv->engine[RCS0];
unsigned int map_type = i915_coherent_map_type(dev_priv);
struct i915_gem_context *ctx;
struct i915_request *rq;
@@ -1738,30 +1745,43 @@ static int gen8_configure_all_contexts(struct drm_i915_private *dev_priv,
/* Update all contexts now that we've stalled the submission. */
list_for_each_entry(ctx, &dev_priv->contexts.list, link) {
- struct intel_context *ce = intel_context_lookup(ctx, engine);
- u32 *regs;
-
- /* OA settings will be set upon first use */
- if (!ce || !ce->state)
- continue;
-
- regs = i915_gem_object_pin_map(ce->state->obj, map_type);
- if (IS_ERR(regs))
- return PTR_ERR(regs);
+ struct i915_gem_engines_iter it;
+ struct intel_context *ce;
+
+ for_each_gem_engine(ce,
+ i915_gem_context_lock_engines(ctx),
+ it) {
+ u32 *regs;
+
+ if (ce->engine->class != RENDER_CLASS)
+ continue;
+
+ /* OA settings will be set upon first use */
+ if (!ce->state)
+ continue;
+
+ regs = i915_gem_object_pin_map(ce->state->obj,
+ map_type);
+ if (IS_ERR(regs)) {
+ i915_gem_context_unlock_engines(ctx);
+ return PTR_ERR(regs);
+ }
- ce->state->obj->mm.dirty = true;
- regs += LRC_STATE_PN * PAGE_SIZE / sizeof(*regs);
+ ce->state->obj->mm.dirty = true;
+ regs += LRC_STATE_PN * PAGE_SIZE / sizeof(*regs);
- gen8_update_reg_state_unlocked(ce, regs, oa_config);
+ gen8_update_reg_state_unlocked(ce, regs, oa_config);
- i915_gem_object_unpin_map(ce->state->obj);
+ i915_gem_object_unpin_map(ce->state->obj);
+ }
+ i915_gem_context_unlock_engines(ctx);
}
/*
* Apply the configuration by doing one context restore of the edited
* context image.
*/
- rq = i915_request_alloc(engine, dev_priv->kernel_context);
+ rq = i915_request_create(dev_priv->engine[RCS0]->kernel_context);
if (IS_ERR(rq))
return PTR_ERR(rq);
diff --git a/drivers/gpu/drm/i915/i915_pmu.c b/drivers/gpu/drm/i915/i915_pmu.c
index 46a52da3db29..1ccda0ee4ff5 100644
--- a/drivers/gpu/drm/i915/i915_pmu.c
+++ b/drivers/gpu/drm/i915/i915_pmu.c
@@ -6,9 +6,12 @@
#include <linux/irq.h>
#include <linux/pm_runtime.h>
-#include "i915_pmu.h"
-#include "intel_ringbuffer.h"
+
+#include "gt/intel_engine.h"
+
#include "i915_drv.h"
+#include "i915_pmu.h"
+#include "intel_pm.h"
/* Frequency for the sampling timer for events which need it. */
#define FREQUENCY 200
diff --git a/drivers/gpu/drm/i915/i915_query.c b/drivers/gpu/drm/i915/i915_query.c
index 782183b78f49..414d0a6d1f70 100644
--- a/drivers/gpu/drm/i915/i915_query.c
+++ b/drivers/gpu/drm/i915/i915_query.c
@@ -96,9 +96,58 @@ static int query_topology_info(struct drm_i915_private *dev_priv,
return total_length;
}
+static int
+query_engine_info(struct drm_i915_private *i915,
+ struct drm_i915_query_item *query_item)
+{
+ struct drm_i915_query_engine_info __user *query_ptr =
+ u64_to_user_ptr(query_item->data_ptr);
+ struct drm_i915_engine_info __user *info_ptr;
+ struct drm_i915_query_engine_info query;
+ struct drm_i915_engine_info info = { };
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ int len, ret;
+
+ if (query_item->flags)
+ return -EINVAL;
+
+ len = sizeof(struct drm_i915_query_engine_info) +
+ RUNTIME_INFO(i915)->num_engines *
+ sizeof(struct drm_i915_engine_info);
+
+ ret = copy_query_item(&query, sizeof(query), len, query_item);
+ if (ret != 0)
+ return ret;
+
+ if (query.num_engines || query.rsvd[0] || query.rsvd[1] ||
+ query.rsvd[2])
+ return -EINVAL;
+
+ info_ptr = &query_ptr->engines[0];
+
+ for_each_engine(engine, i915, id) {
+ info.engine.engine_class = engine->uabi_class;
+ info.engine.engine_instance = engine->instance;
+ info.capabilities = engine->uabi_capabilities;
+
+ if (__copy_to_user(info_ptr, &info, sizeof(info)))
+ return -EFAULT;
+
+ query.num_engines++;
+ info_ptr++;
+ }
+
+ if (__copy_to_user(query_ptr, &query, sizeof(query)))
+ return -EFAULT;
+
+ return len;
+}
+
static int (* const i915_query_funcs[])(struct drm_i915_private *dev_priv,
struct drm_i915_query_item *query_item) = {
query_topology_info,
+ query_engine_info,
};
int i915_query_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 13d6bd4e17b2..72472fabae49 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -1814,7 +1814,6 @@ enum i915_power_well_id {
#define PWR_DOWN_LN_3 (0x8 << 4)
#define PWR_DOWN_LN_2_1_0 (0x7 << 4)
#define PWR_DOWN_LN_1_0 (0x3 << 4)
-#define PWR_DOWN_LN_1 (0x2 << 4)
#define PWR_DOWN_LN_3_1 (0xa << 4)
#define PWR_DOWN_LN_3_1_0 (0xb << 4)
#define PWR_DOWN_LN_MASK (0xf << 4)
@@ -2871,6 +2870,7 @@ enum i915_power_well_id {
#define GFX_FLSH_CNTL_GEN6 _MMIO(0x101008)
#define GFX_FLSH_CNTL_EN (1 << 0)
#define ECOSKPD _MMIO(0x21d0)
+#define ECO_CONSTANT_BUFFER_SR_DISABLE REG_BIT(4)
#define ECO_GATING_CX_ONLY (1 << 3)
#define ECO_FLIP_DONE (1 << 0)
@@ -5770,6 +5770,7 @@ enum {
#define _PIPE_MISC_B 0x71030
#define PIPEMISC_YUV420_ENABLE (1 << 27)
#define PIPEMISC_YUV420_MODE_FULL_BLEND (1 << 26)
+#define PIPEMISC_HDR_MODE_PRECISION (1 << 23) /* icl+ */
#define PIPEMISC_OUTPUT_COLORSPACE_YUV (1 << 11)
#define PIPEMISC_DITHER_BPC_MASK (7 << 5)
#define PIPEMISC_DITHER_8_BPC (0 << 5)
@@ -8866,6 +8867,7 @@ enum {
#define GEN11_LSN_UNSLCVC_GAFS_HALF_SF_MAXALLOC (1 << 7)
#define GEN10_SAMPLER_MODE _MMIO(0xE18C)
+#define GEN11_SAMPLER_ENABLE_HEADLESS_MSG REG_BIT(5)
/* IVYBRIDGE DPF */
#define GEN7_L3CDERRST1(slice) _MMIO(0xB008 + (slice) * 0x200) /* L3CD Error Status 1 */
@@ -9013,32 +9015,32 @@ enum {
/* HSW Audio */
#define _HSW_AUD_CONFIG_A 0x65000
#define _HSW_AUD_CONFIG_B 0x65100
-#define HSW_AUD_CFG(pipe) _MMIO_PIPE(pipe, _HSW_AUD_CONFIG_A, _HSW_AUD_CONFIG_B)
+#define HSW_AUD_CFG(trans) _MMIO_TRANS(trans, _HSW_AUD_CONFIG_A, _HSW_AUD_CONFIG_B)
#define _HSW_AUD_MISC_CTRL_A 0x65010
#define _HSW_AUD_MISC_CTRL_B 0x65110
-#define HSW_AUD_MISC_CTRL(pipe) _MMIO_PIPE(pipe, _HSW_AUD_MISC_CTRL_A, _HSW_AUD_MISC_CTRL_B)
+#define HSW_AUD_MISC_CTRL(trans) _MMIO_TRANS(trans, _HSW_AUD_MISC_CTRL_A, _HSW_AUD_MISC_CTRL_B)
#define _HSW_AUD_M_CTS_ENABLE_A 0x65028
#define _HSW_AUD_M_CTS_ENABLE_B 0x65128
-#define HSW_AUD_M_CTS_ENABLE(pipe) _MMIO_PIPE(pipe, _HSW_AUD_M_CTS_ENABLE_A, _HSW_AUD_M_CTS_ENABLE_B)
+#define HSW_AUD_M_CTS_ENABLE(trans) _MMIO_TRANS(trans, _HSW_AUD_M_CTS_ENABLE_A, _HSW_AUD_M_CTS_ENABLE_B)
#define AUD_M_CTS_M_VALUE_INDEX (1 << 21)
#define AUD_M_CTS_M_PROG_ENABLE (1 << 20)
#define AUD_CONFIG_M_MASK 0xfffff
#define _HSW_AUD_DIP_ELD_CTRL_ST_A 0x650b4
#define _HSW_AUD_DIP_ELD_CTRL_ST_B 0x651b4
-#define HSW_AUD_DIP_ELD_CTRL(pipe) _MMIO_PIPE(pipe, _HSW_AUD_DIP_ELD_CTRL_ST_A, _HSW_AUD_DIP_ELD_CTRL_ST_B)
+#define HSW_AUD_DIP_ELD_CTRL(trans) _MMIO_TRANS(trans, _HSW_AUD_DIP_ELD_CTRL_ST_A, _HSW_AUD_DIP_ELD_CTRL_ST_B)
/* Audio Digital Converter */
#define _HSW_AUD_DIG_CNVT_1 0x65080
#define _HSW_AUD_DIG_CNVT_2 0x65180
-#define AUD_DIG_CNVT(pipe) _MMIO_PIPE(pipe, _HSW_AUD_DIG_CNVT_1, _HSW_AUD_DIG_CNVT_2)
+#define AUD_DIG_CNVT(trans) _MMIO_TRANS(trans, _HSW_AUD_DIG_CNVT_1, _HSW_AUD_DIG_CNVT_2)
#define DIP_PORT_SEL_MASK 0x3
#define _HSW_AUD_EDID_DATA_A 0x65050
#define _HSW_AUD_EDID_DATA_B 0x65150
-#define HSW_AUD_EDID_DATA(pipe) _MMIO_PIPE(pipe, _HSW_AUD_EDID_DATA_A, _HSW_AUD_EDID_DATA_B)
+#define HSW_AUD_EDID_DATA(trans) _MMIO_TRANS(trans, _HSW_AUD_EDID_DATA_A, _HSW_AUD_EDID_DATA_B)
#define HSW_AUD_PIPE_CONV_CFG _MMIO(0x6507c)
#define HSW_AUD_PIN_ELD_CP_VLD _MMIO(0x650c0)
@@ -9527,6 +9529,7 @@ enum skl_power_gate {
#define TRANS_MSA_12_BPC (3 << 5)
#define TRANS_MSA_16_BPC (4 << 5)
#define TRANS_MSA_CEA_RANGE (1 << 3)
+#define TRANS_MSA_USE_VSC_SDP (1 << 14)
/* LCPLL Control */
#define LCPLL_CTL _MMIO(0x130040)
diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
index c88e538b2ef4..18b34b0bf872 100644
--- a/drivers/gpu/drm/i915/i915_request.c
+++ b/drivers/gpu/drm/i915/i915_request.c
@@ -32,13 +32,14 @@
#include "i915_active.h"
#include "i915_drv.h"
#include "i915_globals.h"
-#include "i915_reset.h"
#include "intel_pm.h"
struct execute_cb {
struct list_head link;
struct irq_work work;
struct i915_sw_fence *fence;
+ void (*hook)(struct i915_request *rq, struct dma_fence *signal);
+ struct i915_request *signal;
};
static struct i915_global_request {
@@ -132,19 +133,6 @@ i915_request_remove_from_client(struct i915_request *request)
spin_unlock(&file_priv->mm.lock);
}
-static void reserve_gt(struct drm_i915_private *i915)
-{
- if (!i915->gt.active_requests++)
- i915_gem_unpark(i915);
-}
-
-static void unreserve_gt(struct drm_i915_private *i915)
-{
- GEM_BUG_ON(!i915->gt.active_requests);
- if (!--i915->gt.active_requests)
- i915_gem_park(i915);
-}
-
static void advance_ring(struct i915_request *request)
{
struct intel_ring *ring = request->ring;
@@ -302,11 +290,10 @@ static void i915_request_retire(struct i915_request *request)
i915_request_remove_from_client(request);
- intel_context_unpin(request->hw_context);
-
__retire_engine_upto(request->engine, request);
- unreserve_gt(request->i915);
+ intel_context_exit(request->hw_context);
+ intel_context_unpin(request->hw_context);
i915_sched_node_fini(&request->sched);
i915_request_put(request);
@@ -344,6 +331,17 @@ static void irq_execute_cb(struct irq_work *wrk)
kmem_cache_free(global.slab_execute_cbs, cb);
}
+static void irq_execute_cb_hook(struct irq_work *wrk)
+{
+ struct execute_cb *cb = container_of(wrk, typeof(*cb), work);
+
+ cb->hook(container_of(cb->fence, struct i915_request, submit),
+ &cb->signal->fence);
+ i915_request_put(cb->signal);
+
+ irq_execute_cb(wrk);
+}
+
static void __notify_execute_cb(struct i915_request *rq)
{
struct execute_cb *cb;
@@ -370,14 +368,19 @@ static void __notify_execute_cb(struct i915_request *rq)
}
static int
-i915_request_await_execution(struct i915_request *rq,
- struct i915_request *signal,
- gfp_t gfp)
+__i915_request_await_execution(struct i915_request *rq,
+ struct i915_request *signal,
+ void (*hook)(struct i915_request *rq,
+ struct dma_fence *signal),
+ gfp_t gfp)
{
struct execute_cb *cb;
- if (i915_request_is_active(signal))
+ if (i915_request_is_active(signal)) {
+ if (hook)
+ hook(rq, &signal->fence);
return 0;
+ }
cb = kmem_cache_alloc(global.slab_execute_cbs, gfp);
if (!cb)
@@ -387,8 +390,18 @@ i915_request_await_execution(struct i915_request *rq,
i915_sw_fence_await(cb->fence);
init_irq_work(&cb->work, irq_execute_cb);
+ if (hook) {
+ cb->hook = hook;
+ cb->signal = i915_request_get(signal);
+ cb->work.func = irq_execute_cb_hook;
+ }
+
spin_lock_irq(&signal->lock);
if (i915_request_is_active(signal)) {
+ if (hook) {
+ hook(rq, &signal->fence);
+ i915_request_put(signal);
+ }
i915_sw_fence_complete(cb->fence);
kmem_cache_free(global.slab_execute_cbs, cb);
} else {
@@ -466,6 +479,8 @@ void __i915_request_submit(struct i915_request *request)
/* Transfer from per-context onto the global per-engine timeline */
move_to_timeline(request, &engine->timeline);
+ engine->serial++;
+
trace_i915_request_execute(request);
}
@@ -513,6 +528,12 @@ void __i915_request_unsubmit(struct i915_request *request)
/* Transfer back from the global per-engine timeline to per-context */
move_to_timeline(request, request->timeline);
+ /* We've already spun, don't charge on resubmitting. */
+ if (request->sched.semaphores && i915_request_started(request)) {
+ request->sched.attr.priority |= I915_PRIORITY_NOSEMAPHORE;
+ request->sched.semaphores = 0;
+ }
+
/*
* We don't need to wake_up any waiters on request->execute, they
* will get woken by any other event or us re-adding this request
@@ -597,7 +618,7 @@ static void ring_retire_requests(struct intel_ring *ring)
}
static noinline struct i915_request *
-i915_request_alloc_slow(struct intel_context *ce)
+request_alloc_slow(struct intel_context *ce, gfp_t gfp)
{
struct intel_ring *ring = ce->ring;
struct i915_request *rq;
@@ -605,6 +626,9 @@ i915_request_alloc_slow(struct intel_context *ce)
if (list_empty(&ring->request_list))
goto out;
+ if (!gfpflags_allow_blocking(gfp))
+ goto out;
+
/* Ratelimit ourselves to prevent oom from malicious clients */
rq = list_last_entry(&ring->request_list, typeof(*rq), ring_link);
cond_synchronize_rcu(rq->rcustate);
@@ -613,62 +637,21 @@ i915_request_alloc_slow(struct intel_context *ce)
ring_retire_requests(ring);
out:
- return kmem_cache_alloc(global.slab_requests, GFP_KERNEL);
+ return kmem_cache_alloc(global.slab_requests, gfp);
}
-/**
- * i915_request_alloc - allocate a request structure
- *
- * @engine: engine that we wish to issue the request on.
- * @ctx: context that the request will be associated with.
- *
- * Returns a pointer to the allocated request if successful,
- * or an error code if not.
- */
struct i915_request *
-i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx)
+__i915_request_create(struct intel_context *ce, gfp_t gfp)
{
- struct drm_i915_private *i915 = engine->i915;
- struct intel_context *ce;
- struct i915_timeline *tl;
+ struct i915_timeline *tl = ce->ring->timeline;
struct i915_request *rq;
u32 seqno;
int ret;
- lockdep_assert_held(&i915->drm.struct_mutex);
+ might_sleep_if(gfpflags_allow_blocking(gfp));
- /*
- * Preempt contexts are reserved for exclusive use to inject a
- * preemption context switch. They are never to be used for any trivial
- * request!
- */
- GEM_BUG_ON(ctx == i915->preempt_context);
-
- /*
- * ABI: Before userspace accesses the GPU (e.g. execbuffer), report
- * EIO if the GPU is already wedged.
- */
- ret = i915_terminally_wedged(i915);
- if (ret)
- return ERR_PTR(ret);
-
- /*
- * Pinning the contexts may generate requests in order to acquire
- * GGTT space, so do this first before we reserve a seqno for
- * ourselves.
- */
- ce = intel_context_pin(ctx, engine);
- if (IS_ERR(ce))
- return ERR_CAST(ce);
-
- reserve_gt(i915);
- mutex_lock(&ce->ring->timeline->mutex);
-
- /* Move our oldest request to the slab-cache (if not in use!) */
- rq = list_first_entry(&ce->ring->request_list, typeof(*rq), ring_link);
- if (!list_is_last(&rq->ring_link, &ce->ring->request_list) &&
- i915_request_completed(rq))
- i915_request_retire(rq);
+ /* Check that the caller provided an already pinned context */
+ __intel_context_pin(ce);
/*
* Beware: Dragons be flying overhead.
@@ -700,30 +683,26 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx)
* Do not use kmem_cache_zalloc() here!
*/
rq = kmem_cache_alloc(global.slab_requests,
- GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
+ gfp | __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
if (unlikely(!rq)) {
- rq = i915_request_alloc_slow(ce);
+ rq = request_alloc_slow(ce, gfp);
if (!rq) {
ret = -ENOMEM;
goto err_unreserve;
}
}
- INIT_LIST_HEAD(&rq->active_list);
- INIT_LIST_HEAD(&rq->execute_cb);
-
- tl = ce->ring->timeline;
ret = i915_timeline_get_seqno(tl, rq, &seqno);
if (ret)
goto err_free;
- rq->i915 = i915;
- rq->engine = engine;
- rq->gem_context = ctx;
+ rq->i915 = ce->engine->i915;
rq->hw_context = ce;
+ rq->gem_context = ce->gem_context;
+ rq->engine = ce->engine;
rq->ring = ce->ring;
rq->timeline = tl;
- GEM_BUG_ON(rq->timeline == &engine->timeline);
+ GEM_BUG_ON(rq->timeline == &ce->engine->timeline);
rq->hwsp_seqno = tl->hwsp_seqno;
rq->hwsp_cacheline = tl->hwsp_cacheline;
rq->rcustate = get_state_synchronize_rcu(); /* acts as smp_mb() */
@@ -743,6 +722,10 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx)
rq->batch = NULL;
rq->capture_list = NULL;
rq->waitboost = false;
+ rq->execution_mask = ALL_ENGINES;
+
+ INIT_LIST_HEAD(&rq->active_list);
+ INIT_LIST_HEAD(&rq->execute_cb);
/*
* Reserve space in the ring buffer for all the commands required to
@@ -756,7 +739,8 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx)
* around inside i915_request_add() there is sufficient space at
* the beginning of the ring as well.
*/
- rq->reserved_space = 2 * engine->emit_fini_breadcrumb_dw * sizeof(u32);
+ rq->reserved_space =
+ 2 * rq->engine->emit_fini_breadcrumb_dw * sizeof(u32);
/*
* Record the position of the start of the request so that
@@ -766,20 +750,16 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx)
*/
rq->head = rq->ring->emit;
- ret = engine->request_alloc(rq);
+ ret = rq->engine->request_alloc(rq);
if (ret)
goto err_unwind;
- /* Keep a second pin for the dual retirement along engine and ring */
- __intel_context_pin(ce);
-
rq->infix = rq->ring->emit; /* end of header; start of user payload */
- /* Check that we didn't interrupt ourselves with a new request */
- lockdep_assert_held(&rq->timeline->mutex);
- GEM_BUG_ON(rq->timeline->seqno != rq->fence.seqno);
- rq->cookie = lockdep_pin_lock(&rq->timeline->mutex);
+ /* Keep a second pin for the dual retirement along engine and ring */
+ __intel_context_pin(ce);
+ intel_context_mark_active(ce);
return rq;
err_unwind:
@@ -793,12 +773,39 @@ err_unwind:
err_free:
kmem_cache_free(global.slab_requests, rq);
err_unreserve:
- mutex_unlock(&ce->ring->timeline->mutex);
- unreserve_gt(i915);
intel_context_unpin(ce);
return ERR_PTR(ret);
}
+struct i915_request *
+i915_request_create(struct intel_context *ce)
+{
+ struct i915_request *rq;
+
+ intel_context_timeline_lock(ce);
+
+ /* Move our oldest request to the slab-cache (if not in use!) */
+ rq = list_first_entry(&ce->ring->request_list, typeof(*rq), ring_link);
+ if (!list_is_last(&rq->ring_link, &ce->ring->request_list) &&
+ i915_request_completed(rq))
+ i915_request_retire(rq);
+
+ intel_context_enter(ce);
+ rq = __i915_request_create(ce, GFP_KERNEL);
+ intel_context_exit(ce); /* active reference transferred to request */
+ if (IS_ERR(rq))
+ goto err_unlock;
+
+ /* Check that we do not interrupt ourselves with a new request */
+ rq->cookie = lockdep_pin_lock(&ce->ring->timeline->mutex);
+
+ return rq;
+
+err_unlock:
+ intel_context_timeline_unlock(ce);
+ return rq;
+}
+
static int
i915_request_await_start(struct i915_request *rq, struct i915_request *signal)
{
@@ -854,13 +861,13 @@ emit_semaphore_wait(struct i915_request *to,
if (err < 0)
return err;
- /* We need to pin the signaler's HWSP until we are finished reading. */
- err = i915_timeline_read_hwsp(from, to, &hwsp_offset);
+ /* Only submit our spinner after the signaler is running! */
+ err = __i915_request_await_execution(to, from, NULL, gfp);
if (err)
return err;
- /* Only submit our spinner after the signaler is running! */
- err = i915_request_await_execution(to, from, gfp);
+ /* We need to pin the signaler's HWSP until we are finished reading. */
+ err = i915_timeline_read_hwsp(from, to, &hwsp_offset);
if (err)
return err;
@@ -991,6 +998,52 @@ i915_request_await_dma_fence(struct i915_request *rq, struct dma_fence *fence)
return 0;
}
+int
+i915_request_await_execution(struct i915_request *rq,
+ struct dma_fence *fence,
+ void (*hook)(struct i915_request *rq,
+ struct dma_fence *signal))
+{
+ struct dma_fence **child = &fence;
+ unsigned int nchild = 1;
+ int ret;
+
+ if (dma_fence_is_array(fence)) {
+ struct dma_fence_array *array = to_dma_fence_array(fence);
+
+ /* XXX Error for signal-on-any fence arrays */
+
+ child = array->fences;
+ nchild = array->num_fences;
+ GEM_BUG_ON(!nchild);
+ }
+
+ do {
+ fence = *child++;
+ if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
+ continue;
+
+ /*
+ * We don't squash repeated fence dependencies here as we
+ * want to run our callback in all cases.
+ */
+
+ if (dma_fence_is_i915(fence))
+ ret = __i915_request_await_execution(rq,
+ to_request(fence),
+ hook,
+ I915_FENCE_GFP);
+ else
+ ret = i915_sw_fence_await_dma_fence(&rq->submit, fence,
+ I915_FENCE_TIMEOUT,
+ GFP_KERNEL);
+ if (ret < 0)
+ return ret;
+ } while (--nchild);
+
+ return 0;
+}
+
/**
* i915_request_await_object - set this request to (async) wait upon a bo
* @to: request we are wishing to use
@@ -1100,8 +1153,7 @@ __i915_request_add_to_timeline(struct i915_request *rq)
* precludes optimising to use semaphores serialisation of a single
* timeline across engines.
*/
- prev = i915_active_request_raw(&timeline->last_request,
- &rq->i915->drm.struct_mutex);
+ prev = rcu_dereference_protected(timeline->last_request.request, 1);
if (prev && !i915_request_completed(prev)) {
if (is_power_of_2(prev->engine->mask | rq->engine->mask))
i915_sw_fence_await_sw_fence(&rq->submit,
@@ -1122,6 +1174,11 @@ __i915_request_add_to_timeline(struct i915_request *rq)
list_add_tail(&rq->link, &timeline->requests);
spin_unlock_irq(&timeline->lock);
+ /*
+ * Make sure that no request gazumped us - if it was allocated after
+ * our i915_request_alloc() and called __i915_request_add() before
+ * us, the timeline will hold its seqno which is later than ours.
+ */
GEM_BUG_ON(timeline->seqno != rq->fence.seqno);
__i915_active_request_set(&timeline->last_request, rq);
@@ -1133,36 +1190,23 @@ __i915_request_add_to_timeline(struct i915_request *rq)
* request is not being tracked for completion but the work itself is
* going to happen on the hardware. This would be a Bad Thing(tm).
*/
-void i915_request_add(struct i915_request *request)
+struct i915_request *__i915_request_commit(struct i915_request *rq)
{
- struct intel_engine_cs *engine = request->engine;
- struct i915_timeline *timeline = request->timeline;
- struct intel_ring *ring = request->ring;
+ struct intel_engine_cs *engine = rq->engine;
+ struct intel_ring *ring = rq->ring;
struct i915_request *prev;
u32 *cs;
GEM_TRACE("%s fence %llx:%lld\n",
- engine->name, request->fence.context, request->fence.seqno);
-
- lockdep_assert_held(&request->timeline->mutex);
- lockdep_unpin_lock(&request->timeline->mutex, request->cookie);
-
- trace_i915_request_add(request);
-
- /*
- * Make sure that no request gazumped us - if it was allocated after
- * our i915_request_alloc() and called __i915_request_add() before
- * us, the timeline will hold its seqno which is later than ours.
- */
- GEM_BUG_ON(timeline->seqno != request->fence.seqno);
+ engine->name, rq->fence.context, rq->fence.seqno);
/*
* To ensure that this call will not fail, space for its emissions
* should already have been reserved in the ring buffer. Let the ring
* know that it is time to use that space up.
*/
- GEM_BUG_ON(request->reserved_space > request->ring->space);
- request->reserved_space = 0;
+ GEM_BUG_ON(rq->reserved_space > ring->space);
+ rq->reserved_space = 0;
/*
* Record the position of the start of the breadcrumb so that
@@ -1170,17 +1214,16 @@ void i915_request_add(struct i915_request *request)
* GPU processing the request, we never over-estimate the
* position of the ring's HEAD.
*/
- cs = intel_ring_begin(request, engine->emit_fini_breadcrumb_dw);
+ cs = intel_ring_begin(rq, engine->emit_fini_breadcrumb_dw);
GEM_BUG_ON(IS_ERR(cs));
- request->postfix = intel_ring_offset(request, cs);
+ rq->postfix = intel_ring_offset(rq, cs);
- prev = __i915_request_add_to_timeline(request);
+ prev = __i915_request_add_to_timeline(rq);
- list_add_tail(&request->ring_link, &ring->request_list);
- if (list_is_first(&request->ring_link, &ring->request_list))
- list_add(&ring->active_link, &request->i915->gt.active_rings);
- request->i915->gt.active_engines |= request->engine->mask;
- request->emitted_jiffies = jiffies;
+ list_add_tail(&rq->ring_link, &ring->request_list);
+ if (list_is_first(&rq->ring_link, &ring->request_list))
+ list_add(&ring->active_link, &rq->i915->gt.active_rings);
+ rq->emitted_jiffies = jiffies;
/*
* Let the backend know a new request has arrived that may need
@@ -1194,10 +1237,10 @@ void i915_request_add(struct i915_request *request)
* run at the earliest possible convenience.
*/
local_bh_disable();
- i915_sw_fence_commit(&request->semaphore);
+ i915_sw_fence_commit(&rq->semaphore);
rcu_read_lock(); /* RCU serialisation for set-wedged protection */
if (engine->schedule) {
- struct i915_sched_attr attr = request->gem_context->sched;
+ struct i915_sched_attr attr = rq->gem_context->sched;
/*
* Boost actual workloads past semaphores!
@@ -1211,7 +1254,7 @@ void i915_request_add(struct i915_request *request)
* far in the distance past over useful work, we keep a history
* of any semaphore use along our dependency chain.
*/
- if (!(request->sched.flags & I915_SCHED_HAS_SEMAPHORE_CHAIN))
+ if (!(rq->sched.flags & I915_SCHED_HAS_SEMAPHORE_CHAIN))
attr.priority |= I915_PRIORITY_NOSEMAPHORE;
/*
@@ -1220,15 +1263,29 @@ void i915_request_add(struct i915_request *request)
* Allow interactive/synchronous clients to jump ahead of
* the bulk clients. (FQ_CODEL)
*/
- if (list_empty(&request->sched.signalers_list))
+ if (list_empty(&rq->sched.signalers_list))
attr.priority |= I915_PRIORITY_WAIT;
- engine->schedule(request, &attr);
+ engine->schedule(rq, &attr);
}
rcu_read_unlock();
- i915_sw_fence_commit(&request->submit);
+ i915_sw_fence_commit(&rq->submit);
local_bh_enable(); /* Kick the execlists tasklet if just scheduled */
+ return prev;
+}
+
+void i915_request_add(struct i915_request *rq)
+{
+ struct i915_request *prev;
+
+ lockdep_assert_held(&rq->timeline->mutex);
+ lockdep_unpin_lock(&rq->timeline->mutex, rq->cookie);
+
+ trace_i915_request_add(rq);
+
+ prev = __i915_request_commit(rq);
+
/*
* In typical scenarios, we do not expect the previous request on
* the timeline to be still tracked by timeline->last_request if it
@@ -1249,7 +1306,7 @@ void i915_request_add(struct i915_request *request)
if (prev && i915_request_completed(prev))
i915_request_retire_upto(prev);
- mutex_unlock(&request->timeline->mutex);
+ mutex_unlock(&rq->timeline->mutex);
}
static unsigned long local_clock_us(unsigned int *cpu)
@@ -1382,8 +1439,31 @@ long i915_request_wait(struct i915_request *rq,
trace_i915_request_wait_begin(rq, flags);
- /* Optimistic short spin before touching IRQs */
- if (__i915_spin_request(rq, state, 5))
+ /*
+ * Optimistic spin before touching IRQs.
+ *
+ * We may use a rather large value here to offset the penalty of
+ * switching away from the active task. Frequently, the client will
+ * wait upon an old swapbuffer to throttle itself to remain within a
+ * frame of the gpu. If the client is running in lockstep with the gpu,
+ * then it should not be waiting long at all, and a sleep now will incur
+ * extra scheduler latency in producing the next frame. To try to
+ * avoid adding the cost of enabling/disabling the interrupt to the
+ * short wait, we first spin to see if the request would have completed
+ * in the time taken to setup the interrupt.
+ *
+ * We need upto 5us to enable the irq, and upto 20us to hide the
+ * scheduler latency of a context switch, ignoring the secondary
+ * impacts from a context switch such as cache eviction.
+ *
+ * The scheme used for low-latency IO is called "hybrid interrupt
+ * polling". The suggestion there is to sleep until just before you
+ * expect to be woken by the device interrupt and then poll for its
+ * completion. That requires having a good predictor for the request
+ * duration, which we currently lack.
+ */
+ if (CONFIG_DRM_I915_SPIN_REQUEST &&
+ __i915_spin_request(rq, state, CONFIG_DRM_I915_SPIN_REQUEST))
goto out;
/*
@@ -1401,9 +1481,7 @@ long i915_request_wait(struct i915_request *rq,
if (flags & I915_WAIT_PRIORITY) {
if (!i915_request_started(rq) && INTEL_GEN(rq->i915) >= 6)
gen6_rps_boost(rq);
- local_bh_disable(); /* suspend tasklets for reprioritisation */
i915_schedule_bump_priority(rq, I915_PRIORITY_WAIT);
- local_bh_enable(); /* kick tasklets en masse */
}
wait.tsk = current;
@@ -1437,21 +1515,20 @@ out:
return timeout;
}
-void i915_retire_requests(struct drm_i915_private *i915)
+bool i915_retire_requests(struct drm_i915_private *i915)
{
struct intel_ring *ring, *tmp;
lockdep_assert_held(&i915->drm.struct_mutex);
- if (!i915->gt.active_requests)
- return;
-
list_for_each_entry_safe(ring, tmp,
&i915->gt.active_rings, active_link) {
intel_ring_get(ring); /* last rq holds reference! */
ring_retire_requests(ring);
intel_ring_put(ring);
}
+
+ return !list_empty(&i915->gt.active_rings);
}
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
diff --git a/drivers/gpu/drm/i915/i915_request.h b/drivers/gpu/drm/i915/i915_request.h
index a982664618c2..c9f7d07991c8 100644
--- a/drivers/gpu/drm/i915/i915_request.h
+++ b/drivers/gpu/drm/i915/i915_request.h
@@ -28,6 +28,8 @@
#include <linux/dma-fence.h>
#include <linux/lockdep.h>
+#include "gt/intel_engine_types.h"
+
#include "i915_gem.h"
#include "i915_scheduler.h"
#include "i915_selftest.h"
@@ -156,6 +158,7 @@ struct i915_request {
*/
struct i915_sched_node sched;
struct i915_dependency dep;
+ intel_engine_mask_t execution_mask;
/*
* A convenience pointer to the current breadcrumb value stored in
@@ -240,8 +243,12 @@ static inline bool dma_fence_is_i915(const struct dma_fence *fence)
}
struct i915_request * __must_check
-i915_request_alloc(struct intel_engine_cs *engine,
- struct i915_gem_context *ctx);
+__i915_request_create(struct intel_context *ce, gfp_t gfp);
+struct i915_request * __must_check
+i915_request_create(struct intel_context *ce);
+
+struct i915_request *__i915_request_commit(struct i915_request *request);
+
void i915_request_retire_upto(struct i915_request *rq);
static inline struct i915_request *
@@ -276,6 +283,10 @@ int i915_request_await_object(struct i915_request *to,
bool write);
int i915_request_await_dma_fence(struct i915_request *rq,
struct dma_fence *fence);
+int i915_request_await_execution(struct i915_request *rq,
+ struct dma_fence *fence,
+ void (*hook)(struct i915_request *rq,
+ struct dma_fence *signal));
void i915_request_add(struct i915_request *rq);
@@ -418,6 +429,6 @@ static inline void i915_request_mark_complete(struct i915_request *rq)
rq->hwsp_seqno = (u32 *)&rq->fence.seqno; /* decouple from HWSP */
}
-void i915_retire_requests(struct drm_i915_private *i915);
+bool i915_retire_requests(struct drm_i915_private *i915);
#endif /* I915_REQUEST_H */
diff --git a/drivers/gpu/drm/i915/i915_scheduler.c b/drivers/gpu/drm/i915/i915_scheduler.c
index 108f52e1bf35..78ceb56d7801 100644
--- a/drivers/gpu/drm/i915/i915_scheduler.c
+++ b/drivers/gpu/drm/i915/i915_scheduler.c
@@ -150,29 +150,49 @@ sched_lock_engine(const struct i915_sched_node *node,
struct intel_engine_cs *locked,
struct sched_cache *cache)
{
- struct intel_engine_cs *engine = node_to_request(node)->engine;
+ const struct i915_request *rq = node_to_request(node);
+ struct intel_engine_cs *engine;
GEM_BUG_ON(!locked);
- if (engine != locked) {
+ /*
+ * Virtual engines complicate acquiring the engine timeline lock,
+ * as their rq->engine pointer is not stable until under that
+ * engine lock. The simple ploy we use is to take the lock then
+ * check that the rq still belongs to the newly locked engine.
+ */
+ while (locked != (engine = READ_ONCE(rq->engine))) {
spin_unlock(&locked->timeline.lock);
memset(cache, 0, sizeof(*cache));
spin_lock(&engine->timeline.lock);
+ locked = engine;
}
- return engine;
+ GEM_BUG_ON(locked != engine);
+ return locked;
}
-static bool inflight(const struct i915_request *rq,
- const struct intel_engine_cs *engine)
+static inline int rq_prio(const struct i915_request *rq)
{
- const struct i915_request *active;
+ return rq->sched.attr.priority | __NO_PREEMPTION;
+}
- if (!i915_request_is_active(rq))
- return false;
+static void kick_submission(struct intel_engine_cs *engine, int prio)
+{
+ const struct i915_request *inflight =
+ port_request(engine->execlists.port);
+
+ /*
+ * If we are already the currently executing context, don't
+ * bother evaluating if we should preempt ourselves, or if
+ * we expect nothing to change as a result of running the
+ * tasklet, i.e. we have not change the priority queue
+ * sufficiently to oust the running context.
+ */
+ if (inflight && !i915_scheduler_need_preempt(prio, rq_prio(inflight)))
+ return;
- active = port_request(engine->execlists.port);
- return active->hw_context == rq->hw_context;
+ tasklet_hi_schedule(&engine->execlists.tasklet);
}
static void __i915_schedule(struct i915_sched_node *node,
@@ -189,10 +209,10 @@ static void __i915_schedule(struct i915_sched_node *node,
lockdep_assert_held(&schedule_lock);
GEM_BUG_ON(prio == I915_PRIORITY_INVALID);
- if (node_signaled(node))
+ if (prio <= READ_ONCE(node->attr.priority))
return;
- if (prio <= READ_ONCE(node->attr.priority))
+ if (node_signaled(node))
return;
stack.signaler = node;
@@ -261,6 +281,7 @@ static void __i915_schedule(struct i915_sched_node *node,
spin_lock(&engine->timeline.lock);
/* Fifo and depth-first replacement ensure our deps execute before us */
+ engine = sched_lock_engine(node, engine, &cache);
list_for_each_entry_safe_reverse(dep, p, &dfs, dfs_link) {
INIT_LIST_HEAD(&dep->dfs_link);
@@ -272,8 +293,11 @@ static void __i915_schedule(struct i915_sched_node *node,
if (prio <= node->attr.priority || node_signaled(node))
continue;
+ GEM_BUG_ON(node_to_request(node)->engine != engine);
+
node->attr.priority = prio;
if (!list_empty(&node->link)) {
+ GEM_BUG_ON(intel_engine_is_virtual(engine));
if (!cache.priolist)
cache.priolist =
i915_sched_lookup_priolist(engine,
@@ -297,15 +321,8 @@ static void __i915_schedule(struct i915_sched_node *node,
engine->execlists.queue_priority_hint = prio;
- /*
- * If we are already the currently executing context, don't
- * bother evaluating if we should preempt ourselves.
- */
- if (inflight(node_to_request(node), engine))
- continue;
-
/* Defer (tasklet) submission until after all of our updates. */
- tasklet_hi_schedule(&engine->execlists.tasklet);
+ kick_submission(engine, prio);
}
spin_unlock(&engine->timeline.lock);
diff --git a/drivers/gpu/drm/i915/i915_scheduler.h b/drivers/gpu/drm/i915/i915_scheduler.h
index 07d243acf553..7eefccff39bf 100644
--- a/drivers/gpu/drm/i915/i915_scheduler.h
+++ b/drivers/gpu/drm/i915/i915_scheduler.h
@@ -52,4 +52,22 @@ static inline void i915_priolist_free(struct i915_priolist *p)
__i915_priolist_free(p);
}
+static inline bool i915_scheduler_need_preempt(int prio, int active)
+{
+ /*
+ * Allow preemption of low -> normal -> high, but we do
+ * not allow low priority tasks to preempt other low priority
+ * tasks under the impression that latency for low priority
+ * tasks does not matter (as much as background throughput),
+ * so kiss.
+ *
+ * More naturally we would write
+ * prio >= max(0, last);
+ * except that we wish to prevent triggering preemption at the same
+ * priority level: the task that is running should remain running
+ * to preserve FIFO ordering of dependencies.
+ */
+ return prio > max(I915_PRIORITY_NORMAL - 1, active);
+}
+
#endif /* _I915_SCHEDULER_H_ */
diff --git a/drivers/gpu/drm/i915/i915_scheduler_types.h b/drivers/gpu/drm/i915/i915_scheduler_types.h
index 4f2b2eb7c3e5..3e309631bd0b 100644
--- a/drivers/gpu/drm/i915/i915_scheduler_types.h
+++ b/drivers/gpu/drm/i915/i915_scheduler_types.h
@@ -9,8 +9,8 @@
#include <linux/list.h>
+#include "gt/intel_engine_types.h"
#include "i915_priolist_types.h"
-#include "intel_engine_types.h"
struct drm_i915_private;
struct i915_request;
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index 95f3dab1b229..581201bcb81a 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -29,6 +29,7 @@
#include "i915_reg.h"
#include "intel_drv.h"
#include "intel_fbc.h"
+#include "intel_gmbus.h"
static void i915_save_display(struct drm_i915_private *dev_priv)
{
@@ -144,7 +145,7 @@ int i915_restore_state(struct drm_i915_private *dev_priv)
mutex_unlock(&dev_priv->drm.struct_mutex);
- intel_i2c_reset(dev_priv);
+ intel_gmbus_reset(dev_priv);
return 0;
}
diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c
index 41313005af42..3ef07b987d40 100644
--- a/drivers/gpu/drm/i915/i915_sysfs.c
+++ b/drivers/gpu/drm/i915/i915_sysfs.c
@@ -29,8 +29,11 @@
#include <linux/module.h>
#include <linux/stat.h>
#include <linux/sysfs.h>
-#include "intel_drv.h"
+
#include "i915_drv.h"
+#include "intel_drv.h"
+#include "intel_pm.h"
+#include "intel_sideband.h"
static inline struct drm_i915_private *kdev_minor_to_i915(struct device *kdev)
{
@@ -259,25 +262,23 @@ static ssize_t gt_act_freq_mhz_show(struct device *kdev,
{
struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
intel_wakeref_t wakeref;
- int ret;
+ u32 freq;
wakeref = intel_runtime_pm_get(dev_priv);
- mutex_lock(&dev_priv->pcu_lock);
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
- u32 freq;
+ vlv_punit_get(dev_priv);
freq = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
- ret = intel_gpu_freq(dev_priv, (freq >> 8) & 0xff);
+ vlv_punit_put(dev_priv);
+
+ freq = (freq >> 8) & 0xff;
} else {
- ret = intel_gpu_freq(dev_priv,
- intel_get_cagf(dev_priv,
- I915_READ(GEN6_RPSTAT1)));
+ freq = intel_get_cagf(dev_priv, I915_READ(GEN6_RPSTAT1));
}
- mutex_unlock(&dev_priv->pcu_lock);
intel_runtime_pm_put(dev_priv, wakeref);
- return snprintf(buf, PAGE_SIZE, "%d\n", ret);
+ return snprintf(buf, PAGE_SIZE, "%d\n", intel_gpu_freq(dev_priv, freq));
}
static ssize_t gt_cur_freq_mhz_show(struct device *kdev,
@@ -318,12 +319,12 @@ static ssize_t gt_boost_freq_mhz_store(struct device *kdev,
if (val < rps->min_freq || val > rps->max_freq)
return -EINVAL;
- mutex_lock(&dev_priv->pcu_lock);
+ mutex_lock(&rps->lock);
if (val != rps->boost_freq) {
rps->boost_freq = val;
boost = atomic_read(&rps->num_waiters);
}
- mutex_unlock(&dev_priv->pcu_lock);
+ mutex_unlock(&rps->lock);
if (boost)
schedule_work(&rps->work);
@@ -364,17 +365,14 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev,
return ret;
wakeref = intel_runtime_pm_get(dev_priv);
-
- mutex_lock(&dev_priv->pcu_lock);
+ mutex_lock(&rps->lock);
val = intel_freq_opcode(dev_priv, val);
-
if (val < rps->min_freq ||
val > rps->max_freq ||
val < rps->min_freq_softlimit) {
- mutex_unlock(&dev_priv->pcu_lock);
- intel_runtime_pm_put(dev_priv, wakeref);
- return -EINVAL;
+ ret = -EINVAL;
+ goto unlock;
}
if (val > rps->rp0_freq)
@@ -392,8 +390,8 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev,
* frequency request may be unchanged. */
ret = intel_set_rps(dev_priv, val);
- mutex_unlock(&dev_priv->pcu_lock);
-
+unlock:
+ mutex_unlock(&rps->lock);
intel_runtime_pm_put(dev_priv, wakeref);
return ret ?: count;
@@ -423,17 +421,14 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev,
return ret;
wakeref = intel_runtime_pm_get(dev_priv);
-
- mutex_lock(&dev_priv->pcu_lock);
+ mutex_lock(&rps->lock);
val = intel_freq_opcode(dev_priv, val);
-
if (val < rps->min_freq ||
val > rps->max_freq ||
val > rps->max_freq_softlimit) {
- mutex_unlock(&dev_priv->pcu_lock);
- intel_runtime_pm_put(dev_priv, wakeref);
- return -EINVAL;
+ ret = -EINVAL;
+ goto unlock;
}
rps->min_freq_softlimit = val;
@@ -447,8 +442,8 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev,
* frequency request may be unchanged. */
ret = intel_set_rps(dev_priv, val);
- mutex_unlock(&dev_priv->pcu_lock);
-
+unlock:
+ mutex_unlock(&rps->lock);
intel_runtime_pm_put(dev_priv, wakeref);
return ret ?: count;
diff --git a/drivers/gpu/drm/i915/i915_timeline_types.h b/drivers/gpu/drm/i915/i915_timeline_types.h
index 5256a0b5c5f7..1688705f4a2b 100644
--- a/drivers/gpu/drm/i915/i915_timeline_types.h
+++ b/drivers/gpu/drm/i915/i915_timeline_types.h
@@ -26,6 +26,7 @@ struct i915_timeline {
spinlock_t lock;
#define TIMELINE_CLIENT 0 /* default subclass */
#define TIMELINE_ENGINE 1
+#define TIMELINE_VIRTUAL 2
struct mutex mutex; /* protects the flow of requests */
unsigned int pin_count;
diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h
index 12893304c8f8..83b389e34b50 100644
--- a/drivers/gpu/drm/i915/i915_trace.h
+++ b/drivers/gpu/drm/i915/i915_trace.h
@@ -8,9 +8,11 @@
#include <drm/drm_drv.h>
+#include "gt/intel_engine.h"
+
#include "i915_drv.h"
+#include "i915_irq.h"
#include "intel_drv.h"
-#include "intel_ringbuffer.h"
#undef TRACE_SYSTEM
#define TRACE_SYSTEM i915
diff --git a/drivers/gpu/drm/i915/i915_utils.h b/drivers/gpu/drm/i915/i915_utils.h
index 2dbe8933b50a..e52866084891 100644
--- a/drivers/gpu/drm/i915/i915_utils.h
+++ b/drivers/gpu/drm/i915/i915_utils.h
@@ -25,6 +25,12 @@
#ifndef __I915_UTILS_H
#define __I915_UTILS_H
+#include <linux/list.h>
+#include <linux/overflow.h>
+#include <linux/sched.h>
+#include <linux/types.h>
+#include <linux/workqueue.h>
+
#undef WARN_ON
/* Many gcc seem to no see through this and fall over :( */
#if 0
@@ -73,6 +79,39 @@
#define overflows_type(x, T) \
(sizeof(x) > sizeof(T) && (x) >> BITS_PER_TYPE(T))
+static inline bool
+__check_struct_size(size_t base, size_t arr, size_t count, size_t *size)
+{
+ size_t sz;
+
+ if (check_mul_overflow(count, arr, &sz))
+ return false;
+
+ if (check_add_overflow(sz, base, &sz))
+ return false;
+
+ *size = sz;
+ return true;
+}
+
+/**
+ * check_struct_size() - Calculate size of structure with trailing array.
+ * @p: Pointer to the structure.
+ * @member: Name of the array member.
+ * @n: Number of elements in the array.
+ * @sz: Total size of structure and array
+ *
+ * Calculates size of memory needed for structure @p followed by an
+ * array of @n @member elements, like struct_size() but reports
+ * whether it overflowed, and the resultant size in @sz
+ *
+ * Return: false if the calculation overflowed.
+ */
+#define check_struct_size(p, member, n, sz) \
+ likely(__check_struct_size(sizeof(*(p)), \
+ sizeof(*(p)->member) + __must_be_array((p)->member), \
+ n, sz))
+
#define ptr_mask_bits(ptr, n) ({ \
unsigned long __v = (unsigned long)(ptr); \
(typeof(ptr))(__v & -BIT(n)); \
@@ -97,6 +136,8 @@
#define page_pack_bits(ptr, bits) ptr_pack_bits(ptr, bits, PAGE_SHIFT)
#define page_unpack_bits(ptr, bits) ptr_unpack_bits(ptr, bits, PAGE_SHIFT)
+#define struct_member(T, member) (((T *)0)->member)
+
#define ptr_offset(ptr, member) offsetof(typeof(*(ptr)), member)
#define fetch_and_zero(ptr) ({ \
@@ -113,7 +154,7 @@
*/
#define container_of_user(ptr, type, member) ({ \
void __user *__mptr = (void __user *)(ptr); \
- BUILD_BUG_ON_MSG(!__same_type(*(ptr), ((type *)0)->member) && \
+ BUILD_BUG_ON_MSG(!__same_type(*(ptr), struct_member(type, member)) && \
!__same_type(*(ptr), void), \
"pointer type mismatch in container_of()"); \
((type __user *)(__mptr - offsetof(type, member))); })
@@ -152,8 +193,6 @@ static inline u64 ptr_to_u64(const void *ptr)
__idx; \
})
-#include <linux/list.h>
-
static inline void __list_del_many(struct list_head *head,
struct list_head *first)
{
@@ -174,6 +213,158 @@ static inline void drain_delayed_work(struct delayed_work *dw)
} while (delayed_work_pending(dw));
}
+static inline unsigned long msecs_to_jiffies_timeout(const unsigned int m)
+{
+ unsigned long j = msecs_to_jiffies(m);
+
+ return min_t(unsigned long, MAX_JIFFY_OFFSET, j + 1);
+}
+
+static inline unsigned long nsecs_to_jiffies_timeout(const u64 n)
+{
+ /* nsecs_to_jiffies64() does not guard against overflow */
+ if (NSEC_PER_SEC % HZ &&
+ div_u64(n, NSEC_PER_SEC) >= MAX_JIFFY_OFFSET / HZ)
+ return MAX_JIFFY_OFFSET;
+
+ return min_t(u64, MAX_JIFFY_OFFSET, nsecs_to_jiffies64(n) + 1);
+}
+
+/*
+ * If you need to wait X milliseconds between events A and B, but event B
+ * doesn't happen exactly after event A, you record the timestamp (jiffies) of
+ * when event A happened, then just before event B you call this function and
+ * pass the timestamp as the first argument, and X as the second argument.
+ */
+static inline void
+wait_remaining_ms_from_jiffies(unsigned long timestamp_jiffies, int to_wait_ms)
+{
+ unsigned long target_jiffies, tmp_jiffies, remaining_jiffies;
+
+ /*
+ * Don't re-read the value of "jiffies" every time since it may change
+ * behind our back and break the math.
+ */
+ tmp_jiffies = jiffies;
+ target_jiffies = timestamp_jiffies +
+ msecs_to_jiffies_timeout(to_wait_ms);
+
+ if (time_after(target_jiffies, tmp_jiffies)) {
+ remaining_jiffies = target_jiffies - tmp_jiffies;
+ while (remaining_jiffies)
+ remaining_jiffies =
+ schedule_timeout_uninterruptible(remaining_jiffies);
+ }
+}
+
+/**
+ * __wait_for - magic wait macro
+ *
+ * Macro to help avoid open coding check/wait/timeout patterns. Note that it's
+ * important that we check the condition again after having timed out, since the
+ * timeout could be due to preemption or similar and we've never had a chance to
+ * check the condition before the timeout.
+ */
+#define __wait_for(OP, COND, US, Wmin, Wmax) ({ \
+ const ktime_t end__ = ktime_add_ns(ktime_get_raw(), 1000ll * (US)); \
+ long wait__ = (Wmin); /* recommended min for usleep is 10 us */ \
+ int ret__; \
+ might_sleep(); \
+ for (;;) { \
+ const bool expired__ = ktime_after(ktime_get_raw(), end__); \
+ OP; \
+ /* Guarantee COND check prior to timeout */ \
+ barrier(); \
+ if (COND) { \
+ ret__ = 0; \
+ break; \
+ } \
+ if (expired__) { \
+ ret__ = -ETIMEDOUT; \
+ break; \
+ } \
+ usleep_range(wait__, wait__ * 2); \
+ if (wait__ < (Wmax)) \
+ wait__ <<= 1; \
+ } \
+ ret__; \
+})
+
+#define _wait_for(COND, US, Wmin, Wmax) __wait_for(, (COND), (US), (Wmin), \
+ (Wmax))
+#define wait_for(COND, MS) _wait_for((COND), (MS) * 1000, 10, 1000)
+
+/* If CONFIG_PREEMPT_COUNT is disabled, in_atomic() always reports false. */
+#if defined(CONFIG_DRM_I915_DEBUG) && defined(CONFIG_PREEMPT_COUNT)
+# define _WAIT_FOR_ATOMIC_CHECK(ATOMIC) WARN_ON_ONCE((ATOMIC) && !in_atomic())
+#else
+# define _WAIT_FOR_ATOMIC_CHECK(ATOMIC) do { } while (0)
+#endif
+
+#define _wait_for_atomic(COND, US, ATOMIC) \
+({ \
+ int cpu, ret, timeout = (US) * 1000; \
+ u64 base; \
+ _WAIT_FOR_ATOMIC_CHECK(ATOMIC); \
+ if (!(ATOMIC)) { \
+ preempt_disable(); \
+ cpu = smp_processor_id(); \
+ } \
+ base = local_clock(); \
+ for (;;) { \
+ u64 now = local_clock(); \
+ if (!(ATOMIC)) \
+ preempt_enable(); \
+ /* Guarantee COND check prior to timeout */ \
+ barrier(); \
+ if (COND) { \
+ ret = 0; \
+ break; \
+ } \
+ if (now - base >= timeout) { \
+ ret = -ETIMEDOUT; \
+ break; \
+ } \
+ cpu_relax(); \
+ if (!(ATOMIC)) { \
+ preempt_disable(); \
+ if (unlikely(cpu != smp_processor_id())) { \
+ timeout -= now - base; \
+ cpu = smp_processor_id(); \
+ base = local_clock(); \
+ } \
+ } \
+ } \
+ ret; \
+})
+
+#define wait_for_us(COND, US) \
+({ \
+ int ret__; \
+ BUILD_BUG_ON(!__builtin_constant_p(US)); \
+ if ((US) > 10) \
+ ret__ = _wait_for((COND), (US), 10, 10); \
+ else \
+ ret__ = _wait_for_atomic((COND), (US), 0); \
+ ret__; \
+})
+
+#define wait_for_atomic_us(COND, US) \
+({ \
+ BUILD_BUG_ON(!__builtin_constant_p(US)); \
+ BUILD_BUG_ON((US) > 50000); \
+ _wait_for_atomic((COND), (US), 1); \
+})
+
+#define wait_for_atomic(COND, MS) wait_for_atomic_us((COND), (MS) * 1000)
+
+#define KHz(x) (1000 * (x))
+#define MHz(x) KHz(1000 * (x))
+
+#define KBps(x) (1000 * (x))
+#define MBps(x) KBps(1000 * (x))
+#define GBps(x) ((u64)1000 * MBps((x)))
+
static inline const char *yesno(bool v)
{
return v ? "yes" : "no";
diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
index 961268f66c63..cf405ffda045 100644
--- a/drivers/gpu/drm/i915/i915_vma.c
+++ b/drivers/gpu/drm/i915/i915_vma.c
@@ -22,11 +22,12 @@
*
*/
+#include "gt/intel_engine.h"
+
#include "i915_vma.h"
#include "i915_drv.h"
#include "i915_globals.h"
-#include "intel_ringbuffer.h"
#include "intel_frontbuffer.h"
#include <drm/drm_gem.h>
@@ -155,6 +156,9 @@ vma_create(struct drm_i915_gem_object *obj,
} else if (view->type == I915_GGTT_VIEW_ROTATED) {
vma->size = intel_rotation_info_size(&view->rotated);
vma->size <<= PAGE_SHIFT;
+ } else if (view->type == I915_GGTT_VIEW_REMAPPED) {
+ vma->size = intel_remapped_info_size(&view->remapped);
+ vma->size <<= PAGE_SHIFT;
}
}
@@ -476,13 +480,6 @@ void __i915_vma_set_map_and_fenceable(struct i915_vma *vma)
GEM_BUG_ON(!i915_vma_is_ggtt(vma));
GEM_BUG_ON(!vma->fence_size);
- /*
- * Explicitly disable for rotated VMA since the display does not
- * need the fence and the VMA is not accessible to other users.
- */
- if (vma->ggtt_view.type == I915_GGTT_VIEW_ROTATED)
- return;
-
fenceable = (vma->node.size >= vma->fence_size &&
IS_ALIGNED(vma->node.start, vma->fence_alignment));
diff --git a/drivers/gpu/drm/i915/i915_vma.h b/drivers/gpu/drm/i915/i915_vma.h
index 6eab70953a57..8543d2953cd1 100644
--- a/drivers/gpu/drm/i915/i915_vma.h
+++ b/drivers/gpu/drm/i915/i915_vma.h
@@ -277,8 +277,11 @@ i915_vma_compare(struct i915_vma *vma,
*/
BUILD_BUG_ON(I915_GGTT_VIEW_NORMAL >= I915_GGTT_VIEW_PARTIAL);
BUILD_BUG_ON(I915_GGTT_VIEW_PARTIAL >= I915_GGTT_VIEW_ROTATED);
+ BUILD_BUG_ON(I915_GGTT_VIEW_ROTATED >= I915_GGTT_VIEW_REMAPPED);
BUILD_BUG_ON(offsetof(typeof(*view), rotated) !=
offsetof(typeof(*view), partial));
+ BUILD_BUG_ON(offsetof(typeof(*view), rotated) !=
+ offsetof(typeof(*view), remapped));
return memcmp(&vma->ggtt_view.partial, &view->partial, view->type);
}
diff --git a/drivers/gpu/drm/i915/icl_dsi.c b/drivers/gpu/drm/i915/icl_dsi.c
index 9d962ea1e635..1e240ad665b5 100644
--- a/drivers/gpu/drm/i915/icl_dsi.c
+++ b/drivers/gpu/drm/i915/icl_dsi.c
@@ -28,6 +28,8 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_mipi_dsi.h>
+#include "intel_atomic.h"
+#include "intel_combo_phy.h"
#include "intel_connector.h"
#include "intel_ddi.h"
#include "intel_dsi.h"
@@ -363,30 +365,10 @@ static void gen11_dsi_power_up_lanes(struct intel_encoder *encoder)
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
enum port port;
- u32 tmp;
- u32 lane_mask;
-
- switch (intel_dsi->lane_count) {
- case 1:
- lane_mask = PWR_DOWN_LN_3_1_0;
- break;
- case 2:
- lane_mask = PWR_DOWN_LN_3_1;
- break;
- case 3:
- lane_mask = PWR_DOWN_LN_3;
- break;
- case 4:
- default:
- lane_mask = PWR_UP_ALL_LANES;
- break;
- }
- for_each_dsi_port(port, intel_dsi->ports) {
- tmp = I915_READ(ICL_PORT_CL_DW10(port));
- tmp &= ~PWR_DOWN_LN_MASK;
- I915_WRITE(ICL_PORT_CL_DW10(port), tmp | lane_mask);
- }
+ for_each_dsi_port(port, intel_dsi->ports)
+ intel_combo_phy_power_up_lanes(dev_priv, port, true,
+ intel_dsi->lane_count, false);
}
static void gen11_dsi_config_phy_lanes_sequence(struct intel_encoder *encoder)
@@ -1193,17 +1175,51 @@ static void gen11_dsi_disable(struct intel_encoder *encoder,
gen11_dsi_disable_io_power(encoder);
}
+static void gen11_dsi_get_timings(struct intel_encoder *encoder,
+ struct intel_crtc_state *pipe_config)
+{
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ struct drm_display_mode *adjusted_mode =
+ &pipe_config->base.adjusted_mode;
+
+ if (intel_dsi->dual_link) {
+ adjusted_mode->crtc_hdisplay *= 2;
+ if (intel_dsi->dual_link == DSI_DUAL_LINK_FRONT_BACK)
+ adjusted_mode->crtc_hdisplay -=
+ intel_dsi->pixel_overlap;
+ adjusted_mode->crtc_htotal *= 2;
+ }
+ adjusted_mode->crtc_hblank_start = adjusted_mode->crtc_hdisplay;
+ adjusted_mode->crtc_hblank_end = adjusted_mode->crtc_htotal;
+
+ if (intel_dsi->operation_mode == INTEL_DSI_VIDEO_MODE) {
+ if (intel_dsi->dual_link) {
+ adjusted_mode->crtc_hsync_start *= 2;
+ adjusted_mode->crtc_hsync_end *= 2;
+ }
+ }
+ adjusted_mode->crtc_vblank_start = adjusted_mode->crtc_vdisplay;
+ adjusted_mode->crtc_vblank_end = adjusted_mode->crtc_vtotal;
+}
+
static void gen11_dsi_get_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
/* FIXME: adapt icl_ddi_clock_get() for DSI and use that? */
pipe_config->port_clock =
cnl_calc_wrpll_link(dev_priv, &pipe_config->dpll_hw_state);
+
pipe_config->base.adjusted_mode.crtc_clock = intel_dsi->pclk;
+ if (intel_dsi->dual_link)
+ pipe_config->base.adjusted_mode.crtc_clock *= 2;
+
+ gen11_dsi_get_timings(encoder, pipe_config);
pipe_config->output_types |= BIT(INTEL_OUTPUT_DSI);
+ pipe_config->pipe_bpp = bdw_get_pipemisc_bpp(crtc);
}
static int gen11_dsi_compute_config(struct intel_encoder *encoder,
@@ -1219,6 +1235,7 @@ static int gen11_dsi_compute_config(struct intel_encoder *encoder,
struct drm_display_mode *adjusted_mode =
&pipe_config->base.adjusted_mode;
+ pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
intel_fixed_panel_mode(fixed_mode, adjusted_mode);
intel_pch_panel_fitting(crtc, pipe_config, conn_state->scaling_mode);
diff --git a/drivers/gpu/drm/i915/intel_acpi.c b/drivers/gpu/drm/i915/intel_acpi.c
index 9d142d038a7d..3456d33feb46 100644
--- a/drivers/gpu/drm/i915/intel_acpi.c
+++ b/drivers/gpu/drm/i915/intel_acpi.c
@@ -4,9 +4,12 @@
*
* _DSM related code stolen from nouveau_acpi.c.
*/
+
#include <linux/pci.h>
#include <linux/acpi.h>
+
#include "i915_drv.h"
+#include "intel_acpi.h"
#define INTEL_DSM_REVISION_ID 1 /* For Calpella anyway... */
#define INTEL_DSM_FN_PLATFORM_MUX_INFO 1 /* No args */
diff --git a/drivers/gpu/drm/i915/intel_acpi.h b/drivers/gpu/drm/i915/intel_acpi.h
new file mode 100644
index 000000000000..1c576b3fb712
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_acpi.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __INTEL_ACPI_H__
+#define __INTEL_ACPI_H__
+
+#ifdef CONFIG_ACPI
+void intel_register_dsm_handler(void);
+void intel_unregister_dsm_handler(void);
+#else
+static inline void intel_register_dsm_handler(void) { return; }
+static inline void intel_unregister_dsm_handler(void) { return; }
+#endif /* CONFIG_ACPI */
+
+#endif /* __INTEL_ACPI_H__ */
diff --git a/drivers/gpu/drm/i915/intel_atomic.c b/drivers/gpu/drm/i915/intel_atomic.c
index 8c8fae32ec50..ab40448a19d5 100644
--- a/drivers/gpu/drm/i915/intel_atomic.c
+++ b/drivers/gpu/drm/i915/intel_atomic.c
@@ -34,6 +34,7 @@
#include <drm/drm_fourcc.h>
#include <drm/drm_plane_helper.h>
+#include "intel_atomic.h"
#include "intel_drv.h"
#include "intel_hdcp.h"
#include "intel_sprite.h"
@@ -105,12 +106,14 @@ int intel_digital_connector_atomic_set_property(struct drm_connector *connector,
}
int intel_digital_connector_atomic_check(struct drm_connector *conn,
- struct drm_connector_state *new_state)
+ struct drm_atomic_state *state)
{
+ struct drm_connector_state *new_state =
+ drm_atomic_get_new_connector_state(state, conn);
struct intel_digital_connector_state *new_conn_state =
to_intel_digital_connector_state(new_state);
struct drm_connector_state *old_state =
- drm_atomic_get_old_connector_state(new_state->state, conn);
+ drm_atomic_get_old_connector_state(state, conn);
struct intel_digital_connector_state *old_conn_state =
to_intel_digital_connector_state(old_state);
struct drm_crtc_state *crtc_state;
@@ -120,7 +123,7 @@ int intel_digital_connector_atomic_check(struct drm_connector *conn,
if (!new_state->crtc)
return 0;
- crtc_state = drm_atomic_get_new_crtc_state(new_state->state, new_state->crtc);
+ crtc_state = drm_atomic_get_new_crtc_state(state, new_state->crtc);
/*
* These properties are handled by fastset, and might not end
@@ -411,3 +414,15 @@ void intel_atomic_state_clear(struct drm_atomic_state *s)
drm_atomic_state_default_clear(&state->base);
state->dpll_set = state->modeset = false;
}
+
+struct intel_crtc_state *
+intel_atomic_get_crtc_state(struct drm_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ struct drm_crtc_state *crtc_state;
+ crtc_state = drm_atomic_get_crtc_state(state, &crtc->base);
+ if (IS_ERR(crtc_state))
+ return ERR_CAST(crtc_state);
+
+ return to_intel_crtc_state(crtc_state);
+}
diff --git a/drivers/gpu/drm/i915/intel_atomic.h b/drivers/gpu/drm/i915/intel_atomic.h
new file mode 100644
index 000000000000..58065d3161a3
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_atomic.h
@@ -0,0 +1,49 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __INTEL_ATOMIC_H__
+#define __INTEL_ATOMIC_H__
+
+#include <linux/types.h>
+
+struct drm_atomic_state;
+struct drm_connector;
+struct drm_connector_state;
+struct drm_crtc;
+struct drm_crtc_state;
+struct drm_device;
+struct drm_i915_private;
+struct drm_property;
+struct intel_crtc;
+struct intel_crtc_state;
+
+int intel_digital_connector_atomic_get_property(struct drm_connector *connector,
+ const struct drm_connector_state *state,
+ struct drm_property *property,
+ u64 *val);
+int intel_digital_connector_atomic_set_property(struct drm_connector *connector,
+ struct drm_connector_state *state,
+ struct drm_property *property,
+ u64 val);
+int intel_digital_connector_atomic_check(struct drm_connector *conn,
+ struct drm_atomic_state *state);
+struct drm_connector_state *
+intel_digital_connector_duplicate_state(struct drm_connector *connector);
+
+struct drm_crtc_state *intel_crtc_duplicate_state(struct drm_crtc *crtc);
+void intel_crtc_destroy_state(struct drm_crtc *crtc,
+ struct drm_crtc_state *state);
+struct drm_atomic_state *intel_atomic_state_alloc(struct drm_device *dev);
+void intel_atomic_state_clear(struct drm_atomic_state *state);
+
+struct intel_crtc_state *
+intel_atomic_get_crtc_state(struct drm_atomic_state *state,
+ struct intel_crtc *crtc);
+
+int intel_atomic_setup_scalers(struct drm_i915_private *dev_priv,
+ struct intel_crtc *intel_crtc,
+ struct intel_crtc_state *crtc_state);
+
+#endif /* __INTEL_ATOMIC_H__ */
diff --git a/drivers/gpu/drm/i915/intel_audio.c b/drivers/gpu/drm/i915/intel_audio.c
index bca4cc025d3d..840daff12246 100644
--- a/drivers/gpu/drm/i915/intel_audio.c
+++ b/drivers/gpu/drm/i915/intel_audio.c
@@ -26,11 +26,11 @@
#include <drm/drm_edid.h>
#include <drm/i915_component.h>
-#include <drm/intel_lpe_audio.h>
#include "i915_drv.h"
#include "intel_audio.h"
#include "intel_drv.h"
+#include "intel_lpe_audio.h"
/**
* DOC: High Definition Audio over HDMI and Display Port
@@ -319,9 +319,8 @@ hsw_dp_audio_config_update(struct intel_encoder *encoder,
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct i915_audio_component *acomp = dev_priv->audio_component;
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
enum port port = encoder->port;
- enum pipe pipe = crtc->pipe;
const struct dp_aud_n_m *nm;
int rate;
u32 tmp;
@@ -333,7 +332,7 @@ hsw_dp_audio_config_update(struct intel_encoder *encoder,
else
DRM_DEBUG_KMS("using automatic Maud, Naud\n");
- tmp = I915_READ(HSW_AUD_CFG(pipe));
+ tmp = I915_READ(HSW_AUD_CFG(cpu_transcoder));
tmp &= ~AUD_CONFIG_N_VALUE_INDEX;
tmp &= ~AUD_CONFIG_PIXEL_CLOCK_HDMI_MASK;
tmp &= ~AUD_CONFIG_N_PROG_ENABLE;
@@ -345,9 +344,9 @@ hsw_dp_audio_config_update(struct intel_encoder *encoder,
tmp |= AUD_CONFIG_N_PROG_ENABLE;
}
- I915_WRITE(HSW_AUD_CFG(pipe), tmp);
+ I915_WRITE(HSW_AUD_CFG(cpu_transcoder), tmp);
- tmp = I915_READ(HSW_AUD_M_CTS_ENABLE(pipe));
+ tmp = I915_READ(HSW_AUD_M_CTS_ENABLE(cpu_transcoder));
tmp &= ~AUD_CONFIG_M_MASK;
tmp &= ~AUD_M_CTS_M_VALUE_INDEX;
tmp &= ~AUD_M_CTS_M_PROG_ENABLE;
@@ -358,7 +357,7 @@ hsw_dp_audio_config_update(struct intel_encoder *encoder,
tmp |= AUD_M_CTS_M_PROG_ENABLE;
}
- I915_WRITE(HSW_AUD_M_CTS_ENABLE(pipe), tmp);
+ I915_WRITE(HSW_AUD_M_CTS_ENABLE(cpu_transcoder), tmp);
}
static void
@@ -367,15 +366,14 @@ hsw_hdmi_audio_config_update(struct intel_encoder *encoder,
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct i915_audio_component *acomp = dev_priv->audio_component;
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
enum port port = encoder->port;
- enum pipe pipe = crtc->pipe;
int n, rate;
u32 tmp;
rate = acomp ? acomp->aud_sample_rate[port] : 0;
- tmp = I915_READ(HSW_AUD_CFG(pipe));
+ tmp = I915_READ(HSW_AUD_CFG(cpu_transcoder));
tmp &= ~AUD_CONFIG_N_VALUE_INDEX;
tmp &= ~AUD_CONFIG_PIXEL_CLOCK_HDMI_MASK;
tmp &= ~AUD_CONFIG_N_PROG_ENABLE;
@@ -392,16 +390,16 @@ hsw_hdmi_audio_config_update(struct intel_encoder *encoder,
DRM_DEBUG_KMS("using automatic N\n");
}
- I915_WRITE(HSW_AUD_CFG(pipe), tmp);
+ I915_WRITE(HSW_AUD_CFG(cpu_transcoder), tmp);
/*
* Let's disable "Enable CTS or M Prog bit"
* and let HW calculate the value
*/
- tmp = I915_READ(HSW_AUD_M_CTS_ENABLE(pipe));
+ tmp = I915_READ(HSW_AUD_M_CTS_ENABLE(cpu_transcoder));
tmp &= ~AUD_M_CTS_M_PROG_ENABLE;
tmp &= ~AUD_M_CTS_M_VALUE_INDEX;
- I915_WRITE(HSW_AUD_M_CTS_ENABLE(pipe), tmp);
+ I915_WRITE(HSW_AUD_M_CTS_ENABLE(cpu_transcoder), tmp);
}
static void
@@ -419,28 +417,28 @@ static void hsw_audio_codec_disable(struct intel_encoder *encoder,
const struct drm_connector_state *old_conn_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
- enum pipe pipe = crtc->pipe;
+ enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
u32 tmp;
- DRM_DEBUG_KMS("Disable audio codec on pipe %c\n", pipe_name(pipe));
+ DRM_DEBUG_KMS("Disable audio codec on transcoder %s\n",
+ transcoder_name(cpu_transcoder));
mutex_lock(&dev_priv->av_mutex);
/* Disable timestamps */
- tmp = I915_READ(HSW_AUD_CFG(pipe));
+ tmp = I915_READ(HSW_AUD_CFG(cpu_transcoder));
tmp &= ~AUD_CONFIG_N_VALUE_INDEX;
tmp |= AUD_CONFIG_N_PROG_ENABLE;
tmp &= ~AUD_CONFIG_UPPER_N_MASK;
tmp &= ~AUD_CONFIG_LOWER_N_MASK;
if (intel_crtc_has_dp_encoder(old_crtc_state))
tmp |= AUD_CONFIG_N_VALUE_INDEX;
- I915_WRITE(HSW_AUD_CFG(pipe), tmp);
+ I915_WRITE(HSW_AUD_CFG(cpu_transcoder), tmp);
/* Invalidate ELD */
tmp = I915_READ(HSW_AUD_PIN_ELD_CP_VLD);
- tmp &= ~AUDIO_ELD_VALID(pipe);
- tmp &= ~AUDIO_OUTPUT_ENABLE(pipe);
+ tmp &= ~AUDIO_ELD_VALID(cpu_transcoder);
+ tmp &= ~AUDIO_OUTPUT_ENABLE(cpu_transcoder);
I915_WRITE(HSW_AUD_PIN_ELD_CP_VLD, tmp);
mutex_unlock(&dev_priv->av_mutex);
@@ -451,22 +449,21 @@ static void hsw_audio_codec_enable(struct intel_encoder *encoder,
const struct drm_connector_state *conn_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
struct drm_connector *connector = conn_state->connector;
- enum pipe pipe = crtc->pipe;
+ enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
const u8 *eld = connector->eld;
u32 tmp;
int len, i;
- DRM_DEBUG_KMS("Enable audio codec on pipe %c, %u bytes ELD\n",
- pipe_name(pipe), drm_eld_size(eld));
+ DRM_DEBUG_KMS("Enable audio codec on transcoder %s, %u bytes ELD\n",
+ transcoder_name(cpu_transcoder), drm_eld_size(eld));
mutex_lock(&dev_priv->av_mutex);
/* Enable audio presence detect, invalidate ELD */
tmp = I915_READ(HSW_AUD_PIN_ELD_CP_VLD);
- tmp |= AUDIO_OUTPUT_ENABLE(pipe);
- tmp &= ~AUDIO_ELD_VALID(pipe);
+ tmp |= AUDIO_OUTPUT_ENABLE(cpu_transcoder);
+ tmp &= ~AUDIO_ELD_VALID(cpu_transcoder);
I915_WRITE(HSW_AUD_PIN_ELD_CP_VLD, tmp);
/*
@@ -477,18 +474,18 @@ static void hsw_audio_codec_enable(struct intel_encoder *encoder,
*/
/* Reset ELD write address */
- tmp = I915_READ(HSW_AUD_DIP_ELD_CTRL(pipe));
+ tmp = I915_READ(HSW_AUD_DIP_ELD_CTRL(cpu_transcoder));
tmp &= ~IBX_ELD_ADDRESS_MASK;
- I915_WRITE(HSW_AUD_DIP_ELD_CTRL(pipe), tmp);
+ I915_WRITE(HSW_AUD_DIP_ELD_CTRL(cpu_transcoder), tmp);
/* Up to 84 bytes of hw ELD buffer */
len = min(drm_eld_size(eld), 84);
for (i = 0; i < len / 4; i++)
- I915_WRITE(HSW_AUD_EDID_DATA(pipe), *((const u32 *)eld + i));
+ I915_WRITE(HSW_AUD_EDID_DATA(cpu_transcoder), *((const u32 *)eld + i));
/* ELD valid */
tmp = I915_READ(HSW_AUD_PIN_ELD_CP_VLD);
- tmp |= AUDIO_ELD_VALID(pipe);
+ tmp |= AUDIO_ELD_VALID(cpu_transcoder);
I915_WRITE(HSW_AUD_PIN_ELD_CP_VLD, tmp);
/* Enable timestamps */
@@ -644,8 +641,10 @@ void intel_audio_codec_enable(struct intel_encoder *encoder,
enum port port = encoder->port;
enum pipe pipe = crtc->pipe;
+ /* FIXME precompute the ELD in .compute_config() */
if (!connector->eld[0])
- return;
+ DRM_DEBUG_KMS("Bogus ELD on [CONNECTOR:%d:%s]\n",
+ connector->base.id, connector->name);
DRM_DEBUG_DRIVER("ELD on [CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
connector->base.id,
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index 1dc8d03ff127..a0b708f7f384 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -27,7 +27,9 @@
#include <drm/drm_dp_helper.h>
#include <drm/i915_drm.h>
+
#include "i915_drv.h"
+#include "intel_gmbus.h"
#define _INTEL_BIOS_PRIVATE
#include "intel_vbt_defs.h"
diff --git a/drivers/gpu/drm/i915/intel_bios.h b/drivers/gpu/drm/i915/intel_bios.h
index 7e3545f65257..7bac53f219e1 100644
--- a/drivers/gpu/drm/i915/intel_bios.h
+++ b/drivers/gpu/drm/i915/intel_bios.h
@@ -30,6 +30,12 @@
#ifndef _INTEL_BIOS_H_
#define _INTEL_BIOS_H_
+#include <linux/types.h>
+
+#include <drm/i915_drm.h>
+
+struct drm_i915_private;
+
enum intel_backlight_type {
INTEL_BACKLIGHT_PMIC,
INTEL_BACKLIGHT_LPSS,
@@ -220,4 +226,19 @@ struct mipi_pps_data {
u16 panel_power_cycle_delay;
} __packed;
+void intel_bios_init(struct drm_i915_private *dev_priv);
+void intel_bios_cleanup(struct drm_i915_private *dev_priv);
+bool intel_bios_is_valid_vbt(const void *buf, size_t size);
+bool intel_bios_is_tv_present(struct drm_i915_private *dev_priv);
+bool intel_bios_is_lvds_present(struct drm_i915_private *dev_priv, u8 *i2c_pin);
+bool intel_bios_is_port_present(struct drm_i915_private *dev_priv, enum port port);
+bool intel_bios_is_port_edp(struct drm_i915_private *dev_priv, enum port port);
+bool intel_bios_is_port_dp_dual_mode(struct drm_i915_private *dev_priv, enum port port);
+bool intel_bios_is_dsi_present(struct drm_i915_private *dev_priv, enum port *port);
+bool intel_bios_is_port_hpd_inverted(struct drm_i915_private *dev_priv,
+ enum port port);
+bool intel_bios_is_lspcon_present(struct drm_i915_private *dev_priv,
+ enum port port);
+enum aux_ch intel_bios_port_aux_ch(struct drm_i915_private *dev_priv, enum port port);
+
#endif /* _INTEL_BIOS_H_ */
diff --git a/drivers/gpu/drm/i915/intel_cdclk.c b/drivers/gpu/drm/i915/intel_cdclk.c
index ae40a8679314..78d9f619956c 100644
--- a/drivers/gpu/drm/i915/intel_cdclk.c
+++ b/drivers/gpu/drm/i915/intel_cdclk.c
@@ -23,6 +23,7 @@
#include "intel_cdclk.h"
#include "intel_drv.h"
+#include "intel_sideband.h"
/**
* DOC: CDCLK / RAWCLK
@@ -464,14 +465,18 @@ static void vlv_get_cdclk(struct drm_i915_private *dev_priv,
{
u32 val;
+ vlv_iosf_sb_get(dev_priv,
+ BIT(VLV_IOSF_SB_CCK) | BIT(VLV_IOSF_SB_PUNIT));
+
cdclk_state->vco = vlv_get_hpll_vco(dev_priv);
cdclk_state->cdclk = vlv_get_cck_clock(dev_priv, "cdclk",
CCK_DISPLAY_CLOCK_CONTROL,
cdclk_state->vco);
- mutex_lock(&dev_priv->pcu_lock);
val = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM);
- mutex_unlock(&dev_priv->pcu_lock);
+
+ vlv_iosf_sb_put(dev_priv,
+ BIT(VLV_IOSF_SB_CCK) | BIT(VLV_IOSF_SB_PUNIT));
if (IS_VALLEYVIEW(dev_priv))
cdclk_state->voltage_level = (val & DSPFREQGUAR_MASK) >>
@@ -545,7 +550,11 @@ static void vlv_set_cdclk(struct drm_i915_private *dev_priv,
*/
wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_PIPE_A);
- mutex_lock(&dev_priv->pcu_lock);
+ vlv_iosf_sb_get(dev_priv,
+ BIT(VLV_IOSF_SB_CCK) |
+ BIT(VLV_IOSF_SB_BUNIT) |
+ BIT(VLV_IOSF_SB_PUNIT));
+
val = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM);
val &= ~DSPFREQGUAR_MASK;
val |= (cmd << DSPFREQGUAR_SHIFT);
@@ -555,9 +564,6 @@ static void vlv_set_cdclk(struct drm_i915_private *dev_priv,
50)) {
DRM_ERROR("timed out waiting for CDclk change\n");
}
- mutex_unlock(&dev_priv->pcu_lock);
-
- mutex_lock(&dev_priv->sb_lock);
if (cdclk == 400000) {
u32 divider;
@@ -591,7 +597,10 @@ static void vlv_set_cdclk(struct drm_i915_private *dev_priv,
val |= 3000 / 250; /* 3.0 usec */
vlv_bunit_write(dev_priv, BUNIT_REG_BISOC, val);
- mutex_unlock(&dev_priv->sb_lock);
+ vlv_iosf_sb_put(dev_priv,
+ BIT(VLV_IOSF_SB_CCK) |
+ BIT(VLV_IOSF_SB_BUNIT) |
+ BIT(VLV_IOSF_SB_PUNIT));
intel_update_cdclk(dev_priv);
@@ -627,7 +636,7 @@ static void chv_set_cdclk(struct drm_i915_private *dev_priv,
*/
wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_PIPE_A);
- mutex_lock(&dev_priv->pcu_lock);
+ vlv_punit_get(dev_priv);
val = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM);
val &= ~DSPFREQGUAR_MASK_CHV;
val |= (cmd << DSPFREQGUAR_SHIFT_CHV);
@@ -637,7 +646,8 @@ static void chv_set_cdclk(struct drm_i915_private *dev_priv,
50)) {
DRM_ERROR("timed out waiting for CDclk change\n");
}
- mutex_unlock(&dev_priv->pcu_lock);
+
+ vlv_punit_put(dev_priv);
intel_update_cdclk(dev_priv);
@@ -716,10 +726,8 @@ static void bdw_set_cdclk(struct drm_i915_private *dev_priv,
"trying to change cdclk frequency with cdclk not enabled\n"))
return;
- mutex_lock(&dev_priv->pcu_lock);
ret = sandybridge_pcode_write(dev_priv,
BDW_PCODE_DISPLAY_FREQ_CHANGE_REQ, 0x0);
- mutex_unlock(&dev_priv->pcu_lock);
if (ret) {
DRM_ERROR("failed to inform pcode about cdclk change\n");
return;
@@ -768,10 +776,8 @@ static void bdw_set_cdclk(struct drm_i915_private *dev_priv,
LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1))
DRM_ERROR("Switching back to LCPLL failed\n");
- mutex_lock(&dev_priv->pcu_lock);
sandybridge_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ,
cdclk_state->voltage_level);
- mutex_unlock(&dev_priv->pcu_lock);
I915_WRITE(CDCLK_FREQ, DIV_ROUND_CLOSEST(cdclk, 1000) - 1);
@@ -1010,12 +1016,10 @@ static void skl_set_cdclk(struct drm_i915_private *dev_priv,
*/
WARN_ON_ONCE(IS_SKYLAKE(dev_priv) && vco == 8640000);
- mutex_lock(&dev_priv->pcu_lock);
ret = skl_pcode_request(dev_priv, SKL_PCODE_CDCLK_CONTROL,
SKL_CDCLK_PREPARE_FOR_CHANGE,
SKL_CDCLK_READY_FOR_CHANGE,
SKL_CDCLK_READY_FOR_CHANGE, 3);
- mutex_unlock(&dev_priv->pcu_lock);
if (ret) {
DRM_ERROR("Failed to inform PCU about cdclk change (%d)\n",
ret);
@@ -1079,10 +1083,8 @@ static void skl_set_cdclk(struct drm_i915_private *dev_priv,
POSTING_READ(CDCLK_CTL);
/* inform PCU of the change */
- mutex_lock(&dev_priv->pcu_lock);
sandybridge_pcode_write(dev_priv, SKL_PCODE_CDCLK_CONTROL,
cdclk_state->voltage_level);
- mutex_unlock(&dev_priv->pcu_lock);
intel_update_cdclk(dev_priv);
}
@@ -1379,12 +1381,9 @@ static void bxt_set_cdclk(struct drm_i915_private *dev_priv,
* requires us to wait up to 150usec, but that leads to timeouts;
* the 2ms used here is based on experiment.
*/
- mutex_lock(&dev_priv->pcu_lock);
ret = sandybridge_pcode_write_timeout(dev_priv,
HSW_PCODE_DE_WRITE_FREQ_REQ,
0x80000000, 150, 2);
- mutex_unlock(&dev_priv->pcu_lock);
-
if (ret) {
DRM_ERROR("PCode CDCLK freq change notify failed (err %d, freq %d)\n",
ret, cdclk);
@@ -1414,7 +1413,6 @@ static void bxt_set_cdclk(struct drm_i915_private *dev_priv,
if (pipe != INVALID_PIPE)
intel_wait_for_vblank(dev_priv, pipe);
- mutex_lock(&dev_priv->pcu_lock);
/*
* The timeout isn't specified, the 2ms used here is based on
* experiment.
@@ -1424,8 +1422,6 @@ static void bxt_set_cdclk(struct drm_i915_private *dev_priv,
ret = sandybridge_pcode_write_timeout(dev_priv,
HSW_PCODE_DE_WRITE_FREQ_REQ,
cdclk_state->voltage_level, 150, 2);
- mutex_unlock(&dev_priv->pcu_lock);
-
if (ret) {
DRM_ERROR("PCode CDCLK freq set failed, (err %d, freq %d)\n",
ret, cdclk);
@@ -1648,12 +1644,10 @@ static void cnl_set_cdclk(struct drm_i915_private *dev_priv,
u32 val, divider;
int ret;
- mutex_lock(&dev_priv->pcu_lock);
ret = skl_pcode_request(dev_priv, SKL_PCODE_CDCLK_CONTROL,
SKL_CDCLK_PREPARE_FOR_CHANGE,
SKL_CDCLK_READY_FOR_CHANGE,
SKL_CDCLK_READY_FOR_CHANGE, 3);
- mutex_unlock(&dev_priv->pcu_lock);
if (ret) {
DRM_ERROR("Failed to inform PCU about cdclk change (%d)\n",
ret);
@@ -1692,10 +1686,8 @@ static void cnl_set_cdclk(struct drm_i915_private *dev_priv,
intel_wait_for_vblank(dev_priv, pipe);
/* inform PCU of the change */
- mutex_lock(&dev_priv->pcu_lock);
sandybridge_pcode_write(dev_priv, SKL_PCODE_CDCLK_CONTROL,
cdclk_state->voltage_level);
- mutex_unlock(&dev_priv->pcu_lock);
intel_update_cdclk(dev_priv);
@@ -1834,12 +1826,10 @@ static void icl_set_cdclk(struct drm_i915_private *dev_priv,
unsigned int vco = cdclk_state->vco;
int ret;
- mutex_lock(&dev_priv->pcu_lock);
ret = skl_pcode_request(dev_priv, SKL_PCODE_CDCLK_CONTROL,
SKL_CDCLK_PREPARE_FOR_CHANGE,
SKL_CDCLK_READY_FOR_CHANGE,
SKL_CDCLK_READY_FOR_CHANGE, 3);
- mutex_unlock(&dev_priv->pcu_lock);
if (ret) {
DRM_ERROR("Failed to inform PCU about cdclk change (%d)\n",
ret);
@@ -1861,10 +1851,8 @@ static void icl_set_cdclk(struct drm_i915_private *dev_priv,
I915_WRITE(CDCLK_CTL, ICL_CDCLK_CD2X_PIPE_NONE |
skl_cdclk_decimal(cdclk));
- mutex_lock(&dev_priv->pcu_lock);
sandybridge_pcode_write(dev_priv, SKL_PCODE_CDCLK_CONTROL,
cdclk_state->voltage_level);
- mutex_unlock(&dev_priv->pcu_lock);
intel_update_cdclk(dev_priv);
@@ -2277,6 +2265,15 @@ int intel_crtc_compute_min_cdclk(const struct intel_crtc_state *crtc_state)
IS_VALLEYVIEW(dev_priv))
min_cdclk = max(320000, min_cdclk);
+ /*
+ * On Geminilake once the CDCLK gets as low as 79200
+ * picture gets unstable, despite that values are
+ * correct for DSI PLL and DE PLL.
+ */
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI) &&
+ IS_GEMINILAKE(dev_priv))
+ min_cdclk = max(158400, min_cdclk);
+
if (min_cdclk > dev_priv->max_cdclk_freq) {
DRM_DEBUG_KMS("required cdclk (%d kHz) exceeds max (%d kHz)\n",
min_cdclk, dev_priv->max_cdclk_freq);
diff --git a/drivers/gpu/drm/i915/intel_color.c b/drivers/gpu/drm/i915/intel_color.c
index 9093daabc290..962db1236970 100644
--- a/drivers/gpu/drm/i915/intel_color.c
+++ b/drivers/gpu/drm/i915/intel_color.c
@@ -784,56 +784,78 @@ static void icl_load_luts(const struct intel_crtc_state *crtc_state)
}
}
-static void cherryview_load_luts(const struct intel_crtc_state *crtc_state)
+static u32 chv_cgm_degamma_ldw(const struct drm_color_lut *color)
+{
+ return drm_color_lut_extract(color->green, 14) << 16 |
+ drm_color_lut_extract(color->blue, 14);
+}
+
+static u32 chv_cgm_degamma_udw(const struct drm_color_lut *color)
+{
+ return drm_color_lut_extract(color->red, 14);
+}
+
+static void chv_load_cgm_degamma(struct intel_crtc *crtc,
+ const struct drm_property_blob *blob)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- const struct drm_property_blob *gamma_lut = crtc_state->base.gamma_lut;
- const struct drm_property_blob *degamma_lut = crtc_state->base.degamma_lut;
+ const struct drm_color_lut *lut = blob->data;
+ int i, lut_size = drm_color_lut_size(blob);
enum pipe pipe = crtc->pipe;
- cherryview_load_csc_matrix(crtc_state);
-
- if (crtc_state_is_legacy_gamma(crtc_state)) {
- i9xx_load_luts(crtc_state);
- return;
+ for (i = 0; i < lut_size; i++) {
+ I915_WRITE(CGM_PIPE_DEGAMMA(pipe, i, 0),
+ chv_cgm_degamma_ldw(&lut[i]));
+ I915_WRITE(CGM_PIPE_DEGAMMA(pipe, i, 1),
+ chv_cgm_degamma_udw(&lut[i]));
}
+}
- if (degamma_lut) {
- const struct drm_color_lut *lut = degamma_lut->data;
- int i, lut_size = INTEL_INFO(dev_priv)->color.degamma_lut_size;
+static u32 chv_cgm_gamma_ldw(const struct drm_color_lut *color)
+{
+ return drm_color_lut_extract(color->green, 10) << 16 |
+ drm_color_lut_extract(color->blue, 10);
+}
- for (i = 0; i < lut_size; i++) {
- u32 word0, word1;
+static u32 chv_cgm_gamma_udw(const struct drm_color_lut *color)
+{
+ return drm_color_lut_extract(color->red, 10);
+}
- /* Write LUT in U0.14 format. */
- word0 =
- (drm_color_lut_extract(lut[i].green, 14) << 16) |
- drm_color_lut_extract(lut[i].blue, 14);
- word1 = drm_color_lut_extract(lut[i].red, 14);
+static void chv_load_cgm_gamma(struct intel_crtc *crtc,
+ const struct drm_property_blob *blob)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ const struct drm_color_lut *lut = blob->data;
+ int i, lut_size = drm_color_lut_size(blob);
+ enum pipe pipe = crtc->pipe;
- I915_WRITE(CGM_PIPE_DEGAMMA(pipe, i, 0), word0);
- I915_WRITE(CGM_PIPE_DEGAMMA(pipe, i, 1), word1);
- }
+ for (i = 0; i < lut_size; i++) {
+ I915_WRITE(CGM_PIPE_GAMMA(pipe, i, 0),
+ chv_cgm_gamma_ldw(&lut[i]));
+ I915_WRITE(CGM_PIPE_GAMMA(pipe, i, 1),
+ chv_cgm_gamma_udw(&lut[i]));
}
+}
- if (gamma_lut) {
- const struct drm_color_lut *lut = gamma_lut->data;
- int i, lut_size = INTEL_INFO(dev_priv)->color.gamma_lut_size;
-
- for (i = 0; i < lut_size; i++) {
- u32 word0, word1;
+static void chv_load_luts(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ const struct drm_property_blob *gamma_lut = crtc_state->base.gamma_lut;
+ const struct drm_property_blob *degamma_lut = crtc_state->base.degamma_lut;
- /* Write LUT in U0.10 format. */
- word0 =
- (drm_color_lut_extract(lut[i].green, 10) << 16) |
- drm_color_lut_extract(lut[i].blue, 10);
- word1 = drm_color_lut_extract(lut[i].red, 10);
+ cherryview_load_csc_matrix(crtc_state);
- I915_WRITE(CGM_PIPE_GAMMA(pipe, i, 0), word0);
- I915_WRITE(CGM_PIPE_GAMMA(pipe, i, 1), word1);
- }
+ if (crtc_state_is_legacy_gamma(crtc_state)) {
+ i9xx_load_luts(crtc_state);
+ return;
}
+
+ if (degamma_lut)
+ chv_load_cgm_degamma(crtc, degamma_lut);
+
+ if (gamma_lut)
+ chv_load_cgm_gamma(crtc, gamma_lut);
}
void intel_color_load_luts(const struct intel_crtc_state *crtc_state)
@@ -1232,7 +1254,7 @@ void intel_color_init(struct intel_crtc *crtc)
if (IS_CHERRYVIEW(dev_priv)) {
dev_priv->display.color_check = chv_color_check;
dev_priv->display.color_commit = i9xx_color_commit;
- dev_priv->display.load_luts = cherryview_load_luts;
+ dev_priv->display.load_luts = chv_load_luts;
} else if (INTEL_GEN(dev_priv) >= 4) {
dev_priv->display.color_check = i9xx_color_check;
dev_priv->display.color_commit = i9xx_color_commit;
diff --git a/drivers/gpu/drm/i915/intel_combo_phy.c b/drivers/gpu/drm/i915/intel_combo_phy.c
index 2bf4359d7e41..19a9333b727a 100644
--- a/drivers/gpu/drm/i915/intel_combo_phy.c
+++ b/drivers/gpu/drm/i915/intel_combo_phy.c
@@ -3,6 +3,7 @@
* Copyright © 2018 Intel Corporation
*/
+#include "intel_combo_phy.h"
#include "intel_drv.h"
#define for_each_combo_port(__dev_priv, __port) \
@@ -147,7 +148,7 @@ static bool cnl_combo_phy_verify_state(struct drm_i915_private *dev_priv)
return ret;
}
-void cnl_combo_phys_init(struct drm_i915_private *dev_priv)
+static void cnl_combo_phys_init(struct drm_i915_private *dev_priv)
{
u32 val;
@@ -167,7 +168,7 @@ void cnl_combo_phys_init(struct drm_i915_private *dev_priv)
I915_WRITE(CNL_PORT_CL1CM_DW5, val);
}
-void cnl_combo_phys_uninit(struct drm_i915_private *dev_priv)
+static void cnl_combo_phys_uninit(struct drm_i915_private *dev_priv)
{
u32 val;
@@ -203,7 +204,59 @@ static bool icl_combo_phy_verify_state(struct drm_i915_private *dev_priv,
return ret;
}
-void icl_combo_phys_init(struct drm_i915_private *dev_priv)
+void intel_combo_phy_power_up_lanes(struct drm_i915_private *dev_priv,
+ enum port port, bool is_dsi,
+ int lane_count, bool lane_reversal)
+{
+ u8 lane_mask;
+ u32 val;
+
+ if (is_dsi) {
+ WARN_ON(lane_reversal);
+
+ switch (lane_count) {
+ case 1:
+ lane_mask = PWR_DOWN_LN_3_1_0;
+ break;
+ case 2:
+ lane_mask = PWR_DOWN_LN_3_1;
+ break;
+ case 3:
+ lane_mask = PWR_DOWN_LN_3;
+ break;
+ default:
+ MISSING_CASE(lane_count);
+ /* fall-through */
+ case 4:
+ lane_mask = PWR_UP_ALL_LANES;
+ break;
+ }
+ } else {
+ switch (lane_count) {
+ case 1:
+ lane_mask = lane_reversal ? PWR_DOWN_LN_2_1_0 :
+ PWR_DOWN_LN_3_2_1;
+ break;
+ case 2:
+ lane_mask = lane_reversal ? PWR_DOWN_LN_1_0 :
+ PWR_DOWN_LN_3_2;
+ break;
+ default:
+ MISSING_CASE(lane_count);
+ /* fall-through */
+ case 4:
+ lane_mask = PWR_UP_ALL_LANES;
+ break;
+ }
+ }
+
+ val = I915_READ(ICL_PORT_CL_DW10(port));
+ val &= ~PWR_DOWN_LN_MASK;
+ val |= lane_mask << PWR_DOWN_LN_SHIFT;
+ I915_WRITE(ICL_PORT_CL_DW10(port), val);
+}
+
+static void icl_combo_phys_init(struct drm_i915_private *dev_priv)
{
enum port port;
@@ -232,7 +285,7 @@ void icl_combo_phys_init(struct drm_i915_private *dev_priv)
}
}
-void icl_combo_phys_uninit(struct drm_i915_private *dev_priv)
+static void icl_combo_phys_uninit(struct drm_i915_private *dev_priv)
{
enum port port;
@@ -253,3 +306,19 @@ void icl_combo_phys_uninit(struct drm_i915_private *dev_priv)
I915_WRITE(ICL_PORT_COMP_DW0(port), val);
}
}
+
+void intel_combo_phy_init(struct drm_i915_private *i915)
+{
+ if (INTEL_GEN(i915) >= 11)
+ icl_combo_phys_init(i915);
+ else if (IS_CANNONLAKE(i915))
+ cnl_combo_phys_init(i915);
+}
+
+void intel_combo_phy_uninit(struct drm_i915_private *i915)
+{
+ if (INTEL_GEN(i915) >= 11)
+ icl_combo_phys_uninit(i915);
+ else if (IS_CANNONLAKE(i915))
+ cnl_combo_phys_uninit(i915);
+}
diff --git a/drivers/gpu/drm/i915/intel_combo_phy.h b/drivers/gpu/drm/i915/intel_combo_phy.h
new file mode 100644
index 000000000000..e6e195a83b19
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_combo_phy.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __INTEL_COMBO_PHY_H__
+#define __INTEL_COMBO_PHY_H__
+
+#include <linux/types.h>
+#include <drm/i915_drm.h>
+
+struct drm_i915_private;
+
+void intel_combo_phy_init(struct drm_i915_private *dev_priv);
+void intel_combo_phy_uninit(struct drm_i915_private *dev_priv);
+void intel_combo_phy_power_up_lanes(struct drm_i915_private *dev_priv,
+ enum port port, bool is_dsi,
+ int lane_count, bool lane_reversal);
+
+#endif /* __INTEL_COMBO_PHY_H__ */
diff --git a/drivers/gpu/drm/i915/intel_context.h b/drivers/gpu/drm/i915/intel_context.h
deleted file mode 100644
index ebc861b1a49e..000000000000
--- a/drivers/gpu/drm/i915/intel_context.h
+++ /dev/null
@@ -1,87 +0,0 @@
-/*
- * SPDX-License-Identifier: MIT
- *
- * Copyright © 2019 Intel Corporation
- */
-
-#ifndef __INTEL_CONTEXT_H__
-#define __INTEL_CONTEXT_H__
-
-#include <linux/lockdep.h>
-
-#include "intel_context_types.h"
-#include "intel_engine_types.h"
-
-struct intel_context *intel_context_alloc(void);
-void intel_context_free(struct intel_context *ce);
-
-void intel_context_init(struct intel_context *ce,
- struct i915_gem_context *ctx,
- struct intel_engine_cs *engine);
-
-/**
- * intel_context_lookup - Find the matching HW context for this (ctx, engine)
- * @ctx - the parent GEM context
- * @engine - the target HW engine
- *
- * May return NULL if the HW context hasn't been instantiated (i.e. unused).
- */
-struct intel_context *
-intel_context_lookup(struct i915_gem_context *ctx,
- struct intel_engine_cs *engine);
-
-/**
- * intel_context_pin_lock - Stablises the 'pinned' status of the HW context
- * @ctx - the parent GEM context
- * @engine - the target HW engine
- *
- * Acquire a lock on the pinned status of the HW context, such that the context
- * can neither be bound to the GPU or unbound whilst the lock is held, i.e.
- * intel_context_is_pinned() remains stable.
- */
-struct intel_context *
-intel_context_pin_lock(struct i915_gem_context *ctx,
- struct intel_engine_cs *engine);
-
-static inline bool
-intel_context_is_pinned(struct intel_context *ce)
-{
- return atomic_read(&ce->pin_count);
-}
-
-static inline void intel_context_pin_unlock(struct intel_context *ce)
-__releases(ce->pin_mutex)
-{
- mutex_unlock(&ce->pin_mutex);
-}
-
-struct intel_context *
-__intel_context_insert(struct i915_gem_context *ctx,
- struct intel_engine_cs *engine,
- struct intel_context *ce);
-void
-__intel_context_remove(struct intel_context *ce);
-
-struct intel_context *
-intel_context_pin(struct i915_gem_context *ctx, struct intel_engine_cs *engine);
-
-static inline void __intel_context_pin(struct intel_context *ce)
-{
- GEM_BUG_ON(!intel_context_is_pinned(ce));
- atomic_inc(&ce->pin_count);
-}
-
-void intel_context_unpin(struct intel_context *ce);
-
-static inline struct intel_context *intel_context_get(struct intel_context *ce)
-{
- kref_get(&ce->ref);
- return ce;
-}
-
-static inline void intel_context_put(struct intel_context *ce)
-{
- kref_put(&ce->ref, ce->ops->destroy);
-}
-
-#endif /* __INTEL_CONTEXT_H__ */
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index b665c370111b..bb56518576a1 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -39,6 +39,9 @@
#include "intel_crt.h"
#include "intel_ddi.h"
#include "intel_drv.h"
+#include "intel_fifo_underrun.h"
+#include "intel_gmbus.h"
+#include "intel_hotplug.h"
/* Here's the desired hotplug mode */
#define ADPA_HOTPLUG_BITS (ADPA_CRT_HOTPLUG_PERIOD_128 | \
diff --git a/drivers/gpu/drm/i915/intel_csr.c b/drivers/gpu/drm/i915/intel_csr.c
index 96618af47088..bf0eebd385b9 100644
--- a/drivers/gpu/drm/i915/intel_csr.c
+++ b/drivers/gpu/drm/i915/intel_csr.c
@@ -547,8 +547,6 @@ void intel_csr_ucode_init(struct drm_i915_private *dev_priv)
if (csr->fw_path == NULL) {
DRM_DEBUG_KMS("No known CSR firmware for platform, disabling runtime PM\n");
- WARN_ON(!IS_ALPHA_SUPPORT(INTEL_INFO(dev_priv)));
-
return;
}
diff --git a/drivers/gpu/drm/i915/intel_csr.h b/drivers/gpu/drm/i915/intel_csr.h
index 17a32c1e8a35..03c64f8af7ab 100644
--- a/drivers/gpu/drm/i915/intel_csr.h
+++ b/drivers/gpu/drm/i915/intel_csr.h
@@ -8,6 +8,10 @@
struct drm_i915_private;
+#define CSR_VERSION(major, minor) ((major) << 16 | (minor))
+#define CSR_VERSION_MAJOR(version) ((version) >> 16)
+#define CSR_VERSION_MINOR(version) ((version) & 0xffff)
+
void intel_csr_ucode_init(struct drm_i915_private *i915);
void intel_csr_load_program(struct drm_i915_private *i915);
void intel_csr_ucode_fini(struct drm_i915_private *i915);
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index f181c26f62fd..df06e5bb4764 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -29,16 +29,23 @@
#include "i915_drv.h"
#include "intel_audio.h"
+#include "intel_combo_phy.h"
#include "intel_connector.h"
#include "intel_ddi.h"
#include "intel_dp.h"
+#include "intel_dp_link_training.h"
+#include "intel_dpio_phy.h"
#include "intel_drv.h"
#include "intel_dsi.h"
+#include "intel_fifo_underrun.h"
+#include "intel_gmbus.h"
#include "intel_hdcp.h"
#include "intel_hdmi.h"
+#include "intel_hotplug.h"
#include "intel_lspcon.h"
#include "intel_panel.h"
#include "intel_psr.h"
+#include "intel_vdsc.h"
struct ddi_buf_trans {
u32 trans1; /* balance leg enable, de-emph level */
@@ -1450,7 +1457,8 @@ static void ddi_dotclock_get(struct intel_crtc_state *pipe_config)
else
dotclock = pipe_config->port_clock;
- if (pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
+ if (pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 &&
+ !intel_crtc_has_dp_encoder(pipe_config))
dotclock *= 2;
if (pipe_config->pixel_multiplier)
@@ -1710,6 +1718,14 @@ void intel_ddi_set_pipe_settings(const struct intel_crtc_state *crtc_state)
*/
if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444)
temp |= TRANS_MSA_SAMPLING_444 | TRANS_MSA_CLRSP_YCBCR;
+ /*
+ * As per DP 1.4a spec section 2.2.4.3 [MSA Field for Indication
+ * of Color Encoding Format and Content Color Gamut] while sending
+ * YCBCR 420 signals we should program MSA MISC1 fields which
+ * indicate VSC SDP for the Pixel Encoding/Colorimetry Format.
+ */
+ if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
+ temp |= TRANS_MSA_USE_VSC_SDP;
I915_WRITE(TRANS_MSA_MISC(cpu_transcoder), temp);
}
@@ -1772,9 +1788,7 @@ void intel_ddi_enable_transcoder_func(const struct intel_crtc_state *crtc_state)
* eDP when not using the panel fitter, and when not
* using motion blur mitigation (which we don't
* support). */
- if (IS_HASWELL(dev_priv) &&
- (crtc_state->pch_pfit.enabled ||
- crtc_state->pch_pfit.force_thru))
+ if (crtc_state->pch_pfit.force_thru)
temp |= TRANS_DDI_EDP_INPUT_A_ONOFF;
else
temp |= TRANS_DDI_EDP_INPUT_A_ON;
@@ -3111,6 +3125,15 @@ static void intel_ddi_pre_enable_dp(struct intel_encoder *encoder,
else
intel_prepare_dp_ddi_buffers(encoder, crtc_state);
+ if (intel_port_is_combophy(dev_priv, port)) {
+ bool lane_reversal =
+ dig_port->saved_port_bits & DDI_BUF_PORT_REVERSAL;
+
+ intel_combo_phy_power_up_lanes(dev_priv, port, false,
+ crtc_state->lane_count,
+ lane_reversal);
+ }
+
intel_ddi_init_dp_buf_reg(encoder);
if (!is_mst)
intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
@@ -3375,6 +3398,7 @@ static void intel_enable_ddi_dp(struct intel_encoder *encoder,
intel_edp_backlight_on(crtc_state, conn_state);
intel_psr_enable(intel_dp, crtc_state);
+ intel_dp_ycbcr_420_enable(intel_dp, crtc_state);
intel_edp_drrs_enable(intel_dp, crtc_state);
if (crtc_state->has_audio)
@@ -3844,6 +3868,7 @@ static int intel_ddi_compute_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state)
{
+ struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
enum port port = encoder->port;
int ret;
@@ -3858,6 +3883,12 @@ static int intel_ddi_compute_config(struct intel_encoder *encoder,
if (ret)
return ret;
+ if (IS_HASWELL(dev_priv) && crtc->pipe == PIPE_A &&
+ pipe_config->cpu_transcoder == TRANSCODER_EDP)
+ pipe_config->pch_pfit.force_thru =
+ pipe_config->pch_pfit.enabled ||
+ pipe_config->crc_enabled;
+
if (IS_GEN9_LP(dev_priv))
pipe_config->lane_lat_optim_mask =
bxt_ddi_phy_calc_lane_lat_optim_mask(pipe_config->lane_count);
@@ -3865,7 +3896,6 @@ static int intel_ddi_compute_config(struct intel_encoder *encoder,
intel_ddi_compute_min_voltage_level(dev_priv, pipe_config);
return 0;
-
}
static void intel_ddi_encoder_suspend(struct intel_encoder *encoder)
diff --git a/drivers/gpu/drm/i915/intel_device_info.h b/drivers/gpu/drm/i915/intel_device_info.h
index 0e579f158016..5a2e17d6146b 100644
--- a/drivers/gpu/drm/i915/intel_device_info.h
+++ b/drivers/gpu/drm/i915/intel_device_info.h
@@ -27,7 +27,10 @@
#include <uapi/drm/i915_drm.h>
-#include "intel_engine_types.h"
+#include "gt/intel_engine_types.h"
+#include "gt/intel_context_types.h"
+#include "gt/intel_sseu.h"
+
#include "intel_display.h"
struct drm_printer;
@@ -118,6 +121,7 @@ enum intel_ppgtt_type {
func(has_pooled_eu); \
func(has_rc6); \
func(has_rc6p); \
+ func(has_rps); \
func(has_runtime_pm); \
func(has_snoop); \
func(has_coherent_ggtt); \
@@ -139,33 +143,6 @@ enum intel_ppgtt_type {
func(overlay_needs_physical); \
func(supports_tv);
-#define GEN_MAX_SLICES (6) /* CNL upper bound */
-#define GEN_MAX_SUBSLICES (8) /* ICL upper bound */
-
-struct sseu_dev_info {
- u8 slice_mask;
- u8 subslice_mask[GEN_MAX_SLICES];
- u16 eu_total;
- u8 eu_per_subslice;
- u8 min_eu_in_pool;
- /* For each slice, which subslice(s) has(have) 7 EUs (bitfield)? */
- u8 subslice_7eu[3];
- u8 has_slice_pg:1;
- u8 has_subslice_pg:1;
- u8 has_eu_pg:1;
-
- /* Topology fields */
- u8 max_slices;
- u8 max_subslices;
- u8 max_eus_per_subslice;
-
- /* We don't have more than 8 eus per subslice at the moment and as we
- * store eus enabled using bits, no need to multiply by eus per
- * subslice.
- */
- u8 eu_mask[GEN_MAX_SLICES * GEN_MAX_SUBSLICES];
-};
-
struct intel_device_info {
u16 gen_mask;
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index b69440cf41ea..d97a849d4571 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -46,8 +46,9 @@
#include "i915_drv.h"
#include "i915_gem_clflush.h"
-#include "i915_reset.h"
#include "i915_trace.h"
+#include "intel_acpi.h"
+#include "intel_atomic.h"
#include "intel_atomic_plane.h"
#include "intel_color.h"
#include "intel_cdclk.h"
@@ -59,16 +60,23 @@
#include "intel_dvo.h"
#include "intel_fbc.h"
#include "intel_fbdev.h"
+#include "intel_fifo_underrun.h"
#include "intel_frontbuffer.h"
+#include "intel_gmbus.h"
#include "intel_hdcp.h"
#include "intel_hdmi.h"
+#include "intel_hotplug.h"
#include "intel_lvds.h"
+#include "intel_overlay.h"
#include "intel_pipe_crc.h"
#include "intel_pm.h"
#include "intel_psr.h"
+#include "intel_quirks.h"
#include "intel_sdvo.h"
+#include "intel_sideband.h"
#include "intel_sprite.h"
#include "intel_tv.h"
+#include "intel_vdsc.h"
/* Primary plane formats for gen <= 3 */
static const u32 i8xx_primary_formats[] = {
@@ -120,7 +128,7 @@ static void intel_cpu_transcoder_set_m_n(const struct intel_crtc_state *crtc_sta
static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state);
static void ironlake_set_pipeconf(const struct intel_crtc_state *crtc_state);
static void haswell_set_pipeconf(const struct intel_crtc_state *crtc_state);
-static void haswell_set_pipemisc(const struct intel_crtc_state *crtc_state);
+static void bdw_set_pipemisc(const struct intel_crtc_state *crtc_state);
static void vlv_prepare_pll(struct intel_crtc *crtc,
const struct intel_crtc_state *pipe_config);
static void chv_prepare_pll(struct intel_crtc *crtc,
@@ -153,10 +161,8 @@ int vlv_get_hpll_vco(struct drm_i915_private *dev_priv)
int hpll_freq, vco_freq[] = { 800, 1600, 2000, 2400 };
/* Obtain SKU information */
- mutex_lock(&dev_priv->sb_lock);
hpll_freq = vlv_cck_read(dev_priv, CCK_FUSE_REG) &
CCK_FUSE_HPLL_FREQ_MASK;
- mutex_unlock(&dev_priv->sb_lock);
return vco_freq[hpll_freq] * 1000;
}
@@ -167,10 +173,7 @@ int vlv_get_cck_clock(struct drm_i915_private *dev_priv,
u32 val;
int divider;
- mutex_lock(&dev_priv->sb_lock);
val = vlv_cck_read(dev_priv, reg);
- mutex_unlock(&dev_priv->sb_lock);
-
divider = val & CCK_FREQUENCY_VALUES;
WARN((val & CCK_FREQUENCY_STATUS) !=
@@ -183,11 +186,18 @@ int vlv_get_cck_clock(struct drm_i915_private *dev_priv,
int vlv_get_cck_clock_hpll(struct drm_i915_private *dev_priv,
const char *name, u32 reg)
{
+ int hpll;
+
+ vlv_cck_get(dev_priv);
+
if (dev_priv->hpll_freq == 0)
dev_priv->hpll_freq = vlv_get_hpll_vco(dev_priv);
- return vlv_get_cck_clock(dev_priv, name, reg,
- dev_priv->hpll_freq);
+ hpll = vlv_get_cck_clock(dev_priv, name, reg, dev_priv->hpll_freq);
+
+ vlv_cck_put(dev_priv);
+
+ return hpll;
}
static void intel_update_czclk(struct drm_i915_private *dev_priv)
@@ -476,6 +486,7 @@ static const struct intel_limit intel_limits_bxt = {
.p2 = { .p2_slow = 1, .p2_fast = 20 },
};
+/* WA Display #0827: Gen9:all */
static void
skl_wa_827(struct drm_i915_private *dev_priv, int pipe, bool enable)
{
@@ -489,6 +500,19 @@ skl_wa_827(struct drm_i915_private *dev_priv, int pipe, bool enable)
~(DUPS1_GATING_DIS | DUPS2_GATING_DIS));
}
+/* Wa_2006604312:icl */
+static void
+icl_wa_scalerclkgating(struct drm_i915_private *dev_priv, enum pipe pipe,
+ bool enable)
+{
+ if (enable)
+ I915_WRITE(CLKGATE_DIS_PSL(pipe),
+ I915_READ(CLKGATE_DIS_PSL(pipe)) | DPFR_GATING_DIS);
+ else
+ I915_WRITE(CLKGATE_DIS_PSL(pipe),
+ I915_READ(CLKGATE_DIS_PSL(pipe)) & ~DPFR_GATING_DIS);
+}
+
static bool
needs_modeset(const struct drm_crtc_state *state)
{
@@ -551,7 +575,7 @@ int chv_calc_dpll_params(int refclk, struct dpll *clock)
clock->p = clock->p1 * clock->p2;
if (WARN_ON(clock->n == 0 || clock->p == 0))
return 0;
- clock->vco = DIV_ROUND_CLOSEST_ULL((u64)refclk * clock->m,
+ clock->vco = DIV_ROUND_CLOSEST_ULL(mul_u32_u32(refclk, clock->m),
clock->n << 22);
clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
@@ -936,8 +960,8 @@ chv_find_best_dpll(const struct intel_limit *limit,
clock.p = clock.p1 * clock.p2;
- m2 = DIV_ROUND_CLOSEST_ULL(((u64)target * clock.p *
- clock.n) << 22, refclk * clock.m1);
+ m2 = DIV_ROUND_CLOSEST_ULL(mul_u32_u32(target, clock.p * clock.n) << 22,
+ refclk * clock.m1);
if (m2 > INT_MAX/clock.m1)
continue;
@@ -1080,9 +1104,9 @@ void assert_dsi_pll(struct drm_i915_private *dev_priv, bool state)
u32 val;
bool cur_state;
- mutex_lock(&dev_priv->sb_lock);
+ vlv_cck_get(dev_priv);
val = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL);
- mutex_unlock(&dev_priv->sb_lock);
+ vlv_cck_put(dev_priv);
cur_state = val & DSI_PLL_VCO_EN;
I915_STATE_WARN(cur_state != state,
@@ -1392,14 +1416,14 @@ static void _chv_enable_pll(struct intel_crtc *crtc,
enum dpio_channel port = vlv_pipe_to_channel(pipe);
u32 tmp;
- mutex_lock(&dev_priv->sb_lock);
+ vlv_dpio_get(dev_priv);
/* Enable back the 10bit clock to display controller */
tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port));
tmp |= DPIO_DCLKP_EN;
vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), tmp);
- mutex_unlock(&dev_priv->sb_lock);
+ vlv_dpio_put(dev_priv);
/*
* Need to wait > 100ns between dclkp clock enable bit and PLL enable.
@@ -1556,14 +1580,14 @@ static void chv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
I915_WRITE(DPLL(pipe), val);
POSTING_READ(DPLL(pipe));
- mutex_lock(&dev_priv->sb_lock);
+ vlv_dpio_get(dev_priv);
/* Disable 10bit clock to display controller */
val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port));
val &= ~DPIO_DCLKP_EN;
vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), val);
- mutex_unlock(&dev_priv->sb_lock);
+ vlv_dpio_put(dev_priv);
}
void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
@@ -1891,7 +1915,7 @@ intel_tile_width_bytes(const struct drm_framebuffer *fb, int color_plane)
switch (fb->modifier) {
case DRM_FORMAT_MOD_LINEAR:
- return cpp;
+ return intel_tile_size(dev_priv);
case I915_FORMAT_MOD_X_TILED:
if (IS_GEN(dev_priv, 2))
return 128;
@@ -1934,11 +1958,8 @@ intel_tile_width_bytes(const struct drm_framebuffer *fb, int color_plane)
static unsigned int
intel_tile_height(const struct drm_framebuffer *fb, int color_plane)
{
- if (fb->modifier == DRM_FORMAT_MOD_LINEAR)
- return 1;
- else
- return intel_tile_size(to_i915(fb->dev)) /
- intel_tile_width_bytes(fb, color_plane);
+ return intel_tile_size(to_i915(fb->dev)) /
+ intel_tile_width_bytes(fb, color_plane);
}
/* Return the tile dimensions in pixel units */
@@ -1973,6 +1994,17 @@ unsigned int intel_rotation_info_size(const struct intel_rotation_info *rot_info
return size;
}
+unsigned int intel_remapped_info_size(const struct intel_remapped_info *rem_info)
+{
+ unsigned int size = 0;
+ int i;
+
+ for (i = 0 ; i < ARRAY_SIZE(rem_info->plane); i++)
+ size += rem_info->plane[i].width * rem_info->plane[i].height;
+
+ return size;
+}
+
static void
intel_fill_fb_ggtt_view(struct i915_ggtt_view *view,
const struct drm_framebuffer *fb,
@@ -2042,7 +2074,9 @@ static bool intel_plane_uses_fence(const struct intel_plane_state *plane_state)
struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
- return INTEL_GEN(dev_priv) < 4 || plane->has_fbc;
+ return INTEL_GEN(dev_priv) < 4 ||
+ (plane->has_fbc &&
+ plane_state->view.type == I915_GGTT_VIEW_NORMAL);
}
struct i915_vma *
@@ -2183,16 +2217,8 @@ void intel_add_fb_offsets(int *x, int *y,
int color_plane)
{
- const struct intel_framebuffer *intel_fb = to_intel_framebuffer(state->base.fb);
- unsigned int rotation = state->base.rotation;
-
- if (drm_rotation_90_or_270(rotation)) {
- *x += intel_fb->rotated[color_plane].x;
- *y += intel_fb->rotated[color_plane].y;
- } else {
- *x += intel_fb->normal[color_plane].x;
- *y += intel_fb->normal[color_plane].y;
- }
+ *x += state->color_plane[color_plane].x;
+ *y += state->color_plane[color_plane].y;
}
static u32 intel_adjust_tile_offset(int *x, int *y,
@@ -2476,6 +2502,134 @@ bool is_ccs_modifier(u64 modifier)
modifier == I915_FORMAT_MOD_Yf_TILED_CCS;
}
+u32 intel_plane_fb_max_stride(struct drm_i915_private *dev_priv,
+ u32 pixel_format, u64 modifier)
+{
+ struct intel_crtc *crtc;
+ struct intel_plane *plane;
+
+ /*
+ * We assume the primary plane for pipe A has
+ * the highest stride limits of them all.
+ */
+ crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_A);
+ plane = to_intel_plane(crtc->base.primary);
+
+ return plane->max_stride(plane, pixel_format, modifier,
+ DRM_MODE_ROTATE_0);
+}
+
+static
+u32 intel_fb_max_stride(struct drm_i915_private *dev_priv,
+ u32 pixel_format, u64 modifier)
+{
+ /*
+ * Arbitrary limit for gen4+ chosen to match the
+ * render engine max stride.
+ *
+ * The new CCS hash mode makes remapping impossible
+ */
+ if (!is_ccs_modifier(modifier)) {
+ if (INTEL_GEN(dev_priv) >= 7)
+ return 256*1024;
+ else if (INTEL_GEN(dev_priv) >= 4)
+ return 128*1024;
+ }
+
+ return intel_plane_fb_max_stride(dev_priv, pixel_format, modifier);
+}
+
+static u32
+intel_fb_stride_alignment(const struct drm_framebuffer *fb, int color_plane)
+{
+ struct drm_i915_private *dev_priv = to_i915(fb->dev);
+
+ if (fb->modifier == DRM_FORMAT_MOD_LINEAR) {
+ u32 max_stride = intel_plane_fb_max_stride(dev_priv,
+ fb->format->format,
+ fb->modifier);
+
+ /*
+ * To make remapping with linear generally feasible
+ * we need the stride to be page aligned.
+ */
+ if (fb->pitches[color_plane] > max_stride)
+ return intel_tile_size(dev_priv);
+ else
+ return 64;
+ } else {
+ return intel_tile_width_bytes(fb, color_plane);
+ }
+}
+
+bool intel_plane_can_remap(const struct intel_plane_state *plane_state)
+{
+ struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ const struct drm_framebuffer *fb = plane_state->base.fb;
+ int i;
+
+ /* We don't want to deal with remapping with cursors */
+ if (plane->id == PLANE_CURSOR)
+ return false;
+
+ /*
+ * The display engine limits already match/exceed the
+ * render engine limits, so not much point in remapping.
+ * Would also need to deal with the fence POT alignment
+ * and gen2 2KiB GTT tile size.
+ */
+ if (INTEL_GEN(dev_priv) < 4)
+ return false;
+
+ /*
+ * The new CCS hash mode isn't compatible with remapping as
+ * the virtual address of the pages affects the compressed data.
+ */
+ if (is_ccs_modifier(fb->modifier))
+ return false;
+
+ /* Linear needs a page aligned stride for remapping */
+ if (fb->modifier == DRM_FORMAT_MOD_LINEAR) {
+ unsigned int alignment = intel_tile_size(dev_priv) - 1;
+
+ for (i = 0; i < fb->format->num_planes; i++) {
+ if (fb->pitches[i] & alignment)
+ return false;
+ }
+ }
+
+ return true;
+}
+
+static bool intel_plane_needs_remap(const struct intel_plane_state *plane_state)
+{
+ struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
+ const struct drm_framebuffer *fb = plane_state->base.fb;
+ unsigned int rotation = plane_state->base.rotation;
+ u32 stride, max_stride;
+
+ /*
+ * No remapping for invisible planes since we don't have
+ * an actual source viewport to remap.
+ */
+ if (!plane_state->base.visible)
+ return false;
+
+ if (!intel_plane_can_remap(plane_state))
+ return false;
+
+ /*
+ * FIXME: aux plane limits on gen9+ are
+ * unclear in Bspec, for now no checking.
+ */
+ stride = intel_fb_pitch(fb, 0, rotation);
+ max_stride = plane->max_stride(plane, fb->format->format,
+ fb->modifier, rotation);
+
+ return stride > max_stride;
+}
+
static int
intel_fill_fb_info(struct drm_i915_private *dev_priv,
struct drm_framebuffer *fb)
@@ -2641,6 +2795,168 @@ intel_fill_fb_info(struct drm_i915_private *dev_priv,
return 0;
}
+static void
+intel_plane_remap_gtt(struct intel_plane_state *plane_state)
+{
+ struct drm_i915_private *dev_priv =
+ to_i915(plane_state->base.plane->dev);
+ struct drm_framebuffer *fb = plane_state->base.fb;
+ struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
+ struct intel_rotation_info *info = &plane_state->view.rotated;
+ unsigned int rotation = plane_state->base.rotation;
+ int i, num_planes = fb->format->num_planes;
+ unsigned int tile_size = intel_tile_size(dev_priv);
+ unsigned int src_x, src_y;
+ unsigned int src_w, src_h;
+ u32 gtt_offset = 0;
+
+ memset(&plane_state->view, 0, sizeof(plane_state->view));
+ plane_state->view.type = drm_rotation_90_or_270(rotation) ?
+ I915_GGTT_VIEW_ROTATED : I915_GGTT_VIEW_REMAPPED;
+
+ src_x = plane_state->base.src.x1 >> 16;
+ src_y = plane_state->base.src.y1 >> 16;
+ src_w = drm_rect_width(&plane_state->base.src) >> 16;
+ src_h = drm_rect_height(&plane_state->base.src) >> 16;
+
+ WARN_ON(is_ccs_modifier(fb->modifier));
+
+ /* Make src coordinates relative to the viewport */
+ drm_rect_translate(&plane_state->base.src,
+ -(src_x << 16), -(src_y << 16));
+
+ /* Rotate src coordinates to match rotated GTT view */
+ if (drm_rotation_90_or_270(rotation))
+ drm_rect_rotate(&plane_state->base.src,
+ src_w << 16, src_h << 16,
+ DRM_MODE_ROTATE_270);
+
+ for (i = 0; i < num_planes; i++) {
+ unsigned int hsub = i ? fb->format->hsub : 1;
+ unsigned int vsub = i ? fb->format->vsub : 1;
+ unsigned int cpp = fb->format->cpp[i];
+ unsigned int tile_width, tile_height;
+ unsigned int width, height;
+ unsigned int pitch_tiles;
+ unsigned int x, y;
+ u32 offset;
+
+ intel_tile_dims(fb, i, &tile_width, &tile_height);
+
+ x = src_x / hsub;
+ y = src_y / vsub;
+ width = src_w / hsub;
+ height = src_h / vsub;
+
+ /*
+ * First pixel of the src viewport from the
+ * start of the normal gtt mapping.
+ */
+ x += intel_fb->normal[i].x;
+ y += intel_fb->normal[i].y;
+
+ offset = intel_compute_aligned_offset(dev_priv, &x, &y,
+ fb, i, fb->pitches[i],
+ DRM_MODE_ROTATE_0, tile_size);
+ offset /= tile_size;
+
+ info->plane[i].offset = offset;
+ info->plane[i].stride = DIV_ROUND_UP(fb->pitches[i],
+ tile_width * cpp);
+ info->plane[i].width = DIV_ROUND_UP(x + width, tile_width);
+ info->plane[i].height = DIV_ROUND_UP(y + height, tile_height);
+
+ if (drm_rotation_90_or_270(rotation)) {
+ struct drm_rect r;
+
+ /* rotate the x/y offsets to match the GTT view */
+ r.x1 = x;
+ r.y1 = y;
+ r.x2 = x + width;
+ r.y2 = y + height;
+ drm_rect_rotate(&r,
+ info->plane[i].width * tile_width,
+ info->plane[i].height * tile_height,
+ DRM_MODE_ROTATE_270);
+ x = r.x1;
+ y = r.y1;
+
+ pitch_tiles = info->plane[i].height;
+ plane_state->color_plane[i].stride = pitch_tiles * tile_height;
+
+ /* rotate the tile dimensions to match the GTT view */
+ swap(tile_width, tile_height);
+ } else {
+ pitch_tiles = info->plane[i].width;
+ plane_state->color_plane[i].stride = pitch_tiles * tile_width * cpp;
+ }
+
+ /*
+ * We only keep the x/y offsets, so push all of the
+ * gtt offset into the x/y offsets.
+ */
+ intel_adjust_tile_offset(&x, &y,
+ tile_width, tile_height,
+ tile_size, pitch_tiles,
+ gtt_offset * tile_size, 0);
+
+ gtt_offset += info->plane[i].width * info->plane[i].height;
+
+ plane_state->color_plane[i].offset = 0;
+ plane_state->color_plane[i].x = x;
+ plane_state->color_plane[i].y = y;
+ }
+}
+
+static int
+intel_plane_compute_gtt(struct intel_plane_state *plane_state)
+{
+ const struct intel_framebuffer *fb =
+ to_intel_framebuffer(plane_state->base.fb);
+ unsigned int rotation = plane_state->base.rotation;
+ int i, num_planes;
+
+ if (!fb)
+ return 0;
+
+ num_planes = fb->base.format->num_planes;
+
+ if (intel_plane_needs_remap(plane_state)) {
+ intel_plane_remap_gtt(plane_state);
+
+ /*
+ * Sometimes even remapping can't overcome
+ * the stride limitations :( Can happen with
+ * big plane sizes and suitably misaligned
+ * offsets.
+ */
+ return intel_plane_check_stride(plane_state);
+ }
+
+ intel_fill_fb_ggtt_view(&plane_state->view, &fb->base, rotation);
+
+ for (i = 0; i < num_planes; i++) {
+ plane_state->color_plane[i].stride = intel_fb_pitch(&fb->base, i, rotation);
+ plane_state->color_plane[i].offset = 0;
+
+ if (drm_rotation_90_or_270(rotation)) {
+ plane_state->color_plane[i].x = fb->rotated[i].x;
+ plane_state->color_plane[i].y = fb->rotated[i].y;
+ } else {
+ plane_state->color_plane[i].x = fb->normal[i].x;
+ plane_state->color_plane[i].y = fb->normal[i].y;
+ }
+ }
+
+ /* Rotate src coordinates to match rotated GTT view */
+ if (drm_rotation_90_or_270(rotation))
+ drm_rect_rotate(&plane_state->base.src,
+ fb->base.width << 16, fb->base.height << 16,
+ DRM_MODE_ROTATE_270);
+
+ return intel_plane_check_stride(plane_state);
+}
+
static int i9xx_format_to_fourcc(int format)
{
switch (format) {
@@ -2968,41 +3284,55 @@ static int skl_max_plane_width(const struct drm_framebuffer *fb,
switch (fb->modifier) {
case DRM_FORMAT_MOD_LINEAR:
case I915_FORMAT_MOD_X_TILED:
- switch (cpp) {
- case 8:
- return 4096;
- case 4:
- case 2:
- case 1:
- return 8192;
- default:
- MISSING_CASE(cpp);
- break;
- }
- break;
+ return 4096;
case I915_FORMAT_MOD_Y_TILED_CCS:
case I915_FORMAT_MOD_Yf_TILED_CCS:
/* FIXME AUX plane? */
case I915_FORMAT_MOD_Y_TILED:
case I915_FORMAT_MOD_Yf_TILED:
- switch (cpp) {
- case 8:
+ if (cpp == 8)
return 2048;
- case 4:
+ else
return 4096;
- case 2:
- case 1:
- return 8192;
- default:
- MISSING_CASE(cpp);
- break;
- }
- break;
default:
MISSING_CASE(fb->modifier);
+ return 2048;
}
+}
- return 2048;
+static int glk_max_plane_width(const struct drm_framebuffer *fb,
+ int color_plane,
+ unsigned int rotation)
+{
+ int cpp = fb->format->cpp[color_plane];
+
+ switch (fb->modifier) {
+ case DRM_FORMAT_MOD_LINEAR:
+ case I915_FORMAT_MOD_X_TILED:
+ if (cpp == 8)
+ return 4096;
+ else
+ return 5120;
+ case I915_FORMAT_MOD_Y_TILED_CCS:
+ case I915_FORMAT_MOD_Yf_TILED_CCS:
+ /* FIXME AUX plane? */
+ case I915_FORMAT_MOD_Y_TILED:
+ case I915_FORMAT_MOD_Yf_TILED:
+ if (cpp == 8)
+ return 2048;
+ else
+ return 5120;
+ default:
+ MISSING_CASE(fb->modifier);
+ return 2048;
+ }
+}
+
+static int icl_max_plane_width(const struct drm_framebuffer *fb,
+ int color_plane,
+ unsigned int rotation)
+{
+ return 5120;
}
static bool skl_check_main_ccs_coordinates(struct intel_plane_state *plane_state,
@@ -3045,16 +3375,24 @@ static bool skl_check_main_ccs_coordinates(struct intel_plane_state *plane_state
static int skl_check_main_surface(struct intel_plane_state *plane_state)
{
+ struct drm_i915_private *dev_priv = to_i915(plane_state->base.plane->dev);
const struct drm_framebuffer *fb = plane_state->base.fb;
unsigned int rotation = plane_state->base.rotation;
int x = plane_state->base.src.x1 >> 16;
int y = plane_state->base.src.y1 >> 16;
int w = drm_rect_width(&plane_state->base.src) >> 16;
int h = drm_rect_height(&plane_state->base.src) >> 16;
- int max_width = skl_max_plane_width(fb, 0, rotation);
+ int max_width;
int max_height = 4096;
u32 alignment, offset, aux_offset = plane_state->color_plane[1].offset;
+ if (INTEL_GEN(dev_priv) >= 11)
+ max_width = icl_max_plane_width(fb, 0, rotation);
+ else if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
+ max_width = glk_max_plane_width(fb, 0, rotation);
+ else
+ max_width = skl_max_plane_width(fb, 0, rotation);
+
if (w > max_width || h > max_height) {
DRM_DEBUG_KMS("requested Y/RGB source size %dx%d too big (limit %dx%d)\n",
w, h, max_width, max_height);
@@ -3117,6 +3455,14 @@ static int skl_check_main_surface(struct intel_plane_state *plane_state)
plane_state->color_plane[0].x = x;
plane_state->color_plane[0].y = y;
+ /*
+ * Put the final coordinates back so that the src
+ * coordinate checks will see the right values.
+ */
+ drm_rect_translate(&plane_state->base.src,
+ (x << 16) - plane_state->base.src.x1,
+ (y << 16) - plane_state->base.src.y1);
+
return 0;
}
@@ -3173,26 +3519,15 @@ static int skl_check_ccs_aux_surface(struct intel_plane_state *plane_state)
int skl_check_plane_surface(struct intel_plane_state *plane_state)
{
const struct drm_framebuffer *fb = plane_state->base.fb;
- unsigned int rotation = plane_state->base.rotation;
int ret;
- intel_fill_fb_ggtt_view(&plane_state->view, fb, rotation);
- plane_state->color_plane[0].stride = intel_fb_pitch(fb, 0, rotation);
- plane_state->color_plane[1].stride = intel_fb_pitch(fb, 1, rotation);
-
- ret = intel_plane_check_stride(plane_state);
+ ret = intel_plane_compute_gtt(plane_state);
if (ret)
return ret;
if (!plane_state->base.visible)
return 0;
- /* Rotate src coordinates to match rotated GTT view */
- if (drm_rotation_90_or_270(rotation))
- drm_rect_rotate(&plane_state->base.src,
- fb->width << 16, fb->height << 16,
- DRM_MODE_ROTATE_270);
-
/*
* Handle the AUX surface first since
* the main surface setup depends on it.
@@ -3322,20 +3657,20 @@ int i9xx_check_plane_surface(struct intel_plane_state *plane_state)
{
struct drm_i915_private *dev_priv =
to_i915(plane_state->base.plane->dev);
- const struct drm_framebuffer *fb = plane_state->base.fb;
- unsigned int rotation = plane_state->base.rotation;
- int src_x = plane_state->base.src.x1 >> 16;
- int src_y = plane_state->base.src.y1 >> 16;
+ int src_x, src_y;
u32 offset;
int ret;
- intel_fill_fb_ggtt_view(&plane_state->view, fb, rotation);
- plane_state->color_plane[0].stride = intel_fb_pitch(fb, 0, rotation);
-
- ret = intel_plane_check_stride(plane_state);
+ ret = intel_plane_compute_gtt(plane_state);
if (ret)
return ret;
+ if (!plane_state->base.visible)
+ return 0;
+
+ src_x = plane_state->base.src.x1 >> 16;
+ src_y = plane_state->base.src.y1 >> 16;
+
intel_add_fb_offsets(&src_x, &src_y, plane_state, 0);
if (INTEL_GEN(dev_priv) >= 4)
@@ -3344,8 +3679,17 @@ int i9xx_check_plane_surface(struct intel_plane_state *plane_state)
else
offset = 0;
+ /*
+ * Put the final coordinates back so that the src
+ * coordinate checks will see the right values.
+ */
+ drm_rect_translate(&plane_state->base.src,
+ (src_x << 16) - plane_state->base.src.x1,
+ (src_y << 16) - plane_state->base.src.y1);
+
/* HSW/BDW do this automagically in hardware */
if (!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv)) {
+ unsigned int rotation = plane_state->base.rotation;
int src_w = drm_rect_width(&plane_state->base.src) >> 16;
int src_h = drm_rect_height(&plane_state->base.src) >> 16;
@@ -3382,6 +3726,10 @@ i9xx_plane_check(struct intel_crtc_state *crtc_state,
if (ret)
return ret;
+ ret = i9xx_check_plane_surface(plane_state);
+ if (ret)
+ return ret;
+
if (!plane_state->base.visible)
return 0;
@@ -3389,10 +3737,6 @@ i9xx_plane_check(struct intel_crtc_state *crtc_state,
if (ret)
return ret;
- ret = i9xx_check_plane_surface(plane_state);
- if (ret)
- return ret;
-
plane_state->ctl = i9xx_plane_ctl(crtc_state, plane_state);
return 0;
@@ -3531,15 +3875,6 @@ static bool i9xx_plane_get_hw_state(struct intel_plane *plane,
return ret;
}
-static u32
-intel_fb_stride_alignment(const struct drm_framebuffer *fb, int color_plane)
-{
- if (fb->modifier == DRM_FORMAT_MOD_LINEAR)
- return 64;
- else
- return intel_tile_width_bytes(fb, color_plane);
-}
-
static void skl_detach_scaler(struct intel_crtc *intel_crtc, int id)
{
struct drm_device *dev = intel_crtc->base.dev;
@@ -5019,6 +5354,21 @@ u16 skl_scaler_calc_phase(int sub, int scale, bool chroma_cosited)
return ((phase >> 2) & PS_PHASE_MASK) | trip;
}
+#define SKL_MIN_SRC_W 8
+#define SKL_MAX_SRC_W 4096
+#define SKL_MIN_SRC_H 8
+#define SKL_MAX_SRC_H 4096
+#define SKL_MIN_DST_W 8
+#define SKL_MAX_DST_W 4096
+#define SKL_MIN_DST_H 8
+#define SKL_MAX_DST_H 4096
+#define ICL_MAX_SRC_W 5120
+#define ICL_MAX_SRC_H 4096
+#define ICL_MAX_DST_W 5120
+#define ICL_MAX_DST_H 4096
+#define SKL_MIN_YUV_420_SRC_W 16
+#define SKL_MIN_YUV_420_SRC_H 16
+
static int
skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
unsigned int scaler_user, int *scaler_id,
@@ -5298,10 +5648,8 @@ void hsw_enable_ips(const struct intel_crtc_state *crtc_state)
WARN_ON(!(crtc_state->active_planes & ~BIT(PLANE_CURSOR)));
if (IS_BROADWELL(dev_priv)) {
- mutex_lock(&dev_priv->pcu_lock);
WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL,
IPS_ENABLE | IPS_PCODE_CONTROL));
- mutex_unlock(&dev_priv->pcu_lock);
/* Quoting Art Runyan: "its not safe to expect any particular
* value in IPS_CTL bit 31 after enabling IPS through the
* mailbox." Moreover, the mailbox may return a bogus state,
@@ -5331,9 +5679,7 @@ void hsw_disable_ips(const struct intel_crtc_state *crtc_state)
return;
if (IS_BROADWELL(dev_priv)) {
- mutex_lock(&dev_priv->pcu_lock);
WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0));
- mutex_unlock(&dev_priv->pcu_lock);
/*
* Wait for PCODE to finish disabling IPS. The BSpec specified
* 42ms timeout value leads to occasional timeouts so use 100ms
@@ -5509,6 +5855,16 @@ static bool needs_nv12_wa(struct drm_i915_private *dev_priv,
return false;
}
+static bool needs_scalerclk_wa(struct drm_i915_private *dev_priv,
+ const struct intel_crtc_state *crtc_state)
+{
+ /* Wa_2006604312:icl */
+ if (crtc_state->scaler_state.scaler_users > 0 && IS_ICELAKE(dev_priv))
+ return true;
+
+ return false;
+}
+
static void intel_post_plane_update(struct intel_crtc_state *old_crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
@@ -5542,11 +5898,13 @@ static void intel_post_plane_update(struct intel_crtc_state *old_crtc_state)
intel_post_enable_primary(&crtc->base, pipe_config);
}
- /* Display WA 827 */
if (needs_nv12_wa(dev_priv, old_crtc_state) &&
- !needs_nv12_wa(dev_priv, pipe_config)) {
+ !needs_nv12_wa(dev_priv, pipe_config))
skl_wa_827(dev_priv, crtc->pipe, false);
- }
+
+ if (needs_scalerclk_wa(dev_priv, old_crtc_state) &&
+ !needs_scalerclk_wa(dev_priv, pipe_config))
+ icl_wa_scalerclkgating(dev_priv, crtc->pipe, false);
}
static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state,
@@ -5583,9 +5941,13 @@ static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state,
/* Display WA 827 */
if (!needs_nv12_wa(dev_priv, old_crtc_state) &&
- needs_nv12_wa(dev_priv, pipe_config)) {
+ needs_nv12_wa(dev_priv, pipe_config))
skl_wa_827(dev_priv, crtc->pipe, true);
- }
+
+ /* Wa_2006604312:icl */
+ if (!needs_scalerclk_wa(dev_priv, old_crtc_state) &&
+ needs_scalerclk_wa(dev_priv, pipe_config))
+ icl_wa_scalerclkgating(dev_priv, crtc->pipe, true);
/*
* Vblank time updates from the shadow to live plane control register
@@ -5991,7 +6353,8 @@ static void haswell_crtc_enable(struct intel_crtc_state *pipe_config,
if (!transcoder_is_dsi(cpu_transcoder))
haswell_set_pipeconf(pipe_config);
- haswell_set_pipemisc(pipe_config);
+ if (INTEL_GEN(dev_priv) >= 9 || IS_BROADWELL(dev_priv))
+ bdw_set_pipemisc(pipe_config);
intel_crtc->active = true;
@@ -6293,7 +6656,7 @@ static u64 get_crtc_power_domains(struct drm_crtc *crtc,
mask |= BIT_ULL(POWER_DOMAIN_AUDIO);
if (crtc_state->shared_dpll)
- mask |= BIT_ULL(POWER_DOMAIN_PLLS);
+ mask |= BIT_ULL(POWER_DOMAIN_DISPLAY_CORE);
return mask;
}
@@ -6876,7 +7239,7 @@ static u32 ilk_pipe_pixel_rate(const struct intel_crtc_state *pipe_config)
if (WARN_ON(!pfit_w || !pfit_h))
return pixel_rate;
- pixel_rate = div_u64((u64)pixel_rate * pipe_w * pipe_h,
+ pixel_rate = div_u64(mul_u32_u32(pixel_rate, pipe_w * pipe_h),
pfit_w * pfit_h);
}
@@ -6996,7 +7359,7 @@ static void compute_m_n(unsigned int m, unsigned int n,
else
*ret_n = min_t(unsigned int, roundup_pow_of_two(n), DATA_LINK_N_MAX);
- *ret_m = div_u64((u64)m * *ret_n, n);
+ *ret_m = div_u64(mul_u32_u32(m, *ret_n), n);
intel_reduce_m_n_ratio(ret_m, ret_n);
}
@@ -7229,7 +7592,7 @@ static void vlv_prepare_pll(struct intel_crtc *crtc,
if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
return;
- mutex_lock(&dev_priv->sb_lock);
+ vlv_dpio_get(dev_priv);
bestn = pipe_config->dpll.n;
bestm1 = pipe_config->dpll.m1;
@@ -7306,7 +7669,8 @@ static void vlv_prepare_pll(struct intel_crtc *crtc,
vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW7(pipe), coreclk);
vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW11(pipe), 0x87871000);
- mutex_unlock(&dev_priv->sb_lock);
+
+ vlv_dpio_put(dev_priv);
}
static void chv_prepare_pll(struct intel_crtc *crtc,
@@ -7339,7 +7703,7 @@ static void chv_prepare_pll(struct intel_crtc *crtc,
dpio_val = 0;
loopfilter = 0;
- mutex_lock(&dev_priv->sb_lock);
+ vlv_dpio_get(dev_priv);
/* p1 and p2 divider */
vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW13(port),
@@ -7411,7 +7775,7 @@ static void chv_prepare_pll(struct intel_crtc *crtc,
vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port)) |
DPIO_AFC_RECAL);
- mutex_unlock(&dev_priv->sb_lock);
+ vlv_dpio_put(dev_priv);
}
/**
@@ -7679,9 +8043,14 @@ static void intel_get_pipe_timings(struct intel_crtc *crtc,
tmp = I915_READ(HTOTAL(cpu_transcoder));
pipe_config->base.adjusted_mode.crtc_hdisplay = (tmp & 0xffff) + 1;
pipe_config->base.adjusted_mode.crtc_htotal = ((tmp >> 16) & 0xffff) + 1;
- tmp = I915_READ(HBLANK(cpu_transcoder));
- pipe_config->base.adjusted_mode.crtc_hblank_start = (tmp & 0xffff) + 1;
- pipe_config->base.adjusted_mode.crtc_hblank_end = ((tmp >> 16) & 0xffff) + 1;
+
+ if (!transcoder_is_dsi(cpu_transcoder)) {
+ tmp = I915_READ(HBLANK(cpu_transcoder));
+ pipe_config->base.adjusted_mode.crtc_hblank_start =
+ (tmp & 0xffff) + 1;
+ pipe_config->base.adjusted_mode.crtc_hblank_end =
+ ((tmp >> 16) & 0xffff) + 1;
+ }
tmp = I915_READ(HSYNC(cpu_transcoder));
pipe_config->base.adjusted_mode.crtc_hsync_start = (tmp & 0xffff) + 1;
pipe_config->base.adjusted_mode.crtc_hsync_end = ((tmp >> 16) & 0xffff) + 1;
@@ -7689,9 +8058,14 @@ static void intel_get_pipe_timings(struct intel_crtc *crtc,
tmp = I915_READ(VTOTAL(cpu_transcoder));
pipe_config->base.adjusted_mode.crtc_vdisplay = (tmp & 0xffff) + 1;
pipe_config->base.adjusted_mode.crtc_vtotal = ((tmp >> 16) & 0xffff) + 1;
- tmp = I915_READ(VBLANK(cpu_transcoder));
- pipe_config->base.adjusted_mode.crtc_vblank_start = (tmp & 0xffff) + 1;
- pipe_config->base.adjusted_mode.crtc_vblank_end = ((tmp >> 16) & 0xffff) + 1;
+
+ if (!transcoder_is_dsi(cpu_transcoder)) {
+ tmp = I915_READ(VBLANK(cpu_transcoder));
+ pipe_config->base.adjusted_mode.crtc_vblank_start =
+ (tmp & 0xffff) + 1;
+ pipe_config->base.adjusted_mode.crtc_vblank_end =
+ ((tmp >> 16) & 0xffff) + 1;
+ }
tmp = I915_READ(VSYNC(cpu_transcoder));
pipe_config->base.adjusted_mode.crtc_vsync_start = (tmp & 0xffff) + 1;
pipe_config->base.adjusted_mode.crtc_vsync_end = ((tmp >> 16) & 0xffff) + 1;
@@ -8037,9 +8411,9 @@ static void vlv_crtc_clock_get(struct intel_crtc *crtc,
if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
return;
- mutex_lock(&dev_priv->sb_lock);
+ vlv_dpio_get(dev_priv);
mdiv = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW3(pipe));
- mutex_unlock(&dev_priv->sb_lock);
+ vlv_dpio_put(dev_priv);
clock.m1 = (mdiv >> DPIO_M1DIV_SHIFT) & 7;
clock.m2 = mdiv & DPIO_M2DIV_MASK;
@@ -8148,13 +8522,13 @@ static void chv_crtc_clock_get(struct intel_crtc *crtc,
if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
return;
- mutex_lock(&dev_priv->sb_lock);
+ vlv_dpio_get(dev_priv);
cmn_dw13 = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW13(port));
pll_dw0 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW0(port));
pll_dw1 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW1(port));
pll_dw2 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW2(port));
pll_dw3 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port));
- mutex_unlock(&dev_priv->sb_lock);
+ vlv_dpio_put(dev_priv);
clock.m1 = (pll_dw1 & 0x7) == DPIO_CHV_M1_DIV_BY_2 ? 2 : 0;
clock.m2 = (pll_dw0 & 0xff) << 22;
@@ -8654,7 +9028,7 @@ static void lpt_enable_clkout_dp(struct drm_i915_private *dev_priv,
}
/* Sequence to disable CLKOUT_DP */
-static void lpt_disable_clkout_dp(struct drm_i915_private *dev_priv)
+void lpt_disable_clkout_dp(struct drm_i915_private *dev_priv)
{
u32 reg, tmp;
@@ -8837,44 +9211,68 @@ static void haswell_set_pipeconf(const struct intel_crtc_state *crtc_state)
POSTING_READ(PIPECONF(cpu_transcoder));
}
-static void haswell_set_pipemisc(const struct intel_crtc_state *crtc_state)
+static void bdw_set_pipemisc(const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
- struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ u32 val = 0;
- if (IS_BROADWELL(dev_priv) || INTEL_GEN(dev_priv) >= 9) {
- u32 val = 0;
+ switch (crtc_state->pipe_bpp) {
+ case 18:
+ val |= PIPEMISC_DITHER_6_BPC;
+ break;
+ case 24:
+ val |= PIPEMISC_DITHER_8_BPC;
+ break;
+ case 30:
+ val |= PIPEMISC_DITHER_10_BPC;
+ break;
+ case 36:
+ val |= PIPEMISC_DITHER_12_BPC;
+ break;
+ default:
+ MISSING_CASE(crtc_state->pipe_bpp);
+ break;
+ }
- switch (crtc_state->pipe_bpp) {
- case 18:
- val |= PIPEMISC_DITHER_6_BPC;
- break;
- case 24:
- val |= PIPEMISC_DITHER_8_BPC;
- break;
- case 30:
- val |= PIPEMISC_DITHER_10_BPC;
- break;
- case 36:
- val |= PIPEMISC_DITHER_12_BPC;
- break;
- default:
- /* Case prevented by pipe_config_set_bpp. */
- BUG();
- }
+ if (crtc_state->dither)
+ val |= PIPEMISC_DITHER_ENABLE | PIPEMISC_DITHER_TYPE_SP;
+
+ if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 ||
+ crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444)
+ val |= PIPEMISC_OUTPUT_COLORSPACE_YUV;
- if (crtc_state->dither)
- val |= PIPEMISC_DITHER_ENABLE | PIPEMISC_DITHER_TYPE_SP;
+ if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
+ val |= PIPEMISC_YUV420_ENABLE |
+ PIPEMISC_YUV420_MODE_FULL_BLEND;
+
+ if (INTEL_GEN(dev_priv) >= 11 &&
+ (crtc_state->active_planes & ~(icl_hdr_plane_mask() |
+ BIT(PLANE_CURSOR))) == 0)
+ val |= PIPEMISC_HDR_MODE_PRECISION;
- if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 ||
- crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444)
- val |= PIPEMISC_OUTPUT_COLORSPACE_YUV;
+ I915_WRITE(PIPEMISC(crtc->pipe), val);
+}
- if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
- val |= PIPEMISC_YUV420_ENABLE |
- PIPEMISC_YUV420_MODE_FULL_BLEND;
+int bdw_get_pipemisc_bpp(struct intel_crtc *crtc)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ u32 tmp;
- I915_WRITE(PIPEMISC(intel_crtc->pipe), val);
+ tmp = I915_READ(PIPEMISC(crtc->pipe));
+
+ switch (tmp & PIPEMISC_DITHER_BPC_MASK) {
+ case PIPEMISC_DITHER_6_BPC:
+ return 18;
+ case PIPEMISC_DITHER_8_BPC:
+ return 24;
+ case PIPEMISC_DITHER_10_BPC:
+ return 30;
+ case PIPEMISC_DITHER_12_BPC:
+ return 36;
+ default:
+ MISSING_CASE(tmp);
+ return 0;
}
}
@@ -9409,228 +9807,6 @@ out:
return ret;
}
-
-static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv)
-{
- struct drm_device *dev = &dev_priv->drm;
- struct intel_crtc *crtc;
-
- for_each_intel_crtc(dev, crtc)
- I915_STATE_WARN(crtc->active, "CRTC for pipe %c enabled\n",
- pipe_name(crtc->pipe));
-
- I915_STATE_WARN(I915_READ(HSW_PWR_WELL_CTL2),
- "Display power well on\n");
- I915_STATE_WARN(I915_READ(SPLL_CTL) & SPLL_PLL_ENABLE, "SPLL enabled\n");
- I915_STATE_WARN(I915_READ(WRPLL_CTL(0)) & WRPLL_PLL_ENABLE, "WRPLL1 enabled\n");
- I915_STATE_WARN(I915_READ(WRPLL_CTL(1)) & WRPLL_PLL_ENABLE, "WRPLL2 enabled\n");
- I915_STATE_WARN(I915_READ(PP_STATUS(0)) & PP_ON, "Panel power on\n");
- I915_STATE_WARN(I915_READ(BLC_PWM_CPU_CTL2) & BLM_PWM_ENABLE,
- "CPU PWM1 enabled\n");
- if (IS_HASWELL(dev_priv))
- I915_STATE_WARN(I915_READ(HSW_BLC_PWM2_CTL) & BLM_PWM_ENABLE,
- "CPU PWM2 enabled\n");
- I915_STATE_WARN(I915_READ(BLC_PWM_PCH_CTL1) & BLM_PCH_PWM_ENABLE,
- "PCH PWM1 enabled\n");
- I915_STATE_WARN(I915_READ(UTIL_PIN_CTL) & UTIL_PIN_ENABLE,
- "Utility pin enabled\n");
- I915_STATE_WARN(I915_READ(PCH_GTC_CTL) & PCH_GTC_ENABLE, "PCH GTC enabled\n");
-
- /*
- * In theory we can still leave IRQs enabled, as long as only the HPD
- * interrupts remain enabled. We used to check for that, but since it's
- * gen-specific and since we only disable LCPLL after we fully disable
- * the interrupts, the check below should be enough.
- */
- I915_STATE_WARN(intel_irqs_enabled(dev_priv), "IRQs enabled\n");
-}
-
-static u32 hsw_read_dcomp(struct drm_i915_private *dev_priv)
-{
- if (IS_HASWELL(dev_priv))
- return I915_READ(D_COMP_HSW);
- else
- return I915_READ(D_COMP_BDW);
-}
-
-static void hsw_write_dcomp(struct drm_i915_private *dev_priv, u32 val)
-{
- if (IS_HASWELL(dev_priv)) {
- mutex_lock(&dev_priv->pcu_lock);
- if (sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_D_COMP,
- val))
- DRM_DEBUG_KMS("Failed to write to D_COMP\n");
- mutex_unlock(&dev_priv->pcu_lock);
- } else {
- I915_WRITE(D_COMP_BDW, val);
- POSTING_READ(D_COMP_BDW);
- }
-}
-
-/*
- * This function implements pieces of two sequences from BSpec:
- * - Sequence for display software to disable LCPLL
- * - Sequence for display software to allow package C8+
- * The steps implemented here are just the steps that actually touch the LCPLL
- * register. Callers should take care of disabling all the display engine
- * functions, doing the mode unset, fixing interrupts, etc.
- */
-static void hsw_disable_lcpll(struct drm_i915_private *dev_priv,
- bool switch_to_fclk, bool allow_power_down)
-{
- u32 val;
-
- assert_can_disable_lcpll(dev_priv);
-
- val = I915_READ(LCPLL_CTL);
-
- if (switch_to_fclk) {
- val |= LCPLL_CD_SOURCE_FCLK;
- I915_WRITE(LCPLL_CTL, val);
-
- if (wait_for_us(I915_READ(LCPLL_CTL) &
- LCPLL_CD_SOURCE_FCLK_DONE, 1))
- DRM_ERROR("Switching to FCLK failed\n");
-
- val = I915_READ(LCPLL_CTL);
- }
-
- val |= LCPLL_PLL_DISABLE;
- I915_WRITE(LCPLL_CTL, val);
- POSTING_READ(LCPLL_CTL);
-
- if (intel_wait_for_register(&dev_priv->uncore,
- LCPLL_CTL, LCPLL_PLL_LOCK, 0, 1))
- DRM_ERROR("LCPLL still locked\n");
-
- val = hsw_read_dcomp(dev_priv);
- val |= D_COMP_COMP_DISABLE;
- hsw_write_dcomp(dev_priv, val);
- ndelay(100);
-
- if (wait_for((hsw_read_dcomp(dev_priv) & D_COMP_RCOMP_IN_PROGRESS) == 0,
- 1))
- DRM_ERROR("D_COMP RCOMP still in progress\n");
-
- if (allow_power_down) {
- val = I915_READ(LCPLL_CTL);
- val |= LCPLL_POWER_DOWN_ALLOW;
- I915_WRITE(LCPLL_CTL, val);
- POSTING_READ(LCPLL_CTL);
- }
-}
-
-/*
- * Fully restores LCPLL, disallowing power down and switching back to LCPLL
- * source.
- */
-static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
-{
- u32 val;
-
- val = I915_READ(LCPLL_CTL);
-
- if ((val & (LCPLL_PLL_LOCK | LCPLL_PLL_DISABLE | LCPLL_CD_SOURCE_FCLK |
- LCPLL_POWER_DOWN_ALLOW)) == LCPLL_PLL_LOCK)
- return;
-
- /*
- * Make sure we're not on PC8 state before disabling PC8, otherwise
- * we'll hang the machine. To prevent PC8 state, just enable force_wake.
- */
- intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
-
- if (val & LCPLL_POWER_DOWN_ALLOW) {
- val &= ~LCPLL_POWER_DOWN_ALLOW;
- I915_WRITE(LCPLL_CTL, val);
- POSTING_READ(LCPLL_CTL);
- }
-
- val = hsw_read_dcomp(dev_priv);
- val |= D_COMP_COMP_FORCE;
- val &= ~D_COMP_COMP_DISABLE;
- hsw_write_dcomp(dev_priv, val);
-
- val = I915_READ(LCPLL_CTL);
- val &= ~LCPLL_PLL_DISABLE;
- I915_WRITE(LCPLL_CTL, val);
-
- if (intel_wait_for_register(&dev_priv->uncore,
- LCPLL_CTL, LCPLL_PLL_LOCK, LCPLL_PLL_LOCK,
- 5))
- DRM_ERROR("LCPLL not locked yet\n");
-
- if (val & LCPLL_CD_SOURCE_FCLK) {
- val = I915_READ(LCPLL_CTL);
- val &= ~LCPLL_CD_SOURCE_FCLK;
- I915_WRITE(LCPLL_CTL, val);
-
- if (wait_for_us((I915_READ(LCPLL_CTL) &
- LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1))
- DRM_ERROR("Switching back to LCPLL failed\n");
- }
-
- intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
-
- intel_update_cdclk(dev_priv);
- intel_dump_cdclk_state(&dev_priv->cdclk.hw, "Current CDCLK");
-}
-
-/*
- * Package states C8 and deeper are really deep PC states that can only be
- * reached when all the devices on the system allow it, so even if the graphics
- * device allows PC8+, it doesn't mean the system will actually get to these
- * states. Our driver only allows PC8+ when going into runtime PM.
- *
- * The requirements for PC8+ are that all the outputs are disabled, the power
- * well is disabled and most interrupts are disabled, and these are also
- * requirements for runtime PM. When these conditions are met, we manually do
- * the other conditions: disable the interrupts, clocks and switch LCPLL refclk
- * to Fclk. If we're in PC8+ and we get an non-hotplug interrupt, we can hard
- * hang the machine.
- *
- * When we really reach PC8 or deeper states (not just when we allow it) we lose
- * the state of some registers, so when we come back from PC8+ we need to
- * restore this state. We don't get into PC8+ if we're not in RC6, so we don't
- * need to take care of the registers kept by RC6. Notice that this happens even
- * if we don't put the device in PCI D3 state (which is what currently happens
- * because of the runtime PM support).
- *
- * For more, read "Display Sequences for Package C8" on the hardware
- * documentation.
- */
-void hsw_enable_pc8(struct drm_i915_private *dev_priv)
-{
- u32 val;
-
- DRM_DEBUG_KMS("Enabling package C8+\n");
-
- if (HAS_PCH_LPT_LP(dev_priv)) {
- val = I915_READ(SOUTH_DSPCLK_GATE_D);
- val &= ~PCH_LP_PARTITION_LEVEL_DISABLE;
- I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
- }
-
- lpt_disable_clkout_dp(dev_priv);
- hsw_disable_lcpll(dev_priv, true, true);
-}
-
-void hsw_disable_pc8(struct drm_i915_private *dev_priv)
-{
- u32 val;
-
- DRM_DEBUG_KMS("Disabling package C8+\n");
-
- hsw_restore_lcpll(dev_priv);
- lpt_init_pch_refclk(dev_priv);
-
- if (HAS_PCH_LPT_LP(dev_priv)) {
- val = I915_READ(SOUTH_DSPCLK_GATE_D);
- val |= PCH_LP_PARTITION_LEVEL_DISABLE;
- I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
- }
-}
-
static int haswell_crtc_compute_clock(struct intel_crtc *crtc,
struct intel_crtc_state *crtc_state)
{
@@ -9801,6 +9977,7 @@ static bool hsw_get_transcoder_state(struct intel_crtc *crtc,
for_each_set_bit(panel_transcoder,
&panel_transcoder_mask,
ARRAY_SIZE(INTEL_INFO(dev_priv)->trans_offsets)) {
+ bool force_thru = false;
enum pipe trans_pipe;
tmp = I915_READ(TRANS_DDI_FUNC_CTL(panel_transcoder));
@@ -9822,6 +9999,8 @@ static bool hsw_get_transcoder_state(struct intel_crtc *crtc,
transcoder_name(panel_transcoder));
/* fall through */
case TRANS_DDI_EDP_INPUT_A_ONOFF:
+ force_thru = true;
+ /* fall through */
case TRANS_DDI_EDP_INPUT_A_ON:
trans_pipe = PIPE_A;
break;
@@ -9833,8 +10012,10 @@ static bool hsw_get_transcoder_state(struct intel_crtc *crtc,
break;
}
- if (trans_pipe == crtc->pipe)
+ if (trans_pipe == crtc->pipe) {
pipe_config->cpu_transcoder = panel_transcoder;
+ pipe_config->pch_pfit.force_thru = force_thru;
+ }
}
/*
@@ -10119,19 +10300,17 @@ static bool intel_cursor_size_ok(const struct intel_plane_state *plane_state)
static int intel_cursor_check_surface(struct intel_plane_state *plane_state)
{
- const struct drm_framebuffer *fb = plane_state->base.fb;
- unsigned int rotation = plane_state->base.rotation;
int src_x, src_y;
u32 offset;
int ret;
- intel_fill_fb_ggtt_view(&plane_state->view, fb, rotation);
- plane_state->color_plane[0].stride = intel_fb_pitch(fb, 0, rotation);
-
- ret = intel_plane_check_stride(plane_state);
+ ret = intel_plane_compute_gtt(plane_state);
if (ret)
return ret;
+ if (!plane_state->base.visible)
+ return 0;
+
src_x = plane_state->base.src_x >> 16;
src_y = plane_state->base.src_y >> 16;
@@ -10168,6 +10347,10 @@ static int intel_check_cursor(struct intel_crtc_state *crtc_state,
if (ret)
return ret;
+ ret = intel_cursor_check_surface(plane_state);
+ if (ret)
+ return ret;
+
if (!plane_state->base.visible)
return 0;
@@ -10175,10 +10358,6 @@ static int intel_check_cursor(struct intel_crtc_state *crtc_state,
if (ret)
return ret;
- ret = intel_cursor_check_surface(plane_state);
- if (ret)
- return ret;
-
return 0;
}
@@ -11669,10 +11848,11 @@ static void intel_dump_pipe_config(struct intel_crtc *crtc,
pipe_config->gmch_pfit.pgm_ratios,
pipe_config->gmch_pfit.lvds_border_bits);
else
- DRM_DEBUG_KMS("pch pfit: pos: 0x%08x, size: 0x%08x, %s\n",
+ DRM_DEBUG_KMS("pch pfit: pos: 0x%08x, size: 0x%08x, %s, force thru: %s\n",
pipe_config->pch_pfit.pos,
pipe_config->pch_pfit.size,
- enableddisabled(pipe_config->pch_pfit.enabled));
+ enableddisabled(pipe_config->pch_pfit.enabled),
+ yesno(pipe_config->pch_pfit.force_thru));
DRM_DEBUG_KMS("ips: %i, double wide: %i\n",
pipe_config->ips_enabled, pipe_config->double_wide);
@@ -11794,7 +11974,6 @@ clear_intel_crtc_state(struct intel_crtc_state *crtc_state)
saved_state->scaler_state = crtc_state->scaler_state;
saved_state->shared_dpll = crtc_state->shared_dpll;
saved_state->dpll_hw_state = crtc_state->dpll_hw_state;
- saved_state->pch_pfit.force_thru = crtc_state->pch_pfit.force_thru;
saved_state->crc_enabled = crtc_state->crc_enabled;
if (IS_G4X(dev_priv) ||
IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
@@ -12086,7 +12265,6 @@ intel_pipe_config_compare(struct drm_i915_private *dev_priv,
struct intel_crtc_state *pipe_config,
bool adjust)
{
- struct intel_crtc *crtc = to_intel_crtc(current_config->base.crtc);
bool ret = true;
bool fixup_inherited = adjust &&
(current_config->base.mode.private_flags & I915_MODE_FLAG_INHERITED) &&
@@ -12312,9 +12490,7 @@ intel_pipe_config_compare(struct drm_i915_private *dev_priv,
* Changing the EDP transcoder input mux
* (A_ONOFF vs. A_ON) requires a full modeset.
*/
- if (IS_HASWELL(dev_priv) && crtc->pipe == PIPE_A &&
- current_config->cpu_transcoder == TRANSCODER_EDP)
- PIPE_CONF_CHECK_BOOL(pch_pfit.enabled);
+ PIPE_CONF_CHECK_BOOL(pch_pfit.force_thru);
if (!adjust) {
PIPE_CONF_CHECK_I(pipe_src_w);
@@ -14109,6 +14285,9 @@ static void intel_begin_crtc_commit(struct intel_atomic_state *state,
else if (INTEL_GEN(dev_priv) >= 9)
skl_detach_scalers(new_crtc_state);
+ if (INTEL_GEN(dev_priv) >= 9 || IS_BROADWELL(dev_priv))
+ bdw_set_pipemisc(new_crtc_state);
+
out:
if (dev_priv->display.atomic_update_watermarks)
dev_priv->display.atomic_update_watermarks(state,
@@ -14609,9 +14788,8 @@ static int intel_crtc_init(struct drm_i915_private *dev_priv, enum pipe pipe)
ret = -ENOMEM;
goto fail;
}
+ __drm_atomic_helper_crtc_reset(&intel_crtc->base, &crtc_state->base);
intel_crtc->config = crtc_state;
- intel_crtc->base.state = &crtc_state->base;
- crtc_state->base.crtc = &intel_crtc->base;
primary = intel_primary_plane_create(dev_priv, pipe);
if (IS_ERR(primary)) {
@@ -15064,31 +15242,13 @@ static const struct drm_framebuffer_funcs intel_fb_funcs = {
.dirty = intel_user_framebuffer_dirty,
};
-static
-u32 intel_fb_pitch_limit(struct drm_i915_private *dev_priv,
- u32 pixel_format, u64 fb_modifier)
-{
- struct intel_crtc *crtc;
- struct intel_plane *plane;
-
- /*
- * We assume the primary plane for pipe A has
- * the highest stride limits of them all.
- */
- crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_A);
- plane = to_intel_plane(crtc->base.primary);
-
- return plane->max_stride(plane, pixel_format, fb_modifier,
- DRM_MODE_ROTATE_0);
-}
-
static int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
struct drm_i915_gem_object *obj,
struct drm_mode_fb_cmd2 *mode_cmd)
{
struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
struct drm_framebuffer *fb = &intel_fb->base;
- u32 pitch_limit;
+ u32 max_stride;
unsigned int tiling, stride;
int ret = -EINVAL;
int i;
@@ -15140,13 +15300,13 @@ static int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
goto err;
}
- pitch_limit = intel_fb_pitch_limit(dev_priv, mode_cmd->pixel_format,
- mode_cmd->modifier[0]);
- if (mode_cmd->pitches[0] > pitch_limit) {
+ max_stride = intel_fb_max_stride(dev_priv, mode_cmd->pixel_format,
+ mode_cmd->modifier[0]);
+ if (mode_cmd->pitches[0] > max_stride) {
DRM_DEBUG_KMS("%s pitch (%u) must be at most %d\n",
mode_cmd->modifier[0] != DRM_FORMAT_MOD_LINEAR ?
"tiled" : "linear",
- mode_cmd->pitches[0], pitch_limit);
+ mode_cmd->pitches[0], max_stride);
goto err;
}
@@ -15423,6 +15583,16 @@ void intel_init_display_hooks(struct drm_i915_private *dev_priv)
dev_priv->display.update_crtcs = intel_update_crtcs;
}
+static i915_reg_t i915_vgacntrl_reg(struct drm_i915_private *dev_priv)
+{
+ if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+ return VLV_VGACNTRL;
+ else if (INTEL_GEN(dev_priv) >= 5)
+ return CPU_VGACNTRL;
+ else
+ return VGACNTRL;
+}
+
/* Disable the VGA plane that we never use */
static void i915_disable_vga(struct drm_i915_private *dev_priv)
{
@@ -15660,16 +15830,22 @@ int intel_modeset_init(struct drm_device *dev)
}
}
- /* maximum framebuffer dimensions */
- if (IS_GEN(dev_priv, 2)) {
- dev->mode_config.max_width = 2048;
- dev->mode_config.max_height = 2048;
+ /*
+ * Maximum framebuffer dimensions, chosen to match
+ * the maximum render engine surface size on gen4+.
+ */
+ if (INTEL_GEN(dev_priv) >= 7) {
+ dev->mode_config.max_width = 16384;
+ dev->mode_config.max_height = 16384;
+ } else if (INTEL_GEN(dev_priv) >= 4) {
+ dev->mode_config.max_width = 8192;
+ dev->mode_config.max_height = 8192;
} else if (IS_GEN(dev_priv, 3)) {
dev->mode_config.max_width = 4096;
dev->mode_config.max_height = 4096;
} else {
- dev->mode_config.max_width = 8192;
- dev->mode_config.max_height = 8192;
+ dev->mode_config.max_width = 2048;
+ dev->mode_config.max_height = 2048;
}
if (IS_I845G(dev_priv) || IS_I865G(dev_priv)) {
@@ -16153,7 +16329,7 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
__drm_atomic_helper_crtc_destroy_state(&crtc_state->base);
memset(crtc_state, 0, sizeof(*crtc_state));
- crtc_state->base.crtc = &crtc->base;
+ __drm_atomic_helper_crtc_reset(&crtc->base, &crtc_state->base);
crtc_state->base.active = crtc_state->base.enable =
dev_priv->display.get_pipe_config(crtc, crtc_state);
@@ -16568,7 +16744,7 @@ void intel_modeset_cleanup(struct drm_device *dev)
intel_overlay_cleanup(dev_priv);
- intel_teardown_gmbus(dev_priv);
+ intel_gmbus_teardown(dev_priv);
destroy_workqueue(dev_priv->modeset_wq);
diff --git a/drivers/gpu/drm/i915/intel_display.h b/drivers/gpu/drm/i915/intel_display.h
index 2220588e86ac..a43d54089be3 100644
--- a/drivers/gpu/drm/i915/intel_display.h
+++ b/drivers/gpu/drm/i915/intel_display.h
@@ -28,6 +28,9 @@
#include <drm/drm_util.h>
#include <drm/i915_drm.h>
+struct drm_i915_private;
+struct intel_plane_state;
+
enum i915_gpio {
GPIOA,
GPIOB,
@@ -218,6 +221,7 @@ enum aux_ch {
#define aux_ch_name(a) ((a) + 'A')
enum intel_display_power_domain {
+ POWER_DOMAIN_DISPLAY_CORE,
POWER_DOMAIN_PIPE_A,
POWER_DOMAIN_PIPE_B,
POWER_DOMAIN_PIPE_C,
@@ -248,7 +252,6 @@ enum intel_display_power_domain {
POWER_DOMAIN_PORT_OTHER,
POWER_DOMAIN_VGA,
POWER_DOMAIN_AUDIO,
- POWER_DOMAIN_PLLS,
POWER_DOMAIN_AUX_A,
POWER_DOMAIN_AUX_B,
POWER_DOMAIN_AUX_C,
@@ -432,4 +435,9 @@ void intel_link_compute_m_n(u16 bpp, int nlanes,
struct intel_link_m_n *m_n,
bool constant_n);
bool is_ccs_modifier(u64 modifier);
+void lpt_disable_clkout_dp(struct drm_i915_private *dev_priv);
+u32 intel_plane_fb_max_stride(struct drm_i915_private *dev_priv,
+ u32 pixel_format, u64 modifier);
+bool intel_plane_can_remap(const struct intel_plane_state *plane_state);
+
#endif
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 560274d1c50b..24b56b2a76c8 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -31,6 +31,7 @@
#include <linux/reboot.h>
#include <linux/slab.h>
#include <linux/types.h>
+
#include <asm/byteorder.h>
#include <drm/drm_atomic_helper.h>
@@ -41,18 +42,27 @@
#include <drm/drm_probe_helper.h>
#include <drm/i915_drm.h>
+#include "i915_debugfs.h"
#include "i915_drv.h"
+#include "intel_atomic.h"
#include "intel_audio.h"
#include "intel_connector.h"
#include "intel_ddi.h"
#include "intel_dp.h"
+#include "intel_dp_link_training.h"
+#include "intel_dp_mst.h"
+#include "intel_dpio_phy.h"
#include "intel_drv.h"
+#include "intel_fifo_underrun.h"
#include "intel_hdcp.h"
#include "intel_hdmi.h"
+#include "intel_hotplug.h"
#include "intel_lspcon.h"
#include "intel_lvds.h"
#include "intel_panel.h"
#include "intel_psr.h"
+#include "intel_sideband.h"
+#include "intel_vdsc.h"
#define DP_DPRX_ESI_LEN 14
@@ -206,14 +216,17 @@ static int intel_dp_get_fia_supported_lane_count(struct intel_dp *intel_dp)
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
enum tc_port tc_port = intel_port_to_tc(dev_priv, dig_port->base.port);
+ intel_wakeref_t wakeref;
u32 lane_info;
if (tc_port == PORT_TC_NONE || dig_port->tc_type != TC_PORT_TYPEC)
return 4;
- lane_info = (I915_READ(PORT_TX_DFLEXDPSP) &
- DP_LANE_ASSIGNMENT_MASK(tc_port)) >>
- DP_LANE_ASSIGNMENT_SHIFT(tc_port);
+ lane_info = 0;
+ with_intel_display_power(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref)
+ lane_info = (I915_READ(PORT_TX_DFLEXDPSP) &
+ DP_LANE_ASSIGNMENT_MASK(tc_port)) >>
+ DP_LANE_ASSIGNMENT_SHIFT(tc_port);
switch (lane_info) {
default:
@@ -1211,7 +1224,10 @@ intel_dp_aux_xfer(struct intel_dp *intel_dp,
to_i915(intel_dig_port->base.base.dev);
i915_reg_t ch_ctl, ch_data[5];
u32 aux_clock_divider;
- intel_wakeref_t wakeref;
+ enum intel_display_power_domain aux_domain =
+ intel_aux_power_domain(intel_dig_port);
+ intel_wakeref_t aux_wakeref;
+ intel_wakeref_t pps_wakeref;
int i, ret, recv_bytes;
int try, clock = 0;
u32 status;
@@ -1221,7 +1237,8 @@ intel_dp_aux_xfer(struct intel_dp *intel_dp,
for (i = 0; i < ARRAY_SIZE(ch_data); i++)
ch_data[i] = intel_dp->aux_ch_data_reg(intel_dp, i);
- wakeref = pps_lock(intel_dp);
+ aux_wakeref = intel_display_power_get(dev_priv, aux_domain);
+ pps_wakeref = pps_lock(intel_dp);
/*
* We will be called with VDD already enabled for dpcd/edid/oui reads.
@@ -1367,7 +1384,8 @@ out:
if (vdd)
edp_panel_vdd_off(intel_dp, false);
- pps_unlock(intel_dp, wakeref);
+ pps_unlock(intel_dp, pps_wakeref);
+ intel_display_power_put_async(dev_priv, aux_domain, aux_wakeref);
return ret;
}
@@ -1832,6 +1850,19 @@ intel_dp_adjust_compliance_config(struct intel_dp *intel_dp,
}
}
+static int intel_dp_output_bpp(const struct intel_crtc_state *crtc_state, int bpp)
+{
+ /*
+ * bpp value was assumed to RGB format. And YCbCr 4:2:0 output
+ * format of the number of bytes per pixel will be half the number
+ * of bytes of RGB pixel.
+ */
+ if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
+ bpp /= 2;
+
+ return bpp;
+}
+
/* Optimize link config in order: max bpp, min clock, min lanes */
static int
intel_dp_compute_link_config_wide(struct intel_dp *intel_dp,
@@ -2075,6 +2106,36 @@ intel_dp_compute_link_config(struct intel_encoder *encoder,
return 0;
}
+static int
+intel_dp_ycbcr420_config(struct intel_dp *intel_dp,
+ struct drm_connector *connector,
+ struct intel_crtc_state *crtc_state)
+{
+ const struct drm_display_info *info = &connector->display_info;
+ const struct drm_display_mode *adjusted_mode =
+ &crtc_state->base.adjusted_mode;
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ int ret;
+
+ if (!drm_mode_is_420_only(info, adjusted_mode) ||
+ !intel_dp_get_colorimetry_status(intel_dp) ||
+ !connector->ycbcr_420_allowed)
+ return 0;
+
+ crtc_state->output_format = INTEL_OUTPUT_FORMAT_YCBCR420;
+
+ /* YCBCR 420 output conversion needs a scaler */
+ ret = skl_update_scaler_crtc(crtc_state);
+ if (ret) {
+ DRM_DEBUG_KMS("Scaler allocation for output failed\n");
+ return ret;
+ }
+
+ intel_pch_panel_fitting(crtc, crtc_state, DRM_MODE_SCALE_FULLSCREEN);
+
+ return 0;
+}
+
bool intel_dp_limited_color_range(const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
@@ -2114,7 +2175,7 @@ intel_dp_compute_config(struct intel_encoder *encoder,
to_intel_digital_connector_state(conn_state);
bool constant_n = drm_dp_has_quirk(&intel_dp->desc,
DP_DPCD_QUIRK_CONSTANT_N);
- int ret, output_bpp;
+ int ret = 0, output_bpp;
if (HAS_PCH_SPLIT(dev_priv) && !HAS_DDI(dev_priv) && port != PORT_A)
pipe_config->has_pch_encoder = true;
@@ -2122,6 +2183,12 @@ intel_dp_compute_config(struct intel_encoder *encoder,
pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
if (lspcon->active)
lspcon_ycbcr420_config(&intel_connector->base, pipe_config);
+ else
+ ret = intel_dp_ycbcr420_config(intel_dp, &intel_connector->base,
+ pipe_config);
+
+ if (ret)
+ return ret;
pipe_config->has_drrs = false;
if (IS_G4X(dev_priv) || port == PORT_A)
@@ -2169,7 +2236,7 @@ intel_dp_compute_config(struct intel_encoder *encoder,
if (pipe_config->dsc_params.compression_enable)
output_bpp = pipe_config->dsc_params.compressed_bpp;
else
- output_bpp = pipe_config->pipe_bpp;
+ output_bpp = intel_dp_output_bpp(pipe_config, pipe_config->pipe_bpp);
intel_link_compute_m_n(output_bpp,
pipe_config->lane_count,
@@ -3148,12 +3215,12 @@ static void chv_post_disable_dp(struct intel_encoder *encoder,
intel_dp_link_down(encoder, old_crtc_state);
- mutex_lock(&dev_priv->sb_lock);
+ vlv_dpio_get(dev_priv);
/* Assert data lane reset */
chv_data_lane_soft_reset(encoder, old_crtc_state, true);
- mutex_unlock(&dev_priv->sb_lock);
+ vlv_dpio_put(dev_priv);
}
static void
@@ -4041,6 +4108,16 @@ intel_dp_read_dpcd(struct intel_dp *intel_dp)
return intel_dp->dpcd[DP_DPCD_REV] != 0;
}
+bool intel_dp_get_colorimetry_status(struct intel_dp *intel_dp)
+{
+ u8 dprx = 0;
+
+ if (drm_dp_dpcd_readb(&intel_dp->aux, DP_DPRX_FEATURE_ENUMERATION_LIST,
+ &dprx) != 1)
+ return false;
+ return dprx & DP_VSC_SDP_EXT_FOR_COLORIMETRY_SUPPORTED;
+}
+
static void intel_dp_get_dsc_sink_cap(struct intel_dp *intel_dp)
{
/*
@@ -4351,6 +4428,96 @@ u8 intel_dp_dsc_get_slice_count(struct intel_dp *intel_dp,
return 0;
}
+static void
+intel_pixel_encoding_setup_vsc(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+ struct dp_sdp vsc_sdp = {};
+
+ /* Prepare VSC Header for SU as per DP 1.4a spec, Table 2-119 */
+ vsc_sdp.sdp_header.HB0 = 0;
+ vsc_sdp.sdp_header.HB1 = 0x7;
+
+ /*
+ * VSC SDP supporting 3D stereo, PSR2, and Pixel Encoding/
+ * Colorimetry Format indication.
+ */
+ vsc_sdp.sdp_header.HB2 = 0x5;
+
+ /*
+ * VSC SDP supporting 3D stereo, + PSR2, + Pixel Encoding/
+ * Colorimetry Format indication (HB2 = 05h).
+ */
+ vsc_sdp.sdp_header.HB3 = 0x13;
+
+ /*
+ * YCbCr 420 = 3h DB16[7:4] ITU-R BT.601 = 0h, ITU-R BT.709 = 1h
+ * DB16[3:0] DP 1.4a spec, Table 2-120
+ */
+ vsc_sdp.db[16] = 0x3 << 4; /* 0x3 << 4 , YCbCr 420*/
+ /* RGB->YCBCR color conversion uses the BT.709 color space. */
+ vsc_sdp.db[16] |= 0x1; /* 0x1, ITU-R BT.709 */
+
+ /*
+ * For pixel encoding formats YCbCr444, YCbCr422, YCbCr420, and Y Only,
+ * the following Component Bit Depth values are defined:
+ * 001b = 8bpc.
+ * 010b = 10bpc.
+ * 011b = 12bpc.
+ * 100b = 16bpc.
+ */
+ switch (crtc_state->pipe_bpp) {
+ case 24: /* 8bpc */
+ vsc_sdp.db[17] = 0x1;
+ break;
+ case 30: /* 10bpc */
+ vsc_sdp.db[17] = 0x2;
+ break;
+ case 36: /* 12bpc */
+ vsc_sdp.db[17] = 0x3;
+ break;
+ case 48: /* 16bpc */
+ vsc_sdp.db[17] = 0x4;
+ break;
+ default:
+ MISSING_CASE(crtc_state->pipe_bpp);
+ break;
+ }
+
+ /*
+ * Dynamic Range (Bit 7)
+ * 0 = VESA range, 1 = CTA range.
+ * all YCbCr are always limited range
+ */
+ vsc_sdp.db[17] |= 0x80;
+
+ /*
+ * Content Type (Bits 2:0)
+ * 000b = Not defined.
+ * 001b = Graphics.
+ * 010b = Photo.
+ * 011b = Video.
+ * 100b = Game
+ * All other values are RESERVED.
+ * Note: See CTA-861-G for the definition and expected
+ * processing by a stream sink for the above contect types.
+ */
+ vsc_sdp.db[18] = 0;
+
+ intel_dig_port->write_infoframe(&intel_dig_port->base,
+ crtc_state, DP_SDP_VSC, &vsc_sdp, sizeof(vsc_sdp));
+}
+
+void intel_dp_ycbcr_420_enable(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state)
+{
+ if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_YCBCR420)
+ return;
+
+ intel_pixel_encoding_setup_vsc(intel_dp, crtc_state);
+}
+
static u8 intel_dp_autotest_link_training(struct intel_dp *intel_dp)
{
int status = 0;
@@ -4829,15 +4996,15 @@ intel_dp_detect_dpcd(struct intel_dp *intel_dp)
u8 *dpcd = intel_dp->dpcd;
u8 type;
+ if (WARN_ON(intel_dp_is_edp(intel_dp)))
+ return connector_status_connected;
+
if (lspcon->active)
lspcon_resume(lspcon);
if (!intel_dp_get_dpcd(intel_dp))
return connector_status_disconnected;
- if (intel_dp_is_edp(intel_dp))
- return connector_status_connected;
-
/* if there's no downstream port, we're done */
if (!drm_dp_is_branch(dpcd))
return connector_status_connected;
@@ -5219,12 +5386,15 @@ static bool icl_tc_port_connected(struct drm_i915_private *dev_priv,
u32 dpsp;
/*
- * WARN if we got a legacy port HPD, but VBT didn't mark the port as
+ * Complain if we got a legacy port HPD, but VBT didn't mark the port as
* legacy. Treat the port as legacy from now on.
*/
- if (WARN_ON(!intel_dig_port->tc_legacy_port &&
- I915_READ(SDEISR) & SDE_TC_HOTPLUG_ICP(tc_port)))
+ if (!intel_dig_port->tc_legacy_port &&
+ I915_READ(SDEISR) & SDE_TC_HOTPLUG_ICP(tc_port)) {
+ DRM_ERROR("VBT incorrectly claims port %c is not TypeC legacy\n",
+ port_name(port));
intel_dig_port->tc_legacy_port = true;
+ }
is_legacy = intel_dig_port->tc_legacy_port;
/*
@@ -5276,7 +5446,7 @@ static bool icl_digital_port_connected(struct intel_encoder *encoder)
*
* Return %true if port is connected, %false otherwise.
*/
-bool intel_digital_port_connected(struct intel_encoder *encoder)
+static bool __intel_digital_port_connected(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
@@ -5306,6 +5476,18 @@ bool intel_digital_port_connected(struct intel_encoder *encoder)
return false;
}
+bool intel_digital_port_connected(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ bool is_connected = false;
+ intel_wakeref_t wakeref;
+
+ with_intel_display_power(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref)
+ is_connected = __intel_digital_port_connected(encoder);
+
+ return is_connected;
+}
+
static struct edid *
intel_dp_get_edid(struct intel_dp *intel_dp)
{
@@ -5359,16 +5541,11 @@ intel_dp_detect(struct drm_connector *connector,
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct intel_encoder *encoder = &dig_port->base;
enum drm_connector_status status;
- enum intel_display_power_domain aux_domain =
- intel_aux_power_domain(dig_port);
- intel_wakeref_t wakeref;
DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
connector->base.id, connector->name);
WARN_ON(!drm_modeset_is_locked(&dev_priv->drm.mode_config.connection_mutex));
- wakeref = intel_display_power_get(dev_priv, aux_domain);
-
/* Can't disconnect eDP */
if (intel_dp_is_edp(intel_dp))
status = edp_detect(intel_dp);
@@ -5432,10 +5609,8 @@ intel_dp_detect(struct drm_connector *connector,
int ret;
ret = intel_dp_retrain_link(encoder, ctx);
- if (ret) {
- intel_display_power_put(dev_priv, aux_domain, wakeref);
+ if (ret)
return ret;
- }
}
/*
@@ -5457,7 +5632,6 @@ out:
if (status != connector_status_connected && !intel_dp->is_mst)
intel_dp_unset_edid(intel_dp);
- intel_display_power_put(dev_priv, aux_domain, wakeref);
return status;
}
@@ -6235,6 +6409,10 @@ void intel_dp_encoder_reset(struct drm_encoder *encoder)
intel_dp->reset_link_params = true;
+ if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv) &&
+ !intel_dp_is_edp(intel_dp))
+ return;
+
with_pps_lock(intel_dp, wakeref) {
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
intel_dp->active_pipe = vlv_active_pipe(intel_dp);
@@ -6278,9 +6456,6 @@ enum irqreturn
intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
{
struct intel_dp *intel_dp = &intel_dig_port->dp;
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- enum irqreturn ret = IRQ_NONE;
- intel_wakeref_t wakeref;
if (long_hpd && intel_dig_port->base.type == INTEL_OUTPUT_EDP) {
/*
@@ -6303,9 +6478,6 @@ intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
return IRQ_NONE;
}
- wakeref = intel_display_power_get(dev_priv,
- intel_aux_power_domain(intel_dig_port));
-
if (intel_dp->is_mst) {
if (intel_dp_check_mst_status(intel_dp) == -EINVAL) {
/*
@@ -6317,7 +6489,8 @@ intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
intel_dp->is_mst = false;
drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr,
intel_dp->is_mst);
- goto put_power;
+
+ return IRQ_NONE;
}
}
@@ -6327,17 +6500,10 @@ intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
handled = intel_dp_short_pulse(intel_dp);
if (!handled)
- goto put_power;
+ return IRQ_NONE;
}
- ret = IRQ_HANDLED;
-
-put_power:
- intel_display_power_put(dev_priv,
- intel_aux_power_domain(intel_dig_port),
- wakeref);
-
- return ret;
+ return IRQ_HANDLED;
}
/* check the VBT to see whether the eDP is on another port */
@@ -7190,10 +7356,16 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
intel_dp->DP = I915_READ(intel_dp->output_reg);
intel_dp->attached_connector = intel_connector;
- if (intel_dp_is_port_edp(dev_priv, port))
+ if (intel_dp_is_port_edp(dev_priv, port)) {
+ /*
+ * Currently we don't support eDP on TypeC ports, although in
+ * theory it could work on TypeC legacy ports.
+ */
+ WARN_ON(intel_port_is_tc(dev_priv, port));
type = DRM_MODE_CONNECTOR_eDP;
- else
+ } else {
type = DRM_MODE_CONNECTOR_DisplayPort;
+ }
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
intel_dp->active_pipe = vlv_active_pipe(intel_dp);
@@ -7223,6 +7395,9 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
connector->interlace_allowed = true;
connector->doublescan_allowed = 0;
+ if (INTEL_GEN(dev_priv) >= 11)
+ connector->ycbcr_420_allowed = true;
+
intel_encoder->hpd_pin = intel_hpd_pin_default(dev_priv, port);
intel_dp_aux_init(intel_dp);
diff --git a/drivers/gpu/drm/i915/intel_dp.h b/drivers/gpu/drm/i915/intel_dp.h
index 5e9e8d13de6e..da70b1a41c83 100644
--- a/drivers/gpu/drm/i915/intel_dp.h
+++ b/drivers/gpu/drm/i915/intel_dp.h
@@ -108,6 +108,7 @@ u8 intel_dp_dsc_get_slice_count(struct intel_dp *intel_dp, int mode_clock,
int mode_hdisplay);
bool intel_dp_read_dpcd(struct intel_dp *intel_dp);
+bool intel_dp_get_colorimetry_status(struct intel_dp *intel_dp);
int intel_dp_link_required(int pixel_clock, int bpp);
int intel_dp_max_data_rate(int max_link_clock, int max_lanes);
bool intel_digital_port_connected(struct intel_encoder *encoder);
diff --git a/drivers/gpu/drm/i915/intel_dp_aux_backlight.c b/drivers/gpu/drm/i915/intel_dp_aux_backlight.c
index 357136f17f85..7ded95a334db 100644
--- a/drivers/gpu/drm/i915/intel_dp_aux_backlight.c
+++ b/drivers/gpu/drm/i915/intel_dp_aux_backlight.c
@@ -22,6 +22,7 @@
*
*/
+#include "intel_dp_aux_backlight.h"
#include "intel_drv.h"
static void set_aux_backlight_enable(struct intel_dp *intel_dp, bool enable)
diff --git a/drivers/gpu/drm/i915/intel_dp_aux_backlight.h b/drivers/gpu/drm/i915/intel_dp_aux_backlight.h
new file mode 100644
index 000000000000..ed60c2858967
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_dp_aux_backlight.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __INTEL_DP_AUX_BACKLIGHT_H__
+#define __INTEL_DP_AUX_BACKLIGHT_H__
+
+struct intel_connector;
+
+int intel_dp_aux_init_backlight_funcs(struct intel_connector *intel_connector);
+
+#endif /* __INTEL_DP_AUX_BACKLIGHT_H__ */
diff --git a/drivers/gpu/drm/i915/intel_dp_link_training.c b/drivers/gpu/drm/i915/intel_dp_link_training.c
index 54b069333e2f..9b1fccea966b 100644
--- a/drivers/gpu/drm/i915/intel_dp_link_training.c
+++ b/drivers/gpu/drm/i915/intel_dp_link_training.c
@@ -22,6 +22,7 @@
*/
#include "intel_dp.h"
+#include "intel_dp_link_training.h"
#include "intel_drv.h"
static void
diff --git a/drivers/gpu/drm/i915/intel_dp_link_training.h b/drivers/gpu/drm/i915/intel_dp_link_training.h
new file mode 100644
index 000000000000..174566adcc92
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_dp_link_training.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __INTEL_DP_LINK_TRAINING_H__
+#define __INTEL_DP_LINK_TRAINING_H__
+
+struct intel_dp;
+
+void intel_dp_start_link_train(struct intel_dp *intel_dp);
+void intel_dp_stop_link_train(struct intel_dp *intel_dp);
+
+#endif /* __INTEL_DP_LINK_TRAINING_H__ */
diff --git a/drivers/gpu/drm/i915/intel_dp_mst.c b/drivers/gpu/drm/i915/intel_dp_mst.c
index 8839eaea8371..60652ebbdf61 100644
--- a/drivers/gpu/drm/i915/intel_dp_mst.c
+++ b/drivers/gpu/drm/i915/intel_dp_mst.c
@@ -28,10 +28,13 @@
#include <drm/drm_probe_helper.h>
#include "i915_drv.h"
+#include "intel_atomic.h"
#include "intel_audio.h"
#include "intel_connector.h"
#include "intel_ddi.h"
#include "intel_dp.h"
+#include "intel_dp_mst.h"
+#include "intel_dpio_phy.h"
#include "intel_drv.h"
static int intel_dp_mst_compute_link_config(struct intel_encoder *encoder,
@@ -148,9 +151,10 @@ static int intel_dp_mst_compute_config(struct intel_encoder *encoder,
static int
intel_dp_mst_atomic_check(struct drm_connector *connector,
- struct drm_connector_state *new_conn_state)
+ struct drm_atomic_state *state)
{
- struct drm_atomic_state *state = new_conn_state->state;
+ struct drm_connector_state *new_conn_state =
+ drm_atomic_get_new_connector_state(state, connector);
struct drm_connector_state *old_conn_state =
drm_atomic_get_old_connector_state(state, connector);
struct intel_connector *intel_connector =
@@ -160,7 +164,7 @@ intel_dp_mst_atomic_check(struct drm_connector *connector,
struct drm_dp_mst_topology_mgr *mgr;
int ret;
- ret = intel_digital_connector_atomic_check(connector, new_conn_state);
+ ret = intel_digital_connector_atomic_check(connector, state);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/i915/intel_dp_mst.h b/drivers/gpu/drm/i915/intel_dp_mst.h
new file mode 100644
index 000000000000..1470c6e0514b
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_dp_mst.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __INTEL_DP_MST_H__
+#define __INTEL_DP_MST_H__
+
+struct intel_digital_port;
+
+int intel_dp_mst_encoder_init(struct intel_digital_port *intel_dig_port, int conn_id);
+void intel_dp_mst_encoder_cleanup(struct intel_digital_port *intel_dig_port);
+
+#endif /* __INTEL_DP_MST_H__ */
diff --git a/drivers/gpu/drm/i915/intel_dpio_phy.c b/drivers/gpu/drm/i915/intel_dpio_phy.c
index ab4ac7158b79..bdbe41759827 100644
--- a/drivers/gpu/drm/i915/intel_dpio_phy.c
+++ b/drivers/gpu/drm/i915/intel_dpio_phy.c
@@ -22,7 +22,9 @@
*/
#include "intel_dp.h"
+#include "intel_dpio_phy.h"
#include "intel_drv.h"
+#include "intel_sideband.h"
/**
* DOC: DPIO
@@ -648,7 +650,7 @@ void chv_set_phy_signal_level(struct intel_encoder *encoder,
u32 val;
int i;
- mutex_lock(&dev_priv->sb_lock);
+ vlv_dpio_get(dev_priv);
/* Clear calc init */
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
@@ -729,8 +731,7 @@ void chv_set_phy_signal_level(struct intel_encoder *encoder,
vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
}
- mutex_unlock(&dev_priv->sb_lock);
-
+ vlv_dpio_put(dev_priv);
}
void chv_data_lane_soft_reset(struct intel_encoder *encoder,
@@ -800,7 +801,7 @@ void chv_phy_pre_pll_enable(struct intel_encoder *encoder,
chv_phy_powergate_lanes(encoder, true, lane_mask);
- mutex_lock(&dev_priv->sb_lock);
+ vlv_dpio_get(dev_priv);
/* Assert data lane reset */
chv_data_lane_soft_reset(encoder, crtc_state, true);
@@ -855,7 +856,7 @@ void chv_phy_pre_pll_enable(struct intel_encoder *encoder,
val |= CHV_CMN_USEDCLKCHANNEL;
vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW19(ch), val);
- mutex_unlock(&dev_priv->sb_lock);
+ vlv_dpio_put(dev_priv);
}
void chv_phy_pre_encoder_enable(struct intel_encoder *encoder,
@@ -870,7 +871,7 @@ void chv_phy_pre_encoder_enable(struct intel_encoder *encoder,
int data, i, stagger;
u32 val;
- mutex_lock(&dev_priv->sb_lock);
+ vlv_dpio_get(dev_priv);
/* allow hardware to manage TX FIFO reset source */
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
@@ -935,7 +936,7 @@ void chv_phy_pre_encoder_enable(struct intel_encoder *encoder,
/* Deassert data lane reset */
chv_data_lane_soft_reset(encoder, crtc_state, false);
- mutex_unlock(&dev_priv->sb_lock);
+ vlv_dpio_put(dev_priv);
}
void chv_phy_release_cl2_override(struct intel_encoder *encoder)
@@ -956,7 +957,7 @@ void chv_phy_post_pll_disable(struct intel_encoder *encoder,
enum pipe pipe = to_intel_crtc(old_crtc_state->base.crtc)->pipe;
u32 val;
- mutex_lock(&dev_priv->sb_lock);
+ vlv_dpio_get(dev_priv);
/* disable left/right clock distribution */
if (pipe != PIPE_B) {
@@ -969,7 +970,7 @@ void chv_phy_post_pll_disable(struct intel_encoder *encoder,
vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
}
- mutex_unlock(&dev_priv->sb_lock);
+ vlv_dpio_put(dev_priv);
/*
* Leave the power down bit cleared for at least one
@@ -993,7 +994,8 @@ void vlv_set_phy_signal_level(struct intel_encoder *encoder,
enum dpio_channel port = vlv_dport_to_channel(dport);
enum pipe pipe = intel_crtc->pipe;
- mutex_lock(&dev_priv->sb_lock);
+ vlv_dpio_get(dev_priv);
+
vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x00000000);
vlv_dpio_write(dev_priv, pipe, VLV_TX_DW4(port), demph_reg_value);
vlv_dpio_write(dev_priv, pipe, VLV_TX_DW2(port),
@@ -1006,7 +1008,8 @@ void vlv_set_phy_signal_level(struct intel_encoder *encoder,
vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW11(port), 0x00030000);
vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW9(port), preemph_reg_value);
vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), DPIO_TX_OCALINIT_EN);
- mutex_unlock(&dev_priv->sb_lock);
+
+ vlv_dpio_put(dev_priv);
}
void vlv_phy_pre_pll_enable(struct intel_encoder *encoder,
@@ -1019,7 +1022,8 @@ void vlv_phy_pre_pll_enable(struct intel_encoder *encoder,
enum pipe pipe = crtc->pipe;
/* Program Tx lane resets to default */
- mutex_lock(&dev_priv->sb_lock);
+ vlv_dpio_get(dev_priv);
+
vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port),
DPIO_PCS_TX_LANE2_RESET |
DPIO_PCS_TX_LANE1_RESET);
@@ -1033,7 +1037,8 @@ void vlv_phy_pre_pll_enable(struct intel_encoder *encoder,
vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW12(port), 0x00750f00);
vlv_dpio_write(dev_priv, pipe, VLV_TX_DW11(port), 0x00001500);
vlv_dpio_write(dev_priv, pipe, VLV_TX_DW14(port), 0x40400000);
- mutex_unlock(&dev_priv->sb_lock);
+
+ vlv_dpio_put(dev_priv);
}
void vlv_phy_pre_encoder_enable(struct intel_encoder *encoder,
@@ -1047,7 +1052,7 @@ void vlv_phy_pre_encoder_enable(struct intel_encoder *encoder,
enum pipe pipe = crtc->pipe;
u32 val;
- mutex_lock(&dev_priv->sb_lock);
+ vlv_dpio_get(dev_priv);
/* Enable clock channels for this port */
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(port));
@@ -1063,7 +1068,7 @@ void vlv_phy_pre_encoder_enable(struct intel_encoder *encoder,
vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW14(port), 0x00760018);
vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW23(port), 0x00400888);
- mutex_unlock(&dev_priv->sb_lock);
+ vlv_dpio_put(dev_priv);
}
void vlv_phy_reset_lanes(struct intel_encoder *encoder,
@@ -1075,8 +1080,8 @@ void vlv_phy_reset_lanes(struct intel_encoder *encoder,
enum dpio_channel port = vlv_dport_to_channel(dport);
enum pipe pipe = crtc->pipe;
- mutex_lock(&dev_priv->sb_lock);
+ vlv_dpio_get(dev_priv);
vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port), 0x00000000);
vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(port), 0x00e00060);
- mutex_unlock(&dev_priv->sb_lock);
+ vlv_dpio_put(dev_priv);
}
diff --git a/drivers/gpu/drm/i915/intel_dpio_phy.h b/drivers/gpu/drm/i915/intel_dpio_phy.h
new file mode 100644
index 000000000000..f418aab90b7e
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_dpio_phy.h
@@ -0,0 +1,58 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __INTEL_DPIO_PHY_H__
+#define __INTEL_DPIO_PHY_H__
+
+#include <linux/types.h>
+
+enum dpio_channel;
+enum dpio_phy;
+enum port;
+struct drm_i915_private;
+struct intel_crtc_state;
+struct intel_encoder;
+
+void bxt_port_to_phy_channel(struct drm_i915_private *dev_priv, enum port port,
+ enum dpio_phy *phy, enum dpio_channel *ch);
+void bxt_ddi_phy_set_signal_level(struct drm_i915_private *dev_priv,
+ enum port port, u32 margin, u32 scale,
+ u32 enable, u32 deemphasis);
+void bxt_ddi_phy_init(struct drm_i915_private *dev_priv, enum dpio_phy phy);
+void bxt_ddi_phy_uninit(struct drm_i915_private *dev_priv, enum dpio_phy phy);
+bool bxt_ddi_phy_is_enabled(struct drm_i915_private *dev_priv,
+ enum dpio_phy phy);
+bool bxt_ddi_phy_verify_state(struct drm_i915_private *dev_priv,
+ enum dpio_phy phy);
+u8 bxt_ddi_phy_calc_lane_lat_optim_mask(u8 lane_count);
+void bxt_ddi_phy_set_lane_optim_mask(struct intel_encoder *encoder,
+ u8 lane_lat_optim_mask);
+u8 bxt_ddi_phy_get_lane_lat_optim_mask(struct intel_encoder *encoder);
+
+void chv_set_phy_signal_level(struct intel_encoder *encoder,
+ u32 deemph_reg_value, u32 margin_reg_value,
+ bool uniq_trans_scale);
+void chv_data_lane_soft_reset(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ bool reset);
+void chv_phy_pre_pll_enable(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state);
+void chv_phy_pre_encoder_enable(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state);
+void chv_phy_release_cl2_override(struct intel_encoder *encoder);
+void chv_phy_post_pll_disable(struct intel_encoder *encoder,
+ const struct intel_crtc_state *old_crtc_state);
+
+void vlv_set_phy_signal_level(struct intel_encoder *encoder,
+ u32 demph_reg_value, u32 preemph_reg_value,
+ u32 uniqtranscale_reg_value, u32 tx3_demph);
+void vlv_phy_pre_pll_enable(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state);
+void vlv_phy_pre_encoder_enable(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state);
+void vlv_phy_reset_lanes(struct intel_encoder *encoder,
+ const struct intel_crtc_state *old_crtc_state);
+
+#endif /* __INTEL_DPIO_PHY_H__ */
diff --git a/drivers/gpu/drm/i915/intel_dpll_mgr.c b/drivers/gpu/drm/i915/intel_dpll_mgr.c
index e01c057ce50b..897d93537414 100644
--- a/drivers/gpu/drm/i915/intel_dpll_mgr.c
+++ b/drivers/gpu/drm/i915/intel_dpll_mgr.c
@@ -21,6 +21,8 @@
* DEALINGS IN THE SOFTWARE.
*/
+#include "intel_dpio_phy.h"
+#include "intel_dpll_mgr.h"
#include "intel_drv.h"
/**
@@ -349,7 +351,7 @@ static bool ibx_pch_dpll_get_hw_state(struct drm_i915_private *dev_priv,
u32 val;
wakeref = intel_display_power_get_if_enabled(dev_priv,
- POWER_DOMAIN_PLLS);
+ POWER_DOMAIN_DISPLAY_CORE);
if (!wakeref)
return false;
@@ -358,7 +360,7 @@ static bool ibx_pch_dpll_get_hw_state(struct drm_i915_private *dev_priv,
hw_state->fp0 = I915_READ(PCH_FP0(id));
hw_state->fp1 = I915_READ(PCH_FP1(id));
- intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS, wakeref);
+ intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
return val & DPLL_VCO_ENABLE;
}
@@ -517,14 +519,14 @@ static bool hsw_ddi_wrpll_get_hw_state(struct drm_i915_private *dev_priv,
u32 val;
wakeref = intel_display_power_get_if_enabled(dev_priv,
- POWER_DOMAIN_PLLS);
+ POWER_DOMAIN_DISPLAY_CORE);
if (!wakeref)
return false;
val = I915_READ(WRPLL_CTL(id));
hw_state->wrpll = val;
- intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS, wakeref);
+ intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
return val & WRPLL_PLL_ENABLE;
}
@@ -537,14 +539,14 @@ static bool hsw_ddi_spll_get_hw_state(struct drm_i915_private *dev_priv,
u32 val;
wakeref = intel_display_power_get_if_enabled(dev_priv,
- POWER_DOMAIN_PLLS);
+ POWER_DOMAIN_DISPLAY_CORE);
if (!wakeref)
return false;
val = I915_READ(SPLL_CTL);
hw_state->spll = val;
- intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS, wakeref);
+ intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
return val & SPLL_PLL_ENABLE;
}
@@ -1002,7 +1004,7 @@ static bool skl_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
bool ret;
wakeref = intel_display_power_get_if_enabled(dev_priv,
- POWER_DOMAIN_PLLS);
+ POWER_DOMAIN_DISPLAY_CORE);
if (!wakeref)
return false;
@@ -1023,7 +1025,7 @@ static bool skl_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
ret = true;
out:
- intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS, wakeref);
+ intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
return ret;
}
@@ -1039,7 +1041,7 @@ static bool skl_ddi_dpll0_get_hw_state(struct drm_i915_private *dev_priv,
bool ret;
wakeref = intel_display_power_get_if_enabled(dev_priv,
- POWER_DOMAIN_PLLS);
+ POWER_DOMAIN_DISPLAY_CORE);
if (!wakeref)
return false;
@@ -1056,7 +1058,7 @@ static bool skl_ddi_dpll0_get_hw_state(struct drm_i915_private *dev_priv,
ret = true;
out:
- intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS, wakeref);
+ intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
return ret;
}
@@ -1600,7 +1602,7 @@ static bool bxt_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
bxt_port_to_phy_channel(dev_priv, port, &phy, &ch);
wakeref = intel_display_power_get_if_enabled(dev_priv,
- POWER_DOMAIN_PLLS);
+ POWER_DOMAIN_DISPLAY_CORE);
if (!wakeref)
return false;
@@ -1658,7 +1660,7 @@ static bool bxt_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
ret = true;
out:
- intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS, wakeref);
+ intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
return ret;
}
@@ -1879,27 +1881,6 @@ static const struct intel_shared_dpll_funcs bxt_ddi_pll_funcs = {
.get_hw_state = bxt_ddi_pll_get_hw_state,
};
-static void intel_ddi_pll_init(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = to_i915(dev);
-
- if (INTEL_GEN(dev_priv) < 9) {
- u32 val = I915_READ(LCPLL_CTL);
-
- /*
- * The LCPLL register should be turned on by the BIOS. For now
- * let's just check its state and print errors in case
- * something is wrong. Don't even try to turn it on.
- */
-
- if (val & LCPLL_CD_SOURCE_FCLK)
- DRM_ERROR("CDCLK source is not LCPLL\n");
-
- if (val & LCPLL_PLL_DISABLE)
- DRM_ERROR("LCPLL is disabled\n");
- }
-}
-
struct intel_dpll_mgr {
const struct dpll_info *dpll_info;
@@ -2106,7 +2087,7 @@ static bool cnl_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
bool ret;
wakeref = intel_display_power_get_if_enabled(dev_priv,
- POWER_DOMAIN_PLLS);
+ POWER_DOMAIN_DISPLAY_CORE);
if (!wakeref)
return false;
@@ -2126,7 +2107,7 @@ static bool cnl_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
ret = true;
out:
- intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS, wakeref);
+ intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
return ret;
}
@@ -2741,11 +2722,11 @@ static bool icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state)
}
if (use_ssc) {
- tmp = (u64)dco_khz * 47 * 32;
+ tmp = mul_u32_u32(dco_khz, 47 * 32);
do_div(tmp, refclk_khz * m1div * 10000);
ssc_stepsize = tmp;
- tmp = (u64)dco_khz * 1000;
+ tmp = mul_u32_u32(dco_khz, 1000);
ssc_steplen = DIV_ROUND_UP_ULL(tmp, 32 * 2 * 32);
} else {
ssc_stepsize = 0;
@@ -2881,7 +2862,7 @@ static bool mg_pll_get_hw_state(struct drm_i915_private *dev_priv,
u32 val;
wakeref = intel_display_power_get_if_enabled(dev_priv,
- POWER_DOMAIN_PLLS);
+ POWER_DOMAIN_DISPLAY_CORE);
if (!wakeref)
return false;
@@ -2928,7 +2909,7 @@ static bool mg_pll_get_hw_state(struct drm_i915_private *dev_priv,
ret = true;
out:
- intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS, wakeref);
+ intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
return ret;
}
@@ -2943,7 +2924,7 @@ static bool icl_pll_get_hw_state(struct drm_i915_private *dev_priv,
u32 val;
wakeref = intel_display_power_get_if_enabled(dev_priv,
- POWER_DOMAIN_PLLS);
+ POWER_DOMAIN_DISPLAY_CORE);
if (!wakeref)
return false;
@@ -2956,7 +2937,7 @@ static bool icl_pll_get_hw_state(struct drm_i915_private *dev_priv,
ret = true;
out:
- intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS, wakeref);
+ intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
return ret;
}
@@ -3303,10 +3284,6 @@ void intel_shared_dpll_init(struct drm_device *dev)
mutex_init(&dev_priv->dpll_lock);
BUG_ON(dev_priv->num_shared_dpll > I915_NUM_PLLS);
-
- /* FIXME: Move this to a more suitable place */
- if (HAS_DDI(dev_priv))
- intel_ddi_pll_init(dev);
}
/**
diff --git a/drivers/gpu/drm/i915/intel_dpll_mgr.h b/drivers/gpu/drm/i915/intel_dpll_mgr.h
index bd8124cc81ed..8835dd20f1d2 100644
--- a/drivers/gpu/drm/i915/intel_dpll_mgr.h
+++ b/drivers/gpu/drm/i915/intel_dpll_mgr.h
@@ -25,6 +25,10 @@
#ifndef _INTEL_DPLL_MGR_H_
#define _INTEL_DPLL_MGR_H_
+#include <linux/types.h>
+
+#include "intel_display.h"
+
/*FIXME: Move this to a more appropriate place. */
#define abs_diff(a, b) ({ \
typeof(a) __a = (a); \
@@ -32,13 +36,13 @@
(void) (&__a == &__b); \
__a > __b ? (__a - __b) : (__b - __a); })
+struct drm_atomic_state;
+struct drm_device;
struct drm_i915_private;
struct intel_crtc;
struct intel_crtc_state;
struct intel_encoder;
-
struct intel_shared_dpll;
-struct intel_dpll_mgr;
/**
* enum intel_dpll_id - possible DPLL ids
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index e85cd377a652..b691341df854 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -28,7 +28,6 @@
#include <linux/async.h>
#include <linux/i2c.h>
#include <linux/sched/clock.h>
-#include <linux/stackdepot.h>
#include <drm/drm_atomic.h>
#include <drm/drm_crtc.h>
@@ -47,127 +46,10 @@
struct drm_printer;
-/**
- * __wait_for - magic wait macro
- *
- * Macro to help avoid open coding check/wait/timeout patterns. Note that it's
- * important that we check the condition again after having timed out, since the
- * timeout could be due to preemption or similar and we've never had a chance to
- * check the condition before the timeout.
- */
-#define __wait_for(OP, COND, US, Wmin, Wmax) ({ \
- const ktime_t end__ = ktime_add_ns(ktime_get_raw(), 1000ll * (US)); \
- long wait__ = (Wmin); /* recommended min for usleep is 10 us */ \
- int ret__; \
- might_sleep(); \
- for (;;) { \
- const bool expired__ = ktime_after(ktime_get_raw(), end__); \
- OP; \
- /* Guarantee COND check prior to timeout */ \
- barrier(); \
- if (COND) { \
- ret__ = 0; \
- break; \
- } \
- if (expired__) { \
- ret__ = -ETIMEDOUT; \
- break; \
- } \
- usleep_range(wait__, wait__ * 2); \
- if (wait__ < (Wmax)) \
- wait__ <<= 1; \
- } \
- ret__; \
-})
-
-#define _wait_for(COND, US, Wmin, Wmax) __wait_for(, (COND), (US), (Wmin), \
- (Wmax))
-#define wait_for(COND, MS) _wait_for((COND), (MS) * 1000, 10, 1000)
-
-/* If CONFIG_PREEMPT_COUNT is disabled, in_atomic() always reports false. */
-#if defined(CONFIG_DRM_I915_DEBUG) && defined(CONFIG_PREEMPT_COUNT)
-# define _WAIT_FOR_ATOMIC_CHECK(ATOMIC) WARN_ON_ONCE((ATOMIC) && !in_atomic())
-#else
-# define _WAIT_FOR_ATOMIC_CHECK(ATOMIC) do { } while (0)
-#endif
-
-#define _wait_for_atomic(COND, US, ATOMIC) \
-({ \
- int cpu, ret, timeout = (US) * 1000; \
- u64 base; \
- _WAIT_FOR_ATOMIC_CHECK(ATOMIC); \
- if (!(ATOMIC)) { \
- preempt_disable(); \
- cpu = smp_processor_id(); \
- } \
- base = local_clock(); \
- for (;;) { \
- u64 now = local_clock(); \
- if (!(ATOMIC)) \
- preempt_enable(); \
- /* Guarantee COND check prior to timeout */ \
- barrier(); \
- if (COND) { \
- ret = 0; \
- break; \
- } \
- if (now - base >= timeout) { \
- ret = -ETIMEDOUT; \
- break; \
- } \
- cpu_relax(); \
- if (!(ATOMIC)) { \
- preempt_disable(); \
- if (unlikely(cpu != smp_processor_id())) { \
- timeout -= now - base; \
- cpu = smp_processor_id(); \
- base = local_clock(); \
- } \
- } \
- } \
- ret; \
-})
-
-#define wait_for_us(COND, US) \
-({ \
- int ret__; \
- BUILD_BUG_ON(!__builtin_constant_p(US)); \
- if ((US) > 10) \
- ret__ = _wait_for((COND), (US), 10, 10); \
- else \
- ret__ = _wait_for_atomic((COND), (US), 0); \
- ret__; \
-})
-
-#define wait_for_atomic_us(COND, US) \
-({ \
- BUILD_BUG_ON(!__builtin_constant_p(US)); \
- BUILD_BUG_ON((US) > 50000); \
- _wait_for_atomic((COND), (US), 1); \
-})
-
-#define wait_for_atomic(COND, MS) wait_for_atomic_us((COND), (MS) * 1000)
-
-#define KHz(x) (1000 * (x))
-#define MHz(x) KHz(1000 * (x))
-
-#define KBps(x) (1000 * (x))
-#define MBps(x) KBps(1000 * (x))
-#define GBps(x) ((u64)1000 * MBps((x)))
-
/*
* Display related stuff
*/
-/* store information about an Ixxx DVO */
-/* The i830->i865 use multiple DVOs with multiple i2cs */
-/* the i915, i945 have a single sDVO i2c bus - which is different */
-#define MAX_OUTPUTS 6
-/* maximum connectors per crtcs in the mode set */
-
-#define INTEL_I2C_BUS_DVO 1
-#define INTEL_I2C_BUS_SDVO 2
-
/* these are outputs from the chip - integrated only
external chips are via DVO or SDVO output */
enum intel_output_type {
@@ -185,14 +67,6 @@ enum intel_output_type {
INTEL_OUTPUT_DP_MST = 11,
};
-#define INTEL_DVO_CHIP_NONE 0
-#define INTEL_DVO_CHIP_LVDS 1
-#define INTEL_DVO_CHIP_TMDS 2
-#define INTEL_DVO_CHIP_TVOUT 4
-
-#define INTEL_DSI_VIDEO_MODE 0
-#define INTEL_DSI_COMMAND_MODE 1
-
struct intel_framebuffer {
struct drm_framebuffer base;
struct intel_rotation_info rot_info;
@@ -677,21 +551,6 @@ struct intel_initial_plane_config {
u8 rotation;
};
-#define SKL_MIN_SRC_W 8
-#define SKL_MAX_SRC_W 4096
-#define SKL_MIN_SRC_H 8
-#define SKL_MAX_SRC_H 4096
-#define SKL_MIN_DST_W 8
-#define SKL_MAX_DST_W 4096
-#define SKL_MIN_DST_H 8
-#define SKL_MAX_DST_H 4096
-#define ICL_MAX_SRC_W 5120
-#define ICL_MAX_SRC_H 4096
-#define ICL_MAX_DST_W 5120
-#define ICL_MAX_DST_H 4096
-#define SKL_MIN_YUV_420_SRC_W 16
-#define SKL_MIN_YUV_420_SRC_H 16
-
struct intel_scaler {
int in_use;
u32 mode;
@@ -1581,56 +1440,6 @@ intel_atomic_get_new_crtc_state(struct intel_atomic_state *state,
&crtc->base));
}
-/* intel_fifo_underrun.c */
-bool intel_set_cpu_fifo_underrun_reporting(struct drm_i915_private *dev_priv,
- enum pipe pipe, bool enable);
-bool intel_set_pch_fifo_underrun_reporting(struct drm_i915_private *dev_priv,
- enum pipe pch_transcoder,
- bool enable);
-void intel_cpu_fifo_underrun_irq_handler(struct drm_i915_private *dev_priv,
- enum pipe pipe);
-void intel_pch_fifo_underrun_irq_handler(struct drm_i915_private *dev_priv,
- enum pipe pch_transcoder);
-void intel_check_cpu_fifo_underruns(struct drm_i915_private *dev_priv);
-void intel_check_pch_fifo_underruns(struct drm_i915_private *dev_priv);
-
-/* i915_irq.c */
-void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, u32 mask);
-void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, u32 mask);
-void gen6_mask_pm_irq(struct drm_i915_private *dev_priv, u32 mask);
-void gen6_unmask_pm_irq(struct drm_i915_private *dev_priv, u32 mask);
-void gen11_reset_rps_interrupts(struct drm_i915_private *dev_priv);
-void gen6_reset_rps_interrupts(struct drm_i915_private *dev_priv);
-void gen6_enable_rps_interrupts(struct drm_i915_private *dev_priv);
-void gen6_disable_rps_interrupts(struct drm_i915_private *dev_priv);
-void gen6_rps_reset_ei(struct drm_i915_private *dev_priv);
-
-static inline u32 gen6_sanitize_rps_pm_mask(const struct drm_i915_private *i915,
- u32 mask)
-{
- return mask & ~i915->gt_pm.rps.pm_intrmsk_mbz;
-}
-
-void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv);
-void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv);
-static inline bool intel_irqs_enabled(struct drm_i915_private *dev_priv)
-{
- /*
- * We only use drm_irq_uninstall() at unload and VT switch, so
- * this is the only thing we need to check.
- */
- return dev_priv->runtime_pm.irqs_enabled;
-}
-
-int intel_get_crtc_scanline(struct intel_crtc *crtc);
-void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv,
- u8 pipe_mask);
-void gen8_irq_power_well_pre_disable(struct drm_i915_private *dev_priv,
- u8 pipe_mask);
-void gen9_reset_guc_interrupts(struct drm_i915_private *dev_priv);
-void gen9_enable_guc_interrupts(struct drm_i915_private *dev_priv);
-void gen9_disable_guc_interrupts(struct drm_i915_private *dev_priv);
-
/* intel_display.c */
void intel_plane_destroy(struct drm_plane *plane);
void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe);
@@ -1652,6 +1461,7 @@ unsigned int intel_fb_align_height(const struct drm_framebuffer *fb,
void intel_add_fb_offsets(int *x, int *y,
const struct intel_plane_state *state, int plane);
unsigned int intel_rotation_info_size(const struct intel_rotation_info *rot_info);
+unsigned int intel_remapped_info_size(const struct intel_remapped_info *rem_info);
bool intel_has_pending_fb_unpin(struct drm_i915_private *dev_priv);
void intel_mark_busy(struct drm_i915_private *dev_priv);
void intel_mark_idle(struct drm_i915_private *dev_priv);
@@ -1763,18 +1573,13 @@ void assert_pipe(struct drm_i915_private *dev_priv, enum pipe pipe, bool state);
#define assert_pipe_disabled(d, p) assert_pipe(d, p, false)
void intel_prepare_reset(struct drm_i915_private *dev_priv);
void intel_finish_reset(struct drm_i915_private *dev_priv);
-void hsw_enable_pc8(struct drm_i915_private *dev_priv);
-void hsw_disable_pc8(struct drm_i915_private *dev_priv);
-void gen9_sanitize_dc_state(struct drm_i915_private *dev_priv);
-void bxt_enable_dc9(struct drm_i915_private *dev_priv);
-void bxt_disable_dc9(struct drm_i915_private *dev_priv);
-void gen9_enable_dc5(struct drm_i915_private *dev_priv);
unsigned int skl_cdclk_get_vco(unsigned int freq);
-void skl_enable_dc6(struct drm_i915_private *dev_priv);
void intel_dp_get_m_n(struct intel_crtc *crtc,
struct intel_crtc_state *pipe_config);
void intel_dp_set_m_n(const struct intel_crtc_state *crtc_state,
enum link_m_n_set m_n);
+void intel_dp_ycbcr_420_enable(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state);
int intel_dotclock_calculate(int link_freq, const struct intel_link_m_n *m_n);
bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state,
struct dpll *best_clock);
@@ -1816,99 +1621,26 @@ int skl_format_to_fourcc(int format, bool rgb_order, bool alpha);
unsigned int i9xx_plane_max_stride(struct intel_plane *plane,
u32 pixel_format, u64 modifier,
unsigned int rotation);
-
-/* intel_dp_link_training.c */
-void intel_dp_start_link_train(struct intel_dp *intel_dp);
-void intel_dp_stop_link_train(struct intel_dp *intel_dp);
-
-/* intel_vdsc.c */
-int intel_dp_compute_dsc_params(struct intel_dp *intel_dp,
- struct intel_crtc_state *pipe_config);
-enum intel_display_power_domain
-intel_dsc_power_domain(const struct intel_crtc_state *crtc_state);
-
-/* intel_dp_aux_backlight.c */
-int intel_dp_aux_init_backlight_funcs(struct intel_connector *intel_connector);
-
-/* intel_dp_mst.c */
-int intel_dp_mst_encoder_init(struct intel_digital_port *intel_dig_port, int conn_id);
-void intel_dp_mst_encoder_cleanup(struct intel_digital_port *intel_dig_port);
-/* vlv_dsi.c */
-void vlv_dsi_init(struct drm_i915_private *dev_priv);
-
-/* icl_dsi.c */
-void icl_dsi_init(struct drm_i915_private *dev_priv);
-
-/* intel_dsi_dcs_backlight.c */
-int intel_dsi_dcs_init_backlight_funcs(struct intel_connector *intel_connector);
-
-/* intel_hotplug.c */
-void intel_hpd_poll_init(struct drm_i915_private *dev_priv);
-bool intel_encoder_hotplug(struct intel_encoder *encoder,
- struct intel_connector *connector);
-
-/* intel_overlay.c */
-void intel_overlay_setup(struct drm_i915_private *dev_priv);
-void intel_overlay_cleanup(struct drm_i915_private *dev_priv);
-int intel_overlay_switch_off(struct intel_overlay *overlay);
-int intel_overlay_put_image_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-int intel_overlay_attrs_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-void intel_overlay_reset(struct drm_i915_private *dev_priv);
-
-/* intel_quirks.c */
-void intel_init_quirks(struct drm_i915_private *dev_priv);
+int bdw_get_pipemisc_bpp(struct intel_crtc *crtc);
/* intel_runtime_pm.c */
-void intel_runtime_pm_init_early(struct drm_i915_private *dev_priv);
-int intel_power_domains_init(struct drm_i915_private *);
-void intel_power_domains_cleanup(struct drm_i915_private *dev_priv);
-void intel_power_domains_init_hw(struct drm_i915_private *dev_priv, bool resume);
-void intel_power_domains_fini_hw(struct drm_i915_private *dev_priv);
-void icl_display_core_init(struct drm_i915_private *dev_priv, bool resume);
-void icl_display_core_uninit(struct drm_i915_private *dev_priv);
-void intel_power_domains_enable(struct drm_i915_private *dev_priv);
-void intel_power_domains_disable(struct drm_i915_private *dev_priv);
-
-enum i915_drm_suspend_mode {
- I915_DRM_SUSPEND_IDLE,
- I915_DRM_SUSPEND_MEM,
- I915_DRM_SUSPEND_HIBERNATE,
-};
+#define BITS_PER_WAKEREF \
+ BITS_PER_TYPE(struct_member(struct i915_runtime_pm, wakeref_count))
+#define INTEL_RPM_WAKELOCK_SHIFT (BITS_PER_WAKEREF / 2)
+#define INTEL_RPM_WAKELOCK_BIAS (1 << INTEL_RPM_WAKELOCK_SHIFT)
+#define INTEL_RPM_RAW_WAKEREF_MASK (INTEL_RPM_WAKELOCK_BIAS - 1)
+
+static inline int
+intel_rpm_raw_wakeref_count(int wakeref_count)
+{
+ return wakeref_count & INTEL_RPM_RAW_WAKEREF_MASK;
+}
-void intel_power_domains_suspend(struct drm_i915_private *dev_priv,
- enum i915_drm_suspend_mode);
-void intel_power_domains_resume(struct drm_i915_private *dev_priv);
-void bxt_display_core_init(struct drm_i915_private *dev_priv, bool resume);
-void bxt_display_core_uninit(struct drm_i915_private *dev_priv);
-void intel_runtime_pm_enable(struct drm_i915_private *dev_priv);
-void intel_runtime_pm_disable(struct drm_i915_private *dev_priv);
-void intel_runtime_pm_cleanup(struct drm_i915_private *dev_priv);
-const char *
-intel_display_power_domain_str(enum intel_display_power_domain domain);
-
-bool intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
- enum intel_display_power_domain domain);
-bool __intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
- enum intel_display_power_domain domain);
-intel_wakeref_t intel_display_power_get(struct drm_i915_private *dev_priv,
- enum intel_display_power_domain domain);
-intel_wakeref_t
-intel_display_power_get_if_enabled(struct drm_i915_private *dev_priv,
- enum intel_display_power_domain domain);
-void intel_display_power_put_unchecked(struct drm_i915_private *dev_priv,
- enum intel_display_power_domain domain);
-#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
-void intel_display_power_put(struct drm_i915_private *dev_priv,
- enum intel_display_power_domain domain,
- intel_wakeref_t wakeref);
-#else
-#define intel_display_power_put(i915, domain, wakeref) \
- intel_display_power_put_unchecked(i915, domain)
-#endif
-void icl_dbuf_slices_update(struct drm_i915_private *dev_priv,
- u8 req_slices);
+static inline int
+intel_rpm_wakelock_count(int wakeref_count)
+{
+ return wakeref_count >> INTEL_RPM_WAKELOCK_SHIFT;
+}
static inline void
assert_rpm_device_not_suspended(struct i915_runtime_pm *rpm)
@@ -1918,11 +1650,33 @@ assert_rpm_device_not_suspended(struct i915_runtime_pm *rpm)
}
static inline void
-__assert_rpm_wakelock_held(struct i915_runtime_pm *rpm)
+____assert_rpm_raw_wakeref_held(struct i915_runtime_pm *rpm, int wakeref_count)
{
assert_rpm_device_not_suspended(rpm);
- WARN_ONCE(!atomic_read(&rpm->wakeref_count),
- "RPM wakelock ref not held during HW access");
+ WARN_ONCE(!intel_rpm_raw_wakeref_count(wakeref_count),
+ "RPM raw-wakeref not held\n");
+}
+
+static inline void
+____assert_rpm_wakelock_held(struct i915_runtime_pm *rpm, int wakeref_count)
+{
+ ____assert_rpm_raw_wakeref_held(rpm, wakeref_count);
+ WARN_ONCE(!intel_rpm_wakelock_count(wakeref_count),
+ "RPM wakelock ref not held during HW access\n");
+}
+
+static inline void
+assert_rpm_raw_wakeref_held(struct drm_i915_private *i915)
+{
+ struct i915_runtime_pm *rpm = &i915->runtime_pm;
+
+ ____assert_rpm_raw_wakeref_held(rpm, atomic_read(&rpm->wakeref_count));
+}
+
+static inline void
+__assert_rpm_wakelock_held(struct i915_runtime_pm *rpm)
+{
+ ____assert_rpm_wakelock_held(rpm, atomic_read(&rpm->wakeref_count));
}
static inline void
@@ -1952,7 +1706,8 @@ assert_rpm_wakelock_held(struct drm_i915_private *i915)
static inline void
disable_rpm_wakeref_asserts(struct drm_i915_private *i915)
{
- atomic_inc(&i915->runtime_pm.wakeref_count);
+ atomic_add(INTEL_RPM_WAKELOCK_BIAS + 1,
+ &i915->runtime_pm.wakeref_count);
}
/**
@@ -1969,77 +1724,8 @@ disable_rpm_wakeref_asserts(struct drm_i915_private *i915)
static inline void
enable_rpm_wakeref_asserts(struct drm_i915_private *i915)
{
- atomic_dec(&i915->runtime_pm.wakeref_count);
-}
-
-intel_wakeref_t intel_runtime_pm_get(struct drm_i915_private *i915);
-intel_wakeref_t intel_runtime_pm_get_if_in_use(struct drm_i915_private *i915);
-intel_wakeref_t intel_runtime_pm_get_noresume(struct drm_i915_private *i915);
-
-#define with_intel_runtime_pm(i915, wf) \
- for ((wf) = intel_runtime_pm_get(i915); (wf); \
- intel_runtime_pm_put((i915), (wf)), (wf) = 0)
-
-#define with_intel_runtime_pm_if_in_use(i915, wf) \
- for ((wf) = intel_runtime_pm_get_if_in_use(i915); (wf); \
- intel_runtime_pm_put((i915), (wf)), (wf) = 0)
-
-void intel_runtime_pm_put_unchecked(struct drm_i915_private *i915);
-#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
-void intel_runtime_pm_put(struct drm_i915_private *i915, intel_wakeref_t wref);
-#else
-#define intel_runtime_pm_put(i915, wref) intel_runtime_pm_put_unchecked(i915)
-#endif
-
-#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
-void print_intel_runtime_pm_wakeref(struct drm_i915_private *i915,
- struct drm_printer *p);
-#else
-static inline void print_intel_runtime_pm_wakeref(struct drm_i915_private *i915,
- struct drm_printer *p)
-{
+ atomic_sub(INTEL_RPM_WAKELOCK_BIAS + 1,
+ &i915->runtime_pm.wakeref_count);
}
-#endif
-
-void chv_phy_powergate_lanes(struct intel_encoder *encoder,
- bool override, unsigned int mask);
-bool chv_phy_powergate_ch(struct drm_i915_private *dev_priv, enum dpio_phy phy,
- enum dpio_channel ch, bool override);
-
-/* intel_atomic.c */
-int intel_digital_connector_atomic_get_property(struct drm_connector *connector,
- const struct drm_connector_state *state,
- struct drm_property *property,
- u64 *val);
-int intel_digital_connector_atomic_set_property(struct drm_connector *connector,
- struct drm_connector_state *state,
- struct drm_property *property,
- u64 val);
-int intel_digital_connector_atomic_check(struct drm_connector *conn,
- struct drm_connector_state *new_state);
-struct drm_connector_state *
-intel_digital_connector_duplicate_state(struct drm_connector *connector);
-
-struct drm_crtc_state *intel_crtc_duplicate_state(struct drm_crtc *crtc);
-void intel_crtc_destroy_state(struct drm_crtc *crtc,
- struct drm_crtc_state *state);
-struct drm_atomic_state *intel_atomic_state_alloc(struct drm_device *dev);
-void intel_atomic_state_clear(struct drm_atomic_state *);
-
-static inline struct intel_crtc_state *
-intel_atomic_get_crtc_state(struct drm_atomic_state *state,
- struct intel_crtc *crtc)
-{
- struct drm_crtc_state *crtc_state;
- crtc_state = drm_atomic_get_crtc_state(state, &crtc->base);
- if (IS_ERR(crtc_state))
- return ERR_CAST(crtc_state);
-
- return to_intel_crtc_state(crtc_state);
-}
-
-int intel_atomic_setup_scalers(struct drm_i915_private *dev_priv,
- struct intel_crtc *intel_crtc,
- struct intel_crtc_state *crtc_state);
#endif /* __INTEL_DRV_H__ */
diff --git a/drivers/gpu/drm/i915/intel_dsi.h b/drivers/gpu/drm/i915/intel_dsi.h
index 705a609050c0..f9b90061d912 100644
--- a/drivers/gpu/drm/i915/intel_dsi.h
+++ b/drivers/gpu/drm/i915/intel_dsi.h
@@ -28,6 +28,9 @@
#include <drm/drm_mipi_dsi.h>
#include "intel_drv.h"
+#define INTEL_DSI_VIDEO_MODE 0
+#define INTEL_DSI_COMMAND_MODE 1
+
/* Dual Link support */
#define DSI_DUAL_LINK_NONE 0
#define DSI_DUAL_LINK_FRONT_BACK 1
@@ -151,6 +154,9 @@ static inline u16 intel_dsi_encoder_ports(struct intel_encoder *encoder)
return enc_to_intel_dsi(&encoder->base)->ports;
}
+/* icl_dsi.c */
+void icl_dsi_init(struct drm_i915_private *dev_priv);
+
/* intel_dsi.c */
int intel_dsi_bitrate(const struct intel_dsi *intel_dsi);
int intel_dsi_tlpx_ns(const struct intel_dsi *intel_dsi);
@@ -166,6 +172,7 @@ enum drm_mode_status intel_dsi_mode_valid(struct drm_connector *connector,
struct intel_dsi_host *intel_dsi_host_init(struct intel_dsi *intel_dsi,
const struct mipi_dsi_host_ops *funcs,
enum port port);
+void vlv_dsi_init(struct drm_i915_private *dev_priv);
/* vlv_dsi_pll.c */
int vlv_dsi_pll_compute(struct intel_encoder *encoder,
diff --git a/drivers/gpu/drm/i915/intel_dsi_dcs_backlight.c b/drivers/gpu/drm/i915/intel_dsi_dcs_backlight.c
index 150a156f3b1e..8c33262cb0b2 100644
--- a/drivers/gpu/drm/i915/intel_dsi_dcs_backlight.c
+++ b/drivers/gpu/drm/i915/intel_dsi_dcs_backlight.c
@@ -23,11 +23,13 @@
* Author: Deepak M <m.deepak at intel.com>
*/
+#include <drm/drm_mipi_dsi.h>
+#include <video/mipi_display.h>
+
+#include "i915_drv.h"
#include "intel_drv.h"
#include "intel_dsi.h"
-#include "i915_drv.h"
-#include <video/mipi_display.h>
-#include <drm/drm_mipi_dsi.h>
+#include "intel_dsi_dcs_backlight.h"
#define CONTROL_DISPLAY_BCTRL (1 << 5)
#define CONTROL_DISPLAY_DD (1 << 3)
diff --git a/drivers/gpu/drm/i915/intel_dsi_dcs_backlight.h b/drivers/gpu/drm/i915/intel_dsi_dcs_backlight.h
new file mode 100644
index 000000000000..eb01947843bf
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_dsi_dcs_backlight.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __INTEL_DSI_DCS_BACKLIGHT_H__
+#define __INTEL_DSI_DCS_BACKLIGHT_H__
+
+struct intel_connector;
+
+int intel_dsi_dcs_init_backlight_funcs(struct intel_connector *intel_connector);
+
+#endif /* __INTEL_DSI_DCS_BACKLIGHT_H__ */
diff --git a/drivers/gpu/drm/i915/intel_dsi_vbt.c b/drivers/gpu/drm/i915/intel_dsi_vbt.c
index 4b8e48db1843..7cdde1d04f4b 100644
--- a/drivers/gpu/drm/i915/intel_dsi_vbt.c
+++ b/drivers/gpu/drm/i915/intel_dsi_vbt.c
@@ -24,18 +24,23 @@
*
*/
-#include <drm/drm_crtc.h>
-#include <drm/drm_edid.h>
-#include <drm/i915_drm.h>
#include <linux/gpio/consumer.h>
#include <linux/mfd/intel_soc_pmic.h>
#include <linux/slab.h>
-#include <video/mipi_display.h>
+
#include <asm/intel-mid.h>
#include <asm/unaligned.h>
+
+#include <drm/drm_crtc.h>
+#include <drm/drm_edid.h>
+#include <drm/i915_drm.h>
+
+#include <video/mipi_display.h>
+
#include "i915_drv.h"
#include "intel_drv.h"
#include "intel_dsi.h"
+#include "intel_sideband.h"
#define MIPI_TRANSFER_MODE_SHIFT 0
#define MIPI_VIRTUAL_CHANNEL_SHIFT 1
@@ -248,7 +253,7 @@ static void vlv_exec_gpio(struct drm_i915_private *dev_priv,
pconf0 = VLV_GPIO_PCONF0(map->base_offset);
padval = VLV_GPIO_PAD_VAL(map->base_offset);
- mutex_lock(&dev_priv->sb_lock);
+ vlv_iosf_sb_get(dev_priv, BIT(VLV_IOSF_SB_GPIO));
if (!map->init) {
/* FIXME: remove constant below */
vlv_iosf_sb_write(dev_priv, port, pconf0, 0x2000CC00);
@@ -257,7 +262,7 @@ static void vlv_exec_gpio(struct drm_i915_private *dev_priv,
tmp = 0x4 | value;
vlv_iosf_sb_write(dev_priv, port, padval, tmp);
- mutex_unlock(&dev_priv->sb_lock);
+ vlv_iosf_sb_put(dev_priv, BIT(VLV_IOSF_SB_GPIO));
}
static void chv_exec_gpio(struct drm_i915_private *dev_priv,
@@ -303,12 +308,12 @@ static void chv_exec_gpio(struct drm_i915_private *dev_priv,
cfg0 = CHV_GPIO_PAD_CFG0(family_num, gpio_index);
cfg1 = CHV_GPIO_PAD_CFG1(family_num, gpio_index);
- mutex_lock(&dev_priv->sb_lock);
+ vlv_iosf_sb_get(dev_priv, BIT(VLV_IOSF_SB_GPIO));
vlv_iosf_sb_write(dev_priv, port, cfg1, 0);
vlv_iosf_sb_write(dev_priv, port, cfg0,
CHV_GPIO_GPIOEN | CHV_GPIO_GPIOCFG_GPO |
CHV_GPIO_GPIOTXSTATE(value));
- mutex_unlock(&dev_priv->sb_lock);
+ vlv_iosf_sb_put(dev_priv, BIT(VLV_IOSF_SB_GPIO));
}
static void bxt_exec_gpio(struct drm_i915_private *dev_priv,
diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c
index adef81c8cccb..22666d28f4aa 100644
--- a/drivers/gpu/drm/i915/intel_dvo.c
+++ b/drivers/gpu/drm/i915/intel_dvo.c
@@ -32,13 +32,19 @@
#include <drm/drm_crtc.h>
#include <drm/i915_drm.h>
-#include "dvo.h"
#include "i915_drv.h"
#include "intel_connector.h"
#include "intel_drv.h"
#include "intel_dvo.h"
+#include "intel_dvo_dev.h"
+#include "intel_gmbus.h"
#include "intel_panel.h"
+#define INTEL_DVO_CHIP_NONE 0
+#define INTEL_DVO_CHIP_LVDS 1
+#define INTEL_DVO_CHIP_TMDS 2
+#define INTEL_DVO_CHIP_TVOUT 4
+
#define SIL164_ADDR 0x38
#define CH7xxx_ADDR 0x76
#define TFP410_ADDR 0x38
diff --git a/drivers/gpu/drm/i915/dvo.h b/drivers/gpu/drm/i915/intel_dvo_dev.h
index 16e0345b711f..94a6ae1e0292 100644
--- a/drivers/gpu/drm/i915/dvo.h
+++ b/drivers/gpu/drm/i915/intel_dvo_dev.h
@@ -20,12 +20,14 @@
* OF THIS SOFTWARE.
*/
-#ifndef _INTEL_DVO_H
-#define _INTEL_DVO_H
+#ifndef __INTEL_DVO_DEV_H__
+#define __INTEL_DVO_DEV_H__
#include <linux/i2c.h>
+
#include <drm/drm_crtc.h>
-#include "intel_drv.h"
+
+#include "i915_reg.h"
struct intel_dvo_device {
const char *name;
@@ -135,4 +137,4 @@ extern const struct intel_dvo_dev_ops tfp410_ops;
extern const struct intel_dvo_dev_ops ch7017_ops;
extern const struct intel_dvo_dev_ops ns2501_ops;
-#endif /* _INTEL_DVO_H */
+#endif /* __INTEL_DVO_DEV_H__ */
diff --git a/drivers/gpu/drm/i915/intel_fifo_underrun.c b/drivers/gpu/drm/i915/intel_fifo_underrun.c
index 74c8b0528294..8545ad32bb50 100644
--- a/drivers/gpu/drm/i915/intel_fifo_underrun.c
+++ b/drivers/gpu/drm/i915/intel_fifo_underrun.c
@@ -28,6 +28,7 @@
#include "i915_drv.h"
#include "intel_drv.h"
#include "intel_fbc.h"
+#include "intel_fifo_underrun.h"
/**
* DOC: fifo underrun handling
diff --git a/drivers/gpu/drm/i915/intel_fifo_underrun.h b/drivers/gpu/drm/i915/intel_fifo_underrun.h
new file mode 100644
index 000000000000..e04f22ac1f49
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_fifo_underrun.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __INTEL_FIFO_UNDERRUN_H__
+#define __INTEL_FIFO_UNDERRUN_H__
+
+#include <linux/types.h>
+
+#include "intel_display.h"
+
+struct drm_i915_private;
+
+bool intel_set_cpu_fifo_underrun_reporting(struct drm_i915_private *dev_priv,
+ enum pipe pipe, bool enable);
+bool intel_set_pch_fifo_underrun_reporting(struct drm_i915_private *dev_priv,
+ enum pipe pch_transcoder,
+ bool enable);
+void intel_cpu_fifo_underrun_irq_handler(struct drm_i915_private *dev_priv,
+ enum pipe pipe);
+void intel_pch_fifo_underrun_irq_handler(struct drm_i915_private *dev_priv,
+ enum pipe pch_transcoder);
+void intel_check_cpu_fifo_underruns(struct drm_i915_private *dev_priv);
+void intel_check_pch_fifo_underruns(struct drm_i915_private *dev_priv);
+
+#endif /* __INTEL_FIFO_UNDERRUN_H__ */
diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_gmbus.c
index 422685d120e9..969ce8b71e32 100644
--- a/drivers/gpu/drm/i915/intel_i2c.c
+++ b/drivers/gpu/drm/i915/intel_gmbus.c
@@ -26,13 +26,17 @@
* Eric Anholt <eric@anholt.net>
* Chris Wilson <chris@chris-wilson.co.uk>
*/
-#include <linux/i2c.h>
-#include <linux/i2c-algo-bit.h>
+
#include <linux/export.h>
+#include <linux/i2c-algo-bit.h>
+#include <linux/i2c.h>
+
#include <drm/drm_hdcp.h>
-#include "intel_drv.h"
#include <drm/i915_drm.h>
+
#include "i915_drv.h"
+#include "intel_drv.h"
+#include "intel_gmbus.h"
struct gmbus_pin {
const char *name;
@@ -134,7 +138,7 @@ to_intel_gmbus(struct i2c_adapter *i2c)
}
void
-intel_i2c_reset(struct drm_i915_private *dev_priv)
+intel_gmbus_reset(struct drm_i915_private *dev_priv)
{
I915_WRITE(GMBUS0, 0);
I915_WRITE(GMBUS4, 0);
@@ -256,7 +260,7 @@ intel_gpio_pre_xfer(struct i2c_adapter *adapter)
adapter);
struct drm_i915_private *dev_priv = bus->dev_priv;
- intel_i2c_reset(dev_priv);
+ intel_gmbus_reset(dev_priv);
if (IS_PINEVIEW(dev_priv))
pnv_gmbus_clock_gating(dev_priv, false);
@@ -577,8 +581,7 @@ do_gmbus_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs, int num,
/* Display WA #0868: skl,bxt,kbl,cfl,glk,cnl */
if (IS_GEN9_LP(dev_priv))
bxt_gmbus_clock_gating(dev_priv, false);
- else if (HAS_PCH_SPT(dev_priv) ||
- HAS_PCH_KBP(dev_priv) || HAS_PCH_CNP(dev_priv))
+ else if (HAS_PCH_SPT(dev_priv) || HAS_PCH_CNP(dev_priv))
pch_gmbus_clock_gating(dev_priv, false);
retry:
@@ -687,8 +690,7 @@ out:
/* Display WA #0868: skl,bxt,kbl,cfl,glk,cnl */
if (IS_GEN9_LP(dev_priv))
bxt_gmbus_clock_gating(dev_priv, true);
- else if (HAS_PCH_SPT(dev_priv) ||
- HAS_PCH_KBP(dev_priv) || HAS_PCH_CNP(dev_priv))
+ else if (HAS_PCH_SPT(dev_priv) || HAS_PCH_CNP(dev_priv))
pch_gmbus_clock_gating(dev_priv, true);
return ret;
@@ -811,7 +813,7 @@ static const struct i2c_lock_operations gmbus_lock_ops = {
* intel_gmbus_setup - instantiate all Intel i2c GMBuses
* @dev_priv: i915 device private
*/
-int intel_setup_gmbus(struct drm_i915_private *dev_priv)
+int intel_gmbus_setup(struct drm_i915_private *dev_priv)
{
struct pci_dev *pdev = dev_priv->drm.pdev;
struct intel_gmbus *bus;
@@ -872,7 +874,7 @@ int intel_setup_gmbus(struct drm_i915_private *dev_priv)
goto err;
}
- intel_i2c_reset(dev_priv);
+ intel_gmbus_reset(dev_priv);
return 0;
@@ -918,7 +920,14 @@ void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit)
mutex_unlock(&dev_priv->gmbus_mutex);
}
-void intel_teardown_gmbus(struct drm_i915_private *dev_priv)
+bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
+{
+ struct intel_gmbus *bus = to_intel_gmbus(adapter);
+
+ return bus->force_bit;
+}
+
+void intel_gmbus_teardown(struct drm_i915_private *dev_priv)
{
struct intel_gmbus *bus;
unsigned int pin;
diff --git a/drivers/gpu/drm/i915/intel_gmbus.h b/drivers/gpu/drm/i915/intel_gmbus.h
new file mode 100644
index 000000000000..d989085b8d22
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_gmbus.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __INTEL_GMBUS_H__
+#define __INTEL_GMBUS_H__
+
+#include <linux/types.h>
+
+struct drm_i915_private;
+struct i2c_adapter;
+
+int intel_gmbus_setup(struct drm_i915_private *dev_priv);
+void intel_gmbus_teardown(struct drm_i915_private *dev_priv);
+bool intel_gmbus_is_valid_pin(struct drm_i915_private *dev_priv,
+ unsigned int pin);
+int intel_gmbus_output_aksv(struct i2c_adapter *adapter);
+
+struct i2c_adapter *
+intel_gmbus_get_adapter(struct drm_i915_private *dev_priv, unsigned int pin);
+void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed);
+void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit);
+bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter);
+void intel_gmbus_reset(struct drm_i915_private *dev_priv);
+
+#endif /* __INTEL_GMBUS_H__ */
diff --git a/drivers/gpu/drm/i915/intel_guc.c b/drivers/gpu/drm/i915/intel_guc.c
index 3aabfa2d9198..c4ac29309fcc 100644
--- a/drivers/gpu/drm/i915/intel_guc.c
+++ b/drivers/gpu/drm/i915/intel_guc.c
@@ -154,7 +154,7 @@ int intel_guc_init_misc(struct intel_guc *guc)
void intel_guc_fini_misc(struct intel_guc *guc)
{
- intel_uc_fw_fini(&guc->fw);
+ intel_uc_fw_cleanup_fetch(&guc->fw);
guc_fini_wq(guc);
}
@@ -189,9 +189,13 @@ int intel_guc_init(struct intel_guc *guc)
struct drm_i915_private *dev_priv = guc_to_i915(guc);
int ret;
- ret = guc_shared_data_create(guc);
+ ret = intel_uc_fw_init(&guc->fw);
if (ret)
goto err_fetch;
+
+ ret = guc_shared_data_create(guc);
+ if (ret)
+ goto err_fw;
GEM_BUG_ON(!guc->shared_data);
ret = intel_guc_log_create(&guc->log);
@@ -220,8 +224,10 @@ err_log:
intel_guc_log_destroy(&guc->log);
err_shared:
guc_shared_data_destroy(guc);
-err_fetch:
+err_fw:
intel_uc_fw_fini(&guc->fw);
+err_fetch:
+ intel_uc_fw_cleanup_fetch(&guc->fw);
return ret;
}
@@ -238,6 +244,7 @@ void intel_guc_fini(struct intel_guc *guc)
intel_guc_log_destroy(&guc->log);
guc_shared_data_destroy(guc);
intel_uc_fw_fini(&guc->fw);
+ intel_uc_fw_cleanup_fetch(&guc->fw);
}
static u32 guc_ctl_debug_flags(struct intel_guc *guc)
@@ -721,3 +728,30 @@ u32 intel_guc_reserved_gtt_size(struct intel_guc *guc)
{
return guc_to_i915(guc)->wopcm.guc.size;
}
+
+int intel_guc_reserve_ggtt_top(struct intel_guc *guc)
+{
+ struct drm_i915_private *i915 = guc_to_i915(guc);
+ struct i915_ggtt *ggtt = &i915->ggtt;
+ u64 size;
+ int ret;
+
+ size = ggtt->vm.total - GUC_GGTT_TOP;
+
+ ret = i915_gem_gtt_reserve(&ggtt->vm, &ggtt->uc_fw, size,
+ GUC_GGTT_TOP, I915_COLOR_UNEVICTABLE,
+ PIN_NOEVICT);
+ if (ret)
+ DRM_DEBUG_DRIVER("GuC: failed to reserve top of ggtt\n");
+
+ return ret;
+}
+
+void intel_guc_release_ggtt_top(struct intel_guc *guc)
+{
+ struct drm_i915_private *i915 = guc_to_i915(guc);
+ struct i915_ggtt *ggtt = &i915->ggtt;
+
+ if (drm_mm_node_allocated(&ggtt->uc_fw))
+ drm_mm_remove_node(&ggtt->uc_fw);
+}
diff --git a/drivers/gpu/drm/i915/intel_guc.h b/drivers/gpu/drm/i915/intel_guc.h
index 2c59ff8d9f39..d4b015ab8a36 100644
--- a/drivers/gpu/drm/i915/intel_guc.h
+++ b/drivers/gpu/drm/i915/intel_guc.h
@@ -96,11 +96,6 @@ struct intel_guc {
void (*notify)(struct intel_guc *guc);
};
-static inline bool intel_guc_is_alive(struct intel_guc *guc)
-{
- return intel_uc_fw_is_loaded(&guc->fw);
-}
-
static
inline int intel_guc_send(struct intel_guc *guc, const u32 *action, u32 len)
{
@@ -173,6 +168,13 @@ int intel_guc_suspend(struct intel_guc *guc);
int intel_guc_resume(struct intel_guc *guc);
struct i915_vma *intel_guc_allocate_vma(struct intel_guc *guc, u32 size);
u32 intel_guc_reserved_gtt_size(struct intel_guc *guc);
+int intel_guc_reserve_ggtt_top(struct intel_guc *guc);
+void intel_guc_release_ggtt_top(struct intel_guc *guc);
+
+static inline bool intel_guc_is_loaded(struct intel_guc *guc)
+{
+ return intel_uc_fw_is_loaded(&guc->fw);
+}
static inline int intel_guc_sanitize(struct intel_guc *guc)
{
diff --git a/drivers/gpu/drm/i915/intel_guc_ct.h b/drivers/gpu/drm/i915/intel_guc_ct.h
index f5e7f0663304..41ba593a4df7 100644
--- a/drivers/gpu/drm/i915/intel_guc_ct.h
+++ b/drivers/gpu/drm/i915/intel_guc_ct.h
@@ -96,4 +96,9 @@ void intel_guc_ct_fini(struct intel_guc_ct *ct);
int intel_guc_ct_enable(struct intel_guc_ct *ct);
void intel_guc_ct_disable(struct intel_guc_ct *ct);
+static inline void intel_guc_ct_stop(struct intel_guc_ct *ct)
+{
+ ct->host_channel.enabled = false;
+}
+
#endif /* _INTEL_GUC_CT_H_ */
diff --git a/drivers/gpu/drm/i915/intel_guc_fw.c b/drivers/gpu/drm/i915/intel_guc_fw.c
index 792a551450c7..8b2dcc70b956 100644
--- a/drivers/gpu/drm/i915/intel_guc_fw.c
+++ b/drivers/gpu/drm/i915/intel_guc_fw.c
@@ -90,7 +90,7 @@ void intel_guc_fw_init_early(struct intel_guc *guc)
{
struct intel_uc_fw *guc_fw = &guc->fw;
- intel_uc_fw_init(guc_fw, INTEL_UC_FW_TYPE_GUC);
+ intel_uc_fw_init_early(guc_fw, INTEL_UC_FW_TYPE_GUC);
guc_fw_select(guc_fw);
}
@@ -122,14 +122,16 @@ static void guc_prepare_xfer(struct intel_guc *guc)
}
/* Copy RSA signature from the fw image to HW for verification */
-static void guc_xfer_rsa(struct intel_guc *guc, struct i915_vma *vma)
+static void guc_xfer_rsa(struct intel_guc *guc)
{
struct drm_i915_private *dev_priv = guc_to_i915(guc);
+ struct intel_uc_fw *fw = &guc->fw;
+ struct sg_table *pages = fw->obj->mm.pages;
u32 rsa[UOS_RSA_SCRATCH_COUNT];
int i;
- sg_pcopy_to_buffer(vma->pages->sgl, vma->pages->nents,
- rsa, sizeof(rsa), guc->fw.rsa_offset);
+ sg_pcopy_to_buffer(pages->sgl, pages->nents,
+ rsa, sizeof(rsa), fw->rsa_offset);
for (i = 0; i < UOS_RSA_SCRATCH_COUNT; i++)
I915_WRITE(UOS_RSA_SCRATCH(i), rsa[i]);
@@ -201,7 +203,7 @@ static int guc_wait_ucode(struct intel_guc *guc)
* transfer between GTT locations. This functionality is left out of the API
* for now as there is no need for it.
*/
-static int guc_xfer_ucode(struct intel_guc *guc, struct i915_vma *vma)
+static int guc_xfer_ucode(struct intel_guc *guc)
{
struct drm_i915_private *dev_priv = guc_to_i915(guc);
struct intel_uc_fw *guc_fw = &guc->fw;
@@ -214,7 +216,7 @@ static int guc_xfer_ucode(struct intel_guc *guc, struct i915_vma *vma)
I915_WRITE(DMA_COPY_SIZE, guc_fw->header_size + guc_fw->ucode_size);
/* Set the source address for the new blob */
- offset = intel_guc_ggtt_offset(guc, vma) + guc_fw->header_offset;
+ offset = intel_uc_fw_ggtt_offset(guc_fw) + guc_fw->header_offset;
I915_WRITE(DMA_ADDR_0_LOW, lower_32_bits(offset));
I915_WRITE(DMA_ADDR_0_HIGH, upper_32_bits(offset) & 0xFFFF);
@@ -233,7 +235,7 @@ static int guc_xfer_ucode(struct intel_guc *guc, struct i915_vma *vma)
/*
* Load the GuC firmware blob into the MinuteIA.
*/
-static int guc_fw_xfer(struct intel_uc_fw *guc_fw, struct i915_vma *vma)
+static int guc_fw_xfer(struct intel_uc_fw *guc_fw)
{
struct intel_guc *guc = container_of(guc_fw, struct intel_guc, fw);
struct drm_i915_private *dev_priv = guc_to_i915(guc);
@@ -250,9 +252,9 @@ static int guc_fw_xfer(struct intel_uc_fw *guc_fw, struct i915_vma *vma)
* by the DMA engine in one operation, whereas the RSA signature is
* loaded via MMIO.
*/
- guc_xfer_rsa(guc, vma);
+ guc_xfer_rsa(guc);
- ret = guc_xfer_ucode(guc, vma);
+ ret = guc_xfer_ucode(guc);
intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
diff --git a/drivers/gpu/drm/i915/intel_guc_submission.c b/drivers/gpu/drm/i915/intel_guc_submission.c
index 46cd0e70aecb..987ff586d7f9 100644
--- a/drivers/gpu/drm/i915/intel_guc_submission.c
+++ b/drivers/gpu/drm/i915/intel_guc_submission.c
@@ -24,8 +24,10 @@
#include <linux/circ_buf.h>
+#include "gt/intel_engine_pm.h"
+#include "gt/intel_lrc_reg.h"
+
#include "intel_guc_submission.h"
-#include "intel_lrc_reg.h"
#include "i915_drv.h"
#define GUC_PREEMPT_FINISHED 0x1
@@ -362,11 +364,10 @@ static void guc_stage_desc_pool_destroy(struct intel_guc *guc)
static void guc_stage_desc_init(struct intel_guc_client *client)
{
struct intel_guc *guc = client->guc;
- struct drm_i915_private *dev_priv = guc_to_i915(guc);
- struct intel_engine_cs *engine;
struct i915_gem_context *ctx = client->owner;
+ struct i915_gem_engines_iter it;
struct guc_stage_desc *desc;
- unsigned int tmp;
+ struct intel_context *ce;
u32 gfx_addr;
desc = __get_stage_desc(client);
@@ -380,10 +381,11 @@ static void guc_stage_desc_init(struct intel_guc_client *client)
desc->priority = client->priority;
desc->db_id = client->doorbell_id;
- for_each_engine_masked(engine, dev_priv, client->engines, tmp) {
- struct intel_context *ce = intel_context_lookup(ctx, engine);
- u32 guc_engine_id = engine->guc_id;
- struct guc_execlist_context *lrc = &desc->lrc[guc_engine_id];
+ for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
+ struct guc_execlist_context *lrc;
+
+ if (!(ce->engine->mask & client->engines))
+ continue;
/* TODO: We have a design issue to be solved here. Only when we
* receive the first batch, we know which engine is used by the
@@ -392,7 +394,7 @@ static void guc_stage_desc_init(struct intel_guc_client *client)
* for now who owns a GuC client. But for future owner of GuC
* client, need to make sure lrc is pinned prior to enter here.
*/
- if (!ce || !ce->state)
+ if (!ce->state)
break; /* XXX: continue? */
/*
@@ -402,6 +404,7 @@ static void guc_stage_desc_init(struct intel_guc_client *client)
* Instead, the GuC uses the LRCA of the user mode context (see
* guc_add_request below).
*/
+ lrc = &desc->lrc[ce->engine->guc_id];
lrc->context_desc = lower_32_bits(ce->lrc_desc);
/* The state page is after PPHWSP */
@@ -412,15 +415,16 @@ static void guc_stage_desc_init(struct intel_guc_client *client)
* here. In proxy submission, it wants the stage id
*/
lrc->context_id = (client->stage_id << GUC_ELC_CTXID_OFFSET) |
- (guc_engine_id << GUC_ELC_ENGINE_OFFSET);
+ (ce->engine->guc_id << GUC_ELC_ENGINE_OFFSET);
lrc->ring_begin = intel_guc_ggtt_offset(guc, ce->ring->vma);
lrc->ring_end = lrc->ring_begin + ce->ring->size - 1;
lrc->ring_next_free_location = lrc->ring_begin;
lrc->ring_current_tail_pointer_value = 0;
- desc->engines_used |= (1 << guc_engine_id);
+ desc->engines_used |= BIT(ce->engine->guc_id);
}
+ i915_gem_context_unlock_engines(ctx);
DRM_DEBUG_DRIVER("Host engines 0x%x => GuC engines used 0x%x\n",
client->engines, desc->engines_used);
@@ -742,7 +746,8 @@ static bool __guc_dequeue(struct intel_engine_cs *engine)
&engine->i915->guc.preempt_work[engine->id];
int prio = execlists->queue_priority_hint;
- if (__execlists_need_preempt(prio, port_prio(port))) {
+ if (i915_scheduler_need_preempt(prio,
+ port_prio(port))) {
execlists_set_active(execlists,
EXECLISTS_ACTIVE_PREEMPT);
queue_work(engine->i915->guc.preempt_wq,
@@ -1194,7 +1199,7 @@ static void __guc_client_disable(struct intel_guc_client *client)
* the case, instead of trying (in vain) to communicate with it, let's
* just cleanup the doorbell HW and our internal state.
*/
- if (intel_guc_is_alive(client->guc))
+ if (intel_guc_is_loaded(client->guc))
destroy_doorbell(client);
else
__fini_doorbell(client);
@@ -1359,6 +1364,7 @@ static void guc_interrupts_release(struct drm_i915_private *dev_priv)
static void guc_submission_park(struct intel_engine_cs *engine)
{
+ intel_engine_park(engine);
intel_engine_unpin_breadcrumbs_irq(engine);
engine->flags &= ~I915_ENGINE_NEEDS_BREADCRUMB_TASKLET;
}
diff --git a/drivers/gpu/drm/i915/intel_guc_submission.h b/drivers/gpu/drm/i915/intel_guc_submission.h
index aa5e6749c925..7d823a513b9c 100644
--- a/drivers/gpu/drm/i915/intel_guc_submission.h
+++ b/drivers/gpu/drm/i915/intel_guc_submission.h
@@ -27,9 +27,10 @@
#include <linux/spinlock.h>
+#include "gt/intel_engine_types.h"
+
#include "i915_gem.h"
#include "i915_selftest.h"
-#include "intel_engine_types.h"
struct drm_i915_private;
diff --git a/drivers/gpu/drm/i915/intel_hdcp.c b/drivers/gpu/drm/i915/intel_hdcp.c
index 99b007169c49..bc3a94d491c4 100644
--- a/drivers/gpu/drm/i915/intel_hdcp.c
+++ b/drivers/gpu/drm/i915/intel_hdcp.c
@@ -16,6 +16,7 @@
#include "i915_reg.h"
#include "intel_drv.h"
#include "intel_hdcp.h"
+#include "intel_sideband.h"
#define KEY_LOAD_TRIES 5
#define ENCRYPT_STATUS_CHANGE_TIMEOUT_MS 50
@@ -78,7 +79,7 @@ bool intel_hdcp_capable(struct intel_connector *connector)
}
/* Is HDCP2.2 capable on Platform and Sink */
-static bool intel_hdcp2_capable(struct intel_connector *connector)
+bool intel_hdcp2_capable(struct intel_connector *connector)
{
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
@@ -213,10 +214,8 @@ static int intel_hdcp_load_keys(struct drm_i915_private *dev_priv)
* from other platforms. So GEN9_BC uses the GT Driver Mailbox i/f.
*/
if (IS_GEN9_BC(dev_priv)) {
- mutex_lock(&dev_priv->pcu_lock);
ret = sandybridge_pcode_write(dev_priv,
SKL_PCODE_LOAD_HDCP_KEYS, 1);
- mutex_unlock(&dev_priv->pcu_lock);
if (ret) {
DRM_ERROR("Failed to initiate HDCP key load (%d)\n",
ret);
@@ -492,9 +491,11 @@ int intel_hdcp_validate_v_prime(struct intel_digital_port *intel_dig_port,
/* Implements Part 2 of the HDCP authorization procedure */
static
-int intel_hdcp_auth_downstream(struct intel_digital_port *intel_dig_port,
- const struct intel_hdcp_shim *shim)
+int intel_hdcp_auth_downstream(struct intel_connector *connector)
{
+ struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
+ const struct intel_hdcp_shim *shim = connector->hdcp.shim;
+ struct drm_device *dev = connector->base.dev;
u8 bstatus[2], num_downstream, *ksv_fifo;
int ret, i, tries = 3;
@@ -533,6 +534,11 @@ int intel_hdcp_auth_downstream(struct intel_digital_port *intel_dig_port,
if (ret)
goto err;
+ if (drm_hdcp_check_ksvs_revoked(dev, ksv_fifo, num_downstream)) {
+ DRM_ERROR("Revoked Ksv(s) in ksv_fifo\n");
+ return -EPERM;
+ }
+
/*
* When V prime mismatches, DP Spec mandates re-read of
* V prime atleast twice.
@@ -559,9 +565,12 @@ err:
}
/* Implements Part 1 of the HDCP authorization procedure */
-static int intel_hdcp_auth(struct intel_digital_port *intel_dig_port,
- const struct intel_hdcp_shim *shim)
+static int intel_hdcp_auth(struct intel_connector *connector)
{
+ struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
+ struct intel_hdcp *hdcp = &connector->hdcp;
+ struct drm_device *dev = connector->base.dev;
+ const struct intel_hdcp_shim *shim = hdcp->shim;
struct drm_i915_private *dev_priv;
enum port port;
unsigned long r0_prime_gen_start;
@@ -627,6 +636,11 @@ static int intel_hdcp_auth(struct intel_digital_port *intel_dig_port,
if (ret < 0)
return ret;
+ if (drm_hdcp_check_ksvs_revoked(dev, bksv.shim, 1)) {
+ DRM_ERROR("BKSV is revoked\n");
+ return -EPERM;
+ }
+
I915_WRITE(PORT_HDCP_BKSVLO(port), bksv.reg[0]);
I915_WRITE(PORT_HDCP_BKSVHI(port), bksv.reg[1]);
@@ -700,7 +714,7 @@ static int intel_hdcp_auth(struct intel_digital_port *intel_dig_port,
*/
if (repeater_present)
- return intel_hdcp_auth_downstream(intel_dig_port, shim);
+ return intel_hdcp_auth_downstream(connector);
DRM_DEBUG_KMS("HDCP is enabled (no repeater present)\n");
return 0;
@@ -763,7 +777,7 @@ static int _intel_hdcp_enable(struct intel_connector *connector)
/* Incase of authentication failures, HDCP spec expects reauth. */
for (i = 0; i < tries; i++) {
- ret = intel_hdcp_auth(conn_to_dig_port(connector), hdcp->shim);
+ ret = intel_hdcp_auth(connector);
if (!ret) {
hdcp->hdcp_encrypted = true;
return 0;
@@ -1162,6 +1176,7 @@ static int hdcp2_authentication_key_exchange(struct intel_connector *connector)
{
struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
struct intel_hdcp *hdcp = &connector->hdcp;
+ struct drm_device *dev = connector->base.dev;
union {
struct hdcp2_ake_init ake_init;
struct hdcp2_ake_send_cert send_cert;
@@ -1196,6 +1211,12 @@ static int hdcp2_authentication_key_exchange(struct intel_connector *connector)
hdcp->is_repeater = HDCP_2_2_RX_REPEATER(msgs.send_cert.rx_caps[2]);
+ if (drm_hdcp_check_ksvs_revoked(dev, msgs.send_cert.cert_rx.receiver_id,
+ 1)) {
+ DRM_ERROR("Receiver ID is revoked\n");
+ return -EPERM;
+ }
+
/*
* Here msgs.no_stored_km will hold msgs corresponding to the km
* stored also.
@@ -1306,7 +1327,7 @@ int hdcp2_propagate_stream_management_info(struct intel_connector *connector)
/* Prepare RepeaterAuth_Stream_Manage msg */
msgs.stream_manage.msg_id = HDCP_2_2_REP_STREAM_MANAGE;
- drm_hdcp2_u32_to_seq_num(msgs.stream_manage.seq_num_m, hdcp->seq_num_m);
+ drm_hdcp_cpu_to_be24(msgs.stream_manage.seq_num_m, hdcp->seq_num_m);
/* K no of streams is fixed as 1. Stored as big-endian. */
msgs.stream_manage.k = cpu_to_be16(1);
@@ -1348,13 +1369,14 @@ int hdcp2_authenticate_repeater_topology(struct intel_connector *connector)
{
struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
struct intel_hdcp *hdcp = &connector->hdcp;
+ struct drm_device *dev = connector->base.dev;
union {
struct hdcp2_rep_send_receiverid_list recvid_list;
struct hdcp2_rep_send_ack rep_ack;
} msgs;
const struct intel_hdcp_shim *shim = hdcp->shim;
+ u32 seq_num_v, device_cnt;
u8 *rx_info;
- u32 seq_num_v;
int ret;
ret = shim->read_2_2_msg(intel_dig_port, HDCP_2_2_REP_SEND_RECVID_LIST,
@@ -1371,7 +1393,8 @@ int hdcp2_authenticate_repeater_topology(struct intel_connector *connector)
}
/* Converting and Storing the seq_num_v to local variable as DWORD */
- seq_num_v = drm_hdcp2_seq_num_to_u32(msgs.recvid_list.seq_num_v);
+ seq_num_v =
+ drm_hdcp_be24_to_cpu((const u8 *)msgs.recvid_list.seq_num_v);
if (seq_num_v < hdcp->seq_num_v) {
/* Roll over of the seq_num_v from repeater. Reauthenticate. */
@@ -1379,6 +1402,14 @@ int hdcp2_authenticate_repeater_topology(struct intel_connector *connector)
return -EINVAL;
}
+ device_cnt = (HDCP_2_2_DEV_COUNT_HI(rx_info[0]) << 4 |
+ HDCP_2_2_DEV_COUNT_LO(rx_info[1]));
+ if (drm_hdcp_check_ksvs_revoked(dev, msgs.recvid_list.receiver_ids,
+ device_cnt)) {
+ DRM_ERROR("Revoked receiver ID(s) is in list\n");
+ return -EPERM;
+ }
+
ret = hdcp2_verify_rep_topology_prepare_ack(connector,
&msgs.recvid_list,
&msgs.rep_ack);
diff --git a/drivers/gpu/drm/i915/intel_hdcp.h b/drivers/gpu/drm/i915/intel_hdcp.h
index a75f25f09d39..be8da85c866a 100644
--- a/drivers/gpu/drm/i915/intel_hdcp.h
+++ b/drivers/gpu/drm/i915/intel_hdcp.h
@@ -25,6 +25,7 @@ int intel_hdcp_enable(struct intel_connector *connector);
int intel_hdcp_disable(struct intel_connector *connector);
bool is_hdcp_supported(struct drm_i915_private *dev_priv, enum port port);
bool intel_hdcp_capable(struct intel_connector *connector);
+bool intel_hdcp2_capable(struct intel_connector *connector);
void intel_hdcp_component_init(struct drm_i915_private *dev_priv);
void intel_hdcp_component_fini(struct drm_i915_private *dev_priv);
void intel_hdcp_cleanup(struct intel_connector *connector);
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index 34be2cfd0ec8..a0b98a0178f6 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -39,17 +39,24 @@
#include <drm/i915_drm.h>
#include <drm/intel_lpe_audio.h>
+#include "i915_debugfs.h"
#include "i915_drv.h"
+#include "intel_atomic.h"
#include "intel_audio.h"
#include "intel_connector.h"
#include "intel_ddi.h"
#include "intel_dp.h"
+#include "intel_dpio_phy.h"
#include "intel_drv.h"
+#include "intel_fifo_underrun.h"
+#include "intel_gmbus.h"
#include "intel_hdcp.h"
#include "intel_hdmi.h"
+#include "intel_hotplug.h"
#include "intel_lspcon.h"
#include "intel_sdvo.h"
#include "intel_panel.h"
+#include "intel_sideband.h"
static struct drm_device *intel_hdmi_to_dev(struct intel_hdmi *intel_hdmi)
{
@@ -846,19 +853,6 @@ static void g4x_set_infoframes(struct intel_encoder *encoder,
&crtc_state->infoframes.hdmi);
}
-static bool hdmi_sink_is_deep_color(const struct drm_connector_state *conn_state)
-{
- struct drm_connector *connector = conn_state->connector;
-
- /*
- * HDMI cloning is only supported on g4x which doesn't
- * support deep color or GCP infoframes anyway so no
- * need to worry about multiple HDMI sinks here.
- */
-
- return connector->display_info.bpc > 8;
-}
-
/*
* Determine if default_phase=1 can be indicated in the GCP infoframe.
*
@@ -963,8 +957,8 @@ static void intel_hdmi_compute_gcp_infoframe(struct intel_encoder *encoder,
crtc_state->infoframes.enable |=
intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GENERAL_CONTROL);
- /* Indicate color depth whenever the sink supports deep color */
- if (hdmi_sink_is_deep_color(conn_state))
+ /* Indicate color indication for deep color mode */
+ if (crtc_state->pipe_bpp > 24)
crtc_state->infoframes.gcp |= GCP_COLOR_INDICATION;
/* Enable default_phase whenever the display mode is suitably aligned */
@@ -2162,7 +2156,7 @@ static bool hdmi_deep_color_possible(const struct intel_crtc_state *crtc_state,
if (bpc == 10 && INTEL_GEN(dev_priv) < 11)
return false;
- if (crtc_state->pipe_bpp <= 8*3)
+ if (crtc_state->pipe_bpp < bpc * 3)
return false;
if (!crtc_state->has_hdmi_sink)
@@ -2620,12 +2614,12 @@ static void chv_hdmi_post_disable(struct intel_encoder *encoder,
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- mutex_lock(&dev_priv->sb_lock);
+ vlv_dpio_get(dev_priv);
/* Assert data lane reset */
chv_data_lane_soft_reset(encoder, old_crtc_state, true);
- mutex_unlock(&dev_priv->sb_lock);
+ vlv_dpio_put(dev_priv);
}
static void chv_hdmi_pre_enable(struct intel_encoder *encoder,
diff --git a/drivers/gpu/drm/i915/intel_hotplug.c b/drivers/gpu/drm/i915/intel_hotplug.c
index b8937c788f03..ff9eb3c855d3 100644
--- a/drivers/gpu/drm/i915/intel_hotplug.c
+++ b/drivers/gpu/drm/i915/intel_hotplug.c
@@ -27,6 +27,7 @@
#include "i915_drv.h"
#include "intel_drv.h"
+#include "intel_hotplug.h"
/**
* DOC: Hotplug
diff --git a/drivers/gpu/drm/i915/intel_hotplug.h b/drivers/gpu/drm/i915/intel_hotplug.h
new file mode 100644
index 000000000000..805f897dbb7a
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_hotplug.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __INTEL_HOTPLUG_H__
+#define __INTEL_HOTPLUG_H__
+
+#include <linux/types.h>
+
+#include <drm/i915_drm.h>
+
+struct drm_i915_private;
+struct intel_connector;
+struct intel_encoder;
+
+void intel_hpd_poll_init(struct drm_i915_private *dev_priv);
+bool intel_encoder_hotplug(struct intel_encoder *encoder,
+ struct intel_connector *connector);
+void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
+ u32 pin_mask, u32 long_mask);
+void intel_hpd_init(struct drm_i915_private *dev_priv);
+void intel_hpd_init_work(struct drm_i915_private *dev_priv);
+void intel_hpd_cancel_work(struct drm_i915_private *dev_priv);
+enum hpd_pin intel_hpd_pin_default(struct drm_i915_private *dev_priv,
+ enum port port);
+bool intel_hpd_disable(struct drm_i915_private *dev_priv, enum hpd_pin pin);
+void intel_hpd_enable(struct drm_i915_private *dev_priv, enum hpd_pin pin);
+
+#endif /* __INTEL_HOTPLUG_H__ */
diff --git a/drivers/gpu/drm/i915/intel_huc.c b/drivers/gpu/drm/i915/intel_huc.c
index 94c04f16a2ad..1ff1fb015e58 100644
--- a/drivers/gpu/drm/i915/intel_huc.c
+++ b/drivers/gpu/drm/i915/intel_huc.c
@@ -40,6 +40,61 @@ int intel_huc_init_misc(struct intel_huc *huc)
return 0;
}
+static int intel_huc_rsa_data_create(struct intel_huc *huc)
+{
+ struct drm_i915_private *i915 = huc_to_i915(huc);
+ struct intel_guc *guc = &i915->guc;
+ struct i915_vma *vma;
+ void *vaddr;
+
+ /*
+ * HuC firmware will sit above GUC_GGTT_TOP and will not map
+ * through GTT. Unfortunately, this means GuC cannot perform
+ * the HuC auth. as the rsa offset now falls within the GuC
+ * inaccessible range. We resort to perma-pinning an additional
+ * vma within the accessible range that only contains the rsa
+ * signature. The GuC can use this extra pinning to perform
+ * the authentication since its GGTT offset will be GuC
+ * accessible.
+ */
+ vma = intel_guc_allocate_vma(guc, PAGE_SIZE);
+ if (IS_ERR(vma))
+ return PTR_ERR(vma);
+
+ vaddr = i915_gem_object_pin_map(vma->obj, I915_MAP_WB);
+ if (IS_ERR(vaddr)) {
+ i915_vma_unpin_and_release(&vma, 0);
+ return PTR_ERR(vaddr);
+ }
+
+ huc->rsa_data = vma;
+ huc->rsa_data_vaddr = vaddr;
+
+ return 0;
+}
+
+static void intel_huc_rsa_data_destroy(struct intel_huc *huc)
+{
+ i915_vma_unpin_and_release(&huc->rsa_data, I915_VMA_RELEASE_MAP);
+}
+
+int intel_huc_init(struct intel_huc *huc)
+{
+ int err;
+
+ err = intel_huc_rsa_data_create(huc);
+ if (err)
+ return err;
+
+ return intel_uc_fw_init(&huc->fw);
+}
+
+void intel_huc_fini(struct intel_huc *huc)
+{
+ intel_uc_fw_fini(&huc->fw);
+ intel_huc_rsa_data_destroy(huc);
+}
+
/**
* intel_huc_auth() - Authenticate HuC uCode
* @huc: intel_huc structure
@@ -55,27 +110,17 @@ int intel_huc_auth(struct intel_huc *huc)
{
struct drm_i915_private *i915 = huc_to_i915(huc);
struct intel_guc *guc = &i915->guc;
- struct i915_vma *vma;
u32 status;
int ret;
if (huc->fw.load_status != INTEL_UC_FIRMWARE_SUCCESS)
return -ENOEXEC;
- vma = i915_gem_object_ggtt_pin(huc->fw.obj, NULL, 0, 0,
- PIN_OFFSET_BIAS | i915->ggtt.pin_bias);
- if (IS_ERR(vma)) {
- ret = PTR_ERR(vma);
- DRM_ERROR("HuC: Failed to pin huc fw object %d\n", ret);
- goto fail;
- }
-
ret = intel_guc_auth_huc(guc,
- intel_guc_ggtt_offset(guc, vma) +
- huc->fw.rsa_offset);
+ intel_guc_ggtt_offset(guc, huc->rsa_data));
if (ret) {
DRM_ERROR("HuC: GuC did not ack Auth request %d\n", ret);
- goto fail_unpin;
+ goto fail;
}
/* Check authentication status, it should be done by now */
@@ -86,14 +131,11 @@ int intel_huc_auth(struct intel_huc *huc)
2, 50, &status);
if (ret) {
DRM_ERROR("HuC: Firmware not verified %#x\n", status);
- goto fail_unpin;
+ goto fail;
}
- i915_vma_unpin(vma);
return 0;
-fail_unpin:
- i915_vma_unpin(vma);
fail:
huc->fw.load_status = INTEL_UC_FIRMWARE_FAIL;
diff --git a/drivers/gpu/drm/i915/intel_huc.h b/drivers/gpu/drm/i915/intel_huc.h
index 7e41d870b509..a0c21ae02a99 100644
--- a/drivers/gpu/drm/i915/intel_huc.h
+++ b/drivers/gpu/drm/i915/intel_huc.h
@@ -33,16 +33,20 @@ struct intel_huc {
struct intel_uc_fw fw;
/* HuC-specific additions */
+ struct i915_vma *rsa_data;
+ void *rsa_data_vaddr;
};
void intel_huc_init_early(struct intel_huc *huc);
int intel_huc_init_misc(struct intel_huc *huc);
+int intel_huc_init(struct intel_huc *huc);
+void intel_huc_fini(struct intel_huc *huc);
int intel_huc_auth(struct intel_huc *huc);
int intel_huc_check_status(struct intel_huc *huc);
static inline void intel_huc_fini_misc(struct intel_huc *huc)
{
- intel_uc_fw_fini(&huc->fw);
+ intel_uc_fw_cleanup_fetch(&huc->fw);
}
static inline int intel_huc_sanitize(struct intel_huc *huc)
diff --git a/drivers/gpu/drm/i915/intel_huc_fw.c b/drivers/gpu/drm/i915/intel_huc_fw.c
index 68d47c105939..44c559526072 100644
--- a/drivers/gpu/drm/i915/intel_huc_fw.c
+++ b/drivers/gpu/drm/i915/intel_huc_fw.c
@@ -89,22 +89,28 @@ void intel_huc_fw_init_early(struct intel_huc *huc)
{
struct intel_uc_fw *huc_fw = &huc->fw;
- intel_uc_fw_init(huc_fw, INTEL_UC_FW_TYPE_HUC);
+ intel_uc_fw_init_early(huc_fw, INTEL_UC_FW_TYPE_HUC);
huc_fw_select(huc_fw);
}
-/**
- * huc_fw_xfer() - DMA's the firmware
- * @huc_fw: the firmware descriptor
- * @vma: the firmware image (bound into the GGTT)
- *
- * Transfer the firmware image to RAM for execution by the microcontroller.
- *
- * Return: 0 on success, non-zero on failure
- */
-static int huc_fw_xfer(struct intel_uc_fw *huc_fw, struct i915_vma *vma)
+static void huc_xfer_rsa(struct intel_huc *huc)
{
- struct intel_huc *huc = container_of(huc_fw, struct intel_huc, fw);
+ struct intel_uc_fw *fw = &huc->fw;
+ struct sg_table *pages = fw->obj->mm.pages;
+
+ /*
+ * HuC firmware image is outside GuC accessible range.
+ * Copy the RSA signature out of the image into
+ * the perma-pinned region set aside for it
+ */
+ sg_pcopy_to_buffer(pages->sgl, pages->nents,
+ huc->rsa_data_vaddr, fw->rsa_size,
+ fw->rsa_offset);
+}
+
+static int huc_xfer_ucode(struct intel_huc *huc)
+{
+ struct intel_uc_fw *huc_fw = &huc->fw;
struct drm_i915_private *dev_priv = huc_to_i915(huc);
struct intel_uncore *uncore = &dev_priv->uncore;
unsigned long offset = 0;
@@ -116,7 +122,7 @@ static int huc_fw_xfer(struct intel_uc_fw *huc_fw, struct i915_vma *vma)
intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
/* Set the source address for the uCode */
- offset = intel_guc_ggtt_offset(&dev_priv->guc, vma) +
+ offset = intel_uc_fw_ggtt_offset(huc_fw) +
huc_fw->header_offset;
intel_uncore_write(uncore, DMA_ADDR_0_LOW,
lower_32_bits(offset));
@@ -151,6 +157,23 @@ static int huc_fw_xfer(struct intel_uc_fw *huc_fw, struct i915_vma *vma)
}
/**
+ * huc_fw_xfer() - DMA's the firmware
+ * @huc_fw: the firmware descriptor
+ *
+ * Transfer the firmware image to RAM for execution by the microcontroller.
+ *
+ * Return: 0 on success, non-zero on failure
+ */
+static int huc_fw_xfer(struct intel_uc_fw *huc_fw)
+{
+ struct intel_huc *huc = container_of(huc_fw, struct intel_huc, fw);
+
+ huc_xfer_rsa(huc);
+
+ return huc_xfer_ucode(huc);
+}
+
+/**
* intel_huc_fw_upload() - load HuC uCode to device
* @huc: intel_huc structure
*
diff --git a/drivers/gpu/drm/i915/intel_lpe_audio.c b/drivers/gpu/drm/i915/intel_lpe_audio.c
index f8239bca3820..b19800b58442 100644
--- a/drivers/gpu/drm/i915/intel_lpe_audio.c
+++ b/drivers/gpu/drm/i915/intel_lpe_audio.c
@@ -61,16 +61,18 @@
*/
#include <linux/acpi.h>
+#include <linux/delay.h>
#include <linux/device.h>
#include <linux/irq.h>
#include <linux/pci.h>
-#include <linux/pm_runtime.h>
#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
-#include "i915_drv.h"
-#include <linux/delay.h>
#include <drm/intel_lpe_audio.h>
+#include "i915_drv.h"
+#include "intel_lpe_audio.h"
+
#define HAS_LPE_AUDIO(dev_priv) ((dev_priv)->lpe_audio.platdev != NULL)
static struct platform_device *
diff --git a/drivers/gpu/drm/i915/intel_lpe_audio.h b/drivers/gpu/drm/i915/intel_lpe_audio.h
new file mode 100644
index 000000000000..f848c5038714
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_lpe_audio.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __INTEL_LPE_AUDIO_H__
+#define __INTEL_LPE_AUDIO_H__
+
+#include <linux/types.h>
+
+enum pipe;
+enum port;
+struct drm_i915_private;
+
+int intel_lpe_audio_init(struct drm_i915_private *dev_priv);
+void intel_lpe_audio_teardown(struct drm_i915_private *dev_priv);
+void intel_lpe_audio_irq_handler(struct drm_i915_private *dev_priv);
+void intel_lpe_audio_notify(struct drm_i915_private *dev_priv,
+ enum pipe pipe, enum port port,
+ const void *eld, int ls_clock, bool dp_output);
+
+#endif /* __INTEL_LPE_AUDIO_H__ */
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index 51d1d59c1619..efefed62a7f8 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -40,8 +40,10 @@
#include <drm/i915_drm.h>
#include "i915_drv.h"
+#include "intel_atomic.h"
#include "intel_connector.h"
#include "intel_drv.h"
+#include "intel_gmbus.h"
#include "intel_lvds.h"
#include "intel_panel.h"
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index eb317759b5d3..b64b45d9b538 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -25,13 +25,15 @@
*
* Derived from Xorg ddx, xf86-video-intel, src/i830_video.c
*/
-#include <drm/i915_drm.h>
+
#include <drm/drm_fourcc.h>
+#include <drm/i915_drm.h>
#include "i915_drv.h"
#include "i915_reg.h"
#include "intel_drv.h"
#include "intel_frontbuffer.h"
+#include "intel_overlay.h"
/* Limits for overlay size. According to intel doc, the real limits are:
* Y width: 4095, UV width (planar): 2047, Y height: 2047,
@@ -235,10 +237,9 @@ static int intel_overlay_do_wait_request(struct intel_overlay *overlay,
static struct i915_request *alloc_request(struct intel_overlay *overlay)
{
- struct drm_i915_private *dev_priv = overlay->i915;
- struct intel_engine_cs *engine = dev_priv->engine[RCS0];
+ struct intel_engine_cs *engine = overlay->i915->engine[RCS0];
- return i915_request_alloc(engine, dev_priv->kernel_context);
+ return i915_request_create(engine->kernel_context);
}
/* overlay needs to be disable in OCMD reg */
diff --git a/drivers/gpu/drm/i915/intel_overlay.h b/drivers/gpu/drm/i915/intel_overlay.h
new file mode 100644
index 000000000000..a167c28acd27
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_overlay.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __INTEL_OVERLAY_H__
+#define __INTEL_OVERLAY_H__
+
+struct drm_device;
+struct drm_file;
+struct drm_i915_error_state_buf;
+struct drm_i915_private;
+struct intel_overlay;
+struct intel_overlay_error_state;
+
+void intel_overlay_setup(struct drm_i915_private *dev_priv);
+void intel_overlay_cleanup(struct drm_i915_private *dev_priv);
+int intel_overlay_switch_off(struct intel_overlay *overlay);
+int intel_overlay_put_image_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+int intel_overlay_attrs_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+void intel_overlay_reset(struct drm_i915_private *dev_priv);
+struct intel_overlay_error_state *
+intel_overlay_capture_error_state(struct drm_i915_private *dev_priv);
+void intel_overlay_print_error_state(struct drm_i915_error_state_buf *e,
+ struct intel_overlay_error_state *error);
+
+#endif /* __INTEL_OVERLAY_H__ */
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index 4ab4ce6569e7..9cd4e37e3934 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -35,7 +35,9 @@
#include <linux/pwm.h>
#include "intel_connector.h"
+#include "intel_dp_aux_backlight.h"
#include "intel_drv.h"
+#include "intel_dsi_dcs_backlight.h"
#include "intel_panel.h"
#define CRC_PMIC_PWM_PERIOD_NS 21333
diff --git a/drivers/gpu/drm/i915/intel_pipe_crc.c b/drivers/gpu/drm/i915/intel_pipe_crc.c
index e7c7be4911c1..1e2c4307d05a 100644
--- a/drivers/gpu/drm/i915/intel_pipe_crc.c
+++ b/drivers/gpu/drm/i915/intel_pipe_crc.c
@@ -29,6 +29,7 @@
#include <linux/debugfs.h>
#include <linux/seq_file.h>
+#include "intel_atomic.h"
#include "intel_drv.h"
#include "intel_pipe_crc.h"
@@ -313,17 +314,8 @@ retry:
if (IS_HASWELL(dev_priv) &&
pipe_config->base.active && crtc->pipe == PIPE_A &&
- pipe_config->cpu_transcoder == TRANSCODER_EDP) {
- bool old_need_power_well = pipe_config->pch_pfit.enabled ||
- pipe_config->pch_pfit.force_thru;
- bool new_need_power_well = pipe_config->pch_pfit.enabled ||
- enable;
-
- pipe_config->pch_pfit.force_thru = enable;
-
- if (old_need_power_well != new_need_power_well)
- pipe_config->base.connectors_changed = true;
- }
+ pipe_config->cpu_transcoder == TRANSCODER_EDP)
+ pipe_config->base.mode_changed = true;
ret = drm_atomic_commit(state);
diff --git a/drivers/gpu/drm/i915/intel_pipe_crc.h b/drivers/gpu/drm/i915/intel_pipe_crc.h
index 81eaf1854788..db258a756fc6 100644
--- a/drivers/gpu/drm/i915/intel_pipe_crc.h
+++ b/drivers/gpu/drm/i915/intel_pipe_crc.h
@@ -9,9 +9,11 @@
#include <linux/types.h>
struct drm_crtc;
+struct drm_i915_private;
struct intel_crtc;
#ifdef CONFIG_DEBUG_FS
+void intel_display_crc_init(struct drm_i915_private *dev_priv);
int intel_crtc_set_crc_source(struct drm_crtc *crtc, const char *source_name);
int intel_crtc_verify_crc_source(struct drm_crtc *crtc,
const char *source_name, size_t *values_cnt);
@@ -20,6 +22,7 @@ const char *const *intel_crtc_get_crc_sources(struct drm_crtc *crtc,
void intel_crtc_disable_pipe_crc(struct intel_crtc *crtc);
void intel_crtc_enable_pipe_crc(struct intel_crtc *crtc);
#else
+static inline void intel_display_crc_init(struct drm_i915_private *dev_priv) {}
#define intel_crtc_set_crc_source NULL
#define intel_crtc_verify_crc_source NULL
#define intel_crtc_get_crc_sources NULL
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 44be676fabd6..decdd79c3805 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -34,10 +34,13 @@
#include <drm/drm_plane_helper.h>
#include "i915_drv.h"
+#include "i915_irq.h"
+#include "intel_atomic.h"
#include "intel_drv.h"
#include "intel_fbc.h"
#include "intel_pm.h"
#include "intel_sprite.h"
+#include "intel_sideband.h"
#include "../../../platform/x86/intel_ips.h"
/**
@@ -317,7 +320,7 @@ static void chv_set_memory_dvfs(struct drm_i915_private *dev_priv, bool enable)
{
u32 val;
- mutex_lock(&dev_priv->pcu_lock);
+ vlv_punit_get(dev_priv);
val = vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2);
if (enable)
@@ -332,14 +335,14 @@ static void chv_set_memory_dvfs(struct drm_i915_private *dev_priv, bool enable)
FORCE_DDR_FREQ_REQ_ACK) == 0, 3))
DRM_ERROR("timed out waiting for Punit DDR DVFS request\n");
- mutex_unlock(&dev_priv->pcu_lock);
+ vlv_punit_put(dev_priv);
}
static void chv_set_memory_pm5(struct drm_i915_private *dev_priv, bool enable)
{
u32 val;
- mutex_lock(&dev_priv->pcu_lock);
+ vlv_punit_get(dev_priv);
val = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM);
if (enable)
@@ -348,7 +351,7 @@ static void chv_set_memory_pm5(struct drm_i915_private *dev_priv, bool enable)
val &= ~DSP_MAXFIFO_PM5_ENABLE;
vlv_punit_write(dev_priv, PUNIT_REG_DSPSSPM, val);
- mutex_unlock(&dev_priv->pcu_lock);
+ vlv_punit_put(dev_priv);
}
#define FW_WM(value, plane) \
@@ -675,7 +678,7 @@ static unsigned int intel_wm_method1(unsigned int pixel_rate,
{
u64 ret;
- ret = (u64)pixel_rate * cpp * latency;
+ ret = mul_u32_u32(pixel_rate, cpp * latency);
ret = DIV_ROUND_UP_ULL(ret, 10000);
return ret;
@@ -2817,11 +2820,9 @@ static void intel_read_wm_latency(struct drm_i915_private *dev_priv,
/* read the first set of memory latencies[0:3] */
val = 0; /* data0 to be programmed to 0 for first set */
- mutex_lock(&dev_priv->pcu_lock);
ret = sandybridge_pcode_read(dev_priv,
GEN9_PCODE_READ_MEM_LATENCY,
&val);
- mutex_unlock(&dev_priv->pcu_lock);
if (ret) {
DRM_ERROR("SKL Mailbox read error = %d\n", ret);
@@ -2838,11 +2839,9 @@ static void intel_read_wm_latency(struct drm_i915_private *dev_priv,
/* read the second set of memory latencies[4:7] */
val = 1; /* data0 to be programmed to 1 for second set */
- mutex_lock(&dev_priv->pcu_lock);
ret = sandybridge_pcode_read(dev_priv,
GEN9_PCODE_READ_MEM_LATENCY,
&val);
- mutex_unlock(&dev_priv->pcu_lock);
if (ret) {
DRM_ERROR("SKL Mailbox read error = %d\n", ret);
return;
@@ -3677,13 +3676,10 @@ intel_enable_sagv(struct drm_i915_private *dev_priv)
return 0;
DRM_DEBUG_KMS("Enabling SAGV\n");
- mutex_lock(&dev_priv->pcu_lock);
-
ret = sandybridge_pcode_write(dev_priv, GEN9_PCODE_SAGV_CONTROL,
GEN9_SAGV_ENABLE);
/* We don't need to wait for SAGV when enabling */
- mutex_unlock(&dev_priv->pcu_lock);
/*
* Some skl systems, pre-release machines in particular,
@@ -3714,15 +3710,11 @@ intel_disable_sagv(struct drm_i915_private *dev_priv)
return 0;
DRM_DEBUG_KMS("Disabling SAGV\n");
- mutex_lock(&dev_priv->pcu_lock);
-
/* bspec says to keep retrying for at least 1 ms */
ret = skl_pcode_request(dev_priv, GEN9_PCODE_SAGV_CONTROL,
GEN9_SAGV_DISABLE,
GEN9_SAGV_IS_DISABLED, GEN9_SAGV_IS_DISABLED,
1);
- mutex_unlock(&dev_priv->pcu_lock);
-
/*
* Some skl systems, pre-release machines in particular,
* don't actually have SAGV.
@@ -3763,14 +3755,16 @@ bool intel_can_enable_sagv(struct drm_atomic_state *state)
sagv_block_time_us = 10;
/*
- * SKL+ workaround: bspec recommends we disable SAGV when we have
- * more then one pipe enabled
- *
* If there are no active CRTCs, no additional checks need be performed
*/
if (hweight32(intel_state->active_crtcs) == 0)
return true;
- else if (hweight32(intel_state->active_crtcs) > 1)
+
+ /*
+ * SKL+ workaround: bspec recommends we disable SAGV when we have
+ * more then one pipe enabled
+ */
+ if (hweight32(intel_state->active_crtcs) > 1)
return false;
/* Since we're now guaranteed to only have one active CRTC... */
@@ -4370,15 +4364,16 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
return 0;
}
- if (INTEL_GEN(dev_priv) < 11)
+ if (INTEL_GEN(dev_priv) >= 11)
+ total_data_rate =
+ icl_get_total_relative_data_rate(cstate,
+ plane_data_rate);
+ else
total_data_rate =
skl_get_total_relative_data_rate(cstate,
plane_data_rate,
uv_plane_data_rate);
- else
- total_data_rate =
- icl_get_total_relative_data_rate(cstate,
- plane_data_rate);
+
skl_ddb_get_pipe_allocation_limits(dev_priv, cstate, total_data_rate,
ddb, alloc, &num_active);
@@ -4787,9 +4782,11 @@ static void skl_compute_plane_wm(const struct intel_crtc_state *cstate,
return;
}
- /* Display WA #1141: kbl,cfl */
- if ((IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv) ||
- IS_CNL_REVID(dev_priv, CNL_REVID_A0, CNL_REVID_B0)) &&
+ /*
+ * WaIncreaseLatencyIPCEnabled: kbl,cfl
+ * Display WA #1141: kbl,cfl
+ */
+ if ((IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv)) ||
dev_priv->ipc_enabled)
latency += 4;
@@ -6139,7 +6136,7 @@ void vlv_wm_get_hw_state(struct drm_i915_private *dev_priv)
wm->level = VLV_WM_LEVEL_PM2;
if (IS_CHERRYVIEW(dev_priv)) {
- mutex_lock(&dev_priv->pcu_lock);
+ vlv_punit_get(dev_priv);
val = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM);
if (val & DSP_MAXFIFO_PM5_ENABLE)
@@ -6169,7 +6166,7 @@ void vlv_wm_get_hw_state(struct drm_i915_private *dev_priv)
wm->level = VLV_WM_LEVEL_DDR_DVFS;
}
- mutex_unlock(&dev_priv->pcu_lock);
+ vlv_punit_put(dev_priv);
}
for_each_intel_crtc(&dev_priv->drm, crtc) {
@@ -6378,16 +6375,25 @@ void intel_enable_ipc(struct drm_i915_private *dev_priv)
I915_WRITE(DISP_ARB_CTL2, val);
}
+static bool intel_can_enable_ipc(struct drm_i915_private *dev_priv)
+{
+ /* Display WA #0477 WaDisableIPC: skl */
+ if (IS_SKYLAKE(dev_priv))
+ return false;
+
+ /* Display WA #1141: SKL:all KBL:all CFL */
+ if (IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv))
+ return dev_priv->dram_info.symmetric_memory;
+
+ return true;
+}
+
void intel_init_ipc(struct drm_i915_private *dev_priv)
{
if (!HAS_IPC(dev_priv))
return;
- /* Display WA #1141: SKL:all KBL:all CFL */
- if (IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv))
- dev_priv->ipc_enabled = dev_priv->dram_info.symmetric_memory;
- else
- dev_priv->ipc_enabled = true;
+ dev_priv->ipc_enabled = intel_can_enable_ipc(dev_priv);
intel_enable_ipc(dev_priv);
}
@@ -6743,7 +6749,9 @@ static int valleyview_set_rps(struct drm_i915_private *dev_priv, u8 val)
I915_WRITE(GEN6_PMINTRMSK, gen6_rps_pm_mask(dev_priv, val));
if (val != dev_priv->gt_pm.rps.cur_freq) {
+ vlv_punit_get(dev_priv);
err = vlv_punit_write(dev_priv, PUNIT_REG_GPU_FREQ_REQ, val);
+ vlv_punit_put(dev_priv);
if (err)
return err;
@@ -6796,7 +6804,7 @@ void gen6_rps_busy(struct drm_i915_private *dev_priv)
{
struct intel_rps *rps = &dev_priv->gt_pm.rps;
- mutex_lock(&dev_priv->pcu_lock);
+ mutex_lock(&rps->lock);
if (rps->enabled) {
u8 freq;
@@ -6819,7 +6827,7 @@ void gen6_rps_busy(struct drm_i915_private *dev_priv)
rps->max_freq_softlimit)))
DRM_DEBUG_DRIVER("Failed to set idle frequency\n");
}
- mutex_unlock(&dev_priv->pcu_lock);
+ mutex_unlock(&rps->lock);
}
void gen6_rps_idle(struct drm_i915_private *dev_priv)
@@ -6833,7 +6841,7 @@ void gen6_rps_idle(struct drm_i915_private *dev_priv)
*/
gen6_disable_rps_interrupts(dev_priv);
- mutex_lock(&dev_priv->pcu_lock);
+ mutex_lock(&rps->lock);
if (rps->enabled) {
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
vlv_set_rps_idle(dev_priv);
@@ -6843,7 +6851,7 @@ void gen6_rps_idle(struct drm_i915_private *dev_priv)
I915_WRITE(GEN6_PMINTRMSK,
gen6_sanitize_rps_pm_mask(dev_priv, ~0));
}
- mutex_unlock(&dev_priv->pcu_lock);
+ mutex_unlock(&rps->lock);
}
void gen6_rps_boost(struct i915_request *rq)
@@ -6883,7 +6891,7 @@ int intel_set_rps(struct drm_i915_private *dev_priv, u8 val)
struct intel_rps *rps = &dev_priv->gt_pm.rps;
int err;
- lockdep_assert_held(&dev_priv->pcu_lock);
+ lockdep_assert_held(&rps->lock);
GEM_BUG_ON(val > rps->max_freq);
GEM_BUG_ON(val < rps->min_freq);
@@ -7013,8 +7021,10 @@ static bool sanitize_rc6(struct drm_i915_private *i915)
struct intel_device_info *info = mkwrite_device_info(i915);
/* Powersaving is controlled by the host when inside a VM */
- if (intel_vgpu_active(i915))
+ if (intel_vgpu_active(i915)) {
info->has_rc6 = 0;
+ info->has_rps = false;
+ }
if (info->has_rc6 &&
IS_GEN9_LP(i915) && !bxt_check_bios_rc6_setup(i915)) {
@@ -7454,7 +7464,7 @@ static void gen6_update_ring_freq(struct drm_i915_private *dev_priv)
unsigned int max_gpu_freq, min_gpu_freq;
struct cpufreq_policy *policy;
- WARN_ON(!mutex_is_locked(&dev_priv->pcu_lock));
+ lockdep_assert_held(&rps->lock);
if (rps->max_freq <= rps->min_freq)
return;
@@ -7753,6 +7763,11 @@ static void valleyview_init_gt_powersave(struct drm_i915_private *dev_priv)
valleyview_setup_pctx(dev_priv);
+ vlv_iosf_sb_get(dev_priv,
+ BIT(VLV_IOSF_SB_PUNIT) |
+ BIT(VLV_IOSF_SB_NC) |
+ BIT(VLV_IOSF_SB_CCK));
+
vlv_init_gpll_ref_freq(dev_priv);
val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
@@ -7790,6 +7805,11 @@ static void valleyview_init_gt_powersave(struct drm_i915_private *dev_priv)
DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n",
intel_gpu_freq(dev_priv, rps->min_freq),
rps->min_freq);
+
+ vlv_iosf_sb_put(dev_priv,
+ BIT(VLV_IOSF_SB_PUNIT) |
+ BIT(VLV_IOSF_SB_NC) |
+ BIT(VLV_IOSF_SB_CCK));
}
static void cherryview_init_gt_powersave(struct drm_i915_private *dev_priv)
@@ -7799,11 +7819,14 @@ static void cherryview_init_gt_powersave(struct drm_i915_private *dev_priv)
cherryview_setup_pctx(dev_priv);
+ vlv_iosf_sb_get(dev_priv,
+ BIT(VLV_IOSF_SB_PUNIT) |
+ BIT(VLV_IOSF_SB_NC) |
+ BIT(VLV_IOSF_SB_CCK));
+
vlv_init_gpll_ref_freq(dev_priv);
- mutex_lock(&dev_priv->sb_lock);
val = vlv_cck_read(dev_priv, CCK_FUSE_REG);
- mutex_unlock(&dev_priv->sb_lock);
switch ((val >> 2) & 0x7) {
case 3:
@@ -7836,6 +7859,11 @@ static void cherryview_init_gt_powersave(struct drm_i915_private *dev_priv)
intel_gpu_freq(dev_priv, rps->min_freq),
rps->min_freq);
+ vlv_iosf_sb_put(dev_priv,
+ BIT(VLV_IOSF_SB_PUNIT) |
+ BIT(VLV_IOSF_SB_NC) |
+ BIT(VLV_IOSF_SB_CCK));
+
WARN_ONCE((rps->max_freq | rps->efficient_freq | rps->rp1_freq |
rps->min_freq) & 1,
"Odd GPU freq values\n");
@@ -7923,13 +7951,15 @@ static void cherryview_enable_rps(struct drm_i915_private *dev_priv)
GEN6_RP_DOWN_IDLE_AVG);
/* Setting Fixed Bias */
- val = VLV_OVERRIDE_EN |
- VLV_SOC_TDP_EN |
- CHV_BIAS_CPU_50_SOC_50;
+ vlv_punit_get(dev_priv);
+
+ val = VLV_OVERRIDE_EN | VLV_SOC_TDP_EN | CHV_BIAS_CPU_50_SOC_50;
vlv_punit_write(dev_priv, VLV_TURBO_SOC_OVERRIDE, val);
val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
+ vlv_punit_put(dev_priv);
+
/* RPS code assumes GPLL is used */
WARN_ONCE((val & GPLLENABLE) == 0, "GPLL not enabled\n");
@@ -8006,14 +8036,16 @@ static void valleyview_enable_rps(struct drm_i915_private *dev_priv)
GEN6_RP_UP_BUSY_AVG |
GEN6_RP_DOWN_IDLE_CONT);
+ vlv_punit_get(dev_priv);
+
/* Setting Fixed Bias */
- val = VLV_OVERRIDE_EN |
- VLV_SOC_TDP_EN |
- VLV_BIAS_CPU_125_SOC_875;
+ val = VLV_OVERRIDE_EN | VLV_SOC_TDP_EN | VLV_BIAS_CPU_125_SOC_875;
vlv_punit_write(dev_priv, VLV_TURBO_SOC_OVERRIDE, val);
val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
+ vlv_punit_put(dev_priv);
+
/* RPS code assumes GPLL is used */
WARN_ONCE((val & GPLLENABLE) == 0, "GPLL not enabled\n");
@@ -8517,8 +8549,6 @@ void intel_init_gt_powersave(struct drm_i915_private *dev_priv)
pm_runtime_get(&dev_priv->drm.pdev->dev);
}
- mutex_lock(&dev_priv->pcu_lock);
-
/* Initialize RPS limits (for userspace) */
if (IS_CHERRYVIEW(dev_priv))
cherryview_init_gt_powersave(dev_priv);
@@ -8528,18 +8558,9 @@ void intel_init_gt_powersave(struct drm_i915_private *dev_priv)
gen6_init_rps_frequencies(dev_priv);
/* Derive initial user preferences/limits from the hardware limits */
- rps->idle_freq = rps->min_freq;
- rps->cur_freq = rps->idle_freq;
-
rps->max_freq_softlimit = rps->max_freq;
rps->min_freq_softlimit = rps->min_freq;
- if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
- rps->min_freq_softlimit =
- max_t(int,
- rps->efficient_freq,
- intel_freq_opcode(dev_priv, 450));
-
/* After setting max-softlimit, find the overclock max freq */
if (IS_GEN(dev_priv, 6) ||
IS_IVYBRIDGE(dev_priv) || IS_HASWELL(dev_priv)) {
@@ -8556,8 +8577,8 @@ void intel_init_gt_powersave(struct drm_i915_private *dev_priv)
/* Finally allow us to boost to max by default */
rps->boost_freq = rps->max_freq;
-
- mutex_unlock(&dev_priv->pcu_lock);
+ rps->idle_freq = rps->min_freq;
+ rps->cur_freq = rps->idle_freq;
}
void intel_cleanup_gt_powersave(struct drm_i915_private *dev_priv)
@@ -8583,7 +8604,7 @@ void intel_sanitize_gt_powersave(struct drm_i915_private *dev_priv)
static inline void intel_disable_llc_pstate(struct drm_i915_private *i915)
{
- lockdep_assert_held(&i915->pcu_lock);
+ lockdep_assert_held(&i915->gt_pm.rps.lock);
if (!i915->gt_pm.llc_pstate.enabled)
return;
@@ -8595,7 +8616,7 @@ static inline void intel_disable_llc_pstate(struct drm_i915_private *i915)
static void intel_disable_rc6(struct drm_i915_private *dev_priv)
{
- lockdep_assert_held(&dev_priv->pcu_lock);
+ lockdep_assert_held(&dev_priv->gt_pm.rps.lock);
if (!dev_priv->gt_pm.rc6.enabled)
return;
@@ -8614,7 +8635,7 @@ static void intel_disable_rc6(struct drm_i915_private *dev_priv)
static void intel_disable_rps(struct drm_i915_private *dev_priv)
{
- lockdep_assert_held(&dev_priv->pcu_lock);
+ lockdep_assert_held(&dev_priv->gt_pm.rps.lock);
if (!dev_priv->gt_pm.rps.enabled)
return;
@@ -8635,19 +8656,19 @@ static void intel_disable_rps(struct drm_i915_private *dev_priv)
void intel_disable_gt_powersave(struct drm_i915_private *dev_priv)
{
- mutex_lock(&dev_priv->pcu_lock);
+ mutex_lock(&dev_priv->gt_pm.rps.lock);
intel_disable_rc6(dev_priv);
intel_disable_rps(dev_priv);
if (HAS_LLC(dev_priv))
intel_disable_llc_pstate(dev_priv);
- mutex_unlock(&dev_priv->pcu_lock);
+ mutex_unlock(&dev_priv->gt_pm.rps.lock);
}
static inline void intel_enable_llc_pstate(struct drm_i915_private *i915)
{
- lockdep_assert_held(&i915->pcu_lock);
+ lockdep_assert_held(&i915->gt_pm.rps.lock);
if (i915->gt_pm.llc_pstate.enabled)
return;
@@ -8659,7 +8680,7 @@ static inline void intel_enable_llc_pstate(struct drm_i915_private *i915)
static void intel_enable_rc6(struct drm_i915_private *dev_priv)
{
- lockdep_assert_held(&dev_priv->pcu_lock);
+ lockdep_assert_held(&dev_priv->gt_pm.rps.lock);
if (dev_priv->gt_pm.rc6.enabled)
return;
@@ -8684,7 +8705,7 @@ static void intel_enable_rps(struct drm_i915_private *dev_priv)
{
struct intel_rps *rps = &dev_priv->gt_pm.rps;
- lockdep_assert_held(&dev_priv->pcu_lock);
+ lockdep_assert_held(&rps->lock);
if (rps->enabled)
return;
@@ -8719,15 +8740,16 @@ void intel_enable_gt_powersave(struct drm_i915_private *dev_priv)
if (intel_vgpu_active(dev_priv))
return;
- mutex_lock(&dev_priv->pcu_lock);
+ mutex_lock(&dev_priv->gt_pm.rps.lock);
if (HAS_RC6(dev_priv))
intel_enable_rc6(dev_priv);
- intel_enable_rps(dev_priv);
+ if (HAS_RPS(dev_priv))
+ intel_enable_rps(dev_priv);
if (HAS_LLC(dev_priv))
intel_enable_llc_pstate(dev_priv);
- mutex_unlock(&dev_priv->pcu_lock);
+ mutex_unlock(&dev_priv->gt_pm.rps.lock);
}
static void ibx_init_clock_gating(struct drm_i915_private *dev_priv)
@@ -9698,221 +9720,6 @@ void intel_init_pm(struct drm_i915_private *dev_priv)
}
}
-static inline int gen6_check_mailbox_status(struct drm_i915_private *dev_priv)
-{
- u32 flags =
- I915_READ_FW(GEN6_PCODE_MAILBOX) & GEN6_PCODE_ERROR_MASK;
-
- switch (flags) {
- case GEN6_PCODE_SUCCESS:
- return 0;
- case GEN6_PCODE_UNIMPLEMENTED_CMD:
- return -ENODEV;
- case GEN6_PCODE_ILLEGAL_CMD:
- return -ENXIO;
- case GEN6_PCODE_MIN_FREQ_TABLE_GT_RATIO_OUT_OF_RANGE:
- case GEN7_PCODE_MIN_FREQ_TABLE_GT_RATIO_OUT_OF_RANGE:
- return -EOVERFLOW;
- case GEN6_PCODE_TIMEOUT:
- return -ETIMEDOUT;
- default:
- MISSING_CASE(flags);
- return 0;
- }
-}
-
-static inline int gen7_check_mailbox_status(struct drm_i915_private *dev_priv)
-{
- u32 flags =
- I915_READ_FW(GEN6_PCODE_MAILBOX) & GEN6_PCODE_ERROR_MASK;
-
- switch (flags) {
- case GEN6_PCODE_SUCCESS:
- return 0;
- case GEN6_PCODE_ILLEGAL_CMD:
- return -ENXIO;
- case GEN7_PCODE_TIMEOUT:
- return -ETIMEDOUT;
- case GEN7_PCODE_ILLEGAL_DATA:
- return -EINVAL;
- case GEN7_PCODE_MIN_FREQ_TABLE_GT_RATIO_OUT_OF_RANGE:
- return -EOVERFLOW;
- default:
- MISSING_CASE(flags);
- return 0;
- }
-}
-
-int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u32 mbox, u32 *val)
-{
- int status;
-
- WARN_ON(!mutex_is_locked(&dev_priv->pcu_lock));
-
- /* GEN6_PCODE_* are outside of the forcewake domain, we can
- * use te fw I915_READ variants to reduce the amount of work
- * required when reading/writing.
- */
-
- if (I915_READ_FW(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) {
- DRM_DEBUG_DRIVER("warning: pcode (read from mbox %x) mailbox access failed for %ps\n",
- mbox, __builtin_return_address(0));
- return -EAGAIN;
- }
-
- I915_WRITE_FW(GEN6_PCODE_DATA, *val);
- I915_WRITE_FW(GEN6_PCODE_DATA1, 0);
- I915_WRITE_FW(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox);
-
- if (__intel_wait_for_register_fw(&dev_priv->uncore,
- GEN6_PCODE_MAILBOX, GEN6_PCODE_READY, 0,
- 500, 0, NULL)) {
- DRM_ERROR("timeout waiting for pcode read (from mbox %x) to finish for %ps\n",
- mbox, __builtin_return_address(0));
- return -ETIMEDOUT;
- }
-
- *val = I915_READ_FW(GEN6_PCODE_DATA);
- I915_WRITE_FW(GEN6_PCODE_DATA, 0);
-
- if (INTEL_GEN(dev_priv) > 6)
- status = gen7_check_mailbox_status(dev_priv);
- else
- status = gen6_check_mailbox_status(dev_priv);
-
- if (status) {
- DRM_DEBUG_DRIVER("warning: pcode (read from mbox %x) mailbox access failed for %ps: %d\n",
- mbox, __builtin_return_address(0), status);
- return status;
- }
-
- return 0;
-}
-
-int sandybridge_pcode_write_timeout(struct drm_i915_private *dev_priv,
- u32 mbox, u32 val,
- int fast_timeout_us, int slow_timeout_ms)
-{
- int status;
-
- WARN_ON(!mutex_is_locked(&dev_priv->pcu_lock));
-
- /* GEN6_PCODE_* are outside of the forcewake domain, we can
- * use te fw I915_READ variants to reduce the amount of work
- * required when reading/writing.
- */
-
- if (I915_READ_FW(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) {
- DRM_DEBUG_DRIVER("warning: pcode (write of 0x%08x to mbox %x) mailbox access failed for %ps\n",
- val, mbox, __builtin_return_address(0));
- return -EAGAIN;
- }
-
- I915_WRITE_FW(GEN6_PCODE_DATA, val);
- I915_WRITE_FW(GEN6_PCODE_DATA1, 0);
- I915_WRITE_FW(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox);
-
- if (__intel_wait_for_register_fw(&dev_priv->uncore,
- GEN6_PCODE_MAILBOX, GEN6_PCODE_READY, 0,
- fast_timeout_us, slow_timeout_ms,
- NULL)) {
- DRM_ERROR("timeout waiting for pcode write of 0x%08x to mbox %x to finish for %ps\n",
- val, mbox, __builtin_return_address(0));
- return -ETIMEDOUT;
- }
-
- I915_WRITE_FW(GEN6_PCODE_DATA, 0);
-
- if (INTEL_GEN(dev_priv) > 6)
- status = gen7_check_mailbox_status(dev_priv);
- else
- status = gen6_check_mailbox_status(dev_priv);
-
- if (status) {
- DRM_DEBUG_DRIVER("warning: pcode (write of 0x%08x to mbox %x) mailbox access failed for %ps: %d\n",
- val, mbox, __builtin_return_address(0), status);
- return status;
- }
-
- return 0;
-}
-
-static bool skl_pcode_try_request(struct drm_i915_private *dev_priv, u32 mbox,
- u32 request, u32 reply_mask, u32 reply,
- u32 *status)
-{
- u32 val = request;
-
- *status = sandybridge_pcode_read(dev_priv, mbox, &val);
-
- return *status || ((val & reply_mask) == reply);
-}
-
-/**
- * skl_pcode_request - send PCODE request until acknowledgment
- * @dev_priv: device private
- * @mbox: PCODE mailbox ID the request is targeted for
- * @request: request ID
- * @reply_mask: mask used to check for request acknowledgment
- * @reply: value used to check for request acknowledgment
- * @timeout_base_ms: timeout for polling with preemption enabled
- *
- * Keep resending the @request to @mbox until PCODE acknowledges it, PCODE
- * reports an error or an overall timeout of @timeout_base_ms+50 ms expires.
- * The request is acknowledged once the PCODE reply dword equals @reply after
- * applying @reply_mask. Polling is first attempted with preemption enabled
- * for @timeout_base_ms and if this times out for another 50 ms with
- * preemption disabled.
- *
- * Returns 0 on success, %-ETIMEDOUT in case of a timeout, <0 in case of some
- * other error as reported by PCODE.
- */
-int skl_pcode_request(struct drm_i915_private *dev_priv, u32 mbox, u32 request,
- u32 reply_mask, u32 reply, int timeout_base_ms)
-{
- u32 status;
- int ret;
-
- WARN_ON(!mutex_is_locked(&dev_priv->pcu_lock));
-
-#define COND skl_pcode_try_request(dev_priv, mbox, request, reply_mask, reply, \
- &status)
-
- /*
- * Prime the PCODE by doing a request first. Normally it guarantees
- * that a subsequent request, at most @timeout_base_ms later, succeeds.
- * _wait_for() doesn't guarantee when its passed condition is evaluated
- * first, so send the first request explicitly.
- */
- if (COND) {
- ret = 0;
- goto out;
- }
- ret = _wait_for(COND, timeout_base_ms * 1000, 10, 10);
- if (!ret)
- goto out;
-
- /*
- * The above can time out if the number of requests was low (2 in the
- * worst case) _and_ PCODE was busy for some reason even after a
- * (queued) request and @timeout_base_ms delay. As a workaround retry
- * the poll with preemption disabled to maximize the number of
- * requests. Increase the timeout from @timeout_base_ms to 50ms to
- * account for interrupts that could reduce the number of these
- * requests, and for any quirks of the PCODE firmware that delays
- * the request completion.
- */
- DRM_DEBUG_KMS("PCODE timeout, retrying with preemption disabled\n");
- WARN_ON_ONCE(timeout_base_ms > 3);
- preempt_disable();
- ret = wait_for_atomic(COND, 50);
- preempt_enable();
-
-out:
- return ret ? ret : status;
-#undef COND
-}
-
static int byt_gpu_freq(struct drm_i915_private *dev_priv, int val)
{
struct intel_rps *rps = &dev_priv->gt_pm.rps;
@@ -9978,7 +9785,7 @@ int intel_freq_opcode(struct drm_i915_private *dev_priv, int val)
void intel_pm_setup(struct drm_i915_private *dev_priv)
{
- mutex_init(&dev_priv->pcu_lock);
+ mutex_init(&dev_priv->gt_pm.rps.lock);
mutex_init(&dev_priv->gt_pm.rps.power.mutex);
atomic_set(&dev_priv->gt_pm.rps.num_waiters, 0);
@@ -10108,6 +9915,12 @@ u64 intel_rc6_residency_ns(struct drm_i915_private *dev_priv,
return mul_u64_u32_div(time_hw, mul, div);
}
+u64 intel_rc6_residency_us(struct drm_i915_private *dev_priv,
+ i915_reg_t reg)
+{
+ return DIV_ROUND_UP_ULL(intel_rc6_residency_ns(dev_priv, reg), 1000);
+}
+
u32 intel_get_cagf(struct drm_i915_private *dev_priv, u32 rpstat)
{
u32 cagf;
diff --git a/drivers/gpu/drm/i915/intel_pm.h b/drivers/gpu/drm/i915/intel_pm.h
index 674a3f0f16a7..17339c99440c 100644
--- a/drivers/gpu/drm/i915/intel_pm.h
+++ b/drivers/gpu/drm/i915/intel_pm.h
@@ -8,6 +8,8 @@
#include <linux/types.h>
+#include "i915_reg.h"
+
struct drm_atomic_state;
struct drm_device;
struct drm_i915_private;
@@ -68,4 +70,12 @@ int skl_check_pipe_max_pixel_rate(struct intel_crtc *intel_crtc,
void intel_init_ipc(struct drm_i915_private *dev_priv);
void intel_enable_ipc(struct drm_i915_private *dev_priv);
+int intel_gpu_freq(struct drm_i915_private *dev_priv, int val);
+int intel_freq_opcode(struct drm_i915_private *dev_priv, int val);
+u64 intel_rc6_residency_ns(struct drm_i915_private *dev_priv, i915_reg_t reg);
+u64 intel_rc6_residency_us(struct drm_i915_private *dev_priv, i915_reg_t reg);
+
+u32 intel_get_cagf(struct drm_i915_private *dev_priv, u32 rpstat1);
+
+
#endif /* __INTEL_PM_H__ */
diff --git a/drivers/gpu/drm/i915/intel_psr.c b/drivers/gpu/drm/i915/intel_psr.c
index 963663ba0edf..01ca502099df 100644
--- a/drivers/gpu/drm/i915/intel_psr.c
+++ b/drivers/gpu/drm/i915/intel_psr.c
@@ -229,16 +229,6 @@ void intel_psr_irq_handler(struct drm_i915_private *dev_priv, u32 psr_iir)
}
}
-static bool intel_dp_get_colorimetry_status(struct intel_dp *intel_dp)
-{
- u8 dprx = 0;
-
- if (drm_dp_dpcd_readb(&intel_dp->aux, DP_DPRX_FEATURE_ENUMERATION_LIST,
- &dprx) != 1)
- return false;
- return dprx & DP_VSC_SDP_EXT_FOR_COLORIMETRY_SUPPORTED;
-}
-
static bool intel_dp_get_alpm_status(struct intel_dp *intel_dp)
{
u8 alpm_caps = 0;
@@ -352,7 +342,7 @@ static void intel_psr_setup_vsc(struct intel_dp *intel_dp,
{
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- struct edp_vsc_psr psr_vsc;
+ struct dp_sdp psr_vsc;
if (dev_priv->psr.psr2_enabled) {
/* Prepare VSC Header for SU as per EDP 1.4 spec, Table 6.11 */
diff --git a/drivers/gpu/drm/i915/intel_quirks.c b/drivers/gpu/drm/i915/intel_quirks.c
index ec2b0fc92b8b..0b749c28541f 100644
--- a/drivers/gpu/drm/i915/intel_quirks.c
+++ b/drivers/gpu/drm/i915/intel_quirks.c
@@ -6,6 +6,7 @@
#include <linux/dmi.h>
#include "intel_drv.h"
+#include "intel_quirks.h"
/*
* Some machines (Lenovo U160) do not work with SSC on LVDS for some reason
diff --git a/drivers/gpu/drm/i915/intel_quirks.h b/drivers/gpu/drm/i915/intel_quirks.h
new file mode 100644
index 000000000000..b0fcff142a56
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_quirks.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __INTEL_QUIRKS_H__
+#define __INTEL_QUIRKS_H__
+
+struct drm_i915_private;
+
+void intel_init_quirks(struct drm_i915_private *dev_priv);
+
+#endif /* __INTEL_QUIRKS_H__ */
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c
index 6150e35bf7b5..12f5b669f20e 100644
--- a/drivers/gpu/drm/i915/intel_runtime_pm.c
+++ b/drivers/gpu/drm/i915/intel_runtime_pm.c
@@ -32,11 +32,16 @@
#include <drm/drm_print.h>
#include "i915_drv.h"
+#include "i915_irq.h"
#include "intel_cdclk.h"
+#include "intel_combo_phy.h"
#include "intel_crt.h"
#include "intel_csr.h"
#include "intel_dp.h"
+#include "intel_dpio_phy.h"
#include "intel_drv.h"
+#include "intel_hotplug.h"
+#include "intel_sideband.h"
/**
* DOC: runtime pm
@@ -55,6 +60,22 @@
* present for a given platform.
*/
+static intel_wakeref_t intel_runtime_pm_get_raw(struct drm_i915_private *i915);
+static void
+__intel_runtime_pm_put(struct drm_i915_private *i915, intel_wakeref_t wref,
+ bool wakelock);
+
+#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
+static void
+intel_runtime_pm_put_raw(struct drm_i915_private *i915, intel_wakeref_t wref);
+#else
+static inline void intel_runtime_pm_put_raw(struct drm_i915_private *i915,
+ intel_wakeref_t wref)
+{
+ __intel_runtime_pm_put(i915, -1, false);
+}
+#endif
+
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
#include <linux/sort.h>
@@ -94,9 +115,6 @@ track_intel_runtime_pm_wakeref(struct drm_i915_private *i915)
depot_stack_handle_t stack, *stacks;
unsigned long flags;
- atomic_inc(&rpm->wakeref_count);
- assert_rpm_wakelock_held(i915);
-
if (!HAS_RUNTIME_PM(i915))
return -1;
@@ -124,8 +142,8 @@ track_intel_runtime_pm_wakeref(struct drm_i915_private *i915)
return stack;
}
-static void cancel_intel_runtime_pm_wakeref(struct drm_i915_private *i915,
- depot_stack_handle_t stack)
+static void untrack_intel_runtime_pm_wakeref(struct drm_i915_private *i915,
+ depot_stack_handle_t stack)
{
struct i915_runtime_pm *rpm = &i915->runtime_pm;
unsigned long flags, n;
@@ -220,32 +238,60 @@ __print_intel_runtime_pm_wakeref(struct drm_printer *p,
}
static noinline void
-untrack_intel_runtime_pm_wakeref(struct drm_i915_private *i915)
+__untrack_all_wakerefs(struct intel_runtime_pm_debug *debug,
+ struct intel_runtime_pm_debug *saved)
+{
+ *saved = *debug;
+
+ debug->owners = NULL;
+ debug->count = 0;
+ debug->last_release = __save_depot_stack();
+}
+
+static void
+dump_and_free_wakeref_tracking(struct intel_runtime_pm_debug *debug)
+{
+ struct drm_printer p;
+
+ if (!debug->count)
+ return;
+
+ p = drm_debug_printer("i915");
+ __print_intel_runtime_pm_wakeref(&p, debug);
+
+ kfree(debug->owners);
+}
+
+static noinline void
+__intel_wakeref_dec_and_check_tracking(struct drm_i915_private *i915)
{
struct i915_runtime_pm *rpm = &i915->runtime_pm;
struct intel_runtime_pm_debug dbg = {};
- struct drm_printer p;
unsigned long flags;
- assert_rpm_wakelock_held(i915);
- if (atomic_dec_and_lock_irqsave(&rpm->wakeref_count,
- &rpm->debug.lock,
- flags)) {
- dbg = rpm->debug;
+ if (!atomic_dec_and_lock_irqsave(&rpm->wakeref_count,
+ &rpm->debug.lock,
+ flags))
+ return;
- rpm->debug.owners = NULL;
- rpm->debug.count = 0;
- rpm->debug.last_release = __save_depot_stack();
+ __untrack_all_wakerefs(&rpm->debug, &dbg);
+ spin_unlock_irqrestore(&rpm->debug.lock, flags);
- spin_unlock_irqrestore(&rpm->debug.lock, flags);
- }
- if (!dbg.count)
- return;
+ dump_and_free_wakeref_tracking(&dbg);
+}
- p = drm_debug_printer("i915");
- __print_intel_runtime_pm_wakeref(&p, &dbg);
+static noinline void
+untrack_all_intel_runtime_pm_wakerefs(struct drm_i915_private *i915)
+{
+ struct i915_runtime_pm *rpm = &i915->runtime_pm;
+ struct intel_runtime_pm_debug dbg = {};
+ unsigned long flags;
- kfree(dbg.owners);
+ spin_lock_irqsave(&rpm->debug.lock, flags);
+ __untrack_all_wakerefs(&rpm->debug, &dbg);
+ spin_unlock_irqrestore(&rpm->debug.lock, flags);
+
+ dump_and_free_wakeref_tracking(&dbg);
}
void print_intel_runtime_pm_wakeref(struct drm_i915_private *i915,
@@ -295,19 +341,56 @@ static void init_intel_runtime_pm_wakeref(struct drm_i915_private *i915)
static depot_stack_handle_t
track_intel_runtime_pm_wakeref(struct drm_i915_private *i915)
{
- atomic_inc(&i915->runtime_pm.wakeref_count);
- assert_rpm_wakelock_held(i915);
return -1;
}
-static void untrack_intel_runtime_pm_wakeref(struct drm_i915_private *i915)
+static void untrack_intel_runtime_pm_wakeref(struct drm_i915_private *i915,
+ intel_wakeref_t wref)
+{
+}
+
+static void
+__intel_wakeref_dec_and_check_tracking(struct drm_i915_private *i915)
{
- assert_rpm_wakelock_held(i915);
atomic_dec(&i915->runtime_pm.wakeref_count);
}
+static void
+untrack_all_intel_runtime_pm_wakerefs(struct drm_i915_private *i915)
+{
+}
+
#endif
+static void
+intel_runtime_pm_acquire(struct drm_i915_private *i915, bool wakelock)
+{
+ struct i915_runtime_pm *rpm = &i915->runtime_pm;
+
+ if (wakelock) {
+ atomic_add(1 + INTEL_RPM_WAKELOCK_BIAS, &rpm->wakeref_count);
+ assert_rpm_wakelock_held(i915);
+ } else {
+ atomic_inc(&rpm->wakeref_count);
+ assert_rpm_raw_wakeref_held(i915);
+ }
+}
+
+static void
+intel_runtime_pm_release(struct drm_i915_private *i915, int wakelock)
+{
+ struct i915_runtime_pm *rpm = &i915->runtime_pm;
+
+ if (wakelock) {
+ assert_rpm_wakelock_held(i915);
+ atomic_sub(INTEL_RPM_WAKELOCK_BIAS, &rpm->wakeref_count);
+ } else {
+ assert_rpm_raw_wakeref_held(i915);
+ }
+
+ __intel_wakeref_dec_and_check_tracking(i915);
+}
+
bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv,
enum i915_power_well_id power_well_id);
@@ -315,6 +398,8 @@ const char *
intel_display_power_domain_str(enum intel_display_power_domain domain)
{
switch (domain) {
+ case POWER_DOMAIN_DISPLAY_CORE:
+ return "DISPLAY_CORE";
case POWER_DOMAIN_PIPE_A:
return "PIPE_A";
case POWER_DOMAIN_PIPE_B:
@@ -375,8 +460,6 @@ intel_display_power_domain_str(enum intel_display_power_domain domain)
return "VGA";
case POWER_DOMAIN_AUDIO:
return "AUDIO";
- case POWER_DOMAIN_PLLS:
- return "PLLS";
case POWER_DOMAIN_AUX_A:
return "AUX_A";
case POWER_DOMAIN_AUX_B:
@@ -1125,7 +1208,7 @@ static void gen9_dc_off_power_well_enable(struct drm_i915_private *dev_priv,
* PHY's HW context for port B is lost after DC transitions,
* so we need to restore it manually.
*/
- icl_combo_phys_init(dev_priv);
+ intel_combo_phy_init(dev_priv);
}
static void gen9_dc_off_power_well_disable(struct drm_i915_private *dev_priv,
@@ -1200,7 +1283,7 @@ static void vlv_set_power_well(struct drm_i915_private *dev_priv,
state = enable ? PUNIT_PWRGT_PWR_ON(pw_idx) :
PUNIT_PWRGT_PWR_GATE(pw_idx);
- mutex_lock(&dev_priv->pcu_lock);
+ vlv_punit_get(dev_priv);
#define COND \
((vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask) == state)
@@ -1221,7 +1304,7 @@ static void vlv_set_power_well(struct drm_i915_private *dev_priv,
#undef COND
out:
- mutex_unlock(&dev_priv->pcu_lock);
+ vlv_punit_put(dev_priv);
}
static void vlv_power_well_enable(struct drm_i915_private *dev_priv,
@@ -1248,7 +1331,7 @@ static bool vlv_power_well_enabled(struct drm_i915_private *dev_priv,
mask = PUNIT_PWRGT_MASK(pw_idx);
ctrl = PUNIT_PWRGT_PWR_ON(pw_idx);
- mutex_lock(&dev_priv->pcu_lock);
+ vlv_punit_get(dev_priv);
state = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask;
/*
@@ -1267,7 +1350,7 @@ static bool vlv_power_well_enabled(struct drm_i915_private *dev_priv,
ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL) & mask;
WARN_ON(ctrl != state);
- mutex_unlock(&dev_priv->pcu_lock);
+ vlv_punit_put(dev_priv);
return enabled;
}
@@ -1558,7 +1641,7 @@ static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
1))
DRM_ERROR("Display PHY %d is not power up\n", phy);
- mutex_lock(&dev_priv->sb_lock);
+ vlv_dpio_get(dev_priv);
/* Enable dynamic power down */
tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW28);
@@ -1581,7 +1664,7 @@ static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW30, tmp);
}
- mutex_unlock(&dev_priv->sb_lock);
+ vlv_dpio_put(dev_priv);
dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(phy);
I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
@@ -1644,9 +1727,9 @@ static void assert_chv_phy_powergate(struct drm_i915_private *dev_priv, enum dpi
else
reg = _CHV_CMN_DW6_CH1;
- mutex_lock(&dev_priv->sb_lock);
+ vlv_dpio_get(dev_priv);
val = vlv_dpio_read(dev_priv, pipe, reg);
- mutex_unlock(&dev_priv->sb_lock);
+ vlv_dpio_put(dev_priv);
/*
* This assumes !override is only used when the port is disabled.
@@ -1753,7 +1836,7 @@ static bool chv_pipe_power_well_enabled(struct drm_i915_private *dev_priv,
bool enabled;
u32 state, ctrl;
- mutex_lock(&dev_priv->pcu_lock);
+ vlv_punit_get(dev_priv);
state = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & DP_SSS_MASK(pipe);
/*
@@ -1770,7 +1853,7 @@ static bool chv_pipe_power_well_enabled(struct drm_i915_private *dev_priv,
ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & DP_SSC_MASK(pipe);
WARN_ON(ctrl << 16 != state);
- mutex_unlock(&dev_priv->pcu_lock);
+ vlv_punit_put(dev_priv);
return enabled;
}
@@ -1785,7 +1868,7 @@ static void chv_set_pipe_power_well(struct drm_i915_private *dev_priv,
state = enable ? DP_SSS_PWR_ON(pipe) : DP_SSS_PWR_GATE(pipe);
- mutex_lock(&dev_priv->pcu_lock);
+ vlv_punit_get(dev_priv);
#define COND \
((vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & DP_SSS_MASK(pipe)) == state)
@@ -1806,7 +1889,7 @@ static void chv_set_pipe_power_well(struct drm_i915_private *dev_priv,
#undef COND
out:
- mutex_unlock(&dev_priv->pcu_lock);
+ vlv_punit_put(dev_priv);
}
static void chv_pipe_power_well_enable(struct drm_i915_private *dev_priv,
@@ -1825,6 +1908,125 @@ static void chv_pipe_power_well_disable(struct drm_i915_private *dev_priv,
chv_set_pipe_power_well(dev_priv, power_well, false);
}
+static u64 __async_put_domains_mask(struct i915_power_domains *power_domains)
+{
+ return power_domains->async_put_domains[0] |
+ power_domains->async_put_domains[1];
+}
+
+#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
+
+static bool
+assert_async_put_domain_masks_disjoint(struct i915_power_domains *power_domains)
+{
+ return !WARN_ON(power_domains->async_put_domains[0] &
+ power_domains->async_put_domains[1]);
+}
+
+static bool
+__async_put_domains_state_ok(struct i915_power_domains *power_domains)
+{
+ enum intel_display_power_domain domain;
+ bool err = false;
+
+ err |= !assert_async_put_domain_masks_disjoint(power_domains);
+ err |= WARN_ON(!!power_domains->async_put_wakeref !=
+ !!__async_put_domains_mask(power_domains));
+
+ for_each_power_domain(domain, __async_put_domains_mask(power_domains))
+ err |= WARN_ON(power_domains->domain_use_count[domain] != 1);
+
+ return !err;
+}
+
+static void print_power_domains(struct i915_power_domains *power_domains,
+ const char *prefix, u64 mask)
+{
+ enum intel_display_power_domain domain;
+
+ DRM_DEBUG_DRIVER("%s (%lu):\n", prefix, hweight64(mask));
+ for_each_power_domain(domain, mask)
+ DRM_DEBUG_DRIVER("%s use_count %d\n",
+ intel_display_power_domain_str(domain),
+ power_domains->domain_use_count[domain]);
+}
+
+static void
+print_async_put_domains_state(struct i915_power_domains *power_domains)
+{
+ DRM_DEBUG_DRIVER("async_put_wakeref %u\n",
+ power_domains->async_put_wakeref);
+
+ print_power_domains(power_domains, "async_put_domains[0]",
+ power_domains->async_put_domains[0]);
+ print_power_domains(power_domains, "async_put_domains[1]",
+ power_domains->async_put_domains[1]);
+}
+
+static void
+verify_async_put_domains_state(struct i915_power_domains *power_domains)
+{
+ if (!__async_put_domains_state_ok(power_domains))
+ print_async_put_domains_state(power_domains);
+}
+
+#else
+
+static void
+assert_async_put_domain_masks_disjoint(struct i915_power_domains *power_domains)
+{
+}
+
+static void
+verify_async_put_domains_state(struct i915_power_domains *power_domains)
+{
+}
+
+#endif /* CONFIG_DRM_I915_DEBUG_RUNTIME_PM */
+
+static u64 async_put_domains_mask(struct i915_power_domains *power_domains)
+{
+ assert_async_put_domain_masks_disjoint(power_domains);
+
+ return __async_put_domains_mask(power_domains);
+}
+
+static void
+async_put_domains_clear_domain(struct i915_power_domains *power_domains,
+ enum intel_display_power_domain domain)
+{
+ assert_async_put_domain_masks_disjoint(power_domains);
+
+ power_domains->async_put_domains[0] &= ~BIT_ULL(domain);
+ power_domains->async_put_domains[1] &= ~BIT_ULL(domain);
+}
+
+static bool
+intel_display_power_grab_async_put_ref(struct drm_i915_private *dev_priv,
+ enum intel_display_power_domain domain)
+{
+ struct i915_power_domains *power_domains = &dev_priv->power_domains;
+ bool ret = false;
+
+ if (!(async_put_domains_mask(power_domains) & BIT_ULL(domain)))
+ goto out_verify;
+
+ async_put_domains_clear_domain(power_domains, domain);
+
+ ret = true;
+
+ if (async_put_domains_mask(power_domains))
+ goto out_verify;
+
+ cancel_delayed_work(&power_domains->async_put_work);
+ intel_runtime_pm_put_raw(dev_priv,
+ fetch_and_zero(&power_domains->async_put_wakeref));
+out_verify:
+ verify_async_put_domains_state(power_domains);
+
+ return ret;
+}
+
static void
__intel_display_power_get_domain(struct drm_i915_private *dev_priv,
enum intel_display_power_domain domain)
@@ -1832,6 +2034,9 @@ __intel_display_power_get_domain(struct drm_i915_private *dev_priv,
struct i915_power_domains *power_domains = &dev_priv->power_domains;
struct i915_power_well *power_well;
+ if (intel_display_power_grab_async_put_ref(dev_priv, domain))
+ return;
+
for_each_power_domain_well(dev_priv, power_well, BIT_ULL(domain))
intel_power_well_get(dev_priv, power_well);
@@ -1857,9 +2062,7 @@ intel_wakeref_t intel_display_power_get(struct drm_i915_private *dev_priv,
intel_wakeref_t wakeref = intel_runtime_pm_get(dev_priv);
mutex_lock(&power_domains->lock);
-
__intel_display_power_get_domain(dev_priv, domain);
-
mutex_unlock(&power_domains->lock);
return wakeref;
@@ -1908,35 +2111,51 @@ intel_display_power_get_if_enabled(struct drm_i915_private *dev_priv,
return wakeref;
}
-static void __intel_display_power_put(struct drm_i915_private *dev_priv,
- enum intel_display_power_domain domain)
+static void
+__intel_display_power_put_domain(struct drm_i915_private *dev_priv,
+ enum intel_display_power_domain domain)
{
struct i915_power_domains *power_domains;
struct i915_power_well *power_well;
+ const char *name = intel_display_power_domain_str(domain);
power_domains = &dev_priv->power_domains;
- mutex_lock(&power_domains->lock);
-
WARN(!power_domains->domain_use_count[domain],
"Use count on domain %s is already zero\n",
- intel_display_power_domain_str(domain));
+ name);
+ WARN(async_put_domains_mask(power_domains) & BIT_ULL(domain),
+ "Async disabling of domain %s is pending\n",
+ name);
+
power_domains->domain_use_count[domain]--;
for_each_power_domain_well_reverse(dev_priv, power_well, BIT_ULL(domain))
intel_power_well_put(dev_priv, power_well);
+}
+static void __intel_display_power_put(struct drm_i915_private *dev_priv,
+ enum intel_display_power_domain domain)
+{
+ struct i915_power_domains *power_domains = &dev_priv->power_domains;
+
+ mutex_lock(&power_domains->lock);
+ __intel_display_power_put_domain(dev_priv, domain);
mutex_unlock(&power_domains->lock);
}
/**
- * intel_display_power_put - release a power domain reference
+ * intel_display_power_put_unchecked - release an unchecked power domain reference
* @dev_priv: i915 device instance
* @domain: power domain to reference
*
* This function drops the power domain reference obtained by
* intel_display_power_get() and might power down the corresponding hardware
* block right away if this is the last reference.
+ *
+ * This function exists only for historical reasons and should be avoided in
+ * new code, as the correctness of its use cannot be checked. Always use
+ * intel_display_power_put() instead.
*/
void intel_display_power_put_unchecked(struct drm_i915_private *dev_priv,
enum intel_display_power_domain domain)
@@ -1945,7 +2164,199 @@ void intel_display_power_put_unchecked(struct drm_i915_private *dev_priv,
intel_runtime_pm_put_unchecked(dev_priv);
}
+static void
+queue_async_put_domains_work(struct i915_power_domains *power_domains,
+ intel_wakeref_t wakeref)
+{
+ WARN_ON(power_domains->async_put_wakeref);
+ power_domains->async_put_wakeref = wakeref;
+ WARN_ON(!queue_delayed_work(system_unbound_wq,
+ &power_domains->async_put_work,
+ msecs_to_jiffies(100)));
+}
+
+static void
+release_async_put_domains(struct i915_power_domains *power_domains, u64 mask)
+{
+ struct drm_i915_private *dev_priv =
+ container_of(power_domains, struct drm_i915_private,
+ power_domains);
+ enum intel_display_power_domain domain;
+ intel_wakeref_t wakeref;
+
+ /*
+ * The caller must hold already raw wakeref, upgrade that to a proper
+ * wakeref to make the state checker happy about the HW access during
+ * power well disabling.
+ */
+ assert_rpm_raw_wakeref_held(dev_priv);
+ wakeref = intel_runtime_pm_get(dev_priv);
+
+ for_each_power_domain(domain, mask) {
+ /* Clear before put, so put's sanity check is happy. */
+ async_put_domains_clear_domain(power_domains, domain);
+ __intel_display_power_put_domain(dev_priv, domain);
+ }
+
+ intel_runtime_pm_put(dev_priv, wakeref);
+}
+
+static void
+intel_display_power_put_async_work(struct work_struct *work)
+{
+ struct drm_i915_private *dev_priv =
+ container_of(work, struct drm_i915_private,
+ power_domains.async_put_work.work);
+ struct i915_power_domains *power_domains = &dev_priv->power_domains;
+ intel_wakeref_t new_work_wakeref = intel_runtime_pm_get_raw(dev_priv);
+ intel_wakeref_t old_work_wakeref = 0;
+
+ mutex_lock(&power_domains->lock);
+
+ /*
+ * Bail out if all the domain refs pending to be released were grabbed
+ * by subsequent gets or a flush_work.
+ */
+ old_work_wakeref = fetch_and_zero(&power_domains->async_put_wakeref);
+ if (!old_work_wakeref)
+ goto out_verify;
+
+ release_async_put_domains(power_domains,
+ power_domains->async_put_domains[0]);
+
+ /* Requeue the work if more domains were async put meanwhile. */
+ if (power_domains->async_put_domains[1]) {
+ power_domains->async_put_domains[0] =
+ fetch_and_zero(&power_domains->async_put_domains[1]);
+ queue_async_put_domains_work(power_domains,
+ fetch_and_zero(&new_work_wakeref));
+ }
+
+out_verify:
+ verify_async_put_domains_state(power_domains);
+
+ mutex_unlock(&power_domains->lock);
+
+ if (old_work_wakeref)
+ intel_runtime_pm_put_raw(dev_priv, old_work_wakeref);
+ if (new_work_wakeref)
+ intel_runtime_pm_put_raw(dev_priv, new_work_wakeref);
+}
+
+/**
+ * intel_display_power_put_async - release a power domain reference asynchronously
+ * @i915: i915 device instance
+ * @domain: power domain to reference
+ * @wakeref: wakeref acquired for the reference that is being released
+ *
+ * This function drops the power domain reference obtained by
+ * intel_display_power_get*() and schedules a work to power down the
+ * corresponding hardware block if this is the last reference.
+ */
+void __intel_display_power_put_async(struct drm_i915_private *i915,
+ enum intel_display_power_domain domain,
+ intel_wakeref_t wakeref)
+{
+ struct i915_power_domains *power_domains = &i915->power_domains;
+ intel_wakeref_t work_wakeref = intel_runtime_pm_get_raw(i915);
+
+ mutex_lock(&power_domains->lock);
+
+ if (power_domains->domain_use_count[domain] > 1) {
+ __intel_display_power_put_domain(i915, domain);
+
+ goto out_verify;
+ }
+
+ WARN_ON(power_domains->domain_use_count[domain] != 1);
+
+ /* Let a pending work requeue itself or queue a new one. */
+ if (power_domains->async_put_wakeref) {
+ power_domains->async_put_domains[1] |= BIT_ULL(domain);
+ } else {
+ power_domains->async_put_domains[0] |= BIT_ULL(domain);
+ queue_async_put_domains_work(power_domains,
+ fetch_and_zero(&work_wakeref));
+ }
+
+out_verify:
+ verify_async_put_domains_state(power_domains);
+
+ mutex_unlock(&power_domains->lock);
+
+ if (work_wakeref)
+ intel_runtime_pm_put_raw(i915, work_wakeref);
+
+ intel_runtime_pm_put(i915, wakeref);
+}
+
+/**
+ * intel_display_power_flush_work - flushes the async display power disabling work
+ * @i915: i915 device instance
+ *
+ * Flushes any pending work that was scheduled by a preceding
+ * intel_display_power_put_async() call, completing the disabling of the
+ * corresponding power domains.
+ *
+ * Note that the work handler function may still be running after this
+ * function returns; to ensure that the work handler isn't running use
+ * intel_display_power_flush_work_sync() instead.
+ */
+void intel_display_power_flush_work(struct drm_i915_private *i915)
+{
+ struct i915_power_domains *power_domains = &i915->power_domains;
+ intel_wakeref_t work_wakeref;
+
+ mutex_lock(&power_domains->lock);
+
+ work_wakeref = fetch_and_zero(&power_domains->async_put_wakeref);
+ if (!work_wakeref)
+ goto out_verify;
+
+ release_async_put_domains(power_domains,
+ async_put_domains_mask(power_domains));
+ cancel_delayed_work(&power_domains->async_put_work);
+
+out_verify:
+ verify_async_put_domains_state(power_domains);
+
+ mutex_unlock(&power_domains->lock);
+
+ if (work_wakeref)
+ intel_runtime_pm_put_raw(i915, work_wakeref);
+}
+
+/**
+ * intel_display_power_flush_work_sync - flushes and syncs the async display power disabling work
+ * @i915: i915 device instance
+ *
+ * Like intel_display_power_flush_work(), but also ensure that the work
+ * handler function is not running any more when this function returns.
+ */
+static void
+intel_display_power_flush_work_sync(struct drm_i915_private *i915)
+{
+ struct i915_power_domains *power_domains = &i915->power_domains;
+
+ intel_display_power_flush_work(i915);
+ cancel_delayed_work_sync(&power_domains->async_put_work);
+
+ verify_async_put_domains_state(power_domains);
+
+ WARN_ON(power_domains->async_put_wakeref);
+}
+
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
+/**
+ * intel_display_power_put - release a power domain reference
+ * @dev_priv: i915 device instance
+ * @domain: power domain to reference
+ * @wakeref: wakeref acquired for the reference that is being released
+ *
+ * This function drops the power domain reference obtained by
+ * intel_display_power_get() and might power down the corresponding hardware
+ * block right away if this is the last reference.
+ */
void intel_display_power_put(struct drm_i915_private *dev_priv,
enum intel_display_power_domain domain,
intel_wakeref_t wakeref)
@@ -1965,6 +2376,7 @@ void intel_display_power_put(struct drm_i915_private *dev_priv,
BIT_ULL(POWER_DOMAIN_INIT))
#define VLV_DISPLAY_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_DISPLAY_CORE) | \
BIT_ULL(POWER_DOMAIN_PIPE_A) | \
BIT_ULL(POWER_DOMAIN_PIPE_B) | \
BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \
@@ -2011,6 +2423,7 @@ void intel_display_power_put(struct drm_i915_private *dev_priv,
BIT_ULL(POWER_DOMAIN_INIT))
#define CHV_DISPLAY_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_DISPLAY_CORE) | \
BIT_ULL(POWER_DOMAIN_PIPE_A) | \
BIT_ULL(POWER_DOMAIN_PIPE_B) | \
BIT_ULL(POWER_DOMAIN_PIPE_C) | \
@@ -3433,6 +3846,9 @@ int intel_power_domains_init(struct drm_i915_private *dev_priv)
mutex_init(&power_domains->lock);
+ INIT_DELAYED_WORK(&power_domains->async_put_work,
+ intel_display_power_put_async_work);
+
/*
* The enabling order will be from lower to higher indexed wells,
* the disabling order is reversed.
@@ -3609,6 +4025,246 @@ static void icl_mbus_init(struct drm_i915_private *dev_priv)
I915_WRITE(MBUS_ABOX_CTL, val);
}
+static void hsw_assert_cdclk(struct drm_i915_private *dev_priv)
+{
+ u32 val = I915_READ(LCPLL_CTL);
+
+ /*
+ * The LCPLL register should be turned on by the BIOS. For now
+ * let's just check its state and print errors in case
+ * something is wrong. Don't even try to turn it on.
+ */
+
+ if (val & LCPLL_CD_SOURCE_FCLK)
+ DRM_ERROR("CDCLK source is not LCPLL\n");
+
+ if (val & LCPLL_PLL_DISABLE)
+ DRM_ERROR("LCPLL is disabled\n");
+}
+
+static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv)
+{
+ struct drm_device *dev = &dev_priv->drm;
+ struct intel_crtc *crtc;
+
+ for_each_intel_crtc(dev, crtc)
+ I915_STATE_WARN(crtc->active, "CRTC for pipe %c enabled\n",
+ pipe_name(crtc->pipe));
+
+ I915_STATE_WARN(I915_READ(HSW_PWR_WELL_CTL2),
+ "Display power well on\n");
+ I915_STATE_WARN(I915_READ(SPLL_CTL) & SPLL_PLL_ENABLE,
+ "SPLL enabled\n");
+ I915_STATE_WARN(I915_READ(WRPLL_CTL(0)) & WRPLL_PLL_ENABLE,
+ "WRPLL1 enabled\n");
+ I915_STATE_WARN(I915_READ(WRPLL_CTL(1)) & WRPLL_PLL_ENABLE,
+ "WRPLL2 enabled\n");
+ I915_STATE_WARN(I915_READ(PP_STATUS(0)) & PP_ON,
+ "Panel power on\n");
+ I915_STATE_WARN(I915_READ(BLC_PWM_CPU_CTL2) & BLM_PWM_ENABLE,
+ "CPU PWM1 enabled\n");
+ if (IS_HASWELL(dev_priv))
+ I915_STATE_WARN(I915_READ(HSW_BLC_PWM2_CTL) & BLM_PWM_ENABLE,
+ "CPU PWM2 enabled\n");
+ I915_STATE_WARN(I915_READ(BLC_PWM_PCH_CTL1) & BLM_PCH_PWM_ENABLE,
+ "PCH PWM1 enabled\n");
+ I915_STATE_WARN(I915_READ(UTIL_PIN_CTL) & UTIL_PIN_ENABLE,
+ "Utility pin enabled\n");
+ I915_STATE_WARN(I915_READ(PCH_GTC_CTL) & PCH_GTC_ENABLE,
+ "PCH GTC enabled\n");
+
+ /*
+ * In theory we can still leave IRQs enabled, as long as only the HPD
+ * interrupts remain enabled. We used to check for that, but since it's
+ * gen-specific and since we only disable LCPLL after we fully disable
+ * the interrupts, the check below should be enough.
+ */
+ I915_STATE_WARN(intel_irqs_enabled(dev_priv), "IRQs enabled\n");
+}
+
+static u32 hsw_read_dcomp(struct drm_i915_private *dev_priv)
+{
+ if (IS_HASWELL(dev_priv))
+ return I915_READ(D_COMP_HSW);
+ else
+ return I915_READ(D_COMP_BDW);
+}
+
+static void hsw_write_dcomp(struct drm_i915_private *dev_priv, u32 val)
+{
+ if (IS_HASWELL(dev_priv)) {
+ if (sandybridge_pcode_write(dev_priv,
+ GEN6_PCODE_WRITE_D_COMP, val))
+ DRM_DEBUG_KMS("Failed to write to D_COMP\n");
+ } else {
+ I915_WRITE(D_COMP_BDW, val);
+ POSTING_READ(D_COMP_BDW);
+ }
+}
+
+/*
+ * This function implements pieces of two sequences from BSpec:
+ * - Sequence for display software to disable LCPLL
+ * - Sequence for display software to allow package C8+
+ * The steps implemented here are just the steps that actually touch the LCPLL
+ * register. Callers should take care of disabling all the display engine
+ * functions, doing the mode unset, fixing interrupts, etc.
+ */
+static void hsw_disable_lcpll(struct drm_i915_private *dev_priv,
+ bool switch_to_fclk, bool allow_power_down)
+{
+ u32 val;
+
+ assert_can_disable_lcpll(dev_priv);
+
+ val = I915_READ(LCPLL_CTL);
+
+ if (switch_to_fclk) {
+ val |= LCPLL_CD_SOURCE_FCLK;
+ I915_WRITE(LCPLL_CTL, val);
+
+ if (wait_for_us(I915_READ(LCPLL_CTL) &
+ LCPLL_CD_SOURCE_FCLK_DONE, 1))
+ DRM_ERROR("Switching to FCLK failed\n");
+
+ val = I915_READ(LCPLL_CTL);
+ }
+
+ val |= LCPLL_PLL_DISABLE;
+ I915_WRITE(LCPLL_CTL, val);
+ POSTING_READ(LCPLL_CTL);
+
+ if (intel_wait_for_register(&dev_priv->uncore, LCPLL_CTL,
+ LCPLL_PLL_LOCK, 0, 1))
+ DRM_ERROR("LCPLL still locked\n");
+
+ val = hsw_read_dcomp(dev_priv);
+ val |= D_COMP_COMP_DISABLE;
+ hsw_write_dcomp(dev_priv, val);
+ ndelay(100);
+
+ if (wait_for((hsw_read_dcomp(dev_priv) &
+ D_COMP_RCOMP_IN_PROGRESS) == 0, 1))
+ DRM_ERROR("D_COMP RCOMP still in progress\n");
+
+ if (allow_power_down) {
+ val = I915_READ(LCPLL_CTL);
+ val |= LCPLL_POWER_DOWN_ALLOW;
+ I915_WRITE(LCPLL_CTL, val);
+ POSTING_READ(LCPLL_CTL);
+ }
+}
+
+/*
+ * Fully restores LCPLL, disallowing power down and switching back to LCPLL
+ * source.
+ */
+static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
+{
+ u32 val;
+
+ val = I915_READ(LCPLL_CTL);
+
+ if ((val & (LCPLL_PLL_LOCK | LCPLL_PLL_DISABLE | LCPLL_CD_SOURCE_FCLK |
+ LCPLL_POWER_DOWN_ALLOW)) == LCPLL_PLL_LOCK)
+ return;
+
+ /*
+ * Make sure we're not on PC8 state before disabling PC8, otherwise
+ * we'll hang the machine. To prevent PC8 state, just enable force_wake.
+ */
+ intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
+
+ if (val & LCPLL_POWER_DOWN_ALLOW) {
+ val &= ~LCPLL_POWER_DOWN_ALLOW;
+ I915_WRITE(LCPLL_CTL, val);
+ POSTING_READ(LCPLL_CTL);
+ }
+
+ val = hsw_read_dcomp(dev_priv);
+ val |= D_COMP_COMP_FORCE;
+ val &= ~D_COMP_COMP_DISABLE;
+ hsw_write_dcomp(dev_priv, val);
+
+ val = I915_READ(LCPLL_CTL);
+ val &= ~LCPLL_PLL_DISABLE;
+ I915_WRITE(LCPLL_CTL, val);
+
+ if (intel_wait_for_register(&dev_priv->uncore, LCPLL_CTL,
+ LCPLL_PLL_LOCK, LCPLL_PLL_LOCK, 5))
+ DRM_ERROR("LCPLL not locked yet\n");
+
+ if (val & LCPLL_CD_SOURCE_FCLK) {
+ val = I915_READ(LCPLL_CTL);
+ val &= ~LCPLL_CD_SOURCE_FCLK;
+ I915_WRITE(LCPLL_CTL, val);
+
+ if (wait_for_us((I915_READ(LCPLL_CTL) &
+ LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1))
+ DRM_ERROR("Switching back to LCPLL failed\n");
+ }
+
+ intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
+
+ intel_update_cdclk(dev_priv);
+ intel_dump_cdclk_state(&dev_priv->cdclk.hw, "Current CDCLK");
+}
+
+/*
+ * Package states C8 and deeper are really deep PC states that can only be
+ * reached when all the devices on the system allow it, so even if the graphics
+ * device allows PC8+, it doesn't mean the system will actually get to these
+ * states. Our driver only allows PC8+ when going into runtime PM.
+ *
+ * The requirements for PC8+ are that all the outputs are disabled, the power
+ * well is disabled and most interrupts are disabled, and these are also
+ * requirements for runtime PM. When these conditions are met, we manually do
+ * the other conditions: disable the interrupts, clocks and switch LCPLL refclk
+ * to Fclk. If we're in PC8+ and we get an non-hotplug interrupt, we can hard
+ * hang the machine.
+ *
+ * When we really reach PC8 or deeper states (not just when we allow it) we lose
+ * the state of some registers, so when we come back from PC8+ we need to
+ * restore this state. We don't get into PC8+ if we're not in RC6, so we don't
+ * need to take care of the registers kept by RC6. Notice that this happens even
+ * if we don't put the device in PCI D3 state (which is what currently happens
+ * because of the runtime PM support).
+ *
+ * For more, read "Display Sequences for Package C8" on the hardware
+ * documentation.
+ */
+void hsw_enable_pc8(struct drm_i915_private *dev_priv)
+{
+ u32 val;
+
+ DRM_DEBUG_KMS("Enabling package C8+\n");
+
+ if (HAS_PCH_LPT_LP(dev_priv)) {
+ val = I915_READ(SOUTH_DSPCLK_GATE_D);
+ val &= ~PCH_LP_PARTITION_LEVEL_DISABLE;
+ I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
+ }
+
+ lpt_disable_clkout_dp(dev_priv);
+ hsw_disable_lcpll(dev_priv, true, true);
+}
+
+void hsw_disable_pc8(struct drm_i915_private *dev_priv)
+{
+ u32 val;
+
+ DRM_DEBUG_KMS("Disabling package C8+\n");
+
+ hsw_restore_lcpll(dev_priv);
+ intel_init_pch_refclk(dev_priv);
+
+ if (HAS_PCH_LPT_LP(dev_priv)) {
+ val = I915_READ(SOUTH_DSPCLK_GATE_D);
+ val |= PCH_LP_PARTITION_LEVEL_DISABLE;
+ I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
+ }
+}
+
static void intel_pch_reset_handshake(struct drm_i915_private *dev_priv,
bool enable)
{
@@ -3764,7 +4420,7 @@ static void cnl_display_core_init(struct drm_i915_private *dev_priv, bool resume
intel_pch_reset_handshake(dev_priv, !HAS_PCH_NOP(dev_priv));
/* 2-3. */
- cnl_combo_phys_init(dev_priv);
+ intel_combo_phy_init(dev_priv);
/*
* 4. Enable Power Well 1 (PG1).
@@ -3813,7 +4469,7 @@ static void cnl_display_core_uninit(struct drm_i915_private *dev_priv)
usleep_range(10, 30); /* 10 us delay per Bspec */
/* 5. */
- cnl_combo_phys_uninit(dev_priv);
+ intel_combo_phy_uninit(dev_priv);
}
void icl_display_core_init(struct drm_i915_private *dev_priv,
@@ -3827,11 +4483,11 @@ void icl_display_core_init(struct drm_i915_private *dev_priv,
/* 1. Enable PCH reset handshake. */
intel_pch_reset_handshake(dev_priv, !HAS_PCH_NOP(dev_priv));
- /* 2-3. */
- icl_combo_phys_init(dev_priv);
+ /* 2. Initialize all combo phys */
+ intel_combo_phy_init(dev_priv);
/*
- * 4. Enable Power Well 1 (PG1).
+ * 3. Enable Power Well 1 (PG1).
* The AUX IO power wells will be enabled on demand.
*/
mutex_lock(&power_domains->lock);
@@ -3839,13 +4495,13 @@ void icl_display_core_init(struct drm_i915_private *dev_priv,
intel_power_well_enable(dev_priv, well);
mutex_unlock(&power_domains->lock);
- /* 5. Enable CDCLK. */
+ /* 4. Enable CDCLK. */
intel_cdclk_init(dev_priv);
- /* 6. Enable DBUF. */
+ /* 5. Enable DBUF. */
icl_dbuf_enable(dev_priv);
- /* 7. Setup MBUS. */
+ /* 6. Setup MBUS. */
icl_mbus_init(dev_priv);
if (resume && dev_priv->csr.dmc_payload)
@@ -3878,7 +4534,7 @@ void icl_display_core_uninit(struct drm_i915_private *dev_priv)
mutex_unlock(&power_domains->lock);
/* 5. */
- icl_combo_phys_uninit(dev_priv);
+ intel_combo_phy_uninit(dev_priv);
}
static void chv_phy_control_init(struct drm_i915_private *dev_priv)
@@ -4000,9 +4656,9 @@ static bool vlv_punit_is_power_gated(struct drm_i915_private *dev_priv, u32 reg0
{
bool ret;
- mutex_lock(&dev_priv->pcu_lock);
+ vlv_punit_get(dev_priv);
ret = (vlv_punit_read(dev_priv, reg0) & SSPM0_SSC_MASK) == SSPM0_SSC_PWR_GATE;
- mutex_unlock(&dev_priv->pcu_lock);
+ vlv_punit_put(dev_priv);
return ret;
}
@@ -4069,7 +4725,10 @@ void intel_power_domains_init_hw(struct drm_i915_private *i915, bool resume)
mutex_unlock(&power_domains->lock);
assert_ved_power_gated(i915);
assert_isp_power_gated(i915);
- } else if (IS_IVYBRIDGE(i915) || INTEL_GEN(i915) >= 7) {
+ } else if (IS_BROADWELL(i915) || IS_HASWELL(i915)) {
+ hsw_assert_cdclk(i915);
+ intel_pch_reset_handshake(i915, !HAS_PCH_NOP(i915));
+ } else if (IS_IVYBRIDGE(i915)) {
intel_pch_reset_handshake(i915, !HAS_PCH_NOP(i915));
}
@@ -4110,6 +4769,8 @@ void intel_power_domains_fini_hw(struct drm_i915_private *i915)
if (!i915_modparams.disable_power_well)
intel_display_power_put_unchecked(i915, POWER_DOMAIN_INIT);
+ intel_display_power_flush_work_sync(i915);
+
intel_power_domains_verify_state(i915);
/* Keep the power well enabled, but cancel its rpm wakeref. */
@@ -4185,6 +4846,7 @@ void intel_power_domains_suspend(struct drm_i915_private *i915,
if (!(i915->csr.allowed_dc_mask & DC_STATE_EN_DC9) &&
suspend_mode == I915_DRM_SUSPEND_IDLE &&
i915->csr.dmc_payload) {
+ intel_display_power_flush_work(i915);
intel_power_domains_verify_state(i915);
return;
}
@@ -4193,10 +4855,11 @@ void intel_power_domains_suspend(struct drm_i915_private *i915,
* Even if power well support was disabled we still want to disable
* power wells if power domains must be deinitialized for suspend.
*/
- if (!i915_modparams.disable_power_well) {
+ if (!i915_modparams.disable_power_well)
intel_display_power_put_unchecked(i915, POWER_DOMAIN_INIT);
- intel_power_domains_verify_state(i915);
- }
+
+ intel_display_power_flush_work(i915);
+ intel_power_domains_verify_state(i915);
if (INTEL_GEN(i915) >= 11)
icl_display_core_uninit(i915);
@@ -4274,6 +4937,8 @@ static void intel_power_domains_verify_state(struct drm_i915_private *i915)
mutex_lock(&power_domains->lock);
+ verify_async_put_domains_state(power_domains);
+
dump_domain_info = false;
for_each_power_well(i915, power_well) {
enum intel_display_power_domain domain;
@@ -4320,6 +4985,26 @@ static void intel_power_domains_verify_state(struct drm_i915_private *i915)
#endif
+static intel_wakeref_t __intel_runtime_pm_get(struct drm_i915_private *i915,
+ bool wakelock)
+{
+ struct pci_dev *pdev = i915->drm.pdev;
+ struct device *kdev = &pdev->dev;
+ int ret;
+
+ ret = pm_runtime_get_sync(kdev);
+ WARN_ONCE(ret < 0, "pm_runtime_get_sync() failed: %d\n", ret);
+
+ intel_runtime_pm_acquire(i915, wakelock);
+
+ return track_intel_runtime_pm_wakeref(i915);
+}
+
+static intel_wakeref_t intel_runtime_pm_get_raw(struct drm_i915_private *i915)
+{
+ return __intel_runtime_pm_get(i915, false);
+}
+
/**
* intel_runtime_pm_get - grab a runtime pm reference
* @i915: i915 device instance
@@ -4334,14 +5019,7 @@ static void intel_power_domains_verify_state(struct drm_i915_private *i915)
*/
intel_wakeref_t intel_runtime_pm_get(struct drm_i915_private *i915)
{
- struct pci_dev *pdev = i915->drm.pdev;
- struct device *kdev = &pdev->dev;
- int ret;
-
- ret = pm_runtime_get_sync(kdev);
- WARN_ONCE(ret < 0, "pm_runtime_get_sync() failed: %d\n", ret);
-
- return track_intel_runtime_pm_wakeref(i915);
+ return __intel_runtime_pm_get(i915, true);
}
/**
@@ -4374,6 +5052,8 @@ intel_wakeref_t intel_runtime_pm_get_if_in_use(struct drm_i915_private *i915)
return 0;
}
+ intel_runtime_pm_acquire(i915, true);
+
return track_intel_runtime_pm_wakeref(i915);
}
@@ -4404,33 +5084,64 @@ intel_wakeref_t intel_runtime_pm_get_noresume(struct drm_i915_private *i915)
assert_rpm_wakelock_held(i915);
pm_runtime_get_noresume(kdev);
+ intel_runtime_pm_acquire(i915, true);
+
return track_intel_runtime_pm_wakeref(i915);
}
+static void __intel_runtime_pm_put(struct drm_i915_private *i915,
+ intel_wakeref_t wref,
+ bool wakelock)
+{
+ struct pci_dev *pdev = i915->drm.pdev;
+ struct device *kdev = &pdev->dev;
+
+ untrack_intel_runtime_pm_wakeref(i915, wref);
+
+ intel_runtime_pm_release(i915, wakelock);
+
+ pm_runtime_mark_last_busy(kdev);
+ pm_runtime_put_autosuspend(kdev);
+}
+
+#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
+static void
+intel_runtime_pm_put_raw(struct drm_i915_private *i915, intel_wakeref_t wref)
+{
+ __intel_runtime_pm_put(i915, wref, false);
+}
+#endif
+
/**
- * intel_runtime_pm_put - release a runtime pm reference
+ * intel_runtime_pm_put_unchecked - release an unchecked runtime pm reference
* @i915: i915 device instance
*
* This function drops the device-level runtime pm reference obtained by
* intel_runtime_pm_get() and might power down the corresponding
* hardware block right away if this is the last reference.
+ *
+ * This function exists only for historical reasons and should be avoided in
+ * new code, as the correctness of its use cannot be checked. Always use
+ * intel_runtime_pm_put() instead.
*/
void intel_runtime_pm_put_unchecked(struct drm_i915_private *i915)
{
- struct pci_dev *pdev = i915->drm.pdev;
- struct device *kdev = &pdev->dev;
-
- untrack_intel_runtime_pm_wakeref(i915);
-
- pm_runtime_mark_last_busy(kdev);
- pm_runtime_put_autosuspend(kdev);
+ __intel_runtime_pm_put(i915, -1, true);
}
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
+/**
+ * intel_runtime_pm_put - release a runtime pm reference
+ * @i915: i915 device instance
+ * @wref: wakeref acquired for the reference that is being released
+ *
+ * This function drops the device-level runtime pm reference obtained by
+ * intel_runtime_pm_get() and might power down the corresponding
+ * hardware block right away if this is the last reference.
+ */
void intel_runtime_pm_put(struct drm_i915_private *i915, intel_wakeref_t wref)
{
- cancel_intel_runtime_pm_wakeref(i915, wref);
- intel_runtime_pm_put_unchecked(i915);
+ __intel_runtime_pm_put(i915, wref, true);
}
#endif
@@ -4504,14 +5215,14 @@ void intel_runtime_pm_disable(struct drm_i915_private *i915)
void intel_runtime_pm_cleanup(struct drm_i915_private *i915)
{
struct i915_runtime_pm *rpm = &i915->runtime_pm;
- int count;
+ int count = atomic_read(&rpm->wakeref_count);
- count = atomic_fetch_inc(&rpm->wakeref_count); /* balance untrack */
WARN(count,
- "i915->runtime_pm.wakeref_count=%d on cleanup\n",
- count);
+ "i915 raw-wakerefs=%d wakelocks=%d on cleanup\n",
+ intel_rpm_raw_wakeref_count(count),
+ intel_rpm_wakelock_count(count));
- untrack_intel_runtime_pm_wakeref(i915);
+ untrack_all_intel_runtime_pm_wakerefs(i915);
}
void intel_runtime_pm_init_early(struct drm_i915_private *i915)
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.h b/drivers/gpu/drm/i915/intel_runtime_pm.h
new file mode 100644
index 000000000000..0a4c4b3aee7d
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_runtime_pm.h
@@ -0,0 +1,142 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __INTEL_RUNTIME_PM_H__
+#define __INTEL_RUNTIME_PM_H__
+
+#include <linux/types.h>
+
+#include "intel_display.h"
+#include "intel_wakeref.h"
+
+struct drm_i915_private;
+struct drm_printer;
+struct intel_encoder;
+
+enum i915_drm_suspend_mode {
+ I915_DRM_SUSPEND_IDLE,
+ I915_DRM_SUSPEND_MEM,
+ I915_DRM_SUSPEND_HIBERNATE,
+};
+
+void skl_enable_dc6(struct drm_i915_private *dev_priv);
+void gen9_sanitize_dc_state(struct drm_i915_private *dev_priv);
+void bxt_enable_dc9(struct drm_i915_private *dev_priv);
+void bxt_disable_dc9(struct drm_i915_private *dev_priv);
+void gen9_enable_dc5(struct drm_i915_private *dev_priv);
+
+void intel_runtime_pm_init_early(struct drm_i915_private *dev_priv);
+int intel_power_domains_init(struct drm_i915_private *);
+void intel_power_domains_cleanup(struct drm_i915_private *dev_priv);
+void intel_power_domains_init_hw(struct drm_i915_private *dev_priv, bool resume);
+void intel_power_domains_fini_hw(struct drm_i915_private *dev_priv);
+void icl_display_core_init(struct drm_i915_private *dev_priv, bool resume);
+void icl_display_core_uninit(struct drm_i915_private *dev_priv);
+void intel_power_domains_enable(struct drm_i915_private *dev_priv);
+void intel_power_domains_disable(struct drm_i915_private *dev_priv);
+void intel_power_domains_suspend(struct drm_i915_private *dev_priv,
+ enum i915_drm_suspend_mode);
+void intel_power_domains_resume(struct drm_i915_private *dev_priv);
+void hsw_enable_pc8(struct drm_i915_private *dev_priv);
+void hsw_disable_pc8(struct drm_i915_private *dev_priv);
+void bxt_display_core_init(struct drm_i915_private *dev_priv, bool resume);
+void bxt_display_core_uninit(struct drm_i915_private *dev_priv);
+void intel_runtime_pm_enable(struct drm_i915_private *dev_priv);
+void intel_runtime_pm_disable(struct drm_i915_private *dev_priv);
+void intel_runtime_pm_cleanup(struct drm_i915_private *dev_priv);
+
+const char *
+intel_display_power_domain_str(enum intel_display_power_domain domain);
+
+bool intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
+ enum intel_display_power_domain domain);
+bool __intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
+ enum intel_display_power_domain domain);
+intel_wakeref_t intel_display_power_get(struct drm_i915_private *dev_priv,
+ enum intel_display_power_domain domain);
+intel_wakeref_t
+intel_display_power_get_if_enabled(struct drm_i915_private *dev_priv,
+ enum intel_display_power_domain domain);
+void intel_display_power_put_unchecked(struct drm_i915_private *dev_priv,
+ enum intel_display_power_domain domain);
+void __intel_display_power_put_async(struct drm_i915_private *i915,
+ enum intel_display_power_domain domain,
+ intel_wakeref_t wakeref);
+void intel_display_power_flush_work(struct drm_i915_private *i915);
+#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
+void intel_display_power_put(struct drm_i915_private *dev_priv,
+ enum intel_display_power_domain domain,
+ intel_wakeref_t wakeref);
+static inline void
+intel_display_power_put_async(struct drm_i915_private *i915,
+ enum intel_display_power_domain domain,
+ intel_wakeref_t wakeref)
+{
+ __intel_display_power_put_async(i915, domain, wakeref);
+}
+#else
+static inline void
+intel_display_power_put(struct drm_i915_private *i915,
+ enum intel_display_power_domain domain,
+ intel_wakeref_t wakeref)
+{
+ intel_display_power_put_unchecked(i915, domain);
+}
+
+static inline void
+intel_display_power_put_async(struct drm_i915_private *i915,
+ enum intel_display_power_domain domain,
+ intel_wakeref_t wakeref)
+{
+ __intel_display_power_put_async(i915, domain, -1);
+}
+#endif
+
+#define with_intel_display_power(i915, domain, wf) \
+ for ((wf) = intel_display_power_get((i915), (domain)); (wf); \
+ intel_display_power_put_async((i915), (domain), (wf)), (wf) = 0)
+
+void icl_dbuf_slices_update(struct drm_i915_private *dev_priv,
+ u8 req_slices);
+
+intel_wakeref_t intel_runtime_pm_get(struct drm_i915_private *i915);
+intel_wakeref_t intel_runtime_pm_get_if_in_use(struct drm_i915_private *i915);
+intel_wakeref_t intel_runtime_pm_get_noresume(struct drm_i915_private *i915);
+
+#define with_intel_runtime_pm(i915, wf) \
+ for ((wf) = intel_runtime_pm_get(i915); (wf); \
+ intel_runtime_pm_put((i915), (wf)), (wf) = 0)
+
+#define with_intel_runtime_pm_if_in_use(i915, wf) \
+ for ((wf) = intel_runtime_pm_get_if_in_use(i915); (wf); \
+ intel_runtime_pm_put((i915), (wf)), (wf) = 0)
+
+void intel_runtime_pm_put_unchecked(struct drm_i915_private *i915);
+#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
+void intel_runtime_pm_put(struct drm_i915_private *i915, intel_wakeref_t wref);
+#else
+static inline void
+intel_runtime_pm_put(struct drm_i915_private *i915, intel_wakeref_t wref)
+{
+ intel_runtime_pm_put_unchecked(i915);
+}
+#endif
+
+#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
+void print_intel_runtime_pm_wakeref(struct drm_i915_private *i915,
+ struct drm_printer *p);
+#else
+static inline void print_intel_runtime_pm_wakeref(struct drm_i915_private *i915,
+ struct drm_printer *p)
+{
+}
+#endif
+
+void chv_phy_powergate_lanes(struct intel_encoder *encoder,
+ bool override, unsigned int mask);
+bool chv_phy_powergate_ch(struct drm_i915_private *dev_priv, enum dpio_phy phy,
+ enum dpio_channel ch, bool override);
+
+#endif /* __INTEL_RUNTIME_PM_H__ */
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index 9ecfba0a54a1..ed0485a44c3e 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -37,9 +37,13 @@
#include <drm/i915_drm.h>
#include "i915_drv.h"
+#include "intel_atomic.h"
#include "intel_connector.h"
#include "intel_drv.h"
+#include "intel_fifo_underrun.h"
+#include "intel_gmbus.h"
#include "intel_hdmi.h"
+#include "intel_hotplug.h"
#include "intel_panel.h"
#include "intel_sdvo.h"
#include "intel_sdvo_regs.h"
@@ -2388,9 +2392,10 @@ static const struct drm_connector_funcs intel_sdvo_connector_funcs = {
};
static int intel_sdvo_atomic_check(struct drm_connector *conn,
- struct drm_connector_state *new_conn_state)
+ struct drm_atomic_state *state)
{
- struct drm_atomic_state *state = new_conn_state->state;
+ struct drm_connector_state *new_conn_state =
+ drm_atomic_get_new_connector_state(state, conn);
struct drm_connector_state *old_conn_state =
drm_atomic_get_old_connector_state(state, conn);
struct intel_sdvo_connector_state *old_state =
@@ -2402,13 +2407,13 @@ static int intel_sdvo_atomic_check(struct drm_connector *conn,
(memcmp(&old_state->tv, &new_state->tv, sizeof(old_state->tv)) ||
memcmp(&old_conn_state->tv, &new_conn_state->tv, sizeof(old_conn_state->tv)))) {
struct drm_crtc_state *crtc_state =
- drm_atomic_get_new_crtc_state(new_conn_state->state,
+ drm_atomic_get_new_crtc_state(state,
new_conn_state->crtc);
crtc_state->connectors_changed = true;
}
- return intel_digital_connector_atomic_check(conn, new_conn_state);
+ return intel_digital_connector_atomic_check(conn, state);
}
static const struct drm_connector_helper_funcs intel_sdvo_connector_helper_funcs = {
diff --git a/drivers/gpu/drm/i915/intel_sideband.c b/drivers/gpu/drm/i915/intel_sideband.c
index 57de41b1f989..87b5a14c7ca8 100644
--- a/drivers/gpu/drm/i915/intel_sideband.c
+++ b/drivers/gpu/drm/i915/intel_sideband.c
@@ -22,6 +22,10 @@
*
*/
+#include <asm/iosf_mbi.h>
+
+#include "intel_sideband.h"
+
#include "i915_drv.h"
#include "intel_drv.h"
@@ -39,19 +43,68 @@
/* Private register write, double-word addressing, non-posted */
#define SB_CRWRDA_NP 0x07
-static int vlv_sideband_rw(struct drm_i915_private *dev_priv, u32 devfn,
- u32 port, u32 opcode, u32 addr, u32 *val)
+static void ping(void *info)
+{
+}
+
+static void __vlv_punit_get(struct drm_i915_private *i915)
+{
+ iosf_mbi_punit_acquire();
+
+ /*
+ * Prevent the cpu from sleeping while we use this sideband, otherwise
+ * the punit may cause a machine hang. The issue appears to be isolated
+ * with changing the power state of the CPU package while changing
+ * the power state via the punit, and we have only observed it
+ * reliably on 4-core Baytail systems suggesting the issue is in the
+ * power delivery mechanism and likely to be be board/function
+ * specific. Hence we presume the workaround needs only be applied
+ * to the Valleyview P-unit and not all sideband communications.
+ */
+ if (IS_VALLEYVIEW(i915)) {
+ pm_qos_update_request(&i915->sb_qos, 0);
+ on_each_cpu(ping, NULL, 1);
+ }
+}
+
+static void __vlv_punit_put(struct drm_i915_private *i915)
+{
+ if (IS_VALLEYVIEW(i915))
+ pm_qos_update_request(&i915->sb_qos, PM_QOS_DEFAULT_VALUE);
+
+ iosf_mbi_punit_release();
+}
+
+void vlv_iosf_sb_get(struct drm_i915_private *i915, unsigned long ports)
{
- u32 cmd, be = 0xf, bar = 0;
- bool is_read = (opcode == SB_MRD_NP || opcode == SB_CRRDDA_NP);
+ if (ports & BIT(VLV_IOSF_SB_PUNIT))
+ __vlv_punit_get(i915);
- cmd = (devfn << IOSF_DEVFN_SHIFT) | (opcode << IOSF_OPCODE_SHIFT) |
- (port << IOSF_PORT_SHIFT) | (be << IOSF_BYTE_ENABLES_SHIFT) |
- (bar << IOSF_BAR_SHIFT);
+ mutex_lock(&i915->sb_lock);
+}
+
+void vlv_iosf_sb_put(struct drm_i915_private *i915, unsigned long ports)
+{
+ mutex_unlock(&i915->sb_lock);
+
+ if (ports & BIT(VLV_IOSF_SB_PUNIT))
+ __vlv_punit_put(i915);
+}
+
+static int vlv_sideband_rw(struct drm_i915_private *i915,
+ u32 devfn, u32 port, u32 opcode,
+ u32 addr, u32 *val)
+{
+ struct intel_uncore *uncore = &i915->uncore;
+ const bool is_read = (opcode == SB_MRD_NP || opcode == SB_CRRDDA_NP);
+ int err;
- WARN_ON(!mutex_is_locked(&dev_priv->sb_lock));
+ lockdep_assert_held(&i915->sb_lock);
+ if (port == IOSF_PORT_PUNIT)
+ iosf_mbi_assert_punit_acquired();
- if (intel_wait_for_register(&dev_priv->uncore,
+ /* Flush the previous comms, just in case it failed last time. */
+ if (intel_wait_for_register(uncore,
VLV_IOSF_DOORBELL_REQ, IOSF_SB_BUSY, 0,
5)) {
DRM_DEBUG_DRIVER("IOSF sideband idle wait (%s) timed out\n",
@@ -59,131 +112,132 @@ static int vlv_sideband_rw(struct drm_i915_private *dev_priv, u32 devfn,
return -EAGAIN;
}
- I915_WRITE(VLV_IOSF_ADDR, addr);
- I915_WRITE(VLV_IOSF_DATA, is_read ? 0 : *val);
- I915_WRITE(VLV_IOSF_DOORBELL_REQ, cmd);
-
- if (intel_wait_for_register(&dev_priv->uncore,
- VLV_IOSF_DOORBELL_REQ, IOSF_SB_BUSY, 0,
- 5)) {
+ preempt_disable();
+
+ intel_uncore_write_fw(uncore, VLV_IOSF_ADDR, addr);
+ intel_uncore_write_fw(uncore, VLV_IOSF_DATA, is_read ? 0 : *val);
+ intel_uncore_write_fw(uncore, VLV_IOSF_DOORBELL_REQ,
+ (devfn << IOSF_DEVFN_SHIFT) |
+ (opcode << IOSF_OPCODE_SHIFT) |
+ (port << IOSF_PORT_SHIFT) |
+ (0xf << IOSF_BYTE_ENABLES_SHIFT) |
+ (0 << IOSF_BAR_SHIFT) |
+ IOSF_SB_BUSY);
+
+ if (__intel_wait_for_register_fw(uncore,
+ VLV_IOSF_DOORBELL_REQ, IOSF_SB_BUSY, 0,
+ 10000, 0, NULL) == 0) {
+ if (is_read)
+ *val = intel_uncore_read_fw(uncore, VLV_IOSF_DATA);
+ err = 0;
+ } else {
DRM_DEBUG_DRIVER("IOSF sideband finish wait (%s) timed out\n",
is_read ? "read" : "write");
- return -ETIMEDOUT;
+ err = -ETIMEDOUT;
}
- if (is_read)
- *val = I915_READ(VLV_IOSF_DATA);
+ preempt_enable();
- return 0;
+ return err;
}
-u32 vlv_punit_read(struct drm_i915_private *dev_priv, u32 addr)
+u32 vlv_punit_read(struct drm_i915_private *i915, u32 addr)
{
u32 val = 0;
- WARN_ON(!mutex_is_locked(&dev_priv->pcu_lock));
-
- mutex_lock(&dev_priv->sb_lock);
- vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_PUNIT,
+ vlv_sideband_rw(i915, PCI_DEVFN(0, 0), IOSF_PORT_PUNIT,
SB_CRRDDA_NP, addr, &val);
- mutex_unlock(&dev_priv->sb_lock);
return val;
}
-int vlv_punit_write(struct drm_i915_private *dev_priv, u32 addr, u32 val)
+int vlv_punit_write(struct drm_i915_private *i915, u32 addr, u32 val)
{
- int err;
-
- WARN_ON(!mutex_is_locked(&dev_priv->pcu_lock));
-
- mutex_lock(&dev_priv->sb_lock);
- err = vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_PUNIT,
- SB_CRWRDA_NP, addr, &val);
- mutex_unlock(&dev_priv->sb_lock);
-
- return err;
+ return vlv_sideband_rw(i915, PCI_DEVFN(0, 0), IOSF_PORT_PUNIT,
+ SB_CRWRDA_NP, addr, &val);
}
-u32 vlv_bunit_read(struct drm_i915_private *dev_priv, u32 reg)
+u32 vlv_bunit_read(struct drm_i915_private *i915, u32 reg)
{
u32 val = 0;
- vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_BUNIT,
+ vlv_sideband_rw(i915, PCI_DEVFN(0, 0), IOSF_PORT_BUNIT,
SB_CRRDDA_NP, reg, &val);
return val;
}
-void vlv_bunit_write(struct drm_i915_private *dev_priv, u32 reg, u32 val)
+void vlv_bunit_write(struct drm_i915_private *i915, u32 reg, u32 val)
{
- vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_BUNIT,
+ vlv_sideband_rw(i915, PCI_DEVFN(0, 0), IOSF_PORT_BUNIT,
SB_CRWRDA_NP, reg, &val);
}
-u32 vlv_nc_read(struct drm_i915_private *dev_priv, u8 addr)
+u32 vlv_nc_read(struct drm_i915_private *i915, u8 addr)
{
u32 val = 0;
- WARN_ON(!mutex_is_locked(&dev_priv->pcu_lock));
-
- mutex_lock(&dev_priv->sb_lock);
- vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_NC,
+ vlv_sideband_rw(i915, PCI_DEVFN(0, 0), IOSF_PORT_NC,
SB_CRRDDA_NP, addr, &val);
- mutex_unlock(&dev_priv->sb_lock);
return val;
}
-u32 vlv_iosf_sb_read(struct drm_i915_private *dev_priv, u8 port, u32 reg)
+u32 vlv_iosf_sb_read(struct drm_i915_private *i915, u8 port, u32 reg)
{
u32 val = 0;
- vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), port,
+
+ vlv_sideband_rw(i915, PCI_DEVFN(0, 0), port,
SB_CRRDDA_NP, reg, &val);
+
return val;
}
-void vlv_iosf_sb_write(struct drm_i915_private *dev_priv,
+void vlv_iosf_sb_write(struct drm_i915_private *i915,
u8 port, u32 reg, u32 val)
{
- vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), port,
+ vlv_sideband_rw(i915, PCI_DEVFN(0, 0), port,
SB_CRWRDA_NP, reg, &val);
}
-u32 vlv_cck_read(struct drm_i915_private *dev_priv, u32 reg)
+u32 vlv_cck_read(struct drm_i915_private *i915, u32 reg)
{
u32 val = 0;
- vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_CCK,
+
+ vlv_sideband_rw(i915, PCI_DEVFN(0, 0), IOSF_PORT_CCK,
SB_CRRDDA_NP, reg, &val);
+
return val;
}
-void vlv_cck_write(struct drm_i915_private *dev_priv, u32 reg, u32 val)
+void vlv_cck_write(struct drm_i915_private *i915, u32 reg, u32 val)
{
- vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_CCK,
+ vlv_sideband_rw(i915, PCI_DEVFN(0, 0), IOSF_PORT_CCK,
SB_CRWRDA_NP, reg, &val);
}
-u32 vlv_ccu_read(struct drm_i915_private *dev_priv, u32 reg)
+u32 vlv_ccu_read(struct drm_i915_private *i915, u32 reg)
{
u32 val = 0;
- vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_CCU,
+
+ vlv_sideband_rw(i915, PCI_DEVFN(0, 0), IOSF_PORT_CCU,
SB_CRRDDA_NP, reg, &val);
+
return val;
}
-void vlv_ccu_write(struct drm_i915_private *dev_priv, u32 reg, u32 val)
+void vlv_ccu_write(struct drm_i915_private *i915, u32 reg, u32 val)
{
- vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_CCU,
+ vlv_sideband_rw(i915, PCI_DEVFN(0, 0), IOSF_PORT_CCU,
SB_CRWRDA_NP, reg, &val);
}
-u32 vlv_dpio_read(struct drm_i915_private *dev_priv, enum pipe pipe, int reg)
+u32 vlv_dpio_read(struct drm_i915_private *i915, enum pipe pipe, int reg)
{
+ int port = i915->dpio_phy_iosf_port[DPIO_PHY(pipe)];
u32 val = 0;
- vlv_sideband_rw(dev_priv, DPIO_DEVFN, DPIO_PHY_IOSF_PORT(DPIO_PHY(pipe)),
- SB_MRD_NP, reg, &val);
+ vlv_sideband_rw(i915, DPIO_DEVFN, port, SB_MRD_NP, reg, &val);
/*
* FIXME: There might be some registers where all 1's is a valid value,
@@ -195,101 +249,283 @@ u32 vlv_dpio_read(struct drm_i915_private *dev_priv, enum pipe pipe, int reg)
return val;
}
-void vlv_dpio_write(struct drm_i915_private *dev_priv, enum pipe pipe, int reg, u32 val)
+void vlv_dpio_write(struct drm_i915_private *i915,
+ enum pipe pipe, int reg, u32 val)
+{
+ int port = i915->dpio_phy_iosf_port[DPIO_PHY(pipe)];
+
+ vlv_sideband_rw(i915, DPIO_DEVFN, port, SB_MWR_NP, reg, &val);
+}
+
+u32 vlv_flisdsi_read(struct drm_i915_private *i915, u32 reg)
+{
+ u32 val = 0;
+
+ vlv_sideband_rw(i915, DPIO_DEVFN, IOSF_PORT_FLISDSI, SB_CRRDDA_NP,
+ reg, &val);
+ return val;
+}
+
+void vlv_flisdsi_write(struct drm_i915_private *i915, u32 reg, u32 val)
{
- vlv_sideband_rw(dev_priv, DPIO_DEVFN, DPIO_PHY_IOSF_PORT(DPIO_PHY(pipe)),
- SB_MWR_NP, reg, &val);
+ vlv_sideband_rw(i915, DPIO_DEVFN, IOSF_PORT_FLISDSI, SB_CRWRDA_NP,
+ reg, &val);
}
/* SBI access */
-u32 intel_sbi_read(struct drm_i915_private *dev_priv, u16 reg,
- enum intel_sbi_destination destination)
+static int intel_sbi_rw(struct drm_i915_private *i915, u16 reg,
+ enum intel_sbi_destination destination,
+ u32 *val, bool is_read)
{
- u32 value = 0;
- WARN_ON(!mutex_is_locked(&dev_priv->sb_lock));
+ struct intel_uncore *uncore = &i915->uncore;
+ u32 cmd;
+
+ lockdep_assert_held(&i915->sb_lock);
- if (intel_wait_for_register(&dev_priv->uncore,
- SBI_CTL_STAT, SBI_BUSY, 0,
- 100)) {
+ if (intel_wait_for_register_fw(uncore,
+ SBI_CTL_STAT, SBI_BUSY, 0,
+ 100)) {
DRM_ERROR("timeout waiting for SBI to become ready\n");
- return 0;
+ return -EBUSY;
}
- I915_WRITE(SBI_ADDR, (reg << 16));
- I915_WRITE(SBI_DATA, 0);
+ intel_uncore_write_fw(uncore, SBI_ADDR, (u32)reg << 16);
+ intel_uncore_write_fw(uncore, SBI_DATA, is_read ? 0 : *val);
if (destination == SBI_ICLK)
- value = SBI_CTL_DEST_ICLK | SBI_CTL_OP_CRRD;
+ cmd = SBI_CTL_DEST_ICLK | SBI_CTL_OP_CRRD;
else
- value = SBI_CTL_DEST_MPHY | SBI_CTL_OP_IORD;
- I915_WRITE(SBI_CTL_STAT, value | SBI_BUSY);
-
- if (intel_wait_for_register(&dev_priv->uncore,
- SBI_CTL_STAT,
- SBI_BUSY,
- 0,
- 100)) {
+ cmd = SBI_CTL_DEST_MPHY | SBI_CTL_OP_IORD;
+ if (!is_read)
+ cmd |= BIT(8);
+ intel_uncore_write_fw(uncore, SBI_CTL_STAT, cmd | SBI_BUSY);
+
+ if (__intel_wait_for_register_fw(uncore,
+ SBI_CTL_STAT, SBI_BUSY, 0,
+ 100, 100, &cmd)) {
DRM_ERROR("timeout waiting for SBI to complete read\n");
- return 0;
+ return -ETIMEDOUT;
}
- if (I915_READ(SBI_CTL_STAT) & SBI_RESPONSE_FAIL) {
+ if (cmd & SBI_RESPONSE_FAIL) {
DRM_ERROR("error during SBI read of reg %x\n", reg);
- return 0;
+ return -ENXIO;
}
- return I915_READ(SBI_DATA);
+ if (is_read)
+ *val = intel_uncore_read_fw(uncore, SBI_DATA);
+
+ return 0;
+}
+
+u32 intel_sbi_read(struct drm_i915_private *i915, u16 reg,
+ enum intel_sbi_destination destination)
+{
+ u32 result = 0;
+
+ intel_sbi_rw(i915, reg, destination, &result, true);
+
+ return result;
}
-void intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value,
+void intel_sbi_write(struct drm_i915_private *i915, u16 reg, u32 value,
enum intel_sbi_destination destination)
{
- u32 tmp;
+ intel_sbi_rw(i915, reg, destination, &value, false);
+}
- WARN_ON(!mutex_is_locked(&dev_priv->sb_lock));
+static inline int gen6_check_mailbox_status(u32 mbox)
+{
+ switch (mbox & GEN6_PCODE_ERROR_MASK) {
+ case GEN6_PCODE_SUCCESS:
+ return 0;
+ case GEN6_PCODE_UNIMPLEMENTED_CMD:
+ return -ENODEV;
+ case GEN6_PCODE_ILLEGAL_CMD:
+ return -ENXIO;
+ case GEN6_PCODE_MIN_FREQ_TABLE_GT_RATIO_OUT_OF_RANGE:
+ case GEN7_PCODE_MIN_FREQ_TABLE_GT_RATIO_OUT_OF_RANGE:
+ return -EOVERFLOW;
+ case GEN6_PCODE_TIMEOUT:
+ return -ETIMEDOUT;
+ default:
+ MISSING_CASE(mbox & GEN6_PCODE_ERROR_MASK);
+ return 0;
+ }
+}
- if (intel_wait_for_register(&dev_priv->uncore,
- SBI_CTL_STAT, SBI_BUSY, 0,
- 100)) {
- DRM_ERROR("timeout waiting for SBI to become ready\n");
- return;
+static inline int gen7_check_mailbox_status(u32 mbox)
+{
+ switch (mbox & GEN6_PCODE_ERROR_MASK) {
+ case GEN6_PCODE_SUCCESS:
+ return 0;
+ case GEN6_PCODE_ILLEGAL_CMD:
+ return -ENXIO;
+ case GEN7_PCODE_TIMEOUT:
+ return -ETIMEDOUT;
+ case GEN7_PCODE_ILLEGAL_DATA:
+ return -EINVAL;
+ case GEN7_PCODE_MIN_FREQ_TABLE_GT_RATIO_OUT_OF_RANGE:
+ return -EOVERFLOW;
+ default:
+ MISSING_CASE(mbox & GEN6_PCODE_ERROR_MASK);
+ return 0;
}
+}
- I915_WRITE(SBI_ADDR, (reg << 16));
- I915_WRITE(SBI_DATA, value);
+static int __sandybridge_pcode_rw(struct drm_i915_private *i915,
+ u32 mbox, u32 *val,
+ int fast_timeout_us,
+ int slow_timeout_ms,
+ bool is_read)
+{
+ struct intel_uncore *uncore = &i915->uncore;
- if (destination == SBI_ICLK)
- tmp = SBI_CTL_DEST_ICLK | SBI_CTL_OP_CRWR;
+ lockdep_assert_held(&i915->sb_lock);
+
+ /*
+ * GEN6_PCODE_* are outside of the forcewake domain, we can
+ * use te fw I915_READ variants to reduce the amount of work
+ * required when reading/writing.
+ */
+
+ if (intel_uncore_read_fw(uncore, GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY)
+ return -EAGAIN;
+
+ intel_uncore_write_fw(uncore, GEN6_PCODE_DATA, *val);
+ intel_uncore_write_fw(uncore, GEN6_PCODE_DATA1, 0);
+ intel_uncore_write_fw(uncore,
+ GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox);
+
+ if (__intel_wait_for_register_fw(uncore,
+ GEN6_PCODE_MAILBOX,
+ GEN6_PCODE_READY, 0,
+ fast_timeout_us,
+ slow_timeout_ms,
+ &mbox))
+ return -ETIMEDOUT;
+
+ if (is_read)
+ *val = intel_uncore_read_fw(uncore, GEN6_PCODE_DATA);
+
+ if (INTEL_GEN(i915) > 6)
+ return gen7_check_mailbox_status(mbox);
else
- tmp = SBI_CTL_DEST_MPHY | SBI_CTL_OP_IOWR;
- I915_WRITE(SBI_CTL_STAT, SBI_BUSY | tmp);
-
- if (intel_wait_for_register(&dev_priv->uncore,
- SBI_CTL_STAT,
- SBI_BUSY,
- 0,
- 100)) {
- DRM_ERROR("timeout waiting for SBI to complete write\n");
- return;
+ return gen6_check_mailbox_status(mbox);
+}
+
+int sandybridge_pcode_read(struct drm_i915_private *i915, u32 mbox, u32 *val)
+{
+ int err;
+
+ mutex_lock(&i915->sb_lock);
+ err = __sandybridge_pcode_rw(i915, mbox, val,
+ 500, 0,
+ true);
+ mutex_unlock(&i915->sb_lock);
+
+ if (err) {
+ DRM_DEBUG_DRIVER("warning: pcode (read from mbox %x) mailbox access failed for %ps: %d\n",
+ mbox, __builtin_return_address(0), err);
}
- if (I915_READ(SBI_CTL_STAT) & SBI_RESPONSE_FAIL) {
- DRM_ERROR("error during SBI write of %x to reg %x\n",
- value, reg);
- return;
+ return err;
+}
+
+int sandybridge_pcode_write_timeout(struct drm_i915_private *i915,
+ u32 mbox, u32 val,
+ int fast_timeout_us,
+ int slow_timeout_ms)
+{
+ int err;
+
+ mutex_lock(&i915->sb_lock);
+ err = __sandybridge_pcode_rw(i915, mbox, &val,
+ fast_timeout_us, slow_timeout_ms,
+ false);
+ mutex_unlock(&i915->sb_lock);
+
+ if (err) {
+ DRM_DEBUG_DRIVER("warning: pcode (write of 0x%08x to mbox %x) mailbox access failed for %ps: %d\n",
+ val, mbox, __builtin_return_address(0), err);
}
+
+ return err;
}
-u32 vlv_flisdsi_read(struct drm_i915_private *dev_priv, u32 reg)
+static bool skl_pcode_try_request(struct drm_i915_private *i915, u32 mbox,
+ u32 request, u32 reply_mask, u32 reply,
+ u32 *status)
{
- u32 val = 0;
- vlv_sideband_rw(dev_priv, DPIO_DEVFN, IOSF_PORT_FLISDSI, SB_CRRDDA_NP,
- reg, &val);
- return val;
+ *status = __sandybridge_pcode_rw(i915, mbox, &request,
+ 500, 0,
+ true);
+
+ return *status || ((request & reply_mask) == reply);
}
-void vlv_flisdsi_write(struct drm_i915_private *dev_priv, u32 reg, u32 val)
+/**
+ * skl_pcode_request - send PCODE request until acknowledgment
+ * @i915: device private
+ * @mbox: PCODE mailbox ID the request is targeted for
+ * @request: request ID
+ * @reply_mask: mask used to check for request acknowledgment
+ * @reply: value used to check for request acknowledgment
+ * @timeout_base_ms: timeout for polling with preemption enabled
+ *
+ * Keep resending the @request to @mbox until PCODE acknowledges it, PCODE
+ * reports an error or an overall timeout of @timeout_base_ms+50 ms expires.
+ * The request is acknowledged once the PCODE reply dword equals @reply after
+ * applying @reply_mask. Polling is first attempted with preemption enabled
+ * for @timeout_base_ms and if this times out for another 50 ms with
+ * preemption disabled.
+ *
+ * Returns 0 on success, %-ETIMEDOUT in case of a timeout, <0 in case of some
+ * other error as reported by PCODE.
+ */
+int skl_pcode_request(struct drm_i915_private *i915, u32 mbox, u32 request,
+ u32 reply_mask, u32 reply, int timeout_base_ms)
{
- vlv_sideband_rw(dev_priv, DPIO_DEVFN, IOSF_PORT_FLISDSI, SB_CRWRDA_NP,
- reg, &val);
+ u32 status;
+ int ret;
+
+ mutex_lock(&i915->sb_lock);
+
+#define COND \
+ skl_pcode_try_request(i915, mbox, request, reply_mask, reply, &status)
+
+ /*
+ * Prime the PCODE by doing a request first. Normally it guarantees
+ * that a subsequent request, at most @timeout_base_ms later, succeeds.
+ * _wait_for() doesn't guarantee when its passed condition is evaluated
+ * first, so send the first request explicitly.
+ */
+ if (COND) {
+ ret = 0;
+ goto out;
+ }
+ ret = _wait_for(COND, timeout_base_ms * 1000, 10, 10);
+ if (!ret)
+ goto out;
+
+ /*
+ * The above can time out if the number of requests was low (2 in the
+ * worst case) _and_ PCODE was busy for some reason even after a
+ * (queued) request and @timeout_base_ms delay. As a workaround retry
+ * the poll with preemption disabled to maximize the number of
+ * requests. Increase the timeout from @timeout_base_ms to 50ms to
+ * account for interrupts that could reduce the number of these
+ * requests, and for any quirks of the PCODE firmware that delays
+ * the request completion.
+ */
+ DRM_DEBUG_KMS("PCODE timeout, retrying with preemption disabled\n");
+ WARN_ON_ONCE(timeout_base_ms > 3);
+ preempt_disable();
+ ret = wait_for_atomic(COND, 50);
+ preempt_enable();
+
+out:
+ mutex_unlock(&i915->sb_lock);
+ return ret ? ret : status;
+#undef COND
}
diff --git a/drivers/gpu/drm/i915/intel_sideband.h b/drivers/gpu/drm/i915/intel_sideband.h
new file mode 100644
index 000000000000..a0907e2c4992
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_sideband.h
@@ -0,0 +1,140 @@
+/* SPDX-License-Identifier: MIT */
+
+#ifndef _INTEL_SIDEBAND_H_
+#define _INTEL_SIDEBAND_H_
+
+#include <linux/bitops.h>
+#include <linux/types.h>
+
+struct drm_i915_private;
+enum pipe;
+
+enum intel_sbi_destination {
+ SBI_ICLK,
+ SBI_MPHY,
+};
+
+enum {
+ VLV_IOSF_SB_BUNIT,
+ VLV_IOSF_SB_CCK,
+ VLV_IOSF_SB_CCU,
+ VLV_IOSF_SB_DPIO,
+ VLV_IOSF_SB_FLISDSI,
+ VLV_IOSF_SB_GPIO,
+ VLV_IOSF_SB_NC,
+ VLV_IOSF_SB_PUNIT,
+};
+
+void vlv_iosf_sb_get(struct drm_i915_private *i915, unsigned long ports);
+u32 vlv_iosf_sb_read(struct drm_i915_private *i915, u8 port, u32 reg);
+void vlv_iosf_sb_write(struct drm_i915_private *i915,
+ u8 port, u32 reg, u32 val);
+void vlv_iosf_sb_put(struct drm_i915_private *i915, unsigned long ports);
+
+static inline void vlv_bunit_get(struct drm_i915_private *i915)
+{
+ vlv_iosf_sb_get(i915, BIT(VLV_IOSF_SB_BUNIT));
+}
+
+u32 vlv_bunit_read(struct drm_i915_private *i915, u32 reg);
+void vlv_bunit_write(struct drm_i915_private *i915, u32 reg, u32 val);
+
+static inline void vlv_bunit_put(struct drm_i915_private *i915)
+{
+ vlv_iosf_sb_put(i915, BIT(VLV_IOSF_SB_BUNIT));
+}
+
+static inline void vlv_cck_get(struct drm_i915_private *i915)
+{
+ vlv_iosf_sb_get(i915, BIT(VLV_IOSF_SB_CCK));
+}
+
+u32 vlv_cck_read(struct drm_i915_private *i915, u32 reg);
+void vlv_cck_write(struct drm_i915_private *i915, u32 reg, u32 val);
+
+static inline void vlv_cck_put(struct drm_i915_private *i915)
+{
+ vlv_iosf_sb_put(i915, BIT(VLV_IOSF_SB_CCK));
+}
+
+static inline void vlv_ccu_get(struct drm_i915_private *i915)
+{
+ vlv_iosf_sb_get(i915, BIT(VLV_IOSF_SB_CCU));
+}
+
+u32 vlv_ccu_read(struct drm_i915_private *i915, u32 reg);
+void vlv_ccu_write(struct drm_i915_private *i915, u32 reg, u32 val);
+
+static inline void vlv_ccu_put(struct drm_i915_private *i915)
+{
+ vlv_iosf_sb_put(i915, BIT(VLV_IOSF_SB_CCU));
+}
+
+static inline void vlv_dpio_get(struct drm_i915_private *i915)
+{
+ vlv_iosf_sb_get(i915, BIT(VLV_IOSF_SB_DPIO));
+}
+
+u32 vlv_dpio_read(struct drm_i915_private *i915, enum pipe pipe, int reg);
+void vlv_dpio_write(struct drm_i915_private *i915,
+ enum pipe pipe, int reg, u32 val);
+
+static inline void vlv_dpio_put(struct drm_i915_private *i915)
+{
+ vlv_iosf_sb_put(i915, BIT(VLV_IOSF_SB_DPIO));
+}
+
+static inline void vlv_flisdsi_get(struct drm_i915_private *i915)
+{
+ vlv_iosf_sb_get(i915, BIT(VLV_IOSF_SB_FLISDSI));
+}
+
+u32 vlv_flisdsi_read(struct drm_i915_private *i915, u32 reg);
+void vlv_flisdsi_write(struct drm_i915_private *i915, u32 reg, u32 val);
+
+static inline void vlv_flisdsi_put(struct drm_i915_private *i915)
+{
+ vlv_iosf_sb_put(i915, BIT(VLV_IOSF_SB_FLISDSI));
+}
+
+static inline void vlv_nc_get(struct drm_i915_private *i915)
+{
+ vlv_iosf_sb_get(i915, BIT(VLV_IOSF_SB_NC));
+}
+
+u32 vlv_nc_read(struct drm_i915_private *i915, u8 addr);
+
+static inline void vlv_nc_put(struct drm_i915_private *i915)
+{
+ vlv_iosf_sb_put(i915, BIT(VLV_IOSF_SB_NC));
+}
+
+static inline void vlv_punit_get(struct drm_i915_private *i915)
+{
+ vlv_iosf_sb_get(i915, BIT(VLV_IOSF_SB_PUNIT));
+}
+
+u32 vlv_punit_read(struct drm_i915_private *i915, u32 addr);
+int vlv_punit_write(struct drm_i915_private *i915, u32 addr, u32 val);
+
+static inline void vlv_punit_put(struct drm_i915_private *i915)
+{
+ vlv_iosf_sb_put(i915, BIT(VLV_IOSF_SB_PUNIT));
+}
+
+u32 intel_sbi_read(struct drm_i915_private *i915, u16 reg,
+ enum intel_sbi_destination destination);
+void intel_sbi_write(struct drm_i915_private *i915, u16 reg, u32 value,
+ enum intel_sbi_destination destination);
+
+int sandybridge_pcode_read(struct drm_i915_private *i915, u32 mbox, u32 *val);
+int sandybridge_pcode_write_timeout(struct drm_i915_private *i915, u32 mbox,
+ u32 val, int fast_timeout_us,
+ int slow_timeout_ms);
+#define sandybridge_pcode_write(i915, mbox, val) \
+ sandybridge_pcode_write_timeout(i915, mbox, val, 500, 0)
+
+int skl_pcode_request(struct drm_i915_private *i915, u32 mbox, u32 request,
+ u32 reply_mask, u32 reply, int timeout_base_ms);
+
+#endif /* _INTEL_SIDEBAND_H */
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
index 2913e89280d7..c180815faabd 100644
--- a/drivers/gpu/drm/i915/intel_sprite.c
+++ b/drivers/gpu/drm/i915/intel_sprite.c
@@ -256,6 +256,16 @@ int intel_plane_check_stride(const struct intel_plane_state *plane_state)
unsigned int rotation = plane_state->base.rotation;
u32 stride, max_stride;
+ /*
+ * We ignore stride for all invisible planes that
+ * can be remapped. Otherwise we could end up
+ * with a false positive when the remapping didn't
+ * kick in due the plane being invisible.
+ */
+ if (intel_plane_can_remap(plane_state) &&
+ !plane_state->base.visible)
+ return 0;
+
/* FIXME other color planes? */
stride = plane_state->color_plane[0].stride;
max_stride = plane->max_stride(plane, fb->format->format,
@@ -325,7 +335,8 @@ skl_plane_max_stride(struct intel_plane *plane,
u32 pixel_format, u64 modifier,
unsigned int rotation)
{
- int cpp = drm_format_plane_cpp(pixel_format, 0);
+ const struct drm_format_info *info = drm_format_info(pixel_format);
+ int cpp = info->cpp[0];
/*
* "The stride in bytes must not exceed the
@@ -1417,6 +1428,10 @@ g4x_sprite_check(struct intel_crtc_state *crtc_state,
if (ret)
return ret;
+ ret = i9xx_check_plane_surface(plane_state);
+ if (ret)
+ return ret;
+
if (!plane_state->base.visible)
return 0;
@@ -1428,10 +1443,6 @@ g4x_sprite_check(struct intel_crtc_state *crtc_state,
if (ret)
return ret;
- ret = i9xx_check_plane_surface(plane_state);
- if (ret)
- return ret;
-
if (INTEL_GEN(dev_priv) >= 7)
plane_state->ctl = ivb_sprite_ctl(crtc_state, plane_state);
else
@@ -1475,6 +1486,10 @@ vlv_sprite_check(struct intel_crtc_state *crtc_state,
if (ret)
return ret;
+ ret = i9xx_check_plane_surface(plane_state);
+ if (ret)
+ return ret;
+
if (!plane_state->base.visible)
return 0;
@@ -1482,10 +1497,6 @@ vlv_sprite_check(struct intel_crtc_state *crtc_state,
if (ret)
return ret;
- ret = i9xx_check_plane_surface(plane_state);
- if (ret)
- return ret;
-
plane_state->ctl = vlv_sprite_ctl(crtc_state, plane_state);
return 0;
@@ -1639,6 +1650,10 @@ static int skl_plane_check(struct intel_crtc_state *crtc_state,
if (ret)
return ret;
+ ret = skl_check_plane_surface(plane_state);
+ if (ret)
+ return ret;
+
if (!plane_state->base.visible)
return 0;
@@ -1654,10 +1669,6 @@ static int skl_plane_check(struct intel_crtc_state *crtc_state,
if (ret)
return ret;
- ret = skl_check_plane_surface(plane_state);
- if (ret)
- return ret;
-
/* HW only has 8 bits pixel precision, disable plane if invisible */
if (!(plane_state->base.alpha >> 8))
plane_state->base.visible = false;
diff --git a/drivers/gpu/drm/i915/intel_sprite.h b/drivers/gpu/drm/i915/intel_sprite.h
index 84be8686be16..500f6bffb139 100644
--- a/drivers/gpu/drm/i915/intel_sprite.h
+++ b/drivers/gpu/drm/i915/intel_sprite.h
@@ -43,13 +43,17 @@ static inline bool icl_is_nv12_y_plane(enum plane_id id)
return false;
}
+static inline u8 icl_hdr_plane_mask(void)
+{
+ return BIT(PLANE_PRIMARY) |
+ BIT(PLANE_SPRITE0) | BIT(PLANE_SPRITE1);
+}
+
static inline bool icl_is_hdr_plane(struct drm_i915_private *dev_priv,
enum plane_id plane_id)
{
- if (INTEL_GEN(dev_priv) < 11)
- return false;
-
- return plane_id < PLANE_SPRITE2;
+ return INTEL_GEN(dev_priv) >= 11 &&
+ icl_hdr_plane_mask() & BIT(plane_id);
}
#endif /* __INTEL_SPRITE_H__ */
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index 5dbba33f4202..0a95df6c6a57 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -38,6 +38,7 @@
#include "i915_drv.h"
#include "intel_connector.h"
#include "intel_drv.h"
+#include "intel_hotplug.h"
#include "intel_tv.h"
enum tv_margin {
@@ -1820,16 +1821,18 @@ static const struct drm_connector_funcs intel_tv_connector_funcs = {
};
static int intel_tv_atomic_check(struct drm_connector *connector,
- struct drm_connector_state *new_state)
+ struct drm_atomic_state *state)
{
+ struct drm_connector_state *new_state;
struct drm_crtc_state *new_crtc_state;
struct drm_connector_state *old_state;
+ new_state = drm_atomic_get_new_connector_state(state, connector);
if (!new_state->crtc)
return 0;
- old_state = drm_atomic_get_old_connector_state(new_state->state, connector);
- new_crtc_state = drm_atomic_get_new_crtc_state(new_state->state, new_state->crtc);
+ old_state = drm_atomic_get_old_connector_state(state, connector);
+ new_crtc_state = drm_atomic_get_new_crtc_state(state, new_state->crtc);
if (old_state->tv.mode != new_state->tv.mode ||
old_state->tv.margins.left != new_state->tv.margins.left ||
diff --git a/drivers/gpu/drm/i915/intel_uc.c b/drivers/gpu/drm/i915/intel_uc.c
index 25b80ffe71ad..63fc12cbc25d 100644
--- a/drivers/gpu/drm/i915/intel_uc.c
+++ b/drivers/gpu/drm/i915/intel_uc.c
@@ -22,11 +22,11 @@
*
*/
+#include "gt/intel_reset.h"
#include "intel_uc.h"
#include "intel_guc_submission.h"
#include "intel_guc.h"
#include "i915_drv.h"
-#include "i915_reset.h"
static void guc_free_load_err_log(struct intel_guc *guc);
@@ -224,6 +224,17 @@ static int guc_enable_communication(struct intel_guc *guc)
return 0;
}
+static void guc_stop_communication(struct intel_guc *guc)
+{
+ struct drm_i915_private *i915 = guc_to_i915(guc);
+
+ if (HAS_GUC_CT(i915))
+ intel_guc_ct_stop(&guc->ct);
+
+ guc->send = intel_guc_send_nop;
+ guc->handler = intel_guc_to_host_event_handler_nop;
+}
+
static void guc_disable_communication(struct intel_guc *guc)
{
struct drm_i915_private *i915 = guc_to_i915(guc);
@@ -280,6 +291,7 @@ void intel_uc_fini_misc(struct drm_i915_private *i915)
int intel_uc_init(struct drm_i915_private *i915)
{
struct intel_guc *guc = &i915->guc;
+ struct intel_huc *huc = &i915->huc;
int ret;
if (!USES_GUC(i915))
@@ -292,19 +304,30 @@ int intel_uc_init(struct drm_i915_private *i915)
if (ret)
return ret;
+ if (USES_HUC(i915)) {
+ ret = intel_huc_init(huc);
+ if (ret)
+ goto err_guc;
+ }
+
if (USES_GUC_SUBMISSION(i915)) {
/*
* This is stuff we need to have available at fw load time
* if we are planning to enable submission later
*/
ret = intel_guc_submission_init(guc);
- if (ret) {
- intel_guc_fini(guc);
- return ret;
- }
+ if (ret)
+ goto err_huc;
}
return 0;
+
+err_huc:
+ if (USES_HUC(i915))
+ intel_huc_fini(huc);
+err_guc:
+ intel_guc_fini(guc);
+ return ret;
}
void intel_uc_fini(struct drm_i915_private *i915)
@@ -319,17 +342,17 @@ void intel_uc_fini(struct drm_i915_private *i915)
if (USES_GUC_SUBMISSION(i915))
intel_guc_submission_fini(guc);
+ if (USES_HUC(i915))
+ intel_huc_fini(&i915->huc);
+
intel_guc_fini(guc);
}
-void intel_uc_sanitize(struct drm_i915_private *i915)
+static void __uc_sanitize(struct drm_i915_private *i915)
{
struct intel_guc *guc = &i915->guc;
struct intel_huc *huc = &i915->huc;
- if (!USES_GUC(i915))
- return;
-
GEM_BUG_ON(!HAS_GUC(i915));
intel_huc_sanitize(huc);
@@ -338,6 +361,14 @@ void intel_uc_sanitize(struct drm_i915_private *i915)
__intel_uc_reset_hw(i915);
}
+void intel_uc_sanitize(struct drm_i915_private *i915)
+{
+ if (!USES_GUC(i915))
+ return;
+
+ __uc_sanitize(i915);
+}
+
int intel_uc_init_hw(struct drm_i915_private *i915)
{
struct intel_guc *guc = &i915->guc;
@@ -423,6 +454,8 @@ err_communication:
err_log_capture:
guc_capture_load_err_log(guc);
err_out:
+ __uc_sanitize(i915);
+
/*
* Note that there is no fallback as either user explicitly asked for
* the GuC or driver default option was to run with the GuC enabled.
@@ -438,7 +471,7 @@ void intel_uc_fini_hw(struct drm_i915_private *i915)
{
struct intel_guc *guc = &i915->guc;
- if (!USES_GUC(i915))
+ if (!intel_guc_is_loaded(guc))
return;
GEM_BUG_ON(!HAS_GUC(i915));
@@ -447,6 +480,7 @@ void intel_uc_fini_hw(struct drm_i915_private *i915)
intel_guc_submission_disable(guc);
guc_disable_communication(guc);
+ __uc_sanitize(i915);
}
/**
@@ -459,33 +493,38 @@ void intel_uc_reset_prepare(struct drm_i915_private *i915)
{
struct intel_guc *guc = &i915->guc;
- if (!USES_GUC(i915))
+ if (!intel_guc_is_loaded(guc))
return;
- guc_disable_communication(guc);
- intel_uc_sanitize(i915);
+ guc_stop_communication(guc);
+ __uc_sanitize(i915);
}
-int intel_uc_suspend(struct drm_i915_private *i915)
+void intel_uc_runtime_suspend(struct drm_i915_private *i915)
{
struct intel_guc *guc = &i915->guc;
int err;
- if (!USES_GUC(i915))
- return 0;
-
- if (guc->fw.load_status != INTEL_UC_FIRMWARE_SUCCESS)
- return 0;
+ if (!intel_guc_is_loaded(guc))
+ return;
err = intel_guc_suspend(guc);
- if (err) {
+ if (err)
DRM_DEBUG_DRIVER("Failed to suspend GuC, err=%d", err);
- return err;
- }
guc_disable_communication(guc);
+}
- return 0;
+void intel_uc_suspend(struct drm_i915_private *i915)
+{
+ struct intel_guc *guc = &i915->guc;
+ intel_wakeref_t wakeref;
+
+ if (!intel_guc_is_loaded(guc))
+ return;
+
+ with_intel_runtime_pm(i915, wakeref)
+ intel_uc_runtime_suspend(i915);
}
int intel_uc_resume(struct drm_i915_private *i915)
@@ -493,10 +532,7 @@ int intel_uc_resume(struct drm_i915_private *i915)
struct intel_guc *guc = &i915->guc;
int err;
- if (!USES_GUC(i915))
- return 0;
-
- if (guc->fw.load_status != INTEL_UC_FIRMWARE_SUCCESS)
+ if (!intel_guc_is_loaded(guc))
return 0;
guc_enable_communication(guc);
diff --git a/drivers/gpu/drm/i915/intel_uc.h b/drivers/gpu/drm/i915/intel_uc.h
index c14729786652..3ea06c87dfcd 100644
--- a/drivers/gpu/drm/i915/intel_uc.h
+++ b/drivers/gpu/drm/i915/intel_uc.h
@@ -39,7 +39,8 @@ void intel_uc_fini_hw(struct drm_i915_private *dev_priv);
int intel_uc_init(struct drm_i915_private *dev_priv);
void intel_uc_fini(struct drm_i915_private *dev_priv);
void intel_uc_reset_prepare(struct drm_i915_private *i915);
-int intel_uc_suspend(struct drm_i915_private *dev_priv);
+void intel_uc_suspend(struct drm_i915_private *i915);
+void intel_uc_runtime_suspend(struct drm_i915_private *i915);
int intel_uc_resume(struct drm_i915_private *dev_priv);
static inline bool intel_uc_is_using_guc(struct drm_i915_private *i915)
diff --git a/drivers/gpu/drm/i915/intel_uc_fw.c b/drivers/gpu/drm/i915/intel_uc_fw.c
index becf05ebae4d..b9cb6fea9332 100644
--- a/drivers/gpu/drm/i915/intel_uc_fw.c
+++ b/drivers/gpu/drm/i915/intel_uc_fw.c
@@ -191,6 +191,35 @@ fail:
release_firmware(fw); /* OK even if fw is NULL */
}
+static void intel_uc_fw_ggtt_bind(struct intel_uc_fw *uc_fw)
+{
+ struct drm_i915_gem_object *obj = uc_fw->obj;
+ struct i915_ggtt *ggtt = &to_i915(obj->base.dev)->ggtt;
+ struct i915_vma dummy = {
+ .node.start = intel_uc_fw_ggtt_offset(uc_fw),
+ .node.size = obj->base.size,
+ .pages = obj->mm.pages,
+ .vm = &ggtt->vm,
+ };
+
+ GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
+ GEM_BUG_ON(dummy.node.size > ggtt->uc_fw.size);
+
+ /* uc_fw->obj cache domains were not controlled across suspend */
+ drm_clflush_sg(dummy.pages);
+
+ ggtt->vm.insert_entries(&ggtt->vm, &dummy, I915_CACHE_NONE, 0);
+}
+
+static void intel_uc_fw_ggtt_unbind(struct intel_uc_fw *uc_fw)
+{
+ struct drm_i915_gem_object *obj = uc_fw->obj;
+ struct i915_ggtt *ggtt = &to_i915(obj->base.dev)->ggtt;
+ u64 start = intel_uc_fw_ggtt_offset(uc_fw);
+
+ ggtt->vm.clear_range(&ggtt->vm, start, obj->base.size);
+}
+
/**
* intel_uc_fw_upload - load uC firmware using custom loader
* @uc_fw: uC firmware
@@ -201,11 +230,8 @@ fail:
* Return: 0 on success, non-zero on failure.
*/
int intel_uc_fw_upload(struct intel_uc_fw *uc_fw,
- int (*xfer)(struct intel_uc_fw *uc_fw,
- struct i915_vma *vma))
+ int (*xfer)(struct intel_uc_fw *uc_fw))
{
- struct i915_vma *vma;
- u32 ggtt_pin_bias;
int err;
DRM_DEBUG_DRIVER("%s fw load %s\n",
@@ -219,36 +245,15 @@ int intel_uc_fw_upload(struct intel_uc_fw *uc_fw,
intel_uc_fw_type_repr(uc_fw->type),
intel_uc_fw_status_repr(uc_fw->load_status));
- /* Pin object with firmware */
- err = i915_gem_object_set_to_gtt_domain(uc_fw->obj, false);
- if (err) {
- DRM_DEBUG_DRIVER("%s fw set-domain err=%d\n",
- intel_uc_fw_type_repr(uc_fw->type), err);
- goto fail;
- }
-
- ggtt_pin_bias = to_i915(uc_fw->obj->base.dev)->ggtt.pin_bias;
- vma = i915_gem_object_ggtt_pin(uc_fw->obj, NULL, 0, 0,
- PIN_OFFSET_BIAS | ggtt_pin_bias);
- if (IS_ERR(vma)) {
- err = PTR_ERR(vma);
- DRM_DEBUG_DRIVER("%s fw ggtt-pin err=%d\n",
- intel_uc_fw_type_repr(uc_fw->type), err);
- goto fail;
- }
+ intel_uc_fw_ggtt_bind(uc_fw);
/* Call custom loader */
- err = xfer(uc_fw, vma);
-
- /*
- * We keep the object pages for reuse during resume. But we can unpin it
- * now that DMA has completed, so it doesn't continue to take up space.
- */
- i915_vma_unpin(vma);
-
+ err = xfer(uc_fw);
if (err)
goto fail;
+ intel_uc_fw_ggtt_unbind(uc_fw);
+
uc_fw->load_status = INTEL_UC_FIRMWARE_SUCCESS;
DRM_DEBUG_DRIVER("%s fw load %s\n",
intel_uc_fw_type_repr(uc_fw->type),
@@ -273,14 +278,50 @@ fail:
return err;
}
+int intel_uc_fw_init(struct intel_uc_fw *uc_fw)
+{
+ int err;
+
+ if (uc_fw->fetch_status != INTEL_UC_FIRMWARE_SUCCESS)
+ return -ENOEXEC;
+
+ err = i915_gem_object_pin_pages(uc_fw->obj);
+ if (err)
+ DRM_DEBUG_DRIVER("%s fw pin-pages err=%d\n",
+ intel_uc_fw_type_repr(uc_fw->type), err);
+
+ return err;
+}
+
+void intel_uc_fw_fini(struct intel_uc_fw *uc_fw)
+{
+ if (uc_fw->fetch_status != INTEL_UC_FIRMWARE_SUCCESS)
+ return;
+
+ i915_gem_object_unpin_pages(uc_fw->obj);
+}
+
+u32 intel_uc_fw_ggtt_offset(struct intel_uc_fw *uc_fw)
+{
+ struct drm_i915_private *i915 = to_i915(uc_fw->obj->base.dev);
+ struct i915_ggtt *ggtt = &i915->ggtt;
+ struct drm_mm_node *node = &ggtt->uc_fw;
+
+ GEM_BUG_ON(!node->allocated);
+ GEM_BUG_ON(upper_32_bits(node->start));
+ GEM_BUG_ON(upper_32_bits(node->start + node->size - 1));
+
+ return lower_32_bits(node->start);
+}
+
/**
- * intel_uc_fw_fini - cleanup uC firmware
+ * intel_uc_fw_cleanup_fetch - cleanup uC firmware
*
* @uc_fw: uC firmware
*
* Cleans up uC firmware by releasing the firmware GEM obj.
*/
-void intel_uc_fw_fini(struct intel_uc_fw *uc_fw)
+void intel_uc_fw_cleanup_fetch(struct intel_uc_fw *uc_fw)
{
struct drm_i915_gem_object *obj;
diff --git a/drivers/gpu/drm/i915/intel_uc_fw.h b/drivers/gpu/drm/i915/intel_uc_fw.h
index 0e3bd580e267..ff98f8661d72 100644
--- a/drivers/gpu/drm/i915/intel_uc_fw.h
+++ b/drivers/gpu/drm/i915/intel_uc_fw.h
@@ -27,7 +27,6 @@
struct drm_printer;
struct drm_i915_private;
-struct i915_vma;
/* Home of GuC, HuC and DMC firmwares */
#define INTEL_UC_FIRMWARE_URL "https://git.kernel.org/pub/scm/linux/kernel/git/firmware/linux-firmware.git/tree/i915"
@@ -102,7 +101,8 @@ static inline const char *intel_uc_fw_type_repr(enum intel_uc_fw_type type)
}
static inline
-void intel_uc_fw_init(struct intel_uc_fw *uc_fw, enum intel_uc_fw_type type)
+void intel_uc_fw_init_early(struct intel_uc_fw *uc_fw,
+ enum intel_uc_fw_type type)
{
uc_fw->path = NULL;
uc_fw->fetch_status = INTEL_UC_FIRMWARE_NONE;
@@ -144,10 +144,12 @@ static inline u32 intel_uc_fw_get_upload_size(struct intel_uc_fw *uc_fw)
void intel_uc_fw_fetch(struct drm_i915_private *dev_priv,
struct intel_uc_fw *uc_fw);
+void intel_uc_fw_cleanup_fetch(struct intel_uc_fw *uc_fw);
int intel_uc_fw_upload(struct intel_uc_fw *uc_fw,
- int (*xfer)(struct intel_uc_fw *uc_fw,
- struct i915_vma *vma));
+ int (*xfer)(struct intel_uc_fw *uc_fw));
+int intel_uc_fw_init(struct intel_uc_fw *uc_fw);
void intel_uc_fw_fini(struct intel_uc_fw *uc_fw);
+u32 intel_uc_fw_ggtt_offset(struct intel_uc_fw *uc_fw);
void intel_uc_fw_dump(const struct intel_uc_fw *uc_fw, struct drm_printer *p);
#endif
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index d1d51e1121e2..f78668123f02 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -111,9 +111,11 @@ wait_ack_set(const struct intel_uncore_forcewake_domain *d,
static inline void
fw_domain_wait_ack_clear(const struct intel_uncore_forcewake_domain *d)
{
- if (wait_ack_clear(d, FORCEWAKE_KERNEL))
+ if (wait_ack_clear(d, FORCEWAKE_KERNEL)) {
DRM_ERROR("%s: timed out waiting for forcewake ack to clear.\n",
intel_uncore_forcewake_domain_to_str(d->id));
+ add_taint_for_CI(TAINT_WARN); /* CI now unreliable */
+ }
}
enum ack_type {
@@ -186,9 +188,11 @@ fw_domain_get(const struct intel_uncore_forcewake_domain *d)
static inline void
fw_domain_wait_ack_set(const struct intel_uncore_forcewake_domain *d)
{
- if (wait_ack_set(d, FORCEWAKE_KERNEL))
+ if (wait_ack_set(d, FORCEWAKE_KERNEL)) {
DRM_ERROR("%s: timed out waiting for forcewake ack request.\n",
intel_uncore_forcewake_domain_to_str(d->id));
+ add_taint_for_CI(TAINT_WARN); /* CI now unreliable */
+ }
}
static inline void
diff --git a/drivers/gpu/drm/i915/intel_vdsc.c b/drivers/gpu/drm/i915/intel_vdsc.c
index 3f9921ba4a76..ffec807b8960 100644
--- a/drivers/gpu/drm/i915/intel_vdsc.c
+++ b/drivers/gpu/drm/i915/intel_vdsc.c
@@ -7,8 +7,10 @@
*/
#include <drm/i915_drm.h>
+
#include "i915_drv.h"
#include "intel_drv.h"
+#include "intel_vdsc.h"
enum ROW_INDEX_BPP {
ROW_INDEX_6BPP = 0,
diff --git a/drivers/gpu/drm/i915/intel_vdsc.h b/drivers/gpu/drm/i915/intel_vdsc.h
new file mode 100644
index 000000000000..90d3f6017fcb
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_vdsc.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __INTEL_VDSC_H__
+#define __INTEL_VDSC_H__
+
+struct intel_encoder;
+struct intel_crtc_state;
+struct intel_dp;
+
+void intel_dsc_enable(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state);
+void intel_dsc_disable(const struct intel_crtc_state *crtc_state);
+int intel_dp_compute_dsc_params(struct intel_dp *intel_dp,
+ struct intel_crtc_state *pipe_config);
+enum intel_display_power_domain
+intel_dsc_power_domain(const struct intel_crtc_state *crtc_state);
+
+#endif /* __INTEL_VDSC_H__ */
diff --git a/drivers/gpu/drm/i915/intel_wakeref.c b/drivers/gpu/drm/i915/intel_wakeref.c
new file mode 100644
index 000000000000..91196d9612bb
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_wakeref.c
@@ -0,0 +1,75 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include "intel_drv.h"
+#include "intel_wakeref.h"
+
+static void rpm_get(struct drm_i915_private *i915, struct intel_wakeref *wf)
+{
+ wf->wakeref = intel_runtime_pm_get(i915);
+}
+
+static void rpm_put(struct drm_i915_private *i915, struct intel_wakeref *wf)
+{
+ intel_wakeref_t wakeref = fetch_and_zero(&wf->wakeref);
+
+ intel_runtime_pm_put(i915, wakeref);
+ GEM_BUG_ON(!wakeref);
+}
+
+int __intel_wakeref_get_first(struct drm_i915_private *i915,
+ struct intel_wakeref *wf,
+ int (*fn)(struct intel_wakeref *wf))
+{
+ /*
+ * Treat get/put as different subclasses, as we may need to run
+ * the put callback from under the shrinker and do not want to
+ * cross-contanimate that callback with any extra work performed
+ * upon acquiring the wakeref.
+ */
+ mutex_lock_nested(&wf->mutex, SINGLE_DEPTH_NESTING);
+ if (!atomic_read(&wf->count)) {
+ int err;
+
+ rpm_get(i915, wf);
+
+ err = fn(wf);
+ if (unlikely(err)) {
+ rpm_put(i915, wf);
+ mutex_unlock(&wf->mutex);
+ return err;
+ }
+
+ smp_mb__before_atomic(); /* release wf->count */
+ }
+ atomic_inc(&wf->count);
+ mutex_unlock(&wf->mutex);
+
+ return 0;
+}
+
+int __intel_wakeref_put_last(struct drm_i915_private *i915,
+ struct intel_wakeref *wf,
+ int (*fn)(struct intel_wakeref *wf))
+{
+ int err;
+
+ err = fn(wf);
+ if (likely(!err))
+ rpm_put(i915, wf);
+ else
+ atomic_inc(&wf->count);
+ mutex_unlock(&wf->mutex);
+
+ return err;
+}
+
+void __intel_wakeref_init(struct intel_wakeref *wf, struct lock_class_key *key)
+{
+ __mutex_init(&wf->mutex, "wakeref", key);
+ atomic_set(&wf->count, 0);
+ wf->wakeref = 0;
+}
diff --git a/drivers/gpu/drm/i915/intel_wakeref.h b/drivers/gpu/drm/i915/intel_wakeref.h
new file mode 100644
index 000000000000..db742291211c
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_wakeref.h
@@ -0,0 +1,133 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef INTEL_WAKEREF_H
+#define INTEL_WAKEREF_H
+
+#include <linux/atomic.h>
+#include <linux/mutex.h>
+#include <linux/stackdepot.h>
+
+struct drm_i915_private;
+
+typedef depot_stack_handle_t intel_wakeref_t;
+
+struct intel_wakeref {
+ atomic_t count;
+ struct mutex mutex;
+ intel_wakeref_t wakeref;
+};
+
+void __intel_wakeref_init(struct intel_wakeref *wf,
+ struct lock_class_key *key);
+#define intel_wakeref_init(wf) do { \
+ static struct lock_class_key __key; \
+ \
+ __intel_wakeref_init((wf), &__key); \
+} while (0)
+
+int __intel_wakeref_get_first(struct drm_i915_private *i915,
+ struct intel_wakeref *wf,
+ int (*fn)(struct intel_wakeref *wf));
+int __intel_wakeref_put_last(struct drm_i915_private *i915,
+ struct intel_wakeref *wf,
+ int (*fn)(struct intel_wakeref *wf));
+
+/**
+ * intel_wakeref_get: Acquire the wakeref
+ * @i915: the drm_i915_private device
+ * @wf: the wakeref
+ * @fn: callback for acquired the wakeref, called only on first acquire.
+ *
+ * Acquire a hold on the wakeref. The first user to do so, will acquire
+ * the runtime pm wakeref and then call the @fn underneath the wakeref
+ * mutex.
+ *
+ * Note that @fn is allowed to fail, in which case the runtime-pm wakeref
+ * will be released and the acquisition unwound, and an error reported.
+ *
+ * Returns: 0 if the wakeref was acquired successfully, or a negative error
+ * code otherwise.
+ */
+static inline int
+intel_wakeref_get(struct drm_i915_private *i915,
+ struct intel_wakeref *wf,
+ int (*fn)(struct intel_wakeref *wf))
+{
+ if (unlikely(!atomic_inc_not_zero(&wf->count)))
+ return __intel_wakeref_get_first(i915, wf, fn);
+
+ return 0;
+}
+
+/**
+ * intel_wakeref_put: Release the wakeref
+ * @i915: the drm_i915_private device
+ * @wf: the wakeref
+ * @fn: callback for releasing the wakeref, called only on final release.
+ *
+ * Release our hold on the wakeref. When there are no more users,
+ * the runtime pm wakeref will be released after the @fn callback is called
+ * underneath the wakeref mutex.
+ *
+ * Note that @fn is allowed to fail, in which case the runtime-pm wakeref
+ * is retained and an error reported.
+ *
+ * Returns: 0 if the wakeref was released successfully, or a negative error
+ * code otherwise.
+ */
+static inline int
+intel_wakeref_put(struct drm_i915_private *i915,
+ struct intel_wakeref *wf,
+ int (*fn)(struct intel_wakeref *wf))
+{
+ if (atomic_dec_and_mutex_lock(&wf->count, &wf->mutex))
+ return __intel_wakeref_put_last(i915, wf, fn);
+
+ return 0;
+}
+
+/**
+ * intel_wakeref_lock: Lock the wakeref (mutex)
+ * @wf: the wakeref
+ *
+ * Locks the wakeref to prevent it being acquired or released. New users
+ * can still adjust the counter, but the wakeref itself (and callback)
+ * cannot be acquired or released.
+ */
+static inline void
+intel_wakeref_lock(struct intel_wakeref *wf)
+ __acquires(wf->mutex)
+{
+ mutex_lock(&wf->mutex);
+}
+
+/**
+ * intel_wakeref_unlock: Unlock the wakeref
+ * @wf: the wakeref
+ *
+ * Releases a previously acquired intel_wakeref_lock().
+ */
+static inline void
+intel_wakeref_unlock(struct intel_wakeref *wf)
+ __releases(wf->mutex)
+{
+ mutex_unlock(&wf->mutex);
+}
+
+/**
+ * intel_wakeref_active: Query whether the wakeref is currently held
+ * @wf: the wakeref
+ *
+ * Returns: true if the wakeref is currently held.
+ */
+static inline bool
+intel_wakeref_active(struct intel_wakeref *wf)
+{
+ return READ_ONCE(wf->wakeref);
+}
+
+#endif /* INTEL_WAKEREF_H */
diff --git a/drivers/gpu/drm/i915/selftests/huge_pages.c b/drivers/gpu/drm/i915/selftests/huge_pages.c
index 90721b54e7ae..1e1f83326a96 100644
--- a/drivers/gpu/drm/i915/selftests/huge_pages.c
+++ b/drivers/gpu/drm/i915/selftests/huge_pages.c
@@ -26,6 +26,7 @@
#include <linux/prime_numbers.h>
+#include "igt_gem_utils.h"
#include "mock_drm.h"
#include "i915_random.h"
@@ -980,7 +981,7 @@ static int gpu_write(struct i915_vma *vma,
if (IS_ERR(batch))
return PTR_ERR(batch);
- rq = i915_request_alloc(engine, ctx);
+ rq = igt_request_alloc(ctx, engine);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
goto err_batch;
diff --git a/drivers/gpu/drm/i915/selftests/i915_active.c b/drivers/gpu/drm/i915/selftests/i915_active.c
index 27d8f853111b..eee838dc0634 100644
--- a/drivers/gpu/drm/i915/selftests/i915_active.c
+++ b/drivers/gpu/drm/i915/selftests/i915_active.c
@@ -46,7 +46,7 @@ static int __live_active_setup(struct drm_i915_private *i915,
for_each_engine(engine, i915, id) {
struct i915_request *rq;
- rq = i915_request_alloc(engine, i915->kernel_context);
+ rq = i915_request_create(engine->kernel_context);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
break;
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem.c b/drivers/gpu/drm/i915/selftests/i915_gem.c
index 6fd70d326468..c6a9bff85311 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem.c
@@ -8,34 +8,27 @@
#include "../i915_selftest.h"
-#include "mock_context.h"
+#include "igt_gem_utils.h"
#include "igt_flush_test.h"
+#include "mock_context.h"
static int switch_to_context(struct drm_i915_private *i915,
struct i915_gem_context *ctx)
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
- intel_wakeref_t wakeref;
- int err = 0;
-
- wakeref = intel_runtime_pm_get(i915);
for_each_engine(engine, i915, id) {
struct i915_request *rq;
- rq = i915_request_alloc(engine, ctx);
- if (IS_ERR(rq)) {
- err = PTR_ERR(rq);
- break;
- }
+ rq = igt_request_alloc(ctx, engine);
+ if (IS_ERR(rq))
+ return PTR_ERR(rq);
i915_request_add(rq);
}
- intel_runtime_pm_put(i915, wakeref);
-
- return err;
+ return 0;
}
static void trash_stolen(struct drm_i915_private *i915)
@@ -120,7 +113,7 @@ static void pm_resume(struct drm_i915_private *i915)
* that runtime-pm just works.
*/
with_intel_runtime_pm(i915, wakeref) {
- intel_engines_sanitize(i915, false);
+ intel_gt_sanitize(i915, false);
i915_gem_sanitize(i915);
i915_gem_resume(i915);
}
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_coherency.c b/drivers/gpu/drm/i915/selftests/i915_gem_coherency.c
index e43630b40fce..046a38743152 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_coherency.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_coherency.c
@@ -202,7 +202,7 @@ static int gpu_set(struct drm_i915_gem_object *obj,
if (IS_ERR(vma))
return PTR_ERR(vma);
- rq = i915_request_alloc(i915->engine[RCS0], i915->kernel_context);
+ rq = i915_request_create(i915->engine[RCS0]->kernel_context);
if (IS_ERR(rq)) {
i915_vma_unpin(vma);
return PTR_ERR(rq);
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_context.c b/drivers/gpu/drm/i915/selftests/i915_gem_context.c
index 4e1b6efc6b22..34ac5cc6d59f 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_context.c
@@ -24,10 +24,12 @@
#include <linux/prime_numbers.h>
-#include "../i915_reset.h"
-#include "../i915_selftest.h"
+#include "gt/intel_reset.h"
+#include "i915_selftest.h"
+
#include "i915_random.h"
#include "igt_flush_test.h"
+#include "igt_gem_utils.h"
#include "igt_live_test.h"
#include "igt_reset.h"
#include "igt_spinner.h"
@@ -90,7 +92,7 @@ static int live_nop_switch(void *arg)
times[0] = ktime_get_raw();
for (n = 0; n < nctx; n++) {
- rq = i915_request_alloc(engine, ctx[n]);
+ rq = igt_request_alloc(ctx[n], engine);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
goto out_unlock;
@@ -120,7 +122,7 @@ static int live_nop_switch(void *arg)
times[1] = ktime_get_raw();
for (n = 0; n < prime; n++) {
- rq = i915_request_alloc(engine, ctx[n % nctx]);
+ rq = igt_request_alloc(ctx[n % nctx], engine);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
goto out_unlock;
@@ -300,7 +302,7 @@ static int gpu_fill(struct drm_i915_gem_object *obj,
goto err_vma;
}
- rq = i915_request_alloc(engine, ctx);
+ rq = igt_request_alloc(ctx, engine);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
goto err_batch;
@@ -754,8 +756,7 @@ err:
static int
emit_rpcs_query(struct drm_i915_gem_object *obj,
- struct i915_gem_context *ctx,
- struct intel_engine_cs *engine,
+ struct intel_context *ce,
struct i915_request **rq_out)
{
struct i915_request *rq;
@@ -763,9 +764,9 @@ emit_rpcs_query(struct drm_i915_gem_object *obj,
struct i915_vma *vma;
int err;
- GEM_BUG_ON(!intel_engine_can_store_dword(engine));
+ GEM_BUG_ON(!intel_engine_can_store_dword(ce->engine));
- vma = i915_vma_instance(obj, &ctx->ppgtt->vm, NULL);
+ vma = i915_vma_instance(obj, &ce->gem_context->ppgtt->vm, NULL);
if (IS_ERR(vma))
return PTR_ERR(vma);
@@ -783,13 +784,15 @@ emit_rpcs_query(struct drm_i915_gem_object *obj,
goto err_vma;
}
- rq = i915_request_alloc(engine, ctx);
+ rq = i915_request_create(ce);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
goto err_batch;
}
- err = engine->emit_bb_start(rq, batch->node.start, batch->node.size, 0);
+ err = rq->engine->emit_bb_start(rq,
+ batch->node.start, batch->node.size,
+ 0);
if (err)
goto err_request;
@@ -833,8 +836,7 @@ static int
__sseu_prepare(struct drm_i915_private *i915,
const char *name,
unsigned int flags,
- struct i915_gem_context *ctx,
- struct intel_engine_cs *engine,
+ struct intel_context *ce,
struct igt_spinner **spin)
{
struct i915_request *rq;
@@ -852,7 +854,10 @@ __sseu_prepare(struct drm_i915_private *i915,
if (ret)
goto err_free;
- rq = igt_spinner_create_request(*spin, ctx, engine, MI_NOOP);
+ rq = igt_spinner_create_request(*spin,
+ ce->gem_context,
+ ce->engine,
+ MI_NOOP);
if (IS_ERR(rq)) {
ret = PTR_ERR(rq);
goto err_fini;
@@ -879,8 +884,7 @@ err_free:
static int
__read_slice_count(struct drm_i915_private *i915,
- struct i915_gem_context *ctx,
- struct intel_engine_cs *engine,
+ struct intel_context *ce,
struct drm_i915_gem_object *obj,
struct igt_spinner *spin,
u32 *rpcs)
@@ -891,7 +895,7 @@ __read_slice_count(struct drm_i915_private *i915,
u32 *buf, val;
long ret;
- ret = emit_rpcs_query(obj, ctx, engine, &rq);
+ ret = emit_rpcs_query(obj, ce, &rq);
if (ret)
return ret;
@@ -955,31 +959,29 @@ static int
__sseu_finish(struct drm_i915_private *i915,
const char *name,
unsigned int flags,
- struct i915_gem_context *ctx,
- struct i915_gem_context *kctx,
- struct intel_engine_cs *engine,
+ struct intel_context *ce,
struct drm_i915_gem_object *obj,
unsigned int expected,
struct igt_spinner *spin)
{
- unsigned int slices =
- hweight32(intel_device_default_sseu(i915).slice_mask);
+ unsigned int slices = hweight32(ce->engine->sseu.slice_mask);
u32 rpcs = 0;
int ret = 0;
if (flags & TEST_RESET) {
- ret = i915_reset_engine(engine, "sseu");
+ ret = i915_reset_engine(ce->engine, "sseu");
if (ret)
goto out;
}
- ret = __read_slice_count(i915, ctx, engine, obj,
+ ret = __read_slice_count(i915, ce, obj,
flags & TEST_RESET ? NULL : spin, &rpcs);
ret = __check_rpcs(name, rpcs, ret, expected, "Context", "!");
if (ret)
goto out;
- ret = __read_slice_count(i915, kctx, engine, obj, NULL, &rpcs);
+ ret = __read_slice_count(i915, ce->engine->kernel_context, obj,
+ NULL, &rpcs);
ret = __check_rpcs(name, rpcs, ret, slices, "Kernel context", "!");
out:
@@ -993,7 +995,7 @@ out:
if (ret)
return ret;
- ret = __read_slice_count(i915, ctx, engine, obj, NULL, &rpcs);
+ ret = __read_slice_count(i915, ce, obj, NULL, &rpcs);
ret = __check_rpcs(name, rpcs, ret, expected,
"Context", " after idle!");
}
@@ -1005,28 +1007,22 @@ static int
__sseu_test(struct drm_i915_private *i915,
const char *name,
unsigned int flags,
- struct i915_gem_context *ctx,
- struct intel_engine_cs *engine,
+ struct intel_context *ce,
struct drm_i915_gem_object *obj,
struct intel_sseu sseu)
{
struct igt_spinner *spin = NULL;
- struct i915_gem_context *kctx;
int ret;
- kctx = kernel_context(i915);
- if (IS_ERR(kctx))
- return PTR_ERR(kctx);
-
- ret = __sseu_prepare(i915, name, flags, ctx, engine, &spin);
+ ret = __sseu_prepare(i915, name, flags, ce, &spin);
if (ret)
- goto out_context;
+ return ret;
- ret = __i915_gem_context_reconfigure_sseu(ctx, engine, sseu);
+ ret = __intel_context_reconfigure_sseu(ce, sseu);
if (ret)
goto out_spin;
- ret = __sseu_finish(i915, name, flags, ctx, kctx, engine, obj,
+ ret = __sseu_finish(i915, name, flags, ce, obj,
hweight32(sseu.slice_mask), spin);
out_spin:
@@ -1035,10 +1031,6 @@ out_spin:
igt_spinner_fini(spin);
kfree(spin);
}
-
-out_context:
- kernel_context_close(kctx);
-
return ret;
}
@@ -1047,10 +1039,11 @@ __igt_ctx_sseu(struct drm_i915_private *i915,
const char *name,
unsigned int flags)
{
- struct intel_sseu default_sseu = intel_device_default_sseu(i915);
struct intel_engine_cs *engine = i915->engine[RCS0];
+ struct intel_sseu default_sseu = engine->sseu;
struct drm_i915_gem_object *obj;
struct i915_gem_context *ctx;
+ struct intel_context *ce;
struct intel_sseu pg_sseu;
intel_wakeref_t wakeref;
struct drm_file *file;
@@ -1102,23 +1095,33 @@ __igt_ctx_sseu(struct drm_i915_private *i915,
wakeref = intel_runtime_pm_get(i915);
+ ce = i915_gem_context_get_engine(ctx, RCS0);
+ if (IS_ERR(ce)) {
+ ret = PTR_ERR(ce);
+ goto out_rpm;
+ }
+
+ ret = intel_context_pin(ce);
+ if (ret)
+ goto out_context;
+
/* First set the default mask. */
- ret = __sseu_test(i915, name, flags, ctx, engine, obj, default_sseu);
+ ret = __sseu_test(i915, name, flags, ce, obj, default_sseu);
if (ret)
goto out_fail;
/* Then set a power-gated configuration. */
- ret = __sseu_test(i915, name, flags, ctx, engine, obj, pg_sseu);
+ ret = __sseu_test(i915, name, flags, ce, obj, pg_sseu);
if (ret)
goto out_fail;
/* Back to defaults. */
- ret = __sseu_test(i915, name, flags, ctx, engine, obj, default_sseu);
+ ret = __sseu_test(i915, name, flags, ce, obj, default_sseu);
if (ret)
goto out_fail;
/* One last power-gated configuration for the road. */
- ret = __sseu_test(i915, name, flags, ctx, engine, obj, pg_sseu);
+ ret = __sseu_test(i915, name, flags, ce, obj, pg_sseu);
if (ret)
goto out_fail;
@@ -1126,9 +1129,12 @@ out_fail:
if (igt_flush_test(i915, I915_WAIT_LOCKED))
ret = -EIO;
- i915_gem_object_put(obj);
-
+ intel_context_unpin(ce);
+out_context:
+ intel_context_put(ce);
+out_rpm:
intel_runtime_pm_put(i915, wakeref);
+ i915_gem_object_put(obj);
out_unlock:
mutex_unlock(&i915->drm.struct_mutex);
@@ -1345,7 +1351,7 @@ static int write_to_scratch(struct i915_gem_context *ctx,
if (err)
goto err_unpin;
- rq = i915_request_alloc(engine, ctx);
+ rq = igt_request_alloc(ctx, engine);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
goto err_unpin;
@@ -1440,7 +1446,7 @@ static int read_from_scratch(struct i915_gem_context *ctx,
if (err)
goto err_unpin;
- rq = i915_request_alloc(engine, ctx);
+ rq = igt_request_alloc(ctx, engine);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
goto err_unpin;
@@ -1608,113 +1614,6 @@ __engine_name(struct drm_i915_private *i915, intel_engine_mask_t engines)
return "none";
}
-static int __igt_switch_to_kernel_context(struct drm_i915_private *i915,
- struct i915_gem_context *ctx,
- intel_engine_mask_t engines)
-{
- struct intel_engine_cs *engine;
- intel_engine_mask_t tmp;
- int pass;
-
- GEM_TRACE("Testing %s\n", __engine_name(i915, engines));
- for (pass = 0; pass < 4; pass++) { /* Once busy; once idle; repeat */
- bool from_idle = pass & 1;
- int err;
-
- if (!from_idle) {
- for_each_engine_masked(engine, i915, engines, tmp) {
- struct i915_request *rq;
-
- rq = i915_request_alloc(engine, ctx);
- if (IS_ERR(rq))
- return PTR_ERR(rq);
-
- i915_request_add(rq);
- }
- }
-
- err = i915_gem_switch_to_kernel_context(i915,
- i915->gt.active_engines);
- if (err)
- return err;
-
- if (!from_idle) {
- err = i915_gem_wait_for_idle(i915,
- I915_WAIT_LOCKED,
- MAX_SCHEDULE_TIMEOUT);
- if (err)
- return err;
- }
-
- if (i915->gt.active_requests) {
- pr_err("%d active requests remain after switching to kernel context, pass %d (%s) on %s engine%s\n",
- i915->gt.active_requests,
- pass, from_idle ? "idle" : "busy",
- __engine_name(i915, engines),
- is_power_of_2(engines) ? "" : "s");
- return -EINVAL;
- }
-
- /* XXX Bonus points for proving we are the kernel context! */
-
- mutex_unlock(&i915->drm.struct_mutex);
- drain_delayed_work(&i915->gt.idle_work);
- mutex_lock(&i915->drm.struct_mutex);
- }
-
- if (igt_flush_test(i915, I915_WAIT_LOCKED))
- return -EIO;
-
- return 0;
-}
-
-static int igt_switch_to_kernel_context(void *arg)
-{
- struct drm_i915_private *i915 = arg;
- struct intel_engine_cs *engine;
- struct i915_gem_context *ctx;
- enum intel_engine_id id;
- intel_wakeref_t wakeref;
- int err;
-
- /*
- * A core premise of switching to the kernel context is that
- * if an engine is already idling in the kernel context, we
- * do not emit another request and wake it up. The other being
- * that we do indeed end up idling in the kernel context.
- */
-
- mutex_lock(&i915->drm.struct_mutex);
- wakeref = intel_runtime_pm_get(i915);
-
- ctx = kernel_context(i915);
- if (IS_ERR(ctx)) {
- mutex_unlock(&i915->drm.struct_mutex);
- return PTR_ERR(ctx);
- }
-
- /* First check idling each individual engine */
- for_each_engine(engine, i915, id) {
- err = __igt_switch_to_kernel_context(i915, ctx, BIT(id));
- if (err)
- goto out_unlock;
- }
-
- /* Now en masse */
- err = __igt_switch_to_kernel_context(i915, ctx, ALL_ENGINES);
- if (err)
- goto out_unlock;
-
-out_unlock:
- GEM_TRACE_DUMP_ON(err);
-
- intel_runtime_pm_put(i915, wakeref);
- mutex_unlock(&i915->drm.struct_mutex);
-
- kernel_context_close(ctx);
- return err;
-}
-
static void mock_barrier_task(void *data)
{
unsigned int *counter = data;
@@ -1729,7 +1628,6 @@ static int mock_context_barrier(void *arg)
struct drm_i915_private *i915 = arg;
struct i915_gem_context *ctx;
struct i915_request *rq;
- intel_wakeref_t wakeref;
unsigned int counter;
int err;
@@ -1767,20 +1665,17 @@ static int mock_context_barrier(void *arg)
goto out;
}
if (counter == 0) {
- pr_err("Did not retire immediately for all inactive engines\n");
+ pr_err("Did not retire immediately for all unused engines\n");
err = -EINVAL;
goto out;
}
- rq = ERR_PTR(-ENODEV);
- with_intel_runtime_pm(i915, wakeref)
- rq = i915_request_alloc(i915->engine[RCS0], ctx);
+ rq = igt_request_alloc(ctx, i915->engine[RCS0]);
if (IS_ERR(rq)) {
pr_err("Request allocation failed!\n");
goto out;
}
i915_request_add(rq);
- GEM_BUG_ON(list_empty(&ctx->active_engines));
counter = 0;
context_barrier_inject_fault = BIT(RCS0);
@@ -1824,7 +1719,6 @@ unlock:
int i915_gem_context_mock_selftests(void)
{
static const struct i915_subtest tests[] = {
- SUBTEST(igt_switch_to_kernel_context),
SUBTEST(mock_context_barrier),
};
struct drm_i915_private *i915;
@@ -1843,7 +1737,6 @@ int i915_gem_context_mock_selftests(void)
int i915_gem_context_live_selftests(struct drm_i915_private *dev_priv)
{
static const struct i915_subtest tests[] = {
- SUBTEST(igt_switch_to_kernel_context),
SUBTEST(live_nop_switch),
SUBTEST(igt_ctx_exec),
SUBTEST(igt_ctx_readonly),
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c
index 89766688e420..4fc6e5445dd1 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c
@@ -24,6 +24,7 @@
#include "../i915_selftest.h"
+#include "igt_gem_utils.h"
#include "lib_sw_fence.h"
#include "mock_context.h"
#include "mock_drm.h"
@@ -460,7 +461,7 @@ static int igt_evict_contexts(void *arg)
/* We will need some GGTT space for the rq's context */
igt_evict_ctl.fail_if_busy = true;
- rq = i915_request_alloc(engine, ctx);
+ rq = igt_request_alloc(ctx, engine);
igt_evict_ctl.fail_if_busy = false;
if (IS_ERR(rq)) {
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_object.c b/drivers/gpu/drm/i915/selftests/i915_gem_object.c
index 971148fbe6f5..b926d1cd165d 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_object.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_object.c
@@ -24,6 +24,7 @@
#include "../i915_selftest.h"
+#include "igt_flush_test.h"
#include "mock_gem_device.h"
#include "huge_gem_object.h"
@@ -468,7 +469,7 @@ static int make_obj_busy(struct drm_i915_gem_object *obj)
if (err)
return err;
- rq = i915_request_alloc(i915->engine[RCS0], i915->kernel_context);
+ rq = i915_request_create(i915->engine[RCS0]->kernel_context);
if (IS_ERR(rq)) {
i915_vma_unpin(vma);
return PTR_ERR(rq);
@@ -505,17 +506,21 @@ static void disable_retire_worker(struct drm_i915_private *i915)
{
i915_gem_shrinker_unregister(i915);
- mutex_lock(&i915->drm.struct_mutex);
- if (!i915->gt.active_requests++) {
- intel_wakeref_t wakeref;
+ intel_gt_pm_get(i915);
- with_intel_runtime_pm(i915, wakeref)
- i915_gem_unpark(i915);
- }
+ cancel_delayed_work_sync(&i915->gem.retire_work);
+ flush_work(&i915->gem.idle_work);
+}
+
+static void restore_retire_worker(struct drm_i915_private *i915)
+{
+ intel_gt_pm_put(i915);
+
+ mutex_lock(&i915->drm.struct_mutex);
+ igt_flush_test(i915, I915_WAIT_LOCKED);
mutex_unlock(&i915->drm.struct_mutex);
- cancel_delayed_work_sync(&i915->gt.retire_work);
- cancel_delayed_work_sync(&i915->gt.idle_work);
+ i915_gem_shrinker_register(i915);
}
static int igt_mmap_offset_exhaustion(void *arg)
@@ -615,13 +620,7 @@ static int igt_mmap_offset_exhaustion(void *arg)
out:
drm_mm_remove_node(&resv);
out_park:
- mutex_lock(&i915->drm.struct_mutex);
- if (--i915->gt.active_requests)
- queue_delayed_work(i915->wq, &i915->gt.retire_work, 0);
- else
- queue_delayed_work(i915->wq, &i915->gt.idle_work, 0);
- mutex_unlock(&i915->drm.struct_mutex);
- i915_gem_shrinker_register(i915);
+ restore_retire_worker(i915);
return err;
err_obj:
i915_gem_object_put(obj);
diff --git a/drivers/gpu/drm/i915/selftests/i915_live_selftests.h b/drivers/gpu/drm/i915/selftests/i915_live_selftests.h
index 6d766925ad04..a953125b14c4 100644
--- a/drivers/gpu/drm/i915/selftests/i915_live_selftests.h
+++ b/drivers/gpu/drm/i915/selftests/i915_live_selftests.h
@@ -17,12 +17,14 @@ selftest(requests, i915_request_live_selftests)
selftest(active, i915_active_live_selftests)
selftest(objects, i915_gem_object_live_selftests)
selftest(dmabuf, i915_gem_dmabuf_live_selftests)
+selftest(vma, i915_vma_live_selftests)
selftest(coherency, i915_gem_coherency_live_selftests)
selftest(gtt, i915_gem_gtt_live_selftests)
selftest(gem, i915_gem_live_selftests)
selftest(evict, i915_gem_evict_live_selftests)
selftest(hugepages, i915_gem_huge_page_live_selftests)
selftest(contexts, i915_gem_context_live_selftests)
+selftest(reset, intel_reset_live_selftests)
selftest(hangcheck, intel_hangcheck_live_selftests)
selftest(execlists, intel_execlists_live_selftests)
selftest(guc, intel_guc_live_selftest)
diff --git a/drivers/gpu/drm/i915/selftests/i915_request.c b/drivers/gpu/drm/i915/selftests/i915_request.c
index e6ffe2240126..b60591531e4a 100644
--- a/drivers/gpu/drm/i915/selftests/i915_request.c
+++ b/drivers/gpu/drm/i915/selftests/i915_request.c
@@ -267,7 +267,7 @@ static struct i915_request *
__live_request_alloc(struct i915_gem_context *ctx,
struct intel_engine_cs *engine)
{
- return i915_request_alloc(engine, ctx);
+ return igt_request_alloc(ctx, engine);
}
static int __igt_breadcrumbs_smoketest(void *arg)
@@ -551,8 +551,7 @@ static int live_nop_request(void *arg)
times[1] = ktime_get_raw();
for (n = 0; n < prime; n++) {
- request = i915_request_alloc(engine,
- i915->kernel_context);
+ request = i915_request_create(engine->kernel_context);
if (IS_ERR(request)) {
err = PTR_ERR(request);
goto out_unlock;
@@ -649,7 +648,7 @@ empty_request(struct intel_engine_cs *engine,
struct i915_request *request;
int err;
- request = i915_request_alloc(engine, engine->i915->kernel_context);
+ request = i915_request_create(engine->kernel_context);
if (IS_ERR(request))
return request;
@@ -853,7 +852,7 @@ static int live_all_engines(void *arg)
}
for_each_engine(engine, i915, id) {
- request[id] = i915_request_alloc(engine, i915->kernel_context);
+ request[id] = i915_request_create(engine->kernel_context);
if (IS_ERR(request[id])) {
err = PTR_ERR(request[id]);
pr_err("%s: Request allocation failed with err=%d\n",
@@ -962,7 +961,7 @@ static int live_sequential_engines(void *arg)
goto out_unlock;
}
- request[id] = i915_request_alloc(engine, i915->kernel_context);
+ request[id] = i915_request_create(engine->kernel_context);
if (IS_ERR(request[id])) {
err = PTR_ERR(request[id]);
pr_err("%s: Request allocation failed for %s with err=%d\n",
@@ -1075,7 +1074,7 @@ max_batches(struct i915_gem_context *ctx, struct intel_engine_cs *engine)
if (HAS_EXECLISTS(ctx->i915))
return INT_MAX;
- rq = i915_request_alloc(engine, ctx);
+ rq = igt_request_alloc(ctx, engine);
if (IS_ERR(rq)) {
ret = PTR_ERR(rq);
} else {
diff --git a/drivers/gpu/drm/i915/selftests/i915_timeline.c b/drivers/gpu/drm/i915/selftests/i915_timeline.c
index bd96afcadfe7..ff9ebe50fae8 100644
--- a/drivers/gpu/drm/i915/selftests/i915_timeline.c
+++ b/drivers/gpu/drm/i915/selftests/i915_timeline.c
@@ -454,7 +454,7 @@ tl_write(struct i915_timeline *tl, struct intel_engine_cs *engine, u32 value)
goto out;
}
- rq = i915_request_alloc(engine, engine->i915->kernel_context);
+ rq = i915_request_create(engine->kernel_context);
if (IS_ERR(rq))
goto out_unpin;
@@ -678,7 +678,7 @@ static int live_hwsp_wrap(void *arg)
if (!intel_engine_can_store_dword(engine))
continue;
- rq = i915_request_alloc(engine, i915->kernel_context);
+ rq = i915_request_create(engine->kernel_context);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
goto out;
diff --git a/drivers/gpu/drm/i915/selftests/i915_vma.c b/drivers/gpu/drm/i915/selftests/i915_vma.c
index fc594b030f5a..0027c1fac336 100644
--- a/drivers/gpu/drm/i915/selftests/i915_vma.c
+++ b/drivers/gpu/drm/i915/selftests/i915_vma.c
@@ -59,7 +59,7 @@ static bool assert_vma(struct i915_vma *vma,
static struct i915_vma *
checked_vma_instance(struct drm_i915_gem_object *obj,
struct i915_address_space *vm,
- struct i915_ggtt_view *view)
+ const struct i915_ggtt_view *view)
{
struct i915_vma *vma;
bool ok = true;
@@ -397,18 +397,79 @@ assert_rotated(struct drm_i915_gem_object *obj,
return sg;
}
-static unsigned int rotated_size(const struct intel_rotation_plane_info *a,
- const struct intel_rotation_plane_info *b)
+static unsigned long remapped_index(const struct intel_remapped_info *r,
+ unsigned int n,
+ unsigned int x,
+ unsigned int y)
+{
+ return (r->plane[n].stride * y +
+ r->plane[n].offset + x);
+}
+
+static struct scatterlist *
+assert_remapped(struct drm_i915_gem_object *obj,
+ const struct intel_remapped_info *r, unsigned int n,
+ struct scatterlist *sg)
+{
+ unsigned int x, y;
+ unsigned int left = 0;
+ unsigned int offset;
+
+ for (y = 0; y < r->plane[n].height; y++) {
+ for (x = 0; x < r->plane[n].width; x++) {
+ unsigned long src_idx;
+ dma_addr_t src;
+
+ if (!sg) {
+ pr_err("Invalid sg table: too short at plane %d, (%d, %d)!\n",
+ n, x, y);
+ return ERR_PTR(-EINVAL);
+ }
+ if (!left) {
+ offset = 0;
+ left = sg_dma_len(sg);
+ }
+
+ src_idx = remapped_index(r, n, x, y);
+ src = i915_gem_object_get_dma_address(obj, src_idx);
+
+ if (left < PAGE_SIZE || left & (PAGE_SIZE-1)) {
+ pr_err("Invalid sg.length, found %d, expected %lu for remapped page (%d, %d) [src index %lu]\n",
+ sg_dma_len(sg), PAGE_SIZE,
+ x, y, src_idx);
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (sg_dma_address(sg) + offset != src) {
+ pr_err("Invalid address for remapped page (%d, %d) [src index %lu]\n",
+ x, y, src_idx);
+ return ERR_PTR(-EINVAL);
+ }
+
+ left -= PAGE_SIZE;
+ offset += PAGE_SIZE;
+
+
+ if (!left)
+ sg = sg_next(sg);
+ }
+ }
+
+ return sg;
+}
+
+static unsigned int rotated_size(const struct intel_remapped_plane_info *a,
+ const struct intel_remapped_plane_info *b)
{
return a->width * a->height + b->width * b->height;
}
-static int igt_vma_rotate(void *arg)
+static int igt_vma_rotate_remap(void *arg)
{
struct i915_ggtt *ggtt = arg;
struct i915_address_space *vm = &ggtt->vm;
struct drm_i915_gem_object *obj;
- const struct intel_rotation_plane_info planes[] = {
+ const struct intel_remapped_plane_info planes[] = {
{ .width = 1, .height = 1, .stride = 1 },
{ .width = 2, .height = 2, .stride = 2 },
{ .width = 4, .height = 4, .stride = 4 },
@@ -426,6 +487,11 @@ static int igt_vma_rotate(void *arg)
{ .width = 6, .height = 4, .stride = 6 },
{ }
}, *a, *b;
+ enum i915_ggtt_view_type types[] = {
+ I915_GGTT_VIEW_ROTATED,
+ I915_GGTT_VIEW_REMAPPED,
+ 0,
+ }, *t;
const unsigned int max_pages = 64;
int err = -ENOMEM;
@@ -437,6 +503,7 @@ static int igt_vma_rotate(void *arg)
if (IS_ERR(obj))
goto out;
+ for (t = types; *t; t++) {
for (a = planes; a->width; a++) {
for (b = planes + ARRAY_SIZE(planes); b-- != planes; ) {
struct i915_ggtt_view view;
@@ -447,7 +514,7 @@ static int igt_vma_rotate(void *arg)
GEM_BUG_ON(max_offset > max_pages);
max_offset = max_pages - max_offset;
- view.type = I915_GGTT_VIEW_ROTATED;
+ view.type = *t;
view.rotated.plane[0] = *a;
view.rotated.plane[1] = *b;
@@ -468,14 +535,23 @@ static int igt_vma_rotate(void *arg)
goto out_object;
}
- if (vma->size != rotated_size(a, b) * PAGE_SIZE) {
+ if (view.type == I915_GGTT_VIEW_ROTATED &&
+ vma->size != rotated_size(a, b) * PAGE_SIZE) {
pr_err("VMA is wrong size, expected %lu, found %llu\n",
PAGE_SIZE * rotated_size(a, b), vma->size);
err = -EINVAL;
goto out_object;
}
- if (vma->pages->nents != rotated_size(a, b)) {
+ if (view.type == I915_GGTT_VIEW_REMAPPED &&
+ vma->size > rotated_size(a, b) * PAGE_SIZE) {
+ pr_err("VMA is wrong size, expected %lu, found %llu\n",
+ PAGE_SIZE * rotated_size(a, b), vma->size);
+ err = -EINVAL;
+ goto out_object;
+ }
+
+ if (vma->pages->nents > rotated_size(a, b)) {
pr_err("sg table is wrong sizeo, expected %u, found %u nents\n",
rotated_size(a, b), vma->pages->nents);
err = -EINVAL;
@@ -497,9 +573,14 @@ static int igt_vma_rotate(void *arg)
sg = vma->pages->sgl;
for (n = 0; n < ARRAY_SIZE(view.rotated.plane); n++) {
- sg = assert_rotated(obj, &view.rotated, n, sg);
+ if (view.type == I915_GGTT_VIEW_ROTATED)
+ sg = assert_rotated(obj, &view.rotated, n, sg);
+ else
+ sg = assert_remapped(obj, &view.remapped, n, sg);
if (IS_ERR(sg)) {
- pr_err("Inconsistent VMA pages for plane %d: [(%d, %d, %d, %d), (%d, %d, %d, %d)]\n", n,
+ pr_err("Inconsistent %s VMA pages for plane %d: [(%d, %d, %d, %d), (%d, %d, %d, %d)]\n",
+ view.type == I915_GGTT_VIEW_ROTATED ?
+ "rotated" : "remapped", n,
view.rotated.plane[0].width,
view.rotated.plane[0].height,
view.rotated.plane[0].stride,
@@ -518,6 +599,7 @@ static int igt_vma_rotate(void *arg)
}
}
}
+ }
out_object:
i915_gem_object_put(obj);
@@ -721,7 +803,7 @@ int i915_vma_mock_selftests(void)
static const struct i915_subtest tests[] = {
SUBTEST(igt_vma_create),
SUBTEST(igt_vma_pin1),
- SUBTEST(igt_vma_rotate),
+ SUBTEST(igt_vma_rotate_remap),
SUBTEST(igt_vma_partial),
};
struct drm_i915_private *i915;
@@ -752,3 +834,145 @@ out_put:
drm_dev_put(&i915->drm);
return err;
}
+
+static int igt_vma_remapped_gtt(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ const struct intel_remapped_plane_info planes[] = {
+ { .width = 1, .height = 1, .stride = 1 },
+ { .width = 2, .height = 2, .stride = 2 },
+ { .width = 4, .height = 4, .stride = 4 },
+ { .width = 8, .height = 8, .stride = 8 },
+
+ { .width = 3, .height = 5, .stride = 3 },
+ { .width = 3, .height = 5, .stride = 4 },
+ { .width = 3, .height = 5, .stride = 5 },
+
+ { .width = 5, .height = 3, .stride = 5 },
+ { .width = 5, .height = 3, .stride = 7 },
+ { .width = 5, .height = 3, .stride = 9 },
+
+ { .width = 4, .height = 6, .stride = 6 },
+ { .width = 6, .height = 4, .stride = 6 },
+ { }
+ }, *p;
+ enum i915_ggtt_view_type types[] = {
+ I915_GGTT_VIEW_ROTATED,
+ I915_GGTT_VIEW_REMAPPED,
+ 0,
+ }, *t;
+ struct drm_i915_gem_object *obj;
+ intel_wakeref_t wakeref;
+ int err = 0;
+
+ obj = i915_gem_object_create_internal(i915, 10 * 10 * PAGE_SIZE);
+ if (IS_ERR(obj))
+ return PTR_ERR(obj);
+
+ mutex_lock(&i915->drm.struct_mutex);
+
+ wakeref = intel_runtime_pm_get(i915);
+
+ for (t = types; *t; t++) {
+ for (p = planes; p->width; p++) {
+ struct i915_ggtt_view view = {
+ .type = *t,
+ .rotated.plane[0] = *p,
+ };
+ struct i915_vma *vma;
+ u32 __iomem *map;
+ unsigned int x, y;
+ int err;
+
+ err = i915_gem_object_set_to_gtt_domain(obj, true);
+ if (err)
+ goto out;
+
+ vma = i915_gem_object_ggtt_pin(obj, &view, 0, 0, PIN_MAPPABLE);
+ if (IS_ERR(vma)) {
+ err = PTR_ERR(vma);
+ goto out;
+ }
+
+ GEM_BUG_ON(vma->ggtt_view.type != *t);
+
+ map = i915_vma_pin_iomap(vma);
+ i915_vma_unpin(vma);
+ if (IS_ERR(map)) {
+ err = PTR_ERR(map);
+ goto out;
+ }
+
+ for (y = 0 ; y < p->height; y++) {
+ for (x = 0 ; x < p->width; x++) {
+ unsigned int offset;
+ u32 val = y << 16 | x;
+
+ if (*t == I915_GGTT_VIEW_ROTATED)
+ offset = (x * p->height + y) * PAGE_SIZE;
+ else
+ offset = (y * p->width + x) * PAGE_SIZE;
+
+ iowrite32(val, &map[offset / sizeof(*map)]);
+ }
+ }
+
+ i915_vma_unpin_iomap(vma);
+
+ vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, PIN_MAPPABLE);
+ if (IS_ERR(vma)) {
+ err = PTR_ERR(vma);
+ goto out;
+ }
+
+ GEM_BUG_ON(vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL);
+
+ map = i915_vma_pin_iomap(vma);
+ i915_vma_unpin(vma);
+ if (IS_ERR(map)) {
+ err = PTR_ERR(map);
+ goto out;
+ }
+
+ for (y = 0 ; y < p->height; y++) {
+ for (x = 0 ; x < p->width; x++) {
+ unsigned int offset, src_idx;
+ u32 exp = y << 16 | x;
+ u32 val;
+
+ if (*t == I915_GGTT_VIEW_ROTATED)
+ src_idx = rotated_index(&view.rotated, 0, x, y);
+ else
+ src_idx = remapped_index(&view.remapped, 0, x, y);
+ offset = src_idx * PAGE_SIZE;
+
+ val = ioread32(&map[offset / sizeof(*map)]);
+ if (val != exp) {
+ pr_err("%s VMA write test failed, expected 0x%x, found 0x%x\n",
+ *t == I915_GGTT_VIEW_ROTATED ? "Rotated" : "Remapped",
+ val, exp);
+ i915_vma_unpin_iomap(vma);
+ goto out;
+ }
+ }
+ }
+ i915_vma_unpin_iomap(vma);
+ }
+ }
+
+out:
+ intel_runtime_pm_put(i915, wakeref);
+ mutex_unlock(&i915->drm.struct_mutex);
+ i915_gem_object_put(obj);
+
+ return err;
+}
+
+int i915_vma_live_selftests(struct drm_i915_private *i915)
+{
+ static const struct i915_subtest tests[] = {
+ SUBTEST(igt_vma_remapped_gtt),
+ };
+
+ return i915_subtests(tests, i915);
+}
diff --git a/drivers/gpu/drm/i915/selftests/igt_atomic.h b/drivers/gpu/drm/i915/selftests/igt_atomic.h
new file mode 100644
index 000000000000..93ec89f487ec
--- /dev/null
+++ b/drivers/gpu/drm/i915/selftests/igt_atomic.h
@@ -0,0 +1,56 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2018 Intel Corporation
+ */
+
+#ifndef IGT_ATOMIC_H
+#define IGT_ATOMIC_H
+
+#include <linux/preempt.h>
+#include <linux/bottom_half.h>
+#include <linux/irqflags.h>
+
+static void __preempt_begin(void)
+{
+ preempt_disable();
+}
+
+static void __preempt_end(void)
+{
+ preempt_enable();
+}
+
+static void __softirq_begin(void)
+{
+ local_bh_disable();
+}
+
+static void __softirq_end(void)
+{
+ local_bh_enable();
+}
+
+static void __hardirq_begin(void)
+{
+ local_irq_disable();
+}
+
+static void __hardirq_end(void)
+{
+ local_irq_enable();
+}
+
+struct igt_atomic_section {
+ const char *name;
+ void (*critical_section_begin)(void);
+ void (*critical_section_end)(void);
+};
+
+static const struct igt_atomic_section igt_atomic_phases[] = {
+ { "preempt", __preempt_begin, __preempt_end },
+ { "softirq", __softirq_begin, __softirq_end },
+ { "hardirq", __hardirq_begin, __hardirq_end },
+ { }
+};
+
+#endif /* IGT_ATOMIC_H */
diff --git a/drivers/gpu/drm/i915/selftests/igt_flush_test.c b/drivers/gpu/drm/i915/selftests/igt_flush_test.c
index 94aee4071a66..e42f3c58536a 100644
--- a/drivers/gpu/drm/i915/selftests/igt_flush_test.c
+++ b/drivers/gpu/drm/i915/selftests/igt_flush_test.c
@@ -11,23 +11,29 @@
int igt_flush_test(struct drm_i915_private *i915, unsigned int flags)
{
+ int ret = i915_terminally_wedged(i915) ? -EIO : 0;
+ int repeat = !!(flags & I915_WAIT_LOCKED);
+
cond_resched();
- if (flags & I915_WAIT_LOCKED &&
- i915_gem_switch_to_kernel_context(i915, i915->gt.active_engines)) {
- pr_err("Failed to switch back to kernel context; declaring wedged\n");
- i915_gem_set_wedged(i915);
- }
+ do {
+ if (i915_gem_wait_for_idle(i915, flags, HZ / 5) == -ETIME) {
+ pr_err("%pS timed out, cancelling all further testing.\n",
+ __builtin_return_address(0));
- if (i915_gem_wait_for_idle(i915, flags, HZ / 5) == -ETIME) {
- pr_err("%pS timed out, cancelling all further testing.\n",
- __builtin_return_address(0));
+ GEM_TRACE("%pS timed out.\n",
+ __builtin_return_address(0));
+ GEM_TRACE_DUMP();
- GEM_TRACE("%pS timed out.\n", __builtin_return_address(0));
- GEM_TRACE_DUMP();
+ i915_gem_set_wedged(i915);
+ repeat = 0;
+ ret = -EIO;
+ }
- i915_gem_set_wedged(i915);
- }
+ /* Ensure we also flush after wedging. */
+ if (flags & I915_WAIT_LOCKED)
+ i915_retire_requests(i915);
+ } while (repeat--);
- return i915_terminally_wedged(i915);
+ return ret;
}
diff --git a/drivers/gpu/drm/i915/selftests/igt_gem_utils.c b/drivers/gpu/drm/i915/selftests/igt_gem_utils.c
new file mode 100644
index 000000000000..16891b1a3e50
--- /dev/null
+++ b/drivers/gpu/drm/i915/selftests/igt_gem_utils.c
@@ -0,0 +1,34 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2018 Intel Corporation
+ */
+
+#include "igt_gem_utils.h"
+
+#include "gt/intel_context.h"
+
+#include "../i915_gem_context.h"
+#include "../i915_gem_pm.h"
+#include "../i915_request.h"
+
+struct i915_request *
+igt_request_alloc(struct i915_gem_context *ctx, struct intel_engine_cs *engine)
+{
+ struct intel_context *ce;
+ struct i915_request *rq;
+
+ /*
+ * Pinning the contexts may generate requests in order to acquire
+ * GGTT space, so do this first before we reserve a seqno for
+ * ourselves.
+ */
+ ce = i915_gem_context_get_engine(ctx, engine->id);
+ if (IS_ERR(ce))
+ return ERR_CAST(ce);
+
+ rq = intel_context_create_request(ce);
+ intel_context_put(ce);
+
+ return rq;
+}
diff --git a/drivers/gpu/drm/i915/selftests/igt_gem_utils.h b/drivers/gpu/drm/i915/selftests/igt_gem_utils.h
new file mode 100644
index 000000000000..0f17251cf75d
--- /dev/null
+++ b/drivers/gpu/drm/i915/selftests/igt_gem_utils.h
@@ -0,0 +1,17 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2018 Intel Corporation
+ */
+
+#ifndef __IGT_GEM_UTILS_H__
+#define __IGT_GEM_UTILS_H__
+
+struct i915_request;
+struct i915_gem_context;
+struct intel_engine_cs;
+
+struct i915_request *
+igt_request_alloc(struct i915_gem_context *ctx, struct intel_engine_cs *engine);
+
+#endif /* __IGT_GEM_UTILS_H__ */
diff --git a/drivers/gpu/drm/i915/selftests/igt_reset.c b/drivers/gpu/drm/i915/selftests/igt_reset.c
index 208a966da8ca..587df6fd4ffe 100644
--- a/drivers/gpu/drm/i915/selftests/igt_reset.c
+++ b/drivers/gpu/drm/i915/selftests/igt_reset.c
@@ -6,8 +6,9 @@
#include "igt_reset.h"
+#include "gt/intel_engine.h"
+
#include "../i915_drv.h"
-#include "../intel_ringbuffer.h"
void igt_global_reset_lock(struct drm_i915_private *i915)
{
@@ -42,3 +43,11 @@ void igt_global_reset_unlock(struct drm_i915_private *i915)
clear_bit(I915_RESET_BACKOFF, &i915->gpu_error.flags);
wake_up_all(&i915->gpu_error.reset_queue);
}
+
+bool igt_force_reset(struct drm_i915_private *i915)
+{
+ i915_gem_set_wedged(i915);
+ i915_reset(i915, 0, NULL);
+
+ return !i915_reset_failed(i915);
+}
diff --git a/drivers/gpu/drm/i915/selftests/igt_reset.h b/drivers/gpu/drm/i915/selftests/igt_reset.h
index 5f0234d045d5..363bd853e50f 100644
--- a/drivers/gpu/drm/i915/selftests/igt_reset.h
+++ b/drivers/gpu/drm/i915/selftests/igt_reset.h
@@ -11,5 +11,6 @@
void igt_global_reset_lock(struct drm_i915_private *i915);
void igt_global_reset_unlock(struct drm_i915_private *i915);
+bool igt_force_reset(struct drm_i915_private *i915);
#endif
diff --git a/drivers/gpu/drm/i915/selftests/igt_spinner.c b/drivers/gpu/drm/i915/selftests/igt_spinner.c
index 16890dfe74c0..ece8a8a0d3b0 100644
--- a/drivers/gpu/drm/i915/selftests/igt_spinner.c
+++ b/drivers/gpu/drm/i915/selftests/igt_spinner.c
@@ -4,6 +4,7 @@
* Copyright © 2018 Intel Corporation
*/
+#include "igt_gem_utils.h"
#include "igt_spinner.h"
int igt_spinner_init(struct igt_spinner *spin, struct drm_i915_private *i915)
@@ -114,7 +115,7 @@ igt_spinner_create_request(struct igt_spinner *spin,
if (err)
goto unpin_vma;
- rq = i915_request_alloc(engine, ctx);
+ rq = igt_request_alloc(ctx, engine);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
goto unpin_hws;
diff --git a/drivers/gpu/drm/i915/selftests/igt_spinner.h b/drivers/gpu/drm/i915/selftests/igt_spinner.h
index 391777c76dc7..d312e7cdab68 100644
--- a/drivers/gpu/drm/i915/selftests/igt_spinner.h
+++ b/drivers/gpu/drm/i915/selftests/igt_spinner.h
@@ -9,9 +9,10 @@
#include "../i915_selftest.h"
+#include "gt/intel_engine.h"
+
#include "../i915_drv.h"
#include "../i915_request.h"
-#include "../intel_ringbuffer.h"
#include "../i915_gem_context.h"
struct igt_spinner {
diff --git a/drivers/gpu/drm/i915/selftests/lib_sw_fence.c b/drivers/gpu/drm/i915/selftests/lib_sw_fence.c
index 2bfa72c1654b..b976c12817c5 100644
--- a/drivers/gpu/drm/i915/selftests/lib_sw_fence.c
+++ b/drivers/gpu/drm/i915/selftests/lib_sw_fence.c
@@ -45,6 +45,9 @@ void __onstack_fence_init(struct i915_sw_fence *fence,
void onstack_fence_fini(struct i915_sw_fence *fence)
{
+ if (!fence->flags)
+ return;
+
i915_sw_fence_commit(fence);
i915_sw_fence_fini(fence);
}
diff --git a/drivers/gpu/drm/i915/selftests/mock_context.c b/drivers/gpu/drm/i915/selftests/mock_context.c
index 0426093bf1d9..10e67c931ed1 100644
--- a/drivers/gpu/drm/i915/selftests/mock_context.c
+++ b/drivers/gpu/drm/i915/selftests/mock_context.c
@@ -30,6 +30,7 @@ mock_context(struct drm_i915_private *i915,
const char *name)
{
struct i915_gem_context *ctx;
+ struct i915_gem_engines *e;
int ret;
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
@@ -40,18 +41,20 @@ mock_context(struct drm_i915_private *i915,
INIT_LIST_HEAD(&ctx->link);
ctx->i915 = i915;
- ctx->hw_contexts = RB_ROOT;
- spin_lock_init(&ctx->hw_contexts_lock);
+ mutex_init(&ctx->engines_mutex);
+ e = default_engines(ctx);
+ if (IS_ERR(e))
+ goto err_free;
+ RCU_INIT_POINTER(ctx->engines, e);
INIT_RADIX_TREE(&ctx->handles_vma, GFP_KERNEL);
INIT_LIST_HEAD(&ctx->handles_list);
INIT_LIST_HEAD(&ctx->hw_id_link);
- INIT_LIST_HEAD(&ctx->active_engines);
mutex_init(&ctx->mutex);
ret = i915_gem_context_pin_hw_id(ctx);
if (ret < 0)
- goto err_handles;
+ goto err_engines;
if (name) {
struct i915_hw_ppgtt *ppgtt;
@@ -69,7 +72,9 @@ mock_context(struct drm_i915_private *i915,
return ctx;
-err_handles:
+err_engines:
+ free_engines(rcu_access_pointer(ctx->engines));
+err_free:
kfree(ctx);
return NULL;
diff --git a/drivers/gpu/drm/i915/selftests/mock_gem_device.c b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
index 60bbf8b4df40..9fd02025d382 100644
--- a/drivers/gpu/drm/i915/selftests/mock_gem_device.c
+++ b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
@@ -25,7 +25,8 @@
#include <linux/pm_domain.h>
#include <linux/pm_runtime.h>
-#include "mock_engine.h"
+#include "gt/mock_engine.h"
+
#include "mock_context.h"
#include "mock_request.h"
#include "mock_gem_device.h"
@@ -40,11 +41,10 @@ void mock_device_flush(struct drm_i915_private *i915)
lockdep_assert_held(&i915->drm.struct_mutex);
- for_each_engine(engine, i915, id)
- mock_engine_flush(engine);
-
- i915_retire_requests(i915);
- GEM_BUG_ON(i915->gt.active_requests);
+ do {
+ for_each_engine(engine, i915, id)
+ mock_engine_flush(engine);
+ } while (i915_retire_requests(i915));
}
static void mock_device_release(struct drm_device *dev)
@@ -58,8 +58,7 @@ static void mock_device_release(struct drm_device *dev)
i915_gem_contexts_lost(i915);
mutex_unlock(&i915->drm.struct_mutex);
- drain_delayed_work(&i915->gt.retire_work);
- drain_delayed_work(&i915->gt.idle_work);
+ flush_work(&i915->gem.idle_work);
i915_gem_drain_workqueue(i915);
mutex_lock(&i915->drm.struct_mutex);
@@ -109,10 +108,6 @@ static void mock_retire_work_handler(struct work_struct *work)
static void mock_idle_work_handler(struct work_struct *work)
{
- struct drm_i915_private *i915 =
- container_of(work, typeof(*i915), gt.idle_work.work);
-
- i915->gt.active_engines = 0;
}
static int pm_domain_resume(struct device *dev)
@@ -184,6 +179,8 @@ struct drm_i915_private *mock_gem_device(void)
mock_uncore_init(&i915->uncore);
i915_gem_init__mm(i915);
+ intel_gt_pm_init(i915);
+ atomic_inc(&i915->gt.wakeref.count); /* disable; no hw support */
init_waitqueue_head(&i915->gpu_error.wait_queue);
init_waitqueue_head(&i915->gpu_error.reset_queue);
@@ -196,8 +193,8 @@ struct drm_i915_private *mock_gem_device(void)
mock_init_contexts(i915);
- INIT_DELAYED_WORK(&i915->gt.retire_work, mock_retire_work_handler);
- INIT_DELAYED_WORK(&i915->gt.idle_work, mock_idle_work_handler);
+ INIT_DELAYED_WORK(&i915->gem.retire_work, mock_retire_work_handler);
+ INIT_WORK(&i915->gem.idle_work, mock_idle_work_handler);
i915->gt.awake = true;
@@ -211,12 +208,16 @@ struct drm_i915_private *mock_gem_device(void)
mock_init_ggtt(i915, &i915->ggtt);
mkwrite_device_info(i915)->engine_mask = BIT(0);
- i915->kernel_context = mock_context(i915, NULL);
- if (!i915->kernel_context)
- goto err_unlock;
i915->engine[RCS0] = mock_engine(i915, "mock", RCS0);
if (!i915->engine[RCS0])
+ goto err_unlock;
+
+ i915->kernel_context = mock_context(i915, NULL);
+ if (!i915->kernel_context)
+ goto err_engine;
+
+ if (mock_engine_init(i915->engine[RCS0]))
goto err_context;
mutex_unlock(&i915->drm.struct_mutex);
@@ -227,6 +228,8 @@ struct drm_i915_private *mock_gem_device(void)
err_context:
i915_gem_contexts_fini(i915);
+err_engine:
+ mock_engine_free(i915->engine[RCS0]);
err_unlock:
mutex_unlock(&i915->drm.struct_mutex);
i915_timelines_fini(i915);
diff --git a/drivers/gpu/drm/i915/selftests/mock_request.c b/drivers/gpu/drm/i915/selftests/mock_request.c
index d1a7c9608712..b99f7576153c 100644
--- a/drivers/gpu/drm/i915/selftests/mock_request.c
+++ b/drivers/gpu/drm/i915/selftests/mock_request.c
@@ -22,7 +22,9 @@
*
*/
-#include "mock_engine.h"
+#include "gt/mock_engine.h"
+
+#include "igt_gem_utils.h"
#include "mock_request.h"
struct i915_request *
@@ -33,7 +35,7 @@ mock_request(struct intel_engine_cs *engine,
struct i915_request *request;
/* NB the i915->requests slab cache is enlarged to fit mock_request */
- request = i915_request_alloc(engine, context);
+ request = igt_request_alloc(context, engine);
if (IS_ERR(request))
return NULL;
diff --git a/drivers/gpu/drm/i915/vlv_dsi.c b/drivers/gpu/drm/i915/vlv_dsi.c
index e0b1ec821960..895ea1a72a69 100644
--- a/drivers/gpu/drm/i915/vlv_dsi.c
+++ b/drivers/gpu/drm/i915/vlv_dsi.c
@@ -30,13 +30,15 @@
#include <drm/drm_crtc.h>
#include <drm/drm_edid.h>
#include <drm/drm_mipi_dsi.h>
-#include <drm/i915_drm.h>
#include "i915_drv.h"
+#include "intel_atomic.h"
#include "intel_connector.h"
#include "intel_drv.h"
#include "intel_dsi.h"
+#include "intel_fifo_underrun.h"
#include "intel_panel.h"
+#include "intel_sideband.h"
/* return pixels in terms of txbyteclkhs */
static u16 txbyteclkhs(u16 pixels, int bpp, int lane_count,
@@ -248,7 +250,7 @@ static int dpi_send_cmd(struct intel_dsi *intel_dsi, u32 cmd, bool hs,
static void band_gap_reset(struct drm_i915_private *dev_priv)
{
- mutex_lock(&dev_priv->sb_lock);
+ vlv_flisdsi_get(dev_priv);
vlv_flisdsi_write(dev_priv, 0x08, 0x0001);
vlv_flisdsi_write(dev_priv, 0x0F, 0x0005);
@@ -257,29 +259,7 @@ static void band_gap_reset(struct drm_i915_private *dev_priv)
vlv_flisdsi_write(dev_priv, 0x0F, 0x0000);
vlv_flisdsi_write(dev_priv, 0x08, 0x0000);
- mutex_unlock(&dev_priv->sb_lock);
-}
-
-static int bdw_get_pipemisc_bpp(struct intel_crtc *crtc)
-{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- u32 tmp;
-
- tmp = I915_READ(PIPEMISC(crtc->pipe));
-
- switch (tmp & PIPEMISC_DITHER_BPC_MASK) {
- case PIPEMISC_DITHER_6_BPC:
- return 18;
- case PIPEMISC_DITHER_8_BPC:
- return 24;
- case PIPEMISC_DITHER_10_BPC:
- return 30;
- case PIPEMISC_DITHER_12_BPC:
- return 36;
- default:
- MISSING_CASE(tmp);
- return 0;
- }
+ vlv_flisdsi_put(dev_priv);
}
static int intel_dsi_compute_config(struct intel_encoder *encoder,
@@ -515,11 +495,11 @@ static void vlv_dsi_device_ready(struct intel_encoder *encoder)
DRM_DEBUG_KMS("\n");
- mutex_lock(&dev_priv->sb_lock);
+ vlv_flisdsi_get(dev_priv);
/* program rcomp for compliance, reduce from 50 ohms to 45 ohms
* needed everytime after power gate */
vlv_flisdsi_write(dev_priv, 0x04, 0x0004);
- mutex_unlock(&dev_priv->sb_lock);
+ vlv_flisdsi_put(dev_priv);
/* bandgap reset is needed after everytime we do power gate */
band_gap_reset(dev_priv);
diff --git a/drivers/gpu/drm/i915/vlv_dsi_pll.c b/drivers/gpu/drm/i915/vlv_dsi_pll.c
index 5e7b1fb2db5d..99cc3e2e9c2c 100644
--- a/drivers/gpu/drm/i915/vlv_dsi_pll.c
+++ b/drivers/gpu/drm/i915/vlv_dsi_pll.c
@@ -26,9 +26,11 @@
*/
#include <linux/kernel.h>
-#include "intel_drv.h"
+
#include "i915_drv.h"
+#include "intel_drv.h"
#include "intel_dsi.h"
+#include "intel_sideband.h"
static const u16 lfsr_converts[] = {
426, 469, 234, 373, 442, 221, 110, 311, 411, /* 62 - 70 */
@@ -149,7 +151,7 @@ void vlv_dsi_pll_enable(struct intel_encoder *encoder,
DRM_DEBUG_KMS("\n");
- mutex_lock(&dev_priv->sb_lock);
+ vlv_cck_get(dev_priv);
vlv_cck_write(dev_priv, CCK_REG_DSI_PLL_CONTROL, 0);
vlv_cck_write(dev_priv, CCK_REG_DSI_PLL_DIVIDER, config->dsi_pll.div);
@@ -166,11 +168,11 @@ void vlv_dsi_pll_enable(struct intel_encoder *encoder,
if (wait_for(vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL) &
DSI_PLL_LOCK, 20)) {
- mutex_unlock(&dev_priv->sb_lock);
+ vlv_cck_put(dev_priv);
DRM_ERROR("DSI PLL lock failed\n");
return;
}
- mutex_unlock(&dev_priv->sb_lock);
+ vlv_cck_put(dev_priv);
DRM_DEBUG_KMS("DSI PLL locked\n");
}
@@ -182,14 +184,14 @@ void vlv_dsi_pll_disable(struct intel_encoder *encoder)
DRM_DEBUG_KMS("\n");
- mutex_lock(&dev_priv->sb_lock);
+ vlv_cck_get(dev_priv);
tmp = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL);
tmp &= ~DSI_PLL_VCO_EN;
tmp |= DSI_PLL_LDO_GATE;
vlv_cck_write(dev_priv, CCK_REG_DSI_PLL_CONTROL, tmp);
- mutex_unlock(&dev_priv->sb_lock);
+ vlv_cck_put(dev_priv);
}
bool bxt_dsi_pll_is_enabled(struct drm_i915_private *dev_priv)
@@ -266,10 +268,10 @@ u32 vlv_dsi_get_pclk(struct intel_encoder *encoder,
DRM_DEBUG_KMS("\n");
- mutex_lock(&dev_priv->sb_lock);
+ vlv_cck_get(dev_priv);
pll_ctl = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL);
pll_div = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_DIVIDER);
- mutex_unlock(&dev_priv->sb_lock);
+ vlv_cck_put(dev_priv);
config->dsi_pll.ctrl = pll_ctl & ~DSI_PLL_LOCK;
config->dsi_pll.div = pll_div;
diff --git a/drivers/gpu/drm/imx/ipuv3-plane.c b/drivers/gpu/drm/imx/ipuv3-plane.c
index 91edfe2498a6..2c19054ed570 100644
--- a/drivers/gpu/drm/imx/ipuv3-plane.c
+++ b/drivers/gpu/drm/imx/ipuv3-plane.c
@@ -115,8 +115,8 @@ drm_plane_state_to_ubo(struct drm_plane_state *state)
cma_obj = drm_fb_cma_get_gem_obj(fb, 1);
BUG_ON(!cma_obj);
- x /= drm_format_horz_chroma_subsampling(fb->format->format);
- y /= drm_format_vert_chroma_subsampling(fb->format->format);
+ x /= fb->format->hsub;
+ y /= fb->format->vsub;
return cma_obj->paddr + fb->offsets[1] + fb->pitches[1] * y +
fb->format->cpp[1] * x - eba;
@@ -134,8 +134,8 @@ drm_plane_state_to_vbo(struct drm_plane_state *state)
cma_obj = drm_fb_cma_get_gem_obj(fb, 2);
BUG_ON(!cma_obj);
- x /= drm_format_horz_chroma_subsampling(fb->format->format);
- y /= drm_format_vert_chroma_subsampling(fb->format->format);
+ x /= fb->format->hsub;
+ y /= fb->format->vsub;
return cma_obj->paddr + fb->offsets[2] + fb->pitches[2] * y +
fb->format->cpp[2] * x - eba;
@@ -352,7 +352,6 @@ static int ipu_plane_atomic_check(struct drm_plane *plane,
struct drm_framebuffer *old_fb = old_state->fb;
unsigned long eba, ubo, vbo, old_ubo, old_vbo, alpha_eba;
bool can_position = (plane->type == DRM_PLANE_TYPE_OVERLAY);
- int hsub, vsub;
int ret;
/* Ok to disable */
@@ -471,10 +470,8 @@ static int ipu_plane_atomic_check(struct drm_plane *plane,
* The x/y offsets must be even in case of horizontal/vertical
* chroma subsampling.
*/
- hsub = drm_format_horz_chroma_subsampling(fb->format->format);
- vsub = drm_format_vert_chroma_subsampling(fb->format->format);
- if (((state->src.x1 >> 16) & (hsub - 1)) ||
- ((state->src.y1 >> 16) & (vsub - 1)))
+ if (((state->src.x1 >> 16) & (fb->format->hsub - 1)) ||
+ ((state->src.y1 >> 16) & (fb->format->vsub - 1)))
return -EINVAL;
break;
case DRM_FORMAT_RGB565_A8:
diff --git a/drivers/gpu/drm/lima/lima_drv.c b/drivers/gpu/drm/lima/lima_drv.c
index f9a281a62083..b29c26cd13b2 100644
--- a/drivers/gpu/drm/lima/lima_drv.c
+++ b/drivers/gpu/drm/lima/lima_drv.c
@@ -17,7 +17,7 @@
int lima_sched_timeout_ms;
-MODULE_PARM_DESC(sched_timeout_ms, "task run timeout in ms (0 = no timeout (default))");
+MODULE_PARM_DESC(sched_timeout_ms, "task run timeout in ms");
module_param_named(sched_timeout_ms, lima_sched_timeout_ms, int, 0444);
static int lima_ioctl_get_param(struct drm_device *dev, void *data, struct drm_file *file)
diff --git a/drivers/gpu/drm/lima/lima_pp.c b/drivers/gpu/drm/lima/lima_pp.c
index d29721e177bf..8fef224b93c8 100644
--- a/drivers/gpu/drm/lima/lima_pp.c
+++ b/drivers/gpu/drm/lima/lima_pp.c
@@ -64,7 +64,13 @@ static irqreturn_t lima_pp_bcast_irq_handler(int irq, void *data)
struct lima_ip *pp_bcast = data;
struct lima_device *dev = pp_bcast->dev;
struct lima_sched_pipe *pipe = dev->pipe + lima_pipe_pp;
- struct drm_lima_m450_pp_frame *frame = pipe->current_task->frame;
+ struct drm_lima_m450_pp_frame *frame;
+
+ /* for shared irq case */
+ if (!pipe->current_task)
+ return IRQ_NONE;
+
+ frame = pipe->current_task->frame;
for (i = 0; i < frame->num_pp; i++) {
struct lima_ip *ip = pipe->processor[i];
diff --git a/drivers/gpu/drm/lima/lima_sched.c b/drivers/gpu/drm/lima/lima_sched.c
index d53bd45f8d96..4127cacac454 100644
--- a/drivers/gpu/drm/lima/lima_sched.c
+++ b/drivers/gpu/drm/lima/lima_sched.c
@@ -258,7 +258,7 @@ static struct dma_fence *lima_sched_run_job(struct drm_sched_job *job)
static void lima_sched_handle_error_task(struct lima_sched_pipe *pipe,
struct lima_sched_task *task)
{
- drm_sched_stop(&pipe->base);
+ drm_sched_stop(&pipe->base, &task->base);
if (task)
drm_sched_increase_karma(&task->base);
@@ -329,19 +329,16 @@ static void lima_sched_error_work(struct work_struct *work)
int lima_sched_pipe_init(struct lima_sched_pipe *pipe, const char *name)
{
- long timeout;
-
- if (lima_sched_timeout_ms <= 0)
- timeout = MAX_SCHEDULE_TIMEOUT;
- else
- timeout = msecs_to_jiffies(lima_sched_timeout_ms);
+ unsigned int timeout = lima_sched_timeout_ms > 0 ?
+ lima_sched_timeout_ms : 500;
pipe->fence_context = dma_fence_context_alloc(1);
spin_lock_init(&pipe->fence_lock);
INIT_WORK(&pipe->error_work, lima_sched_error_work);
- return drm_sched_init(&pipe->base, &lima_sched_ops, 1, 0, timeout, name);
+ return drm_sched_init(&pipe->base, &lima_sched_ops, 1, 0,
+ msecs_to_jiffies(timeout), name);
}
void lima_sched_pipe_fini(struct lima_sched_pipe *pipe)
diff --git a/drivers/gpu/drm/mcde/Kconfig b/drivers/gpu/drm/mcde/Kconfig
new file mode 100644
index 000000000000..b3990126562c
--- /dev/null
+++ b/drivers/gpu/drm/mcde/Kconfig
@@ -0,0 +1,18 @@
+config DRM_MCDE
+ tristate "DRM Support for ST-Ericsson MCDE (Multichannel Display Engine)"
+ depends on DRM
+ depends on CMA
+ depends on ARM || COMPILE_TEST
+ depends on OF
+ select MFD_SYSCON
+ select DRM_MIPI_DSI
+ select DRM_BRIDGE
+ select DRM_PANEL_BRIDGE
+ select DRM_KMS_HELPER
+ select DRM_KMS_CMA_HELPER
+ select DRM_GEM_CMA_HELPER
+ select VT_HW_CONSOLE_BINDING if FRAMEBUFFER_CONSOLE
+ help
+ Choose this option for DRM support for the ST-Ericsson MCDE
+ Multi-Channel Display Engine.
+ If M is selected the module will be called mcde_drm.
diff --git a/drivers/gpu/drm/mcde/Makefile b/drivers/gpu/drm/mcde/Makefile
new file mode 100644
index 000000000000..fe28f4e0fe46
--- /dev/null
+++ b/drivers/gpu/drm/mcde/Makefile
@@ -0,0 +1,3 @@
+mcde_drm-y += mcde_drv.o mcde_dsi.o mcde_display.o
+
+obj-$(CONFIG_DRM_MCDE) += mcde_drm.o
diff --git a/drivers/gpu/drm/mcde/mcde_display.c b/drivers/gpu/drm/mcde/mcde_display.c
new file mode 100644
index 000000000000..751454ae3cd1
--- /dev/null
+++ b/drivers/gpu/drm/mcde/mcde_display.c
@@ -0,0 +1,1142 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018 Linus Walleij <linus.walleij@linaro.org>
+ * Parts of this file were based on the MCDE driver by Marcus Lorentzon
+ * (C) ST-Ericsson SA 2013
+ */
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/dma-buf.h>
+
+#include <drm/drm_device.h>
+#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_fourcc.h>
+#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_simple_kms_helper.h>
+#include <drm/drm_vblank.h>
+#include <video/mipi_display.h>
+
+#include "mcde_drm.h"
+#include "mcde_display_regs.h"
+
+enum mcde_fifo {
+ MCDE_FIFO_A,
+ MCDE_FIFO_B,
+ /* TODO: implement FIFO C0 and FIFO C1 */
+};
+
+enum mcde_channel {
+ MCDE_CHANNEL_0 = 0,
+ MCDE_CHANNEL_1,
+ MCDE_CHANNEL_2,
+ MCDE_CHANNEL_3,
+};
+
+enum mcde_extsrc {
+ MCDE_EXTSRC_0 = 0,
+ MCDE_EXTSRC_1,
+ MCDE_EXTSRC_2,
+ MCDE_EXTSRC_3,
+ MCDE_EXTSRC_4,
+ MCDE_EXTSRC_5,
+ MCDE_EXTSRC_6,
+ MCDE_EXTSRC_7,
+ MCDE_EXTSRC_8,
+ MCDE_EXTSRC_9,
+};
+
+enum mcde_overlay {
+ MCDE_OVERLAY_0 = 0,
+ MCDE_OVERLAY_1,
+ MCDE_OVERLAY_2,
+ MCDE_OVERLAY_3,
+ MCDE_OVERLAY_4,
+ MCDE_OVERLAY_5,
+};
+
+enum mcde_dsi_formatter {
+ MCDE_DSI_FORMATTER_0 = 0,
+ MCDE_DSI_FORMATTER_1,
+ MCDE_DSI_FORMATTER_2,
+};
+
+void mcde_display_irq(struct mcde *mcde)
+{
+ u32 mispp, misovl, mischnl;
+ bool vblank = false;
+
+ /* Handle display IRQs */
+ mispp = readl(mcde->regs + MCDE_MISPP);
+ misovl = readl(mcde->regs + MCDE_MISOVL);
+ mischnl = readl(mcde->regs + MCDE_MISCHNL);
+
+ /*
+ * Handle IRQs from the DSI link. All IRQs from the DSI links
+ * are just latched onto the MCDE IRQ line, so we need to traverse
+ * any active DSI masters and check if an IRQ is originating from
+ * them.
+ *
+ * TODO: Currently only one DSI link is supported.
+ */
+ if (mcde_dsi_irq(mcde->mdsi)) {
+ u32 val;
+
+ /*
+ * In oneshot mode we do not send continuous updates
+ * to the display, instead we only push out updates when
+ * the update function is called, then we disable the
+ * flow on the channel once we get the TE IRQ.
+ */
+ if (mcde->oneshot_mode) {
+ spin_lock(&mcde->flow_lock);
+ if (--mcde->flow_active == 0) {
+ dev_dbg(mcde->dev, "TE0 IRQ\n");
+ /* Disable FIFO A flow */
+ val = readl(mcde->regs + MCDE_CRA0);
+ val &= ~MCDE_CRX0_FLOEN;
+ writel(val, mcde->regs + MCDE_CRA0);
+ }
+ spin_unlock(&mcde->flow_lock);
+ }
+ }
+
+ /* Vblank from one of the channels */
+ if (mispp & MCDE_PP_VCMPA) {
+ dev_dbg(mcde->dev, "chnl A vblank IRQ\n");
+ vblank = true;
+ }
+ if (mispp & MCDE_PP_VCMPB) {
+ dev_dbg(mcde->dev, "chnl B vblank IRQ\n");
+ vblank = true;
+ }
+ if (mispp & MCDE_PP_VCMPC0)
+ dev_dbg(mcde->dev, "chnl C0 vblank IRQ\n");
+ if (mispp & MCDE_PP_VCMPC1)
+ dev_dbg(mcde->dev, "chnl C1 vblank IRQ\n");
+ if (mispp & MCDE_PP_VSCC0)
+ dev_dbg(mcde->dev, "chnl C0 TE IRQ\n");
+ if (mispp & MCDE_PP_VSCC1)
+ dev_dbg(mcde->dev, "chnl C1 TE IRQ\n");
+ writel(mispp, mcde->regs + MCDE_RISPP);
+
+ if (vblank)
+ drm_crtc_handle_vblank(&mcde->pipe.crtc);
+
+ if (misovl)
+ dev_info(mcde->dev, "some stray overlay IRQ %08x\n", misovl);
+ writel(misovl, mcde->regs + MCDE_RISOVL);
+
+ if (mischnl)
+ dev_info(mcde->dev, "some stray channel error IRQ %08x\n",
+ mischnl);
+ writel(mischnl, mcde->regs + MCDE_RISCHNL);
+}
+
+void mcde_display_disable_irqs(struct mcde *mcde)
+{
+ /* Disable all IRQs */
+ writel(0, mcde->regs + MCDE_IMSCPP);
+ writel(0, mcde->regs + MCDE_IMSCOVL);
+ writel(0, mcde->regs + MCDE_IMSCCHNL);
+
+ /* Clear any pending IRQs */
+ writel(0xFFFFFFFF, mcde->regs + MCDE_RISPP);
+ writel(0xFFFFFFFF, mcde->regs + MCDE_RISOVL);
+ writel(0xFFFFFFFF, mcde->regs + MCDE_RISCHNL);
+}
+
+static int mcde_display_check(struct drm_simple_display_pipe *pipe,
+ struct drm_plane_state *pstate,
+ struct drm_crtc_state *cstate)
+{
+ const struct drm_display_mode *mode = &cstate->mode;
+ struct drm_framebuffer *old_fb = pipe->plane.state->fb;
+ struct drm_framebuffer *fb = pstate->fb;
+
+ if (fb) {
+ u32 offset = drm_fb_cma_get_gem_addr(fb, pstate, 0);
+
+ /* FB base address must be dword aligned. */
+ if (offset & 3) {
+ DRM_DEBUG_KMS("FB not 32-bit aligned\n");
+ return -EINVAL;
+ }
+
+ /*
+ * There's no pitch register, the mode's hdisplay
+ * controls this.
+ */
+ if (fb->pitches[0] != mode->hdisplay * fb->format->cpp[0]) {
+ DRM_DEBUG_KMS("can't handle pitches\n");
+ return -EINVAL;
+ }
+
+ /*
+ * We can't change the FB format in a flicker-free
+ * manner (and only update it during CRTC enable).
+ */
+ if (old_fb && old_fb->format != fb->format)
+ cstate->mode_changed = true;
+ }
+
+ return 0;
+}
+
+static int mcde_configure_extsrc(struct mcde *mcde, enum mcde_extsrc src,
+ u32 format)
+{
+ u32 val;
+ u32 conf;
+ u32 cr;
+
+ switch (src) {
+ case MCDE_EXTSRC_0:
+ conf = MCDE_EXTSRC0CONF;
+ cr = MCDE_EXTSRC0CR;
+ break;
+ case MCDE_EXTSRC_1:
+ conf = MCDE_EXTSRC1CONF;
+ cr = MCDE_EXTSRC1CR;
+ break;
+ case MCDE_EXTSRC_2:
+ conf = MCDE_EXTSRC2CONF;
+ cr = MCDE_EXTSRC2CR;
+ break;
+ case MCDE_EXTSRC_3:
+ conf = MCDE_EXTSRC3CONF;
+ cr = MCDE_EXTSRC3CR;
+ break;
+ case MCDE_EXTSRC_4:
+ conf = MCDE_EXTSRC4CONF;
+ cr = MCDE_EXTSRC4CR;
+ break;
+ case MCDE_EXTSRC_5:
+ conf = MCDE_EXTSRC5CONF;
+ cr = MCDE_EXTSRC5CR;
+ break;
+ case MCDE_EXTSRC_6:
+ conf = MCDE_EXTSRC6CONF;
+ cr = MCDE_EXTSRC6CR;
+ break;
+ case MCDE_EXTSRC_7:
+ conf = MCDE_EXTSRC7CONF;
+ cr = MCDE_EXTSRC7CR;
+ break;
+ case MCDE_EXTSRC_8:
+ conf = MCDE_EXTSRC8CONF;
+ cr = MCDE_EXTSRC8CR;
+ break;
+ case MCDE_EXTSRC_9:
+ conf = MCDE_EXTSRC9CONF;
+ cr = MCDE_EXTSRC9CR;
+ break;
+ }
+
+ /*
+ * Configure external source 0 one buffer (buffer 0)
+ * primary overlay ID 0.
+ * From mcde_hw.c ovly_update_registers() in the vendor tree
+ */
+ val = 0 << MCDE_EXTSRCXCONF_BUF_ID_SHIFT;
+ val |= 1 << MCDE_EXTSRCXCONF_BUF_NB_SHIFT;
+ val |= 0 << MCDE_EXTSRCXCONF_PRI_OVLID_SHIFT;
+ /*
+ * MCDE has inverse semantics from DRM on RBG/BGR which is why
+ * all the modes are inversed here.
+ */
+ switch (format) {
+ case DRM_FORMAT_ARGB8888:
+ val |= MCDE_EXTSRCXCONF_BPP_ARGB8888 <<
+ MCDE_EXTSRCXCONF_BPP_SHIFT;
+ val |= MCDE_EXTSRCXCONF_BGR;
+ break;
+ case DRM_FORMAT_ABGR8888:
+ val |= MCDE_EXTSRCXCONF_BPP_ARGB8888 <<
+ MCDE_EXTSRCXCONF_BPP_SHIFT;
+ break;
+ case DRM_FORMAT_XRGB8888:
+ val |= MCDE_EXTSRCXCONF_BPP_XRGB8888 <<
+ MCDE_EXTSRCXCONF_BPP_SHIFT;
+ val |= MCDE_EXTSRCXCONF_BGR;
+ break;
+ case DRM_FORMAT_XBGR8888:
+ val |= MCDE_EXTSRCXCONF_BPP_XRGB8888 <<
+ MCDE_EXTSRCXCONF_BPP_SHIFT;
+ break;
+ case DRM_FORMAT_RGB888:
+ val |= MCDE_EXTSRCXCONF_BPP_RGB888 <<
+ MCDE_EXTSRCXCONF_BPP_SHIFT;
+ val |= MCDE_EXTSRCXCONF_BGR;
+ break;
+ case DRM_FORMAT_BGR888:
+ val |= MCDE_EXTSRCXCONF_BPP_RGB888 <<
+ MCDE_EXTSRCXCONF_BPP_SHIFT;
+ break;
+ case DRM_FORMAT_ARGB4444:
+ val |= MCDE_EXTSRCXCONF_BPP_ARGB4444 <<
+ MCDE_EXTSRCXCONF_BPP_SHIFT;
+ val |= MCDE_EXTSRCXCONF_BGR;
+ break;
+ case DRM_FORMAT_ABGR4444:
+ val |= MCDE_EXTSRCXCONF_BPP_ARGB4444 <<
+ MCDE_EXTSRCXCONF_BPP_SHIFT;
+ break;
+ case DRM_FORMAT_XRGB4444:
+ val |= MCDE_EXTSRCXCONF_BPP_RGB444 <<
+ MCDE_EXTSRCXCONF_BPP_SHIFT;
+ val |= MCDE_EXTSRCXCONF_BGR;
+ break;
+ case DRM_FORMAT_XBGR4444:
+ val |= MCDE_EXTSRCXCONF_BPP_RGB444 <<
+ MCDE_EXTSRCXCONF_BPP_SHIFT;
+ break;
+ case DRM_FORMAT_XRGB1555:
+ val |= MCDE_EXTSRCXCONF_BPP_IRGB1555 <<
+ MCDE_EXTSRCXCONF_BPP_SHIFT;
+ val |= MCDE_EXTSRCXCONF_BGR;
+ break;
+ case DRM_FORMAT_XBGR1555:
+ val |= MCDE_EXTSRCXCONF_BPP_IRGB1555 <<
+ MCDE_EXTSRCXCONF_BPP_SHIFT;
+ break;
+ case DRM_FORMAT_RGB565:
+ val |= MCDE_EXTSRCXCONF_BPP_RGB565 <<
+ MCDE_EXTSRCXCONF_BPP_SHIFT;
+ val |= MCDE_EXTSRCXCONF_BGR;
+ break;
+ case DRM_FORMAT_BGR565:
+ val |= MCDE_EXTSRCXCONF_BPP_RGB565 <<
+ MCDE_EXTSRCXCONF_BPP_SHIFT;
+ break;
+ case DRM_FORMAT_YUV422:
+ val |= MCDE_EXTSRCXCONF_BPP_YCBCR422 <<
+ MCDE_EXTSRCXCONF_BPP_SHIFT;
+ break;
+ default:
+ dev_err(mcde->dev, "Unknown pixel format 0x%08x\n",
+ format);
+ return -EINVAL;
+ }
+ writel(val, mcde->regs + conf);
+
+ /* Software select, primary */
+ val = MCDE_EXTSRCXCR_SEL_MOD_SOFTWARE_SEL;
+ val |= MCDE_EXTSRCXCR_MULTIOVL_CTRL_PRIMARY;
+ writel(val, mcde->regs + cr);
+
+ return 0;
+}
+
+static void mcde_configure_overlay(struct mcde *mcde, enum mcde_overlay ovl,
+ enum mcde_extsrc src,
+ enum mcde_channel ch,
+ const struct drm_display_mode *mode,
+ u32 format)
+{
+ u32 val;
+ u32 conf1;
+ u32 conf2;
+ u32 crop;
+ u32 ljinc;
+ u32 cr;
+ u32 comp;
+
+ switch (ovl) {
+ case MCDE_OVERLAY_0:
+ conf1 = MCDE_OVL0CONF;
+ conf2 = MCDE_OVL0CONF2;
+ crop = MCDE_OVL0CROP;
+ ljinc = MCDE_OVL0LJINC;
+ cr = MCDE_OVL0CR;
+ comp = MCDE_OVL0COMP;
+ break;
+ case MCDE_OVERLAY_1:
+ conf1 = MCDE_OVL1CONF;
+ conf2 = MCDE_OVL1CONF2;
+ crop = MCDE_OVL1CROP;
+ ljinc = MCDE_OVL1LJINC;
+ cr = MCDE_OVL1CR;
+ comp = MCDE_OVL1COMP;
+ break;
+ case MCDE_OVERLAY_2:
+ conf1 = MCDE_OVL2CONF;
+ conf2 = MCDE_OVL2CONF2;
+ crop = MCDE_OVL2CROP;
+ ljinc = MCDE_OVL2LJINC;
+ cr = MCDE_OVL2CR;
+ comp = MCDE_OVL2COMP;
+ break;
+ case MCDE_OVERLAY_3:
+ conf1 = MCDE_OVL3CONF;
+ conf2 = MCDE_OVL3CONF2;
+ crop = MCDE_OVL3CROP;
+ ljinc = MCDE_OVL3LJINC;
+ cr = MCDE_OVL3CR;
+ comp = MCDE_OVL3COMP;
+ break;
+ case MCDE_OVERLAY_4:
+ conf1 = MCDE_OVL4CONF;
+ conf2 = MCDE_OVL4CONF2;
+ crop = MCDE_OVL4CROP;
+ ljinc = MCDE_OVL4LJINC;
+ cr = MCDE_OVL4CR;
+ comp = MCDE_OVL4COMP;
+ break;
+ case MCDE_OVERLAY_5:
+ conf1 = MCDE_OVL5CONF;
+ conf2 = MCDE_OVL5CONF2;
+ crop = MCDE_OVL5CROP;
+ ljinc = MCDE_OVL5LJINC;
+ cr = MCDE_OVL5CR;
+ comp = MCDE_OVL5COMP;
+ break;
+ }
+
+ val = mode->hdisplay << MCDE_OVLXCONF_PPL_SHIFT;
+ val |= mode->vdisplay << MCDE_OVLXCONF_LPF_SHIFT;
+ /* Use external source 0 that we just configured */
+ val |= src << MCDE_OVLXCONF_EXTSRC_ID_SHIFT;
+ writel(val, mcde->regs + conf1);
+
+ val = MCDE_OVLXCONF2_BP_PER_PIXEL_ALPHA;
+ val |= 0xff << MCDE_OVLXCONF2_ALPHAVALUE_SHIFT;
+ /* OPQ: overlay is opaque */
+ switch (format) {
+ case DRM_FORMAT_ARGB8888:
+ case DRM_FORMAT_ABGR8888:
+ case DRM_FORMAT_ARGB4444:
+ case DRM_FORMAT_ABGR4444:
+ case DRM_FORMAT_XRGB1555:
+ case DRM_FORMAT_XBGR1555:
+ /* No OPQ */
+ break;
+ case DRM_FORMAT_XRGB8888:
+ case DRM_FORMAT_XBGR8888:
+ case DRM_FORMAT_RGB888:
+ case DRM_FORMAT_BGR888:
+ case DRM_FORMAT_RGB565:
+ case DRM_FORMAT_BGR565:
+ case DRM_FORMAT_YUV422:
+ val |= MCDE_OVLXCONF2_OPQ;
+ break;
+ default:
+ dev_err(mcde->dev, "Unknown pixel format 0x%08x\n",
+ format);
+ break;
+ }
+ /* The default watermark level for overlay 0 is 48 */
+ val |= 48 << MCDE_OVLXCONF2_PIXELFETCHERWATERMARKLEVEL_SHIFT;
+ writel(val, mcde->regs + conf2);
+
+ /* Number of bytes to fetch per line */
+ writel(mcde->stride, mcde->regs + ljinc);
+ /* No cropping */
+ writel(0, mcde->regs + crop);
+
+ /* Set up overlay control register */
+ val = MCDE_OVLXCR_OVLEN;
+ val |= MCDE_OVLXCR_COLCCTRL_DISABLED;
+ val |= MCDE_OVLXCR_BURSTSIZE_8W <<
+ MCDE_OVLXCR_BURSTSIZE_SHIFT;
+ val |= MCDE_OVLXCR_MAXOUTSTANDING_8_REQ <<
+ MCDE_OVLXCR_MAXOUTSTANDING_SHIFT;
+ /* Not using rotation but set it up anyways */
+ val |= MCDE_OVLXCR_ROTBURSTSIZE_8W <<
+ MCDE_OVLXCR_ROTBURSTSIZE_SHIFT;
+ writel(val, mcde->regs + cr);
+
+ /*
+ * Set up the overlay compositor to route the overlay out to
+ * the desired channel
+ */
+ val = ch << MCDE_OVLXCOMP_CH_ID_SHIFT;
+ writel(val, mcde->regs + comp);
+}
+
+static void mcde_configure_channel(struct mcde *mcde, enum mcde_channel ch,
+ enum mcde_fifo fifo,
+ const struct drm_display_mode *mode)
+{
+ u32 val;
+ u32 conf;
+ u32 sync;
+ u32 stat;
+ u32 bgcol;
+ u32 mux;
+
+ switch (ch) {
+ case MCDE_CHANNEL_0:
+ conf = MCDE_CHNL0CONF;
+ sync = MCDE_CHNL0SYNCHMOD;
+ stat = MCDE_CHNL0STAT;
+ bgcol = MCDE_CHNL0BCKGNDCOL;
+ mux = MCDE_CHNL0MUXING;
+ break;
+ case MCDE_CHANNEL_1:
+ conf = MCDE_CHNL1CONF;
+ sync = MCDE_CHNL1SYNCHMOD;
+ stat = MCDE_CHNL1STAT;
+ bgcol = MCDE_CHNL1BCKGNDCOL;
+ mux = MCDE_CHNL1MUXING;
+ break;
+ case MCDE_CHANNEL_2:
+ conf = MCDE_CHNL2CONF;
+ sync = MCDE_CHNL2SYNCHMOD;
+ stat = MCDE_CHNL2STAT;
+ bgcol = MCDE_CHNL2BCKGNDCOL;
+ mux = MCDE_CHNL2MUXING;
+ break;
+ case MCDE_CHANNEL_3:
+ conf = MCDE_CHNL3CONF;
+ sync = MCDE_CHNL3SYNCHMOD;
+ stat = MCDE_CHNL3STAT;
+ bgcol = MCDE_CHNL3BCKGNDCOL;
+ mux = MCDE_CHNL3MUXING;
+ return;
+ }
+
+ /* Set up channel 0 sync (based on chnl_update_registers()) */
+ if (mcde->te_sync) {
+ /*
+ * Turn on hardware TE0 synchronization
+ */
+ val = MCDE_CHNLXSYNCHMOD_SRC_SYNCH_HARDWARE
+ << MCDE_CHNLXSYNCHMOD_SRC_SYNCH_SHIFT;
+ val |= MCDE_CHNLXSYNCHMOD_OUT_SYNCH_SRC_TE0
+ << MCDE_CHNLXSYNCHMOD_OUT_SYNCH_SRC_SHIFT;
+ } else {
+ /*
+ * Set up sync source to software, out sync formatter
+ * Code mostly from mcde_hw.c chnl_update_registers()
+ */
+ val = MCDE_CHNLXSYNCHMOD_SRC_SYNCH_SOFTWARE
+ << MCDE_CHNLXSYNCHMOD_SRC_SYNCH_SHIFT;
+ val |= MCDE_CHNLXSYNCHMOD_OUT_SYNCH_SRC_FORMATTER
+ << MCDE_CHNLXSYNCHMOD_OUT_SYNCH_SRC_SHIFT;
+ }
+ writel(val, mcde->regs + sync);
+
+ /* Set up pixels per line and lines per frame */
+ val = (mode->hdisplay - 1) << MCDE_CHNLXCONF_PPL_SHIFT;
+ val |= (mode->vdisplay - 1) << MCDE_CHNLXCONF_LPF_SHIFT;
+ writel(val, mcde->regs + conf);
+
+ /*
+ * Normalize color conversion:
+ * black background, OLED conversion disable on channel
+ */
+ val = MCDE_CHNLXSTAT_CHNLBLBCKGND_EN |
+ MCDE_CHNLXSTAT_CHNLRD;
+ writel(val, mcde->regs + stat);
+ writel(0, mcde->regs + bgcol);
+
+ /* Set up muxing: connect the channel to the desired FIFO */
+ switch (fifo) {
+ case MCDE_FIFO_A:
+ writel(MCDE_CHNLXMUXING_FIFO_ID_FIFO_A,
+ mcde->regs + mux);
+ break;
+ case MCDE_FIFO_B:
+ writel(MCDE_CHNLXMUXING_FIFO_ID_FIFO_B,
+ mcde->regs + mux);
+ break;
+ }
+}
+
+static void mcde_configure_fifo(struct mcde *mcde, enum mcde_fifo fifo,
+ enum mcde_dsi_formatter fmt,
+ int fifo_wtrmrk)
+{
+ u32 val;
+ u32 ctrl;
+ u32 cr0, cr1;
+
+ switch (fifo) {
+ case MCDE_FIFO_A:
+ ctrl = MCDE_CTRLA;
+ cr0 = MCDE_CRA0;
+ cr1 = MCDE_CRA1;
+ break;
+ case MCDE_FIFO_B:
+ ctrl = MCDE_CTRLB;
+ cr0 = MCDE_CRB0;
+ cr1 = MCDE_CRB1;
+ break;
+ }
+
+ val = fifo_wtrmrk << MCDE_CTRLX_FIFOWTRMRK_SHIFT;
+ /* We only support DSI formatting for now */
+ val |= MCDE_CTRLX_FORMTYPE_DSI <<
+ MCDE_CTRLX_FORMTYPE_SHIFT;
+
+ /* Select the formatter to use for this FIFO */
+ val |= fmt << MCDE_CTRLX_FORMID_SHIFT;
+ writel(val, mcde->regs + ctrl);
+
+ /* Blend source with Alpha 0xff on FIFO */
+ val = MCDE_CRX0_BLENDEN |
+ 0xff << MCDE_CRX0_ALPHABLEND_SHIFT;
+ writel(val, mcde->regs + cr0);
+
+ /* Set-up from mcde_fmtr_dsi.c, fmtr_dsi_enable_video() */
+
+ /* Use the MCDE clock for this FIFO */
+ val = MCDE_CRX1_CLKSEL_MCDECLK << MCDE_CRX1_CLKSEL_SHIFT;
+
+ /* TODO: when adding DPI support add OUTBPP etc here */
+ writel(val, mcde->regs + cr1);
+};
+
+static void mcde_configure_dsi_formatter(struct mcde *mcde,
+ enum mcde_dsi_formatter fmt,
+ u32 formatter_frame,
+ int pkt_size)
+{
+ u32 val;
+ u32 conf0;
+ u32 frame;
+ u32 pkt;
+ u32 sync;
+ u32 cmdw;
+ u32 delay0, delay1;
+
+ switch (fmt) {
+ case MCDE_DSI_FORMATTER_0:
+ conf0 = MCDE_DSIVID0CONF0;
+ frame = MCDE_DSIVID0FRAME;
+ pkt = MCDE_DSIVID0PKT;
+ sync = MCDE_DSIVID0SYNC;
+ cmdw = MCDE_DSIVID0CMDW;
+ delay0 = MCDE_DSIVID0DELAY0;
+ delay1 = MCDE_DSIVID0DELAY1;
+ break;
+ case MCDE_DSI_FORMATTER_1:
+ conf0 = MCDE_DSIVID1CONF0;
+ frame = MCDE_DSIVID1FRAME;
+ pkt = MCDE_DSIVID1PKT;
+ sync = MCDE_DSIVID1SYNC;
+ cmdw = MCDE_DSIVID1CMDW;
+ delay0 = MCDE_DSIVID1DELAY0;
+ delay1 = MCDE_DSIVID1DELAY1;
+ break;
+ case MCDE_DSI_FORMATTER_2:
+ conf0 = MCDE_DSIVID2CONF0;
+ frame = MCDE_DSIVID2FRAME;
+ pkt = MCDE_DSIVID2PKT;
+ sync = MCDE_DSIVID2SYNC;
+ cmdw = MCDE_DSIVID2CMDW;
+ delay0 = MCDE_DSIVID2DELAY0;
+ delay1 = MCDE_DSIVID2DELAY1;
+ break;
+ }
+
+ /*
+ * Enable formatter
+ * 8 bit commands and DCS commands (notgen = not generic)
+ */
+ val = MCDE_DSICONF0_CMD8 | MCDE_DSICONF0_DCSVID_NOTGEN;
+ if (mcde->mdsi->mode_flags & MIPI_DSI_MODE_VIDEO)
+ val |= MCDE_DSICONF0_VID_MODE_VID;
+ switch (mcde->mdsi->format) {
+ case MIPI_DSI_FMT_RGB888:
+ val |= MCDE_DSICONF0_PACKING_RGB888 <<
+ MCDE_DSICONF0_PACKING_SHIFT;
+ break;
+ case MIPI_DSI_FMT_RGB666:
+ val |= MCDE_DSICONF0_PACKING_RGB666 <<
+ MCDE_DSICONF0_PACKING_SHIFT;
+ break;
+ case MIPI_DSI_FMT_RGB666_PACKED:
+ val |= MCDE_DSICONF0_PACKING_RGB666_PACKED <<
+ MCDE_DSICONF0_PACKING_SHIFT;
+ break;
+ case MIPI_DSI_FMT_RGB565:
+ val |= MCDE_DSICONF0_PACKING_RGB565 <<
+ MCDE_DSICONF0_PACKING_SHIFT;
+ break;
+ default:
+ dev_err(mcde->dev, "unknown DSI format\n");
+ return;
+ }
+ writel(val, mcde->regs + conf0);
+
+ writel(formatter_frame, mcde->regs + frame);
+ writel(pkt_size, mcde->regs + pkt);
+ writel(0, mcde->regs + sync);
+ /* Define the MIPI command: we want to write into display memory */
+ val = MIPI_DCS_WRITE_MEMORY_CONTINUE <<
+ MCDE_DSIVIDXCMDW_CMDW_CONTINUE_SHIFT;
+ val |= MIPI_DCS_WRITE_MEMORY_START <<
+ MCDE_DSIVIDXCMDW_CMDW_START_SHIFT;
+ writel(val, mcde->regs + cmdw);
+
+ /*
+ * FIXME: the vendor driver has some hack around this value in
+ * CMD mode with autotrig.
+ */
+ writel(0, mcde->regs + delay0);
+ writel(0, mcde->regs + delay1);
+}
+
+static void mcde_enable_fifo(struct mcde *mcde, enum mcde_fifo fifo)
+{
+ u32 val;
+ u32 cr;
+
+ switch (fifo) {
+ case MCDE_FIFO_A:
+ cr = MCDE_CRA0;
+ break;
+ case MCDE_FIFO_B:
+ cr = MCDE_CRB0;
+ break;
+ default:
+ dev_err(mcde->dev, "cannot enable FIFO %c\n",
+ 'A' + fifo);
+ return;
+ }
+
+ spin_lock(&mcde->flow_lock);
+ val = readl(mcde->regs + cr);
+ val |= MCDE_CRX0_FLOEN;
+ writel(val, mcde->regs + cr);
+ mcde->flow_active++;
+ spin_unlock(&mcde->flow_lock);
+}
+
+static void mcde_disable_fifo(struct mcde *mcde, enum mcde_fifo fifo,
+ bool wait_for_drain)
+{
+ int timeout = 100;
+ u32 val;
+ u32 cr;
+
+ switch (fifo) {
+ case MCDE_FIFO_A:
+ cr = MCDE_CRA0;
+ break;
+ case MCDE_FIFO_B:
+ cr = MCDE_CRB0;
+ break;
+ default:
+ dev_err(mcde->dev, "cannot disable FIFO %c\n",
+ 'A' + fifo);
+ return;
+ }
+
+ spin_lock(&mcde->flow_lock);
+ val = readl(mcde->regs + cr);
+ val &= ~MCDE_CRX0_FLOEN;
+ writel(val, mcde->regs + cr);
+ mcde->flow_active = 0;
+ spin_unlock(&mcde->flow_lock);
+
+ if (!wait_for_drain)
+ return;
+
+ /* Check that we really drained and stopped the flow */
+ while (readl(mcde->regs + cr) & MCDE_CRX0_FLOEN) {
+ usleep_range(1000, 1500);
+ if (!--timeout) {
+ dev_err(mcde->dev,
+ "FIFO timeout while clearing FIFO %c\n",
+ 'A' + fifo);
+ return;
+ }
+ }
+}
+
+/*
+ * This drains a pipe i.e. a FIFO connected to a certain channel
+ */
+static void mcde_drain_pipe(struct mcde *mcde, enum mcde_fifo fifo,
+ enum mcde_channel ch)
+{
+ u32 val;
+ u32 ctrl;
+ u32 synsw;
+
+ switch (fifo) {
+ case MCDE_FIFO_A:
+ ctrl = MCDE_CTRLA;
+ break;
+ case MCDE_FIFO_B:
+ ctrl = MCDE_CTRLB;
+ break;
+ }
+
+ switch (ch) {
+ case MCDE_CHANNEL_0:
+ synsw = MCDE_CHNL0SYNCHSW;
+ break;
+ case MCDE_CHANNEL_1:
+ synsw = MCDE_CHNL1SYNCHSW;
+ break;
+ case MCDE_CHANNEL_2:
+ synsw = MCDE_CHNL2SYNCHSW;
+ break;
+ case MCDE_CHANNEL_3:
+ synsw = MCDE_CHNL3SYNCHSW;
+ return;
+ }
+
+ val = readl(mcde->regs + ctrl);
+ if (!(val & MCDE_CTRLX_FIFOEMPTY)) {
+ dev_err(mcde->dev, "Channel A FIFO not empty (handover)\n");
+ /* Attempt to clear the FIFO */
+ mcde_enable_fifo(mcde, fifo);
+ /* Trigger a software sync out on respective channel (0-3) */
+ writel(MCDE_CHNLXSYNCHSW_SW_TRIG, mcde->regs + synsw);
+ /* Disable FIFO A flow again */
+ mcde_disable_fifo(mcde, fifo, true);
+ }
+}
+
+static int mcde_dsi_get_pkt_div(int ppl, int fifo_size)
+{
+ /*
+ * DSI command mode line packets should be split into an even number of
+ * packets smaller than or equal to the fifo size.
+ */
+ int div;
+ const int max_div = DIV_ROUND_UP(MCDE_MAX_WIDTH, fifo_size);
+
+ for (div = 1; div < max_div; div++)
+ if (ppl % div == 0 && ppl / div <= fifo_size)
+ return div;
+ return 1;
+}
+
+static void mcde_display_enable(struct drm_simple_display_pipe *pipe,
+ struct drm_crtc_state *cstate,
+ struct drm_plane_state *plane_state)
+{
+ struct drm_crtc *crtc = &pipe->crtc;
+ struct drm_plane *plane = &pipe->plane;
+ struct drm_device *drm = crtc->dev;
+ struct mcde *mcde = drm->dev_private;
+ const struct drm_display_mode *mode = &cstate->mode;
+ struct drm_framebuffer *fb = plane->state->fb;
+ u32 format = fb->format->format;
+ u32 formatter_ppl = mode->hdisplay; /* pixels per line */
+ u32 formatter_lpf = mode->vdisplay; /* lines per frame */
+ int pkt_size, fifo_wtrmrk;
+ int cpp = fb->format->cpp[0];
+ int formatter_cpp;
+ struct drm_format_name_buf tmp;
+ u32 formatter_frame;
+ u32 pkt_div;
+ u32 val;
+
+ dev_info(drm->dev, "enable MCDE, %d x %d format %s\n",
+ mode->hdisplay, mode->vdisplay,
+ drm_get_format_name(format, &tmp));
+ if (!mcde->mdsi) {
+ /* TODO: deal with this for non-DSI output */
+ dev_err(drm->dev, "no DSI master attached!\n");
+ return;
+ }
+
+ dev_info(drm->dev, "output in %s mode, format %dbpp\n",
+ (mcde->mdsi->mode_flags & MIPI_DSI_MODE_VIDEO) ?
+ "VIDEO" : "CMD",
+ mipi_dsi_pixel_format_to_bpp(mcde->mdsi->format));
+ formatter_cpp =
+ mipi_dsi_pixel_format_to_bpp(mcde->mdsi->format) / 8;
+ dev_info(drm->dev, "overlay CPP %d bytes, DSI CPP %d bytes\n",
+ cpp,
+ formatter_cpp);
+
+ /* Calculations from mcde_fmtr_dsi.c, fmtr_dsi_enable_video() */
+
+ /*
+ * Set up FIFO A watermark level:
+ * 128 for LCD 32bpp video mode
+ * 48 for LCD 32bpp command mode
+ * 128 for LCD 16bpp video mode
+ * 64 for LCD 16bpp command mode
+ * 128 for HDMI 32bpp
+ * 192 for HDMI 16bpp
+ */
+ fifo_wtrmrk = mode->hdisplay;
+ if (mcde->mdsi->mode_flags & MIPI_DSI_MODE_VIDEO) {
+ fifo_wtrmrk = min(fifo_wtrmrk, 128);
+ pkt_div = 1;
+ } else {
+ fifo_wtrmrk = min(fifo_wtrmrk, 48);
+ /* The FIFO is 640 entries deep on this v3 hardware */
+ pkt_div = mcde_dsi_get_pkt_div(mode->hdisplay, 640);
+ }
+ dev_dbg(drm->dev, "FIFO watermark after flooring: %d bytes\n",
+ fifo_wtrmrk);
+ dev_dbg(drm->dev, "Packet divisor: %d bytes\n", pkt_div);
+
+ /* NOTE: pkt_div is 1 for video mode */
+ pkt_size = (formatter_ppl * formatter_cpp) / pkt_div;
+ /* Commands CMD8 need one extra byte */
+ if (!(mcde->mdsi->mode_flags & MIPI_DSI_MODE_VIDEO))
+ pkt_size++;
+
+ dev_dbg(drm->dev, "DSI packet size: %d * %d bytes per line\n",
+ pkt_size, pkt_div);
+ dev_dbg(drm->dev, "Overlay frame size: %u bytes\n",
+ mode->hdisplay * mode->vdisplay * cpp);
+ mcde->stride = mode->hdisplay * cpp;
+ dev_dbg(drm->dev, "Overlay line stride: %u bytes\n",
+ mcde->stride);
+ /* NOTE: pkt_div is 1 for video mode */
+ formatter_frame = pkt_size * pkt_div * formatter_lpf;
+ dev_dbg(drm->dev, "Formatter frame size: %u bytes\n", formatter_frame);
+
+ /* Drain the FIFO A + channel 0 pipe so we have a clean slate */
+ mcde_drain_pipe(mcde, MCDE_FIFO_A, MCDE_CHANNEL_0);
+
+ /*
+ * We set up our display pipeline:
+ * EXTSRC 0 -> OVERLAY 0 -> CHANNEL 0 -> FIFO A -> DSI FORMATTER 0
+ *
+ * First configure the external source (memory) on external source 0
+ * using the desired bitstream/bitmap format
+ */
+ mcde_configure_extsrc(mcde, MCDE_EXTSRC_0, format);
+
+ /*
+ * Configure overlay 0 according to format and mode and take input
+ * from external source 0 and route the output of this overlay to
+ * channel 0
+ */
+ mcde_configure_overlay(mcde, MCDE_OVERLAY_0, MCDE_EXTSRC_0,
+ MCDE_CHANNEL_0, mode, format);
+
+ /*
+ * Configure pixel-per-line and line-per-frame for channel 0 and then
+ * route channel 0 to FIFO A
+ */
+ mcde_configure_channel(mcde, MCDE_CHANNEL_0, MCDE_FIFO_A, mode);
+
+ /* Configure FIFO A to use DSI formatter 0 */
+ mcde_configure_fifo(mcde, MCDE_FIFO_A, MCDE_DSI_FORMATTER_0,
+ fifo_wtrmrk);
+
+ /* Configure the DSI formatter 0 for the DSI panel output */
+ mcde_configure_dsi_formatter(mcde, MCDE_DSI_FORMATTER_0,
+ formatter_frame, pkt_size);
+
+ if (mcde->te_sync) {
+ if (mode->flags & DRM_MODE_FLAG_NVSYNC)
+ val = MCDE_VSCRC_VSPOL;
+ else
+ val = 0;
+ writel(val, mcde->regs + MCDE_VSCRC0);
+ /* Enable VSYNC capture on TE0 */
+ val = readl(mcde->regs + MCDE_CRC);
+ val |= MCDE_CRC_SYCEN0;
+ writel(val, mcde->regs + MCDE_CRC);
+
+ drm_crtc_vblank_on(crtc);
+ }
+
+ dev_info(drm->dev, "MCDE display is enabled\n");
+}
+
+static void mcde_display_disable(struct drm_simple_display_pipe *pipe)
+{
+ struct drm_crtc *crtc = &pipe->crtc;
+ struct drm_device *drm = crtc->dev;
+ struct mcde *mcde = drm->dev_private;
+
+ if (mcde->te_sync)
+ drm_crtc_vblank_off(crtc);
+
+ /* Disable FIFO A flow */
+ mcde_disable_fifo(mcde, MCDE_FIFO_A, true);
+
+ dev_info(drm->dev, "MCDE display is disabled\n");
+}
+
+static void mcde_display_send_one_frame(struct mcde *mcde)
+{
+ /* Request a TE ACK */
+ if (mcde->te_sync)
+ mcde_dsi_te_request(mcde->mdsi);
+
+ /* Enable FIFO A flow */
+ mcde_enable_fifo(mcde, MCDE_FIFO_A);
+
+ if (mcde->te_sync) {
+ /*
+ * If oneshot mode is enabled, the flow will be disabled
+ * when the TE0 IRQ arrives in the interrupt handler. Otherwise
+ * updates are continuously streamed to the display after this
+ * point.
+ */
+ dev_dbg(mcde->dev, "sent TE0 framebuffer update\n");
+ return;
+ }
+
+ /* Trigger a software sync out on channel 0 */
+ writel(MCDE_CHNLXSYNCHSW_SW_TRIG,
+ mcde->regs + MCDE_CHNL0SYNCHSW);
+
+ /*
+ * Disable FIFO A flow again: since we are using TE sync we
+ * need to wait for the FIFO to drain before we continue
+ * so repeated calls to this function will not cause a mess
+ * in the hardware by pushing updates will updates are going
+ * on already.
+ */
+ mcde_disable_fifo(mcde, MCDE_FIFO_A, true);
+
+ dev_dbg(mcde->dev, "sent SW framebuffer update\n");
+}
+
+static void mcde_set_extsrc(struct mcde *mcde, u32 buffer_address)
+{
+ /* Write bitmap base address to register */
+ writel(buffer_address, mcde->regs + MCDE_EXTSRCXA0);
+ /*
+ * Base address for next line this is probably only used
+ * in interlace modes.
+ */
+ writel(buffer_address + mcde->stride, mcde->regs + MCDE_EXTSRCXA1);
+}
+
+static void mcde_display_update(struct drm_simple_display_pipe *pipe,
+ struct drm_plane_state *old_pstate)
+{
+ struct drm_crtc *crtc = &pipe->crtc;
+ struct drm_device *drm = crtc->dev;
+ struct mcde *mcde = drm->dev_private;
+ struct drm_pending_vblank_event *event = crtc->state->event;
+ struct drm_plane *plane = &pipe->plane;
+ struct drm_plane_state *pstate = plane->state;
+ struct drm_framebuffer *fb = pstate->fb;
+
+ /*
+ * Handle any pending event first, we need to arm the vblank
+ * interrupt before sending any update to the display so we don't
+ * miss the interrupt.
+ */
+ if (event) {
+ crtc->state->event = NULL;
+
+ spin_lock_irq(&crtc->dev->event_lock);
+ /*
+ * Hardware must be on before we can arm any vblank event,
+ * this is not a scanout controller where there is always
+ * some periodic update going on, it is completely frozen
+ * until we get an update. If MCDE output isn't yet enabled,
+ * we just send a vblank dummy event back.
+ */
+ if (crtc->state->active && drm_crtc_vblank_get(crtc) == 0) {
+ dev_dbg(mcde->dev, "arm vblank event\n");
+ drm_crtc_arm_vblank_event(crtc, event);
+ } else {
+ dev_dbg(mcde->dev, "insert fake vblank event\n");
+ drm_crtc_send_vblank_event(crtc, event);
+ }
+
+ spin_unlock_irq(&crtc->dev->event_lock);
+ }
+
+ /*
+ * We do not start sending framebuffer updates before the
+ * display is enabled. Update events will however be dispatched
+ * from the DRM core before the display is enabled.
+ */
+ if (fb) {
+ mcde_set_extsrc(mcde, drm_fb_cma_get_gem_addr(fb, pstate, 0));
+ /* Send a single frame using software sync */
+ mcde_display_send_one_frame(mcde);
+ dev_info_once(mcde->dev, "sent first display update\n");
+ } else {
+ /*
+ * If an update is receieved before the MCDE is enabled
+ * (before mcde_display_enable() is called) we can't really
+ * do much with that buffer.
+ */
+ dev_info(mcde->dev, "ignored a display update\n");
+ }
+}
+
+static int mcde_display_enable_vblank(struct drm_simple_display_pipe *pipe)
+{
+ struct drm_crtc *crtc = &pipe->crtc;
+ struct drm_device *drm = crtc->dev;
+ struct mcde *mcde = drm->dev_private;
+ u32 val;
+
+ /* Enable all VBLANK IRQs */
+ val = MCDE_PP_VCMPA |
+ MCDE_PP_VCMPB |
+ MCDE_PP_VSCC0 |
+ MCDE_PP_VSCC1 |
+ MCDE_PP_VCMPC0 |
+ MCDE_PP_VCMPC1;
+ writel(val, mcde->regs + MCDE_IMSCPP);
+
+ return 0;
+}
+
+static void mcde_display_disable_vblank(struct drm_simple_display_pipe *pipe)
+{
+ struct drm_crtc *crtc = &pipe->crtc;
+ struct drm_device *drm = crtc->dev;
+ struct mcde *mcde = drm->dev_private;
+
+ /* Disable all VBLANK IRQs */
+ writel(0, mcde->regs + MCDE_IMSCPP);
+ /* Clear any pending IRQs */
+ writel(0xFFFFFFFF, mcde->regs + MCDE_RISPP);
+}
+
+static struct drm_simple_display_pipe_funcs mcde_display_funcs = {
+ .check = mcde_display_check,
+ .enable = mcde_display_enable,
+ .disable = mcde_display_disable,
+ .update = mcde_display_update,
+ .prepare_fb = drm_gem_fb_simple_display_pipe_prepare_fb,
+};
+
+int mcde_display_init(struct drm_device *drm)
+{
+ struct mcde *mcde = drm->dev_private;
+ int ret;
+ static const u32 formats[] = {
+ DRM_FORMAT_ARGB8888,
+ DRM_FORMAT_ABGR8888,
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_XBGR8888,
+ DRM_FORMAT_RGB888,
+ DRM_FORMAT_BGR888,
+ DRM_FORMAT_ARGB4444,
+ DRM_FORMAT_ABGR4444,
+ DRM_FORMAT_XRGB4444,
+ DRM_FORMAT_XBGR4444,
+ /* These are actually IRGB1555 so intensity bit is lost */
+ DRM_FORMAT_XRGB1555,
+ DRM_FORMAT_XBGR1555,
+ DRM_FORMAT_RGB565,
+ DRM_FORMAT_BGR565,
+ DRM_FORMAT_YUV422,
+ };
+
+ /* Provide vblank only when we have TE enabled */
+ if (mcde->te_sync) {
+ mcde_display_funcs.enable_vblank = mcde_display_enable_vblank;
+ mcde_display_funcs.disable_vblank = mcde_display_disable_vblank;
+ }
+
+ ret = drm_simple_display_pipe_init(drm, &mcde->pipe,
+ &mcde_display_funcs,
+ formats, ARRAY_SIZE(formats),
+ NULL,
+ mcde->connector);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mcde_display_init);
diff --git a/drivers/gpu/drm/mcde/mcde_display_regs.h b/drivers/gpu/drm/mcde/mcde_display_regs.h
new file mode 100644
index 000000000000..d3ac7ef5ff9a
--- /dev/null
+++ b/drivers/gpu/drm/mcde/mcde_display_regs.h
@@ -0,0 +1,518 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __DRM_MCDE_DISPLAY_REGS
+#define __DRM_MCDE_DISPLAY_REGS
+
+/* PP (pixel processor) interrupts */
+#define MCDE_IMSCPP 0x00000104
+#define MCDE_RISPP 0x00000114
+#define MCDE_MISPP 0x00000124
+#define MCDE_SISPP 0x00000134
+
+#define MCDE_PP_VCMPA BIT(0)
+#define MCDE_PP_VCMPB BIT(1)
+#define MCDE_PP_VSCC0 BIT(2)
+#define MCDE_PP_VSCC1 BIT(3)
+#define MCDE_PP_VCMPC0 BIT(4)
+#define MCDE_PP_VCMPC1 BIT(5)
+#define MCDE_PP_ROTFD_A BIT(6)
+#define MCDE_PP_ROTFD_B BIT(7)
+
+/* Overlay interrupts */
+#define MCDE_IMSCOVL 0x00000108
+#define MCDE_RISOVL 0x00000118
+#define MCDE_MISOVL 0x00000128
+#define MCDE_SISOVL 0x00000138
+
+/* Channel interrupts */
+#define MCDE_IMSCCHNL 0x0000010C
+#define MCDE_RISCHNL 0x0000011C
+#define MCDE_MISCHNL 0x0000012C
+#define MCDE_SISCHNL 0x0000013C
+
+/* X = 0..9 */
+#define MCDE_EXTSRCXA0 0x00000200
+#define MCDE_EXTSRCXA0_GROUPOFFSET 0x20
+#define MCDE_EXTSRCXA0_BASEADDRESS0_SHIFT 3
+#define MCDE_EXTSRCXA0_BASEADDRESS0_MASK 0xFFFFFFF8
+
+#define MCDE_EXTSRCXA1 0x00000204
+#define MCDE_EXTSRCXA1_GROUPOFFSET 0x20
+#define MCDE_EXTSRCXA1_BASEADDRESS1_SHIFT 3
+#define MCDE_EXTSRCXA1_BASEADDRESS1_MASK 0xFFFFFFF8
+
+/* External sources 0..9 */
+#define MCDE_EXTSRC0CONF 0x0000020C
+#define MCDE_EXTSRC1CONF 0x0000022C
+#define MCDE_EXTSRC2CONF 0x0000024C
+#define MCDE_EXTSRC3CONF 0x0000026C
+#define MCDE_EXTSRC4CONF 0x0000028C
+#define MCDE_EXTSRC5CONF 0x000002AC
+#define MCDE_EXTSRC6CONF 0x000002CC
+#define MCDE_EXTSRC7CONF 0x000002EC
+#define MCDE_EXTSRC8CONF 0x0000030C
+#define MCDE_EXTSRC9CONF 0x0000032C
+#define MCDE_EXTSRCXCONF_GROUPOFFSET 0x20
+#define MCDE_EXTSRCXCONF_BUF_ID_SHIFT 0
+#define MCDE_EXTSRCXCONF_BUF_ID_MASK 0x00000003
+#define MCDE_EXTSRCXCONF_BUF_NB_SHIFT 2
+#define MCDE_EXTSRCXCONF_BUF_NB_MASK 0x0000000C
+#define MCDE_EXTSRCXCONF_PRI_OVLID_SHIFT 4
+#define MCDE_EXTSRCXCONF_PRI_OVLID_MASK 0x000000F0
+#define MCDE_EXTSRCXCONF_BPP_SHIFT 8
+#define MCDE_EXTSRCXCONF_BPP_MASK 0x00000F00
+#define MCDE_EXTSRCXCONF_BPP_1BPP_PAL 0
+#define MCDE_EXTSRCXCONF_BPP_2BPP_PAL 1
+#define MCDE_EXTSRCXCONF_BPP_4BPP_PAL 2
+#define MCDE_EXTSRCXCONF_BPP_8BPP_PAL 3
+#define MCDE_EXTSRCXCONF_BPP_RGB444 4
+#define MCDE_EXTSRCXCONF_BPP_ARGB4444 5
+#define MCDE_EXTSRCXCONF_BPP_IRGB1555 6
+#define MCDE_EXTSRCXCONF_BPP_RGB565 7
+#define MCDE_EXTSRCXCONF_BPP_RGB888 8
+#define MCDE_EXTSRCXCONF_BPP_XRGB8888 9
+#define MCDE_EXTSRCXCONF_BPP_ARGB8888 10
+#define MCDE_EXTSRCXCONF_BPP_YCBCR422 11
+#define MCDE_EXTSRCXCONF_BGR BIT(12)
+#define MCDE_EXTSRCXCONF_BEBO BIT(13)
+#define MCDE_EXTSRCXCONF_BEPO BIT(14)
+#define MCDE_EXTSRCXCONF_TUNNELING_BUFFER_HEIGHT_SHIFT 16
+#define MCDE_EXTSRCXCONF_TUNNELING_BUFFER_HEIGHT_MASK 0x0FFF0000
+
+/* External sources 0..9 */
+#define MCDE_EXTSRC0CR 0x00000210
+#define MCDE_EXTSRC1CR 0x00000230
+#define MCDE_EXTSRC2CR 0x00000250
+#define MCDE_EXTSRC3CR 0x00000270
+#define MCDE_EXTSRC4CR 0x00000290
+#define MCDE_EXTSRC5CR 0x000002B0
+#define MCDE_EXTSRC6CR 0x000002D0
+#define MCDE_EXTSRC7CR 0x000002F0
+#define MCDE_EXTSRC8CR 0x00000310
+#define MCDE_EXTSRC9CR 0x00000330
+#define MCDE_EXTSRCXCR_SEL_MOD_SHIFT 0
+#define MCDE_EXTSRCXCR_SEL_MOD_MASK 0x00000003
+#define MCDE_EXTSRCXCR_SEL_MOD_EXTERNAL_SEL 0
+#define MCDE_EXTSRCXCR_SEL_MOD_AUTO_TOGGLE 1
+#define MCDE_EXTSRCXCR_SEL_MOD_SOFTWARE_SEL 2
+#define MCDE_EXTSRCXCR_MULTIOVL_CTRL_PRIMARY BIT(2) /* 0 = all */
+#define MCDE_EXTSRCXCR_FS_DIV_DISABLE BIT(3)
+#define MCDE_EXTSRCXCR_FORCE_FS_DIV BIT(4)
+
+/* Only external source 6 has a second address register */
+#define MCDE_EXTSRC6A2 0x000002C8
+
+/* 6 overlays */
+#define MCDE_OVL0CR 0x00000400
+#define MCDE_OVL1CR 0x00000420
+#define MCDE_OVL2CR 0x00000440
+#define MCDE_OVL3CR 0x00000460
+#define MCDE_OVL4CR 0x00000480
+#define MCDE_OVL5CR 0x000004A0
+#define MCDE_OVLXCR_OVLEN BIT(0)
+#define MCDE_OVLXCR_COLCCTRL_DISABLED 0
+#define MCDE_OVLXCR_COLCCTRL_ENABLED_NO_SAT (1 << 1)
+#define MCDE_OVLXCR_COLCCTRL_ENABLED_SAT (2 << 1)
+#define MCDE_OVLXCR_CKEYGEN BIT(3)
+#define MCDE_OVLXCR_ALPHAPMEN BIT(4)
+#define MCDE_OVLXCR_OVLF BIT(5)
+#define MCDE_OVLXCR_OVLR BIT(6)
+#define MCDE_OVLXCR_OVLB BIT(7)
+#define MCDE_OVLXCR_FETCH_ROPC_SHIFT 8
+#define MCDE_OVLXCR_FETCH_ROPC_MASK 0x0000FF00
+#define MCDE_OVLXCR_STBPRIO_SHIFT 16
+#define MCDE_OVLXCR_STBPRIO_MASK 0x000F0000
+#define MCDE_OVLXCR_BURSTSIZE_SHIFT 20
+#define MCDE_OVLXCR_BURSTSIZE_MASK 0x00F00000
+#define MCDE_OVLXCR_BURSTSIZE_1W 0
+#define MCDE_OVLXCR_BURSTSIZE_2W 1
+#define MCDE_OVLXCR_BURSTSIZE_4W 2
+#define MCDE_OVLXCR_BURSTSIZE_8W 3
+#define MCDE_OVLXCR_BURSTSIZE_16W 4
+#define MCDE_OVLXCR_BURSTSIZE_HW_1W 8
+#define MCDE_OVLXCR_BURSTSIZE_HW_2W 9
+#define MCDE_OVLXCR_BURSTSIZE_HW_4W 10
+#define MCDE_OVLXCR_BURSTSIZE_HW_8W 11
+#define MCDE_OVLXCR_BURSTSIZE_HW_16W 12
+#define MCDE_OVLXCR_MAXOUTSTANDING_SHIFT 24
+#define MCDE_OVLXCR_MAXOUTSTANDING_MASK 0x0F000000
+#define MCDE_OVLXCR_MAXOUTSTANDING_1_REQ 0
+#define MCDE_OVLXCR_MAXOUTSTANDING_2_REQ 1
+#define MCDE_OVLXCR_MAXOUTSTANDING_4_REQ 2
+#define MCDE_OVLXCR_MAXOUTSTANDING_8_REQ 3
+#define MCDE_OVLXCR_MAXOUTSTANDING_16_REQ 4
+#define MCDE_OVLXCR_ROTBURSTSIZE_SHIFT 28
+#define MCDE_OVLXCR_ROTBURSTSIZE_MASK 0xF0000000
+#define MCDE_OVLXCR_ROTBURSTSIZE_1W 0
+#define MCDE_OVLXCR_ROTBURSTSIZE_2W 1
+#define MCDE_OVLXCR_ROTBURSTSIZE_4W 2
+#define MCDE_OVLXCR_ROTBURSTSIZE_8W 3
+#define MCDE_OVLXCR_ROTBURSTSIZE_16W 4
+#define MCDE_OVLXCR_ROTBURSTSIZE_HW_1W 8
+#define MCDE_OVLXCR_ROTBURSTSIZE_HW_2W 9
+#define MCDE_OVLXCR_ROTBURSTSIZE_HW_4W 10
+#define MCDE_OVLXCR_ROTBURSTSIZE_HW_8W 11
+#define MCDE_OVLXCR_ROTBURSTSIZE_HW_16W 12
+
+#define MCDE_OVL0CONF 0x00000404
+#define MCDE_OVL1CONF 0x00000424
+#define MCDE_OVL2CONF 0x00000444
+#define MCDE_OVL3CONF 0x00000464
+#define MCDE_OVL4CONF 0x00000484
+#define MCDE_OVL5CONF 0x000004A4
+#define MCDE_OVLXCONF_PPL_SHIFT 0
+#define MCDE_OVLXCONF_PPL_MASK 0x000007FF
+#define MCDE_OVLXCONF_EXTSRC_ID_SHIFT 11
+#define MCDE_OVLXCONF_EXTSRC_ID_MASK 0x00007800
+#define MCDE_OVLXCONF_LPF_SHIFT 16
+#define MCDE_OVLXCONF_LPF_MASK 0x07FF0000
+
+#define MCDE_OVL0CONF2 0x00000408
+#define MCDE_OVL1CONF2 0x00000428
+#define MCDE_OVL2CONF2 0x00000448
+#define MCDE_OVL3CONF2 0x00000468
+#define MCDE_OVL4CONF2 0x00000488
+#define MCDE_OVL5CONF2 0x000004A8
+#define MCDE_OVLXCONF2_BP_PER_PIXEL_ALPHA 0
+#define MCDE_OVLXCONF2_BP_CONSTANT_ALPHA BIT(0)
+#define MCDE_OVLXCONF2_ALPHAVALUE_SHIFT 1
+#define MCDE_OVLXCONF2_ALPHAVALUE_MASK 0x000001FE
+#define MCDE_OVLXCONF2_OPQ BIT(9)
+#define MCDE_OVLXCONF2_PIXOFF_SHIFT 10
+#define MCDE_OVLXCONF2_PIXOFF_MASK 0x0000FC00
+#define MCDE_OVLXCONF2_PIXELFETCHERWATERMARKLEVEL_SHIFT 16
+#define MCDE_OVLXCONF2_PIXELFETCHERWATERMARKLEVEL_MASK 0x1FFF0000
+
+#define MCDE_OVL0LJINC 0x0000040C
+#define MCDE_OVL1LJINC 0x0000042C
+#define MCDE_OVL2LJINC 0x0000044C
+#define MCDE_OVL3LJINC 0x0000046C
+#define MCDE_OVL4LJINC 0x0000048C
+#define MCDE_OVL5LJINC 0x000004AC
+
+#define MCDE_OVL0CROP 0x00000410
+#define MCDE_OVL1CROP 0x00000430
+#define MCDE_OVL2CROP 0x00000450
+#define MCDE_OVL3CROP 0x00000470
+#define MCDE_OVL4CROP 0x00000490
+#define MCDE_OVL5CROP 0x000004B0
+#define MCDE_OVLXCROP_TMRGN_SHIFT 0
+#define MCDE_OVLXCROP_TMRGN_MASK 0x003FFFFF
+#define MCDE_OVLXCROP_LMRGN_SHIFT 22
+#define MCDE_OVLXCROP_LMRGN_MASK 0xFFC00000
+
+#define MCDE_OVL0COMP 0x00000414
+#define MCDE_OVL1COMP 0x00000434
+#define MCDE_OVL2COMP 0x00000454
+#define MCDE_OVL3COMP 0x00000474
+#define MCDE_OVL4COMP 0x00000494
+#define MCDE_OVL5COMP 0x000004B4
+#define MCDE_OVLXCOMP_XPOS_SHIFT 0
+#define MCDE_OVLXCOMP_XPOS_MASK 0x000007FF
+#define MCDE_OVLXCOMP_CH_ID_SHIFT 11
+#define MCDE_OVLXCOMP_CH_ID_MASK 0x00007800
+#define MCDE_OVLXCOMP_YPOS_SHIFT 16
+#define MCDE_OVLXCOMP_YPOS_MASK 0x07FF0000
+#define MCDE_OVLXCOMP_Z_SHIFT 27
+#define MCDE_OVLXCOMP_Z_MASK 0x78000000
+
+#define MCDE_CRC 0x00000C00
+#define MCDE_CRC_C1EN BIT(2)
+#define MCDE_CRC_C2EN BIT(3)
+#define MCDE_CRC_SYCEN0 BIT(7)
+#define MCDE_CRC_SYCEN1 BIT(8)
+#define MCDE_CRC_SIZE1 BIT(9)
+#define MCDE_CRC_SIZE2 BIT(10)
+#define MCDE_CRC_YUVCONVC1EN BIT(15)
+#define MCDE_CRC_CS1EN BIT(16)
+#define MCDE_CRC_CS2EN BIT(17)
+#define MCDE_CRC_CS1POL BIT(19)
+#define MCDE_CRC_CS2POL BIT(20)
+#define MCDE_CRC_CD1POL BIT(21)
+#define MCDE_CRC_CD2POL BIT(22)
+#define MCDE_CRC_WR1POL BIT(23)
+#define MCDE_CRC_WR2POL BIT(24)
+#define MCDE_CRC_RD1POL BIT(25)
+#define MCDE_CRC_RD2POL BIT(26)
+#define MCDE_CRC_SYNCCTRL_SHIFT 29
+#define MCDE_CRC_SYNCCTRL_MASK 0x60000000
+#define MCDE_CRC_SYNCCTRL_NO_SYNC 0
+#define MCDE_CRC_SYNCCTRL_DBI0 1
+#define MCDE_CRC_SYNCCTRL_DBI1 2
+#define MCDE_CRC_SYNCCTRL_PING_PONG 3
+#define MCDE_CRC_CLAMPC1EN BIT(31)
+
+#define MCDE_VSCRC0 0x00000C5C
+#define MCDE_VSCRC1 0x00000C60
+#define MCDE_VSCRC_VSPMIN_MASK 0x00000FFF
+#define MCDE_VSCRC_VSPMAX_SHIFT 12
+#define MCDE_VSCRC_VSPMAX_MASK 0x00FFF000
+#define MCDE_VSCRC_VSPDIV_SHIFT 24
+#define MCDE_VSCRC_VSPDIV_MASK 0x07000000
+#define MCDE_VSCRC_VSPDIV_MCDECLK_DIV_1 0
+#define MCDE_VSCRC_VSPDIV_MCDECLK_DIV_2 1
+#define MCDE_VSCRC_VSPDIV_MCDECLK_DIV_4 2
+#define MCDE_VSCRC_VSPDIV_MCDECLK_DIV_8 3
+#define MCDE_VSCRC_VSPDIV_MCDECLK_DIV_16 4
+#define MCDE_VSCRC_VSPDIV_MCDECLK_DIV_32 5
+#define MCDE_VSCRC_VSPDIV_MCDECLK_DIV_64 6
+#define MCDE_VSCRC_VSPDIV_MCDECLK_DIV_128 7
+#define MCDE_VSCRC_VSPOL BIT(27) /* 0 active high, 1 active low */
+#define MCDE_VSCRC_VSSEL BIT(28) /* 0 VSYNC0, 1 VSYNC1 */
+#define MCDE_VSCRC_VSDBL BIT(29)
+
+/* Channel config 0..3 */
+#define MCDE_CHNL0CONF 0x00000600
+#define MCDE_CHNL1CONF 0x00000620
+#define MCDE_CHNL2CONF 0x00000640
+#define MCDE_CHNL3CONF 0x00000660
+#define MCDE_CHNLXCONF_PPL_SHIFT 0
+#define MCDE_CHNLXCONF_PPL_MASK 0x000007FF
+#define MCDE_CHNLXCONF_LPF_SHIFT 16
+#define MCDE_CHNLXCONF_LPF_MASK 0x07FF0000
+#define MCDE_MAX_WIDTH 2048
+
+/* Channel status 0..3 */
+#define MCDE_CHNL0STAT 0x00000604
+#define MCDE_CHNL1STAT 0x00000624
+#define MCDE_CHNL2STAT 0x00000644
+#define MCDE_CHNL3STAT 0x00000664
+#define MCDE_CHNLXSTAT_CHNLRD BIT(0)
+#define MCDE_CHNLXSTAT_CHNLA BIT(1)
+#define MCDE_CHNLXSTAT_CHNLBLBCKGND_EN BIT(16)
+#define MCDE_CHNLXSTAT_PPLX2_V422 BIT(17)
+#define MCDE_CHNLXSTAT_LPFX2_V422 BIT(18)
+
+/* Sync settings for channel 0..3 */
+#define MCDE_CHNL0SYNCHMOD 0x00000608
+#define MCDE_CHNL1SYNCHMOD 0x00000628
+#define MCDE_CHNL2SYNCHMOD 0x00000648
+#define MCDE_CHNL3SYNCHMOD 0x00000668
+
+#define MCDE_CHNLXSYNCHMOD_SRC_SYNCH_SHIFT 0
+#define MCDE_CHNLXSYNCHMOD_SRC_SYNCH_MASK 0x00000003
+#define MCDE_CHNLXSYNCHMOD_SRC_SYNCH_HARDWARE 0
+#define MCDE_CHNLXSYNCHMOD_SRC_SYNCH_NO_SYNCH 1
+#define MCDE_CHNLXSYNCHMOD_SRC_SYNCH_SOFTWARE 2
+#define MCDE_CHNLXSYNCHMOD_OUT_SYNCH_SRC_SHIFT 2
+#define MCDE_CHNLXSYNCHMOD_OUT_SYNCH_SRC_MASK 0x0000001C
+#define MCDE_CHNLXSYNCHMOD_OUT_SYNCH_SRC_FORMATTER 0
+#define MCDE_CHNLXSYNCHMOD_OUT_SYNCH_SRC_TE0 1
+#define MCDE_CHNLXSYNCHMOD_OUT_SYNCH_SRC_TE1 2
+
+/* Software sync triggers for channel 0..3 */
+#define MCDE_CHNL0SYNCHSW 0x0000060C
+#define MCDE_CHNL1SYNCHSW 0x0000062C
+#define MCDE_CHNL2SYNCHSW 0x0000064C
+#define MCDE_CHNL3SYNCHSW 0x0000066C
+#define MCDE_CHNLXSYNCHSW_SW_TRIG BIT(0)
+
+#define MCDE_CHNL0BCKGNDCOL 0x00000610
+#define MCDE_CHNL1BCKGNDCOL 0x00000630
+#define MCDE_CHNL2BCKGNDCOL 0x00000650
+#define MCDE_CHNL3BCKGNDCOL 0x00000670
+#define MCDE_CHNLXBCKGNDCOL_B_SHIFT 0
+#define MCDE_CHNLXBCKGNDCOL_B_MASK 0x000000FF
+#define MCDE_CHNLXBCKGNDCOL_G_SHIFT 8
+#define MCDE_CHNLXBCKGNDCOL_G_MASK 0x0000FF00
+#define MCDE_CHNLXBCKGNDCOL_R_SHIFT 16
+#define MCDE_CHNLXBCKGNDCOL_R_MASK 0x00FF0000
+
+#define MCDE_CHNL0MUXING 0x00000614
+#define MCDE_CHNL1MUXING 0x00000634
+#define MCDE_CHNL2MUXING 0x00000654
+#define MCDE_CHNL3MUXING 0x00000674
+#define MCDE_CHNLXMUXING_FIFO_ID_FIFO_A 0
+#define MCDE_CHNLXMUXING_FIFO_ID_FIFO_B 1
+#define MCDE_CHNLXMUXING_FIFO_ID_FIFO_C0 2
+#define MCDE_CHNLXMUXING_FIFO_ID_FIFO_C1 3
+
+/* Pixel processing control registers for channel A B, */
+#define MCDE_CRA0 0x00000800
+#define MCDE_CRB0 0x00000A00
+#define MCDE_CRX0_FLOEN BIT(0)
+#define MCDE_CRX0_POWEREN BIT(1)
+#define MCDE_CRX0_BLENDEN BIT(2)
+#define MCDE_CRX0_AFLICKEN BIT(3)
+#define MCDE_CRX0_PALEN BIT(4)
+#define MCDE_CRX0_DITHEN BIT(5)
+#define MCDE_CRX0_GAMEN BIT(6)
+#define MCDE_CRX0_KEYCTRL_SHIFT 7
+#define MCDE_CRX0_KEYCTRL_MASK 0x00000380
+#define MCDE_CRX0_KEYCTRL_OFF 0
+#define MCDE_CRX0_KEYCTRL_ALPHA_RGB 1
+#define MCDE_CRX0_KEYCTRL_RGB 2
+#define MCDE_CRX0_KEYCTRL_FALPHA_FRGB 4
+#define MCDE_CRX0_KEYCTRL_FRGB 5
+#define MCDE_CRX0_BLENDCTRL BIT(10)
+#define MCDE_CRX0_FLICKMODE_SHIFT 11
+#define MCDE_CRX0_FLICKMODE_MASK 0x00001800
+#define MCDE_CRX0_FLICKMODE_FORCE_FILTER_0 0
+#define MCDE_CRX0_FLICKMODE_ADAPTIVE 1
+#define MCDE_CRX0_FLICKMODE_TEST_MODE 2
+#define MCDE_CRX0_FLOCKFORMAT_RGB BIT(13) /* 0 = YCVCR */
+#define MCDE_CRX0_PALMODE_GAMMA BIT(14) /* 0 = palette */
+#define MCDE_CRX0_OLEDEN BIT(15)
+#define MCDE_CRX0_ALPHABLEND_SHIFT 16
+#define MCDE_CRX0_ALPHABLEND_MASK 0x00FF0000
+#define MCDE_CRX0_ROTEN BIT(24)
+
+#define MCDE_CRA1 0x00000804
+#define MCDE_CRB1 0x00000A04
+#define MCDE_CRX1_PCD_SHIFT 0
+#define MCDE_CRX1_PCD_MASK 0x000003FF
+#define MCDE_CRX1_CLKSEL_SHIFT 10
+#define MCDE_CRX1_CLKSEL_MASK 0x00001C00
+#define MCDE_CRX1_CLKSEL_CLKPLL72 0
+#define MCDE_CRX1_CLKSEL_CLKPLL27 2
+#define MCDE_CRX1_CLKSEL_TV1CLK 3
+#define MCDE_CRX1_CLKSEL_TV2CLK 4
+#define MCDE_CRX1_CLKSEL_MCDECLK 5
+#define MCDE_CRX1_CDWIN_SHIFT 13
+#define MCDE_CRX1_CDWIN_MASK 0x0001E000
+#define MCDE_CRX1_CDWIN_8BPP_C1 0
+#define MCDE_CRX1_CDWIN_12BPP_C1 1
+#define MCDE_CRX1_CDWIN_12BPP_C2 2
+#define MCDE_CRX1_CDWIN_16BPP_C1 3
+#define MCDE_CRX1_CDWIN_16BPP_C2 4
+#define MCDE_CRX1_CDWIN_16BPP_C3 5
+#define MCDE_CRX1_CDWIN_18BPP_C1 6
+#define MCDE_CRX1_CDWIN_18BPP_C2 7
+#define MCDE_CRX1_CDWIN_24BPP 8
+#define MCDE_CRX1_OUTBPP_SHIFT 25
+#define MCDE_CRX1_OUTBPP_MASK 0x1E000000
+#define MCDE_CRX1_OUTBPP_MONO1 0
+#define MCDE_CRX1_OUTBPP_MONO2 1
+#define MCDE_CRX1_OUTBPP_MONO4 2
+#define MCDE_CRX1_OUTBPP_MONO8 3
+#define MCDE_CRX1_OUTBPP_8BPP 4
+#define MCDE_CRX1_OUTBPP_12BPP 5
+#define MCDE_CRX1_OUTBPP_15BPP 6
+#define MCDE_CRX1_OUTBPP_16BPP 7
+#define MCDE_CRX1_OUTBPP_18BPP 8
+#define MCDE_CRX1_OUTBPP_24BPP 9
+#define MCDE_CRX1_BCD BIT(29)
+#define MCDE_CRA1_CLKTYPE_TVXCLKSEL1 BIT(30) /* 0 = TVXCLKSEL1 */
+
+#define MCDE_COLKEYA 0x00000808
+#define MCDE_COLKEYB 0x00000A08
+
+#define MCDE_FCOLKEYA 0x0000080C
+#define MCDE_FCOLKEYB 0x00000A0C
+
+#define MCDE_RGBCONV1A 0x00000810
+#define MCDE_RGBCONV1B 0x00000A10
+
+#define MCDE_RGBCONV2A 0x00000814
+#define MCDE_RGBCONV2B 0x00000A14
+
+#define MCDE_RGBCONV3A 0x00000818
+#define MCDE_RGBCONV3B 0x00000A18
+
+#define MCDE_RGBCONV4A 0x0000081C
+#define MCDE_RGBCONV4B 0x00000A1C
+
+#define MCDE_RGBCONV5A 0x00000820
+#define MCDE_RGBCONV5B 0x00000A20
+
+#define MCDE_RGBCONV6A 0x00000824
+#define MCDE_RGBCONV6B 0x00000A24
+
+/* Rotation */
+#define MCDE_ROTACONF 0x0000087C
+#define MCDE_ROTBCONF 0x00000A7C
+
+#define MCDE_SYNCHCONFA 0x00000880
+#define MCDE_SYNCHCONFB 0x00000A80
+
+/* Channel A+B control registers */
+#define MCDE_CTRLA 0x00000884
+#define MCDE_CTRLB 0x00000A84
+#define MCDE_CTRLX_FIFOWTRMRK_SHIFT 0
+#define MCDE_CTRLX_FIFOWTRMRK_MASK 0x000003FF
+#define MCDE_CTRLX_FIFOEMPTY BIT(12)
+#define MCDE_CTRLX_FIFOFULL BIT(13)
+#define MCDE_CTRLX_FORMID_SHIFT 16
+#define MCDE_CTRLX_FORMID_MASK 0x00070000
+#define MCDE_CTRLX_FORMID_DSI0VID 0
+#define MCDE_CTRLX_FORMID_DSI0CMD 1
+#define MCDE_CTRLX_FORMID_DSI1VID 2
+#define MCDE_CTRLX_FORMID_DSI1CMD 3
+#define MCDE_CTRLX_FORMID_DSI2VID 4
+#define MCDE_CTRLX_FORMID_DSI2CMD 5
+#define MCDE_CTRLX_FORMID_DPIA 0
+#define MCDE_CTRLX_FORMID_DPIB 1
+#define MCDE_CTRLX_FORMTYPE_SHIFT 20
+#define MCDE_CTRLX_FORMTYPE_MASK 0x00700000
+#define MCDE_CTRLX_FORMTYPE_DPITV 0
+#define MCDE_CTRLX_FORMTYPE_DBI 1
+#define MCDE_CTRLX_FORMTYPE_DSI 2
+
+#define MCDE_DSIVID0CONF0 0x00000E00
+#define MCDE_DSICMD0CONF0 0x00000E20
+#define MCDE_DSIVID1CONF0 0x00000E40
+#define MCDE_DSICMD1CONF0 0x00000E60
+#define MCDE_DSIVID2CONF0 0x00000E80
+#define MCDE_DSICMD2CONF0 0x00000EA0
+#define MCDE_DSICONF0_BLANKING_SHIFT 0
+#define MCDE_DSICONF0_BLANKING_MASK 0x000000FF
+#define MCDE_DSICONF0_VID_MODE_CMD 0
+#define MCDE_DSICONF0_VID_MODE_VID BIT(12)
+#define MCDE_DSICONF0_CMD8 BIT(13)
+#define MCDE_DSICONF0_BIT_SWAP BIT(16)
+#define MCDE_DSICONF0_BYTE_SWAP BIT(17)
+#define MCDE_DSICONF0_DCSVID_NOTGEN BIT(18)
+#define MCDE_DSICONF0_PACKING_SHIFT 20
+#define MCDE_DSICONF0_PACKING_MASK 0x00700000
+#define MCDE_DSICONF0_PACKING_RGB565 0
+#define MCDE_DSICONF0_PACKING_RGB666 1
+#define MCDE_DSICONF0_PACKING_RGB666_PACKED 2
+#define MCDE_DSICONF0_PACKING_RGB888 3
+#define MCDE_DSICONF0_PACKING_HDTV 4
+
+#define MCDE_DSIVID0FRAME 0x00000E04
+#define MCDE_DSICMD0FRAME 0x00000E24
+#define MCDE_DSIVID1FRAME 0x00000E44
+#define MCDE_DSICMD1FRAME 0x00000E64
+#define MCDE_DSIVID2FRAME 0x00000E84
+#define MCDE_DSICMD2FRAME 0x00000EA4
+
+#define MCDE_DSIVID0PKT 0x00000E08
+#define MCDE_DSICMD0PKT 0x00000E28
+#define MCDE_DSIVID1PKT 0x00000E48
+#define MCDE_DSICMD1PKT 0x00000E68
+#define MCDE_DSIVID2PKT 0x00000E88
+#define MCDE_DSICMD2PKT 0x00000EA8
+
+#define MCDE_DSIVID0SYNC 0x00000E0C
+#define MCDE_DSICMD0SYNC 0x00000E2C
+#define MCDE_DSIVID1SYNC 0x00000E4C
+#define MCDE_DSICMD1SYNC 0x00000E6C
+#define MCDE_DSIVID2SYNC 0x00000E8C
+#define MCDE_DSICMD2SYNC 0x00000EAC
+
+#define MCDE_DSIVID0CMDW 0x00000E10
+#define MCDE_DSICMD0CMDW 0x00000E30
+#define MCDE_DSIVID1CMDW 0x00000E50
+#define MCDE_DSICMD1CMDW 0x00000E70
+#define MCDE_DSIVID2CMDW 0x00000E90
+#define MCDE_DSICMD2CMDW 0x00000EB0
+#define MCDE_DSIVIDXCMDW_CMDW_CONTINUE_SHIFT 0
+#define MCDE_DSIVIDXCMDW_CMDW_CONTINUE_MASK 0x0000FFFF
+#define MCDE_DSIVIDXCMDW_CMDW_START_SHIFT 16
+#define MCDE_DSIVIDXCMDW_CMDW_START_MASK 0xFFFF0000
+
+#define MCDE_DSIVID0DELAY0 0x00000E14
+#define MCDE_DSICMD0DELAY0 0x00000E34
+#define MCDE_DSIVID1DELAY0 0x00000E54
+#define MCDE_DSICMD1DELAY0 0x00000E74
+#define MCDE_DSIVID2DELAY0 0x00000E94
+#define MCDE_DSICMD2DELAY0 0x00000EB4
+
+#define MCDE_DSIVID0DELAY1 0x00000E18
+#define MCDE_DSICMD0DELAY1 0x00000E38
+#define MCDE_DSIVID1DELAY1 0x00000E58
+#define MCDE_DSICMD1DELAY1 0x00000E78
+#define MCDE_DSIVID2DELAY1 0x00000E98
+#define MCDE_DSICMD2DELAY1 0x00000EB8
+
+#endif /* __DRM_MCDE_DISPLAY_REGS */
diff --git a/drivers/gpu/drm/mcde/mcde_drm.h b/drivers/gpu/drm/mcde/mcde_drm.h
new file mode 100644
index 000000000000..dab4db021231
--- /dev/null
+++ b/drivers/gpu/drm/mcde/mcde_drm.h
@@ -0,0 +1,44 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright (C) 2018 Linus Walleij <linus.walleij@linaro.org>
+ * Parts of this file were based on the MCDE driver by Marcus Lorentzon
+ * (C) ST-Ericsson SA 2013
+ */
+#include <drm/drm_simple_kms_helper.h>
+
+#ifndef _MCDE_DRM_H_
+#define _MCDE_DRM_H_
+
+struct mcde {
+ struct drm_device drm;
+ struct device *dev;
+ struct drm_panel *panel;
+ struct drm_bridge *bridge;
+ struct drm_connector *connector;
+ struct drm_simple_display_pipe pipe;
+ struct mipi_dsi_device *mdsi;
+ s16 stride;
+ bool te_sync;
+ bool oneshot_mode;
+ unsigned int flow_active;
+ spinlock_t flow_lock; /* Locks the channel flow control */
+
+ void __iomem *regs;
+
+ struct clk *mcde_clk;
+ struct clk *lcd_clk;
+ struct clk *hdmi_clk;
+
+ struct regulator *epod;
+ struct regulator *vana;
+};
+
+bool mcde_dsi_irq(struct mipi_dsi_device *mdsi);
+void mcde_dsi_te_request(struct mipi_dsi_device *mdsi);
+extern struct platform_driver mcde_dsi_driver;
+
+void mcde_display_irq(struct mcde *mcde);
+void mcde_display_disable_irqs(struct mcde *mcde);
+int mcde_display_init(struct drm_device *drm);
+
+#endif /* _MCDE_DRM_H_ */
diff --git a/drivers/gpu/drm/mcde/mcde_drv.c b/drivers/gpu/drm/mcde/mcde_drv.c
new file mode 100644
index 000000000000..baf63fb6850a
--- /dev/null
+++ b/drivers/gpu/drm/mcde/mcde_drv.c
@@ -0,0 +1,572 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018 Linus Walleij <linus.walleij@linaro.org>
+ * Parts of this file were based on the MCDE driver by Marcus Lorentzon
+ * (C) ST-Ericsson SA 2013
+ */
+
+/**
+ * DOC: ST-Ericsson MCDE Driver
+ *
+ * The MCDE (short for multi-channel display engine) is a graphics
+ * controller found in the Ux500 chipsets, such as NovaThor U8500.
+ * It was initially conceptualized by ST Microelectronics for the
+ * successor of the Nomadik line, STn8500 but productified in the
+ * ST-Ericsson U8500 where is was used for mass-market deployments
+ * in Android phones from Samsung and Sony Ericsson.
+ *
+ * It can do 1080p30 on SDTV CCIR656, DPI-2, DBI-2 or DSI for
+ * panels with or without frame buffering and can convert most
+ * input formats including most variants of RGB and YUV.
+ *
+ * The hardware has four display pipes, and the layout is a little
+ * bit like this:
+ *
+ * Memory -> Overlay -> Channel -> FIFO -> 5 formatters -> DSI/DPI
+ * External 0..5 0..3 A,B, 3 x DSI bridge
+ * source 0..9 C0,C1 2 x DPI
+ *
+ * FIFOs A and B are for LCD and HDMI while FIFO CO/C1 are for
+ * panels with embedded buffer.
+ * 3 of the formatters are for DSI.
+ * 2 of the formatters are for DPI.
+ *
+ * Behind the formatters are the DSI or DPI ports that route to
+ * the external pins of the chip. As there are 3 DSI ports and one
+ * DPI port, it is possible to configure up to 4 display pipelines
+ * (effectively using channels 0..3) for concurrent use.
+ *
+ * In the current DRM/KMS setup, we use one external source, one overlay,
+ * one FIFO and one formatter which we connect to the simple CMA framebuffer
+ * helpers. We then provide a bridge to the DSI port, and on the DSI port
+ * bridge we connect hang a panel bridge or other bridge. This may be subject
+ * to change as we exploit more of the hardware capabilities.
+ *
+ * TODO:
+ * - Enabled damaged rectangles using drm_plane_enable_fb_damage_clips()
+ * so we can selectively just transmit the damaged area to a
+ * command-only display.
+ * - Enable mixing of more planes, possibly at the cost of moving away
+ * from using the simple framebuffer pipeline.
+ * - Enable output to bridges such as the AV8100 HDMI encoder from
+ * the DSI bridge.
+ */
+
+#include <linux/clk.h>
+#include <linux/component.h>
+#include <linux/dma-buf.h>
+#include <linux/irq.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+#include <linux/slab.h>
+
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_fb_helper.h>
+#include <drm/drm_gem.h>
+#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_of.h>
+#include <drm/drm_probe_helper.h>
+#include <drm/drm_panel.h>
+#include <drm/drm_vblank.h>
+
+#include "mcde_drm.h"
+
+#define DRIVER_DESC "DRM module for MCDE"
+
+#define MCDE_CR 0x00000000
+#define MCDE_CR_IFIFOEMPTYLINECOUNT_V422_SHIFT 0
+#define MCDE_CR_IFIFOEMPTYLINECOUNT_V422_MASK 0x0000003F
+#define MCDE_CR_IFIFOCTRLEN BIT(15)
+#define MCDE_CR_UFRECOVERY_MODE_V422 BIT(16)
+#define MCDE_CR_WRAP_MODE_V422_SHIFT BIT(17)
+#define MCDE_CR_AUTOCLKG_EN BIT(30)
+#define MCDE_CR_MCDEEN BIT(31)
+
+#define MCDE_CONF0 0x00000004
+#define MCDE_CONF0_SYNCMUX0 BIT(0)
+#define MCDE_CONF0_SYNCMUX1 BIT(1)
+#define MCDE_CONF0_SYNCMUX2 BIT(2)
+#define MCDE_CONF0_SYNCMUX3 BIT(3)
+#define MCDE_CONF0_SYNCMUX4 BIT(4)
+#define MCDE_CONF0_SYNCMUX5 BIT(5)
+#define MCDE_CONF0_SYNCMUX6 BIT(6)
+#define MCDE_CONF0_SYNCMUX7 BIT(7)
+#define MCDE_CONF0_IFIFOCTRLWTRMRKLVL_SHIFT 12
+#define MCDE_CONF0_IFIFOCTRLWTRMRKLVL_MASK 0x00007000
+#define MCDE_CONF0_OUTMUX0_SHIFT 16
+#define MCDE_CONF0_OUTMUX0_MASK 0x00070000
+#define MCDE_CONF0_OUTMUX1_SHIFT 19
+#define MCDE_CONF0_OUTMUX1_MASK 0x00380000
+#define MCDE_CONF0_OUTMUX2_SHIFT 22
+#define MCDE_CONF0_OUTMUX2_MASK 0x01C00000
+#define MCDE_CONF0_OUTMUX3_SHIFT 25
+#define MCDE_CONF0_OUTMUX3_MASK 0x0E000000
+#define MCDE_CONF0_OUTMUX4_SHIFT 28
+#define MCDE_CONF0_OUTMUX4_MASK 0x70000000
+
+#define MCDE_SSP 0x00000008
+#define MCDE_AIS 0x00000100
+#define MCDE_IMSCERR 0x00000110
+#define MCDE_RISERR 0x00000120
+#define MCDE_MISERR 0x00000130
+#define MCDE_SISERR 0x00000140
+
+#define MCDE_PID 0x000001FC
+#define MCDE_PID_METALFIX_VERSION_SHIFT 0
+#define MCDE_PID_METALFIX_VERSION_MASK 0x000000FF
+#define MCDE_PID_DEVELOPMENT_VERSION_SHIFT 8
+#define MCDE_PID_DEVELOPMENT_VERSION_MASK 0x0000FF00
+#define MCDE_PID_MINOR_VERSION_SHIFT 16
+#define MCDE_PID_MINOR_VERSION_MASK 0x00FF0000
+#define MCDE_PID_MAJOR_VERSION_SHIFT 24
+#define MCDE_PID_MAJOR_VERSION_MASK 0xFF000000
+
+static const struct drm_mode_config_funcs mcde_mode_config_funcs = {
+ .fb_create = drm_gem_fb_create_with_dirty,
+ .atomic_check = drm_atomic_helper_check,
+ .atomic_commit = drm_atomic_helper_commit,
+};
+
+static const struct drm_mode_config_helper_funcs mcde_mode_config_helpers = {
+ /*
+ * Using this function is necessary to commit atomic updates
+ * that need the CRTC to be enabled before a commit, as is
+ * the case with e.g. DSI displays.
+ */
+ .atomic_commit_tail = drm_atomic_helper_commit_tail_rpm,
+};
+
+static irqreturn_t mcde_irq(int irq, void *data)
+{
+ struct mcde *mcde = data;
+ u32 val;
+
+ val = readl(mcde->regs + MCDE_MISERR);
+
+ mcde_display_irq(mcde);
+
+ if (val)
+ dev_info(mcde->dev, "some error IRQ\n");
+ writel(val, mcde->regs + MCDE_RISERR);
+
+ return IRQ_HANDLED;
+}
+
+static int mcde_modeset_init(struct drm_device *drm)
+{
+ struct drm_mode_config *mode_config;
+ struct mcde *mcde = drm->dev_private;
+ int ret;
+
+ if (!mcde->bridge) {
+ dev_err(drm->dev, "no display output bridge yet\n");
+ return -EPROBE_DEFER;
+ }
+
+ mode_config = &drm->mode_config;
+ mode_config->funcs = &mcde_mode_config_funcs;
+ mode_config->helper_private = &mcde_mode_config_helpers;
+ /* This hardware can do 1080p */
+ mode_config->min_width = 1;
+ mode_config->max_width = 1920;
+ mode_config->min_height = 1;
+ mode_config->max_height = 1080;
+
+ /*
+ * Currently we only support vblank handling on the DSI bridge, using
+ * TE synchronization. If TE sync is not set up, it is still possible
+ * to push out a single update on demand, but this is hard for DRM to
+ * exploit.
+ */
+ if (mcde->te_sync) {
+ ret = drm_vblank_init(drm, 1);
+ if (ret) {
+ dev_err(drm->dev, "failed to init vblank\n");
+ goto out_config;
+ }
+ }
+
+ ret = mcde_display_init(drm);
+ if (ret) {
+ dev_err(drm->dev, "failed to init display\n");
+ goto out_config;
+ }
+
+ /*
+ * Attach the DSI bridge
+ *
+ * TODO: when adding support for the DPI bridge or several DSI bridges,
+ * we selectively connect the bridge(s) here instead of this simple
+ * attachment.
+ */
+ ret = drm_simple_display_pipe_attach_bridge(&mcde->pipe,
+ mcde->bridge);
+ if (ret) {
+ dev_err(drm->dev, "failed to attach display output bridge\n");
+ goto out_config;
+ }
+
+ drm_mode_config_reset(drm);
+ drm_kms_helper_poll_init(drm);
+ drm_fbdev_generic_setup(drm, 32);
+
+ return 0;
+
+out_config:
+ drm_mode_config_cleanup(drm);
+ return ret;
+}
+
+static void mcde_release(struct drm_device *drm)
+{
+ struct mcde *mcde = drm->dev_private;
+
+ drm_mode_config_cleanup(drm);
+ drm_dev_fini(drm);
+ kfree(mcde);
+}
+
+DEFINE_DRM_GEM_CMA_FOPS(drm_fops);
+
+static struct drm_driver mcde_drm_driver = {
+ .driver_features =
+ DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME | DRIVER_ATOMIC,
+ .release = mcde_release,
+ .lastclose = drm_fb_helper_lastclose,
+ .ioctls = NULL,
+ .fops = &drm_fops,
+ .name = "mcde",
+ .desc = DRIVER_DESC,
+ .date = "20180529",
+ .major = 1,
+ .minor = 0,
+ .patchlevel = 0,
+ .dumb_create = drm_gem_cma_dumb_create,
+ .gem_free_object_unlocked = drm_gem_cma_free_object,
+ .gem_vm_ops = &drm_gem_cma_vm_ops,
+
+ .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
+ .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
+ .gem_prime_import = drm_gem_prime_import,
+ .gem_prime_export = drm_gem_prime_export,
+ .gem_prime_get_sg_table = drm_gem_cma_prime_get_sg_table,
+ .gem_prime_import_sg_table = drm_gem_cma_prime_import_sg_table,
+ .gem_prime_vmap = drm_gem_cma_prime_vmap,
+ .gem_prime_vunmap = drm_gem_cma_prime_vunmap,
+ .gem_prime_mmap = drm_gem_cma_prime_mmap,
+};
+
+static int mcde_drm_bind(struct device *dev)
+{
+ struct drm_device *drm = dev_get_drvdata(dev);
+ int ret;
+
+ drm_mode_config_init(drm);
+
+ ret = component_bind_all(drm->dev, drm);
+ if (ret) {
+ dev_err(dev, "can't bind component devices\n");
+ return ret;
+ }
+
+ ret = mcde_modeset_init(drm);
+ if (ret)
+ goto unbind;
+
+ ret = drm_dev_register(drm, 0);
+ if (ret < 0)
+ goto unbind;
+
+ return 0;
+
+unbind:
+ component_unbind_all(drm->dev, drm);
+ return ret;
+}
+
+static void mcde_drm_unbind(struct device *dev)
+{
+ struct drm_device *drm = dev_get_drvdata(dev);
+
+ drm_dev_unregister(drm);
+ drm_atomic_helper_shutdown(drm);
+ component_unbind_all(drm->dev, drm);
+}
+
+static const struct component_master_ops mcde_drm_comp_ops = {
+ .bind = mcde_drm_bind,
+ .unbind = mcde_drm_unbind,
+};
+
+static struct platform_driver *const mcde_component_drivers[] = {
+ &mcde_dsi_driver,
+};
+
+static int mcde_compare_dev(struct device *dev, void *data)
+{
+ return dev == data;
+}
+
+static int mcde_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct drm_device *drm;
+ struct mcde *mcde;
+ struct component_match *match;
+ struct resource *res;
+ u32 pid;
+ u32 val;
+ int irq;
+ int ret;
+ int i;
+
+ mcde = kzalloc(sizeof(*mcde), GFP_KERNEL);
+ if (!mcde)
+ return -ENOMEM;
+ mcde->dev = dev;
+
+ ret = drm_dev_init(&mcde->drm, &mcde_drm_driver, dev);
+ if (ret) {
+ kfree(mcde);
+ return ret;
+ }
+ drm = &mcde->drm;
+ drm->dev_private = mcde;
+ platform_set_drvdata(pdev, drm);
+
+ /* Enable use of the TE signal and interrupt */
+ mcde->te_sync = true;
+ /* Enable continuous updates: this is what Linux' framebuffer expects */
+ mcde->oneshot_mode = false;
+ drm->dev_private = mcde;
+
+ /* First obtain and turn on the main power */
+ mcde->epod = devm_regulator_get(dev, "epod");
+ if (IS_ERR(mcde->epod)) {
+ ret = PTR_ERR(mcde->epod);
+ dev_err(dev, "can't get EPOD regulator\n");
+ goto dev_unref;
+ }
+ ret = regulator_enable(mcde->epod);
+ if (ret) {
+ dev_err(dev, "can't enable EPOD regulator\n");
+ goto dev_unref;
+ }
+ mcde->vana = devm_regulator_get(dev, "vana");
+ if (IS_ERR(mcde->vana)) {
+ ret = PTR_ERR(mcde->vana);
+ dev_err(dev, "can't get VANA regulator\n");
+ goto regulator_epod_off;
+ }
+ ret = regulator_enable(mcde->vana);
+ if (ret) {
+ dev_err(dev, "can't enable VANA regulator\n");
+ goto regulator_epod_off;
+ }
+ /*
+ * The vendor code uses ESRAM (onchip RAM) and need to activate
+ * the v-esram34 regulator, but we don't use that yet
+ */
+
+ /* Clock the silicon so we can access the registers */
+ mcde->mcde_clk = devm_clk_get(dev, "mcde");
+ if (IS_ERR(mcde->mcde_clk)) {
+ dev_err(dev, "unable to get MCDE main clock\n");
+ ret = PTR_ERR(mcde->mcde_clk);
+ goto regulator_off;
+ }
+ ret = clk_prepare_enable(mcde->mcde_clk);
+ if (ret) {
+ dev_err(dev, "failed to enable MCDE main clock\n");
+ goto regulator_off;
+ }
+ dev_info(dev, "MCDE clk rate %lu Hz\n", clk_get_rate(mcde->mcde_clk));
+
+ mcde->lcd_clk = devm_clk_get(dev, "lcd");
+ if (IS_ERR(mcde->lcd_clk)) {
+ dev_err(dev, "unable to get LCD clock\n");
+ ret = PTR_ERR(mcde->lcd_clk);
+ goto clk_disable;
+ }
+ mcde->hdmi_clk = devm_clk_get(dev, "hdmi");
+ if (IS_ERR(mcde->hdmi_clk)) {
+ dev_err(dev, "unable to get HDMI clock\n");
+ ret = PTR_ERR(mcde->hdmi_clk);
+ goto clk_disable;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ mcde->regs = devm_ioremap_resource(dev, res);
+ if (IS_ERR(mcde->regs)) {
+ dev_err(dev, "no MCDE regs\n");
+ ret = -EINVAL;
+ goto clk_disable;
+ }
+
+ irq = platform_get_irq(pdev, 0);
+ if (!irq) {
+ ret = -EINVAL;
+ goto clk_disable;
+ }
+
+ ret = devm_request_irq(dev, irq, mcde_irq, 0, "mcde", mcde);
+ if (ret) {
+ dev_err(dev, "failed to request irq %d\n", ret);
+ goto clk_disable;
+ }
+
+ /*
+ * Check hardware revision, we only support U8500v2 version
+ * as this was the only version used for mass market deployment,
+ * but surely you can add more versions if you have them and
+ * need them.
+ */
+ pid = readl(mcde->regs + MCDE_PID);
+ dev_info(dev, "found MCDE HW revision %d.%d (dev %d, metal fix %d)\n",
+ (pid & MCDE_PID_MAJOR_VERSION_MASK)
+ >> MCDE_PID_MAJOR_VERSION_SHIFT,
+ (pid & MCDE_PID_MINOR_VERSION_MASK)
+ >> MCDE_PID_MINOR_VERSION_SHIFT,
+ (pid & MCDE_PID_DEVELOPMENT_VERSION_MASK)
+ >> MCDE_PID_DEVELOPMENT_VERSION_SHIFT,
+ (pid & MCDE_PID_METALFIX_VERSION_MASK)
+ >> MCDE_PID_METALFIX_VERSION_SHIFT);
+ if (pid != 0x03000800) {
+ dev_err(dev, "unsupported hardware revision\n");
+ ret = -ENODEV;
+ goto clk_disable;
+ }
+
+ /* Set up the main control, watermark level at 7 */
+ val = 7 << MCDE_CONF0_IFIFOCTRLWTRMRKLVL_SHIFT;
+ /* 24 bits DPI: connect LSB Ch B to D[0:7] */
+ val |= 3 << MCDE_CONF0_OUTMUX0_SHIFT;
+ /* TV out: connect LSB Ch B to D[8:15] */
+ val |= 3 << MCDE_CONF0_OUTMUX1_SHIFT;
+ /* Don't care about this muxing */
+ val |= 0 << MCDE_CONF0_OUTMUX2_SHIFT;
+ /* 24 bits DPI: connect MID Ch B to D[24:31] */
+ val |= 4 << MCDE_CONF0_OUTMUX3_SHIFT;
+ /* 5: 24 bits DPI: connect MSB Ch B to D[32:39] */
+ val |= 5 << MCDE_CONF0_OUTMUX4_SHIFT;
+ /* Syncmux bits zero: DPI channel A and B on output pins A and B resp */
+ writel(val, mcde->regs + MCDE_CONF0);
+
+ /* Enable automatic clock gating */
+ val = readl(mcde->regs + MCDE_CR);
+ val |= MCDE_CR_MCDEEN | MCDE_CR_AUTOCLKG_EN;
+ writel(val, mcde->regs + MCDE_CR);
+
+ /* Clear any pending interrupts */
+ mcde_display_disable_irqs(mcde);
+ writel(0, mcde->regs + MCDE_IMSCERR);
+ writel(0xFFFFFFFF, mcde->regs + MCDE_RISERR);
+
+ /* Spawn child devices for the DSI ports */
+ devm_of_platform_populate(dev);
+
+ /* Create something that will match the subdrivers when we bind */
+ for (i = 0; i < ARRAY_SIZE(mcde_component_drivers); i++) {
+ struct device_driver *drv = &mcde_component_drivers[i]->driver;
+ struct device *p = NULL, *d;
+
+ while ((d = bus_find_device(&platform_bus_type, p, drv,
+ (void *)platform_bus_type.match))) {
+ put_device(p);
+ component_match_add(dev, &match, mcde_compare_dev, d);
+ p = d;
+ }
+ put_device(p);
+ }
+ if (IS_ERR(match)) {
+ dev_err(dev, "could not create component match\n");
+ ret = PTR_ERR(match);
+ goto clk_disable;
+ }
+ ret = component_master_add_with_match(&pdev->dev, &mcde_drm_comp_ops,
+ match);
+ if (ret) {
+ dev_err(dev, "failed to add component master\n");
+ goto clk_disable;
+ }
+ return 0;
+
+clk_disable:
+ clk_disable_unprepare(mcde->mcde_clk);
+regulator_off:
+ regulator_disable(mcde->vana);
+regulator_epod_off:
+ regulator_disable(mcde->epod);
+dev_unref:
+ drm_dev_put(drm);
+ return ret;
+
+}
+
+static int mcde_remove(struct platform_device *pdev)
+{
+ struct drm_device *drm = platform_get_drvdata(pdev);
+ struct mcde *mcde = drm->dev_private;
+
+ component_master_del(&pdev->dev, &mcde_drm_comp_ops);
+ clk_disable_unprepare(mcde->mcde_clk);
+ regulator_disable(mcde->vana);
+ regulator_disable(mcde->epod);
+ drm_dev_put(drm);
+
+ return 0;
+}
+
+static const struct of_device_id mcde_of_match[] = {
+ {
+ .compatible = "ste,mcde",
+ },
+ {},
+};
+
+static struct platform_driver mcde_driver = {
+ .driver = {
+ .name = "mcde",
+ .of_match_table = of_match_ptr(mcde_of_match),
+ },
+ .probe = mcde_probe,
+ .remove = mcde_remove,
+};
+
+static struct platform_driver *const component_drivers[] = {
+ &mcde_dsi_driver,
+};
+
+static int __init mcde_drm_register(void)
+{
+ int ret;
+
+ ret = platform_register_drivers(component_drivers,
+ ARRAY_SIZE(component_drivers));
+ if (ret)
+ return ret;
+
+ return platform_driver_register(&mcde_driver);
+}
+
+static void __exit mcde_drm_unregister(void)
+{
+ platform_unregister_drivers(component_drivers,
+ ARRAY_SIZE(component_drivers));
+ platform_driver_unregister(&mcde_driver);
+}
+
+module_init(mcde_drm_register);
+module_exit(mcde_drm_unregister);
+
+MODULE_ALIAS("platform:mcde-drm");
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_AUTHOR("Linus Walleij <linus.walleij@linaro.org>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/mcde/mcde_dsi.c b/drivers/gpu/drm/mcde/mcde_dsi.c
new file mode 100644
index 000000000000..07f7090d08b3
--- /dev/null
+++ b/drivers/gpu/drm/mcde/mcde_dsi.c
@@ -0,0 +1,1044 @@
+// SPDX-License-Identifier: GPL-2.0+
+#include <linux/clk.h>
+#include <linux/component.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
+#include <video/mipi_display.h>
+
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
+#include <drm/drm_device.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_encoder.h>
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_modeset_helper_vtables.h>
+#include <drm/drm_of.h>
+#include <drm/drm_panel.h>
+#include <drm/drm_print.h>
+#include <drm/drm_probe_helper.h>
+
+#include "mcde_drm.h"
+#include "mcde_dsi_regs.h"
+
+#define DSI_DEFAULT_LP_FREQ_HZ 19200000
+#define DSI_DEFAULT_HS_FREQ_HZ 420160000
+
+/* PRCMU DSI reset registers */
+#define PRCM_DSI_SW_RESET 0x324
+#define PRCM_DSI_SW_RESET_DSI0_SW_RESETN BIT(0)
+#define PRCM_DSI_SW_RESET_DSI1_SW_RESETN BIT(1)
+#define PRCM_DSI_SW_RESET_DSI2_SW_RESETN BIT(2)
+
+struct mcde_dsi {
+ struct device *dev;
+ struct mcde *mcde;
+ struct drm_bridge bridge;
+ struct drm_connector connector;
+ struct drm_panel *panel;
+ struct drm_bridge *bridge_out;
+ struct mipi_dsi_host dsi_host;
+ struct mipi_dsi_device *mdsi;
+ struct clk *hs_clk;
+ struct clk *lp_clk;
+ unsigned long hs_freq;
+ unsigned long lp_freq;
+ bool unused;
+
+ void __iomem *regs;
+ struct regmap *prcmu;
+};
+
+static inline struct mcde_dsi *bridge_to_mcde_dsi(struct drm_bridge *bridge)
+{
+ return container_of(bridge, struct mcde_dsi, bridge);
+}
+
+static inline struct mcde_dsi *host_to_mcde_dsi(struct mipi_dsi_host *h)
+{
+ return container_of(h, struct mcde_dsi, dsi_host);
+}
+
+static inline struct mcde_dsi *connector_to_mcde_dsi(struct drm_connector *c)
+{
+ return container_of(c, struct mcde_dsi, connector);
+}
+
+bool mcde_dsi_irq(struct mipi_dsi_device *mdsi)
+{
+ struct mcde_dsi *d;
+ u32 val;
+ bool te_received = false;
+
+ d = host_to_mcde_dsi(mdsi->host);
+
+ dev_dbg(d->dev, "%s called\n", __func__);
+
+ val = readl(d->regs + DSI_DIRECT_CMD_STS_FLAG);
+ if (val)
+ dev_dbg(d->dev, "DSI_DIRECT_CMD_STS_FLAG = %08x\n", val);
+ if (val & DSI_DIRECT_CMD_STS_WRITE_COMPLETED)
+ dev_dbg(d->dev, "direct command write completed\n");
+ if (val & DSI_DIRECT_CMD_STS_TE_RECEIVED) {
+ te_received = true;
+ dev_dbg(d->dev, "direct command TE received\n");
+ }
+ if (val & DSI_DIRECT_CMD_STS_ACKNOWLEDGE_WITH_ERR_RECEIVED)
+ dev_err(d->dev, "direct command ACK ERR received\n");
+ if (val & DSI_DIRECT_CMD_STS_READ_COMPLETED_WITH_ERR)
+ dev_err(d->dev, "direct command read ERR received\n");
+ /* Mask off the ACK value and clear status */
+ writel(val, d->regs + DSI_DIRECT_CMD_STS_CLR);
+
+ val = readl(d->regs + DSI_CMD_MODE_STS_FLAG);
+ if (val)
+ dev_dbg(d->dev, "DSI_CMD_MODE_STS_FLAG = %08x\n", val);
+ if (val & DSI_CMD_MODE_STS_ERR_NO_TE)
+ /* This happens all the time (safe to ignore) */
+ dev_dbg(d->dev, "CMD mode no TE\n");
+ if (val & DSI_CMD_MODE_STS_ERR_TE_MISS)
+ /* This happens all the time (safe to ignore) */
+ dev_dbg(d->dev, "CMD mode TE miss\n");
+ if (val & DSI_CMD_MODE_STS_ERR_SDI1_UNDERRUN)
+ dev_err(d->dev, "CMD mode SD1 underrun\n");
+ if (val & DSI_CMD_MODE_STS_ERR_SDI2_UNDERRUN)
+ dev_err(d->dev, "CMD mode SD2 underrun\n");
+ if (val & DSI_CMD_MODE_STS_ERR_UNWANTED_RD)
+ dev_err(d->dev, "CMD mode unwanted RD\n");
+ writel(val, d->regs + DSI_CMD_MODE_STS_CLR);
+
+ val = readl(d->regs + DSI_DIRECT_CMD_RD_STS_FLAG);
+ if (val)
+ dev_dbg(d->dev, "DSI_DIRECT_CMD_RD_STS_FLAG = %08x\n", val);
+ writel(val, d->regs + DSI_DIRECT_CMD_RD_STS_CLR);
+
+ val = readl(d->regs + DSI_TG_STS_FLAG);
+ if (val)
+ dev_dbg(d->dev, "DSI_TG_STS_FLAG = %08x\n", val);
+ writel(val, d->regs + DSI_TG_STS_CLR);
+
+ val = readl(d->regs + DSI_VID_MODE_STS_FLAG);
+ if (val)
+ dev_err(d->dev, "some video mode error status\n");
+ writel(val, d->regs + DSI_VID_MODE_STS_CLR);
+
+ return te_received;
+}
+
+static int mcde_dsi_host_attach(struct mipi_dsi_host *host,
+ struct mipi_dsi_device *mdsi)
+{
+ struct mcde_dsi *d = host_to_mcde_dsi(host);
+
+ if (mdsi->lanes < 1 || mdsi->lanes > 2) {
+ DRM_ERROR("dsi device params invalid, 1 or 2 lanes supported\n");
+ return -EINVAL;
+ }
+
+ dev_info(d->dev, "attached DSI device with %d lanes\n", mdsi->lanes);
+ /* MIPI_DSI_FMT_RGB88 etc */
+ dev_info(d->dev, "format %08x, %dbpp\n", mdsi->format,
+ mipi_dsi_pixel_format_to_bpp(mdsi->format));
+ dev_info(d->dev, "mode flags: %08lx\n", mdsi->mode_flags);
+
+ d->mdsi = mdsi;
+ if (d->mcde)
+ d->mcde->mdsi = mdsi;
+
+ return 0;
+}
+
+static int mcde_dsi_host_detach(struct mipi_dsi_host *host,
+ struct mipi_dsi_device *mdsi)
+{
+ struct mcde_dsi *d = host_to_mcde_dsi(host);
+
+ d->mdsi = NULL;
+ if (d->mcde)
+ d->mcde->mdsi = NULL;
+
+ return 0;
+}
+
+#define MCDE_DSI_HOST_IS_READ(type) \
+ ((type == MIPI_DSI_GENERIC_READ_REQUEST_0_PARAM) || \
+ (type == MIPI_DSI_GENERIC_READ_REQUEST_1_PARAM) || \
+ (type == MIPI_DSI_GENERIC_READ_REQUEST_2_PARAM) || \
+ (type == MIPI_DSI_DCS_READ))
+
+static ssize_t mcde_dsi_host_transfer(struct mipi_dsi_host *host,
+ const struct mipi_dsi_msg *msg)
+{
+ struct mcde_dsi *d = host_to_mcde_dsi(host);
+ const u32 loop_delay_us = 10; /* us */
+ const u8 *tx = msg->tx_buf;
+ u32 loop_counter;
+ size_t txlen;
+ u32 val;
+ int ret;
+ int i;
+
+ txlen = msg->tx_len;
+ if (txlen > 12) {
+ dev_err(d->dev,
+ "dunno how to write more than 12 bytes yet\n");
+ return -EIO;
+ }
+
+ dev_dbg(d->dev,
+ "message to channel %d, %zd bytes",
+ msg->channel,
+ txlen);
+
+ /* Command "nature" */
+ if (MCDE_DSI_HOST_IS_READ(msg->type))
+ /* MCTL_MAIN_DATA_CTL already set up */
+ val = DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_NAT_READ;
+ else
+ val = DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_NAT_WRITE;
+ /*
+ * More than 2 bytes will not fit in a single packet, so it's
+ * time to set the "long not short" bit. One byte is used by
+ * the MIPI DCS command leaving just one byte for the payload
+ * in a short package.
+ */
+ if (mipi_dsi_packet_format_is_long(msg->type))
+ val |= DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_LONGNOTSHORT;
+ val |= 0 << DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_ID_SHIFT;
+ /* Add one to the length for the MIPI DCS command */
+ val |= txlen
+ << DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_SIZE_SHIFT;
+ val |= DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_LP_EN;
+ val |= msg->type << DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_HEAD_SHIFT;
+ writel(val, d->regs + DSI_DIRECT_CMD_MAIN_SETTINGS);
+
+ /* MIPI DCS command is part of the data */
+ if (txlen > 0) {
+ val = 0;
+ for (i = 0; i < 4 && i < txlen; i++)
+ val |= tx[i] << (i & 3) * 8;
+ }
+ writel(val, d->regs + DSI_DIRECT_CMD_WRDAT0);
+ if (txlen > 4) {
+ val = 0;
+ for (i = 0; i < 4 && (i + 4) < txlen; i++)
+ val |= tx[i + 4] << (i & 3) * 8;
+ writel(val, d->regs + DSI_DIRECT_CMD_WRDAT1);
+ }
+ if (txlen > 8) {
+ val = 0;
+ for (i = 0; i < 4 && (i + 8) < txlen; i++)
+ val |= tx[i + 8] << (i & 3) * 8;
+ writel(val, d->regs + DSI_DIRECT_CMD_WRDAT2);
+ }
+ if (txlen > 12) {
+ val = 0;
+ for (i = 0; i < 4 && (i + 12) < txlen; i++)
+ val |= tx[i + 12] << (i & 3) * 8;
+ writel(val, d->regs + DSI_DIRECT_CMD_WRDAT3);
+ }
+
+ writel(~0, d->regs + DSI_DIRECT_CMD_STS_CLR);
+ writel(~0, d->regs + DSI_CMD_MODE_STS_CLR);
+ /* Send command */
+ writel(1, d->regs + DSI_DIRECT_CMD_SEND);
+
+ loop_counter = 1000 * 1000 / loop_delay_us;
+ while (!(readl(d->regs + DSI_DIRECT_CMD_STS) &
+ DSI_DIRECT_CMD_STS_WRITE_COMPLETED)
+ && --loop_counter)
+ usleep_range(loop_delay_us, (loop_delay_us * 3) / 2);
+
+ if (!loop_counter) {
+ dev_err(d->dev, "DSI write timeout!\n");
+ return -ETIME;
+ }
+
+ val = readl(d->regs + DSI_DIRECT_CMD_STS);
+ if (val & DSI_DIRECT_CMD_STS_ACKNOWLEDGE_WITH_ERR_RECEIVED) {
+ val >>= DSI_DIRECT_CMD_STS_ACK_VAL_SHIFT;
+ dev_err(d->dev, "error during transmission: %04x\n",
+ val);
+ return -EIO;
+ }
+
+ if (!MCDE_DSI_HOST_IS_READ(msg->type)) {
+ /* Return number of bytes written */
+ if (mipi_dsi_packet_format_is_long(msg->type))
+ ret = 4 + txlen;
+ else
+ ret = 4;
+ } else {
+ /* OK this is a read command, get the response */
+ u32 rdsz;
+ u32 rddat;
+ u8 *rx = msg->rx_buf;
+
+ rdsz = readl(d->regs + DSI_DIRECT_CMD_RD_PROPERTY);
+ rdsz &= DSI_DIRECT_CMD_RD_PROPERTY_RD_SIZE_MASK;
+ rddat = readl(d->regs + DSI_DIRECT_CMD_RDDAT);
+ for (i = 0; i < 4 && i < rdsz; i++)
+ rx[i] = (rddat >> (i * 8)) & 0xff;
+ ret = rdsz;
+ }
+
+ writel(~0, d->regs + DSI_DIRECT_CMD_STS_CLR);
+ writel(~0, d->regs + DSI_CMD_MODE_STS_CLR);
+
+ return ret;
+}
+
+static const struct mipi_dsi_host_ops mcde_dsi_host_ops = {
+ .attach = mcde_dsi_host_attach,
+ .detach = mcde_dsi_host_detach,
+ .transfer = mcde_dsi_host_transfer,
+};
+
+/* This sends a direct (short) command to request TE */
+void mcde_dsi_te_request(struct mipi_dsi_device *mdsi)
+{
+ struct mcde_dsi *d;
+ u32 val;
+
+ d = host_to_mcde_dsi(mdsi->host);
+
+ /* Command "nature" TE request */
+ val = DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_NAT_TE_REQ;
+ val |= 0 << DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_ID_SHIFT;
+ val |= 2 << DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_SIZE_SHIFT;
+ val |= DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_LP_EN;
+ val |= DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_HEAD_DCS_SHORT_WRITE_1 <<
+ DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_HEAD_SHIFT;
+ writel(val, d->regs + DSI_DIRECT_CMD_MAIN_SETTINGS);
+
+ /* Clear TE reveived and error status bits and enables them */
+ writel(DSI_DIRECT_CMD_STS_CLR_TE_RECEIVED_CLR |
+ DSI_DIRECT_CMD_STS_CLR_ACKNOWLEDGE_WITH_ERR_RECEIVED_CLR,
+ d->regs + DSI_DIRECT_CMD_STS_CLR);
+ val = readl(d->regs + DSI_DIRECT_CMD_STS_CTL);
+ val |= DSI_DIRECT_CMD_STS_CTL_TE_RECEIVED_EN;
+ val |= DSI_DIRECT_CMD_STS_CTL_ACKNOWLEDGE_WITH_ERR_EN;
+ writel(val, d->regs + DSI_DIRECT_CMD_STS_CTL);
+
+ /* Clear and enable no TE or TE missing status */
+ writel(DSI_CMD_MODE_STS_CLR_ERR_NO_TE_CLR |
+ DSI_CMD_MODE_STS_CLR_ERR_TE_MISS_CLR,
+ d->regs + DSI_CMD_MODE_STS_CLR);
+ val = readl(d->regs + DSI_CMD_MODE_STS_CTL);
+ val |= DSI_CMD_MODE_STS_CTL_ERR_NO_TE_EN;
+ val |= DSI_CMD_MODE_STS_CTL_ERR_TE_MISS_EN;
+ writel(val, d->regs + DSI_CMD_MODE_STS_CTL);
+
+ /* Send this TE request command */
+ writel(1, d->regs + DSI_DIRECT_CMD_SEND);
+}
+
+static void mcde_dsi_setup_video_mode(struct mcde_dsi *d,
+ const struct drm_display_mode *mode)
+{
+ u8 bpp = mipi_dsi_pixel_format_to_bpp(d->mdsi->format);
+ u64 bpl;
+ u32 hfp;
+ u32 hbp;
+ u32 hsa;
+ u32 blkline_pck, line_duration;
+ u32 blkeol_pck, blkeol_duration;
+ u32 val;
+
+ val = 0;
+ if (d->mdsi->mode_flags & MIPI_DSI_MODE_VIDEO_BURST)
+ val |= DSI_VID_MAIN_CTL_BURST_MODE;
+ if (d->mdsi->mode_flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE) {
+ val |= DSI_VID_MAIN_CTL_SYNC_PULSE_ACTIVE;
+ val |= DSI_VID_MAIN_CTL_SYNC_PULSE_HORIZONTAL;
+ }
+ /* RGB header and pixel mode */
+ switch (d->mdsi->format) {
+ case MIPI_DSI_FMT_RGB565:
+ val |= MIPI_DSI_PACKED_PIXEL_STREAM_16 <<
+ DSI_VID_MAIN_CTL_HEADER_SHIFT;
+ val |= DSI_VID_MAIN_CTL_VID_PIXEL_MODE_16BITS;
+ break;
+ case MIPI_DSI_FMT_RGB666_PACKED:
+ val |= MIPI_DSI_PACKED_PIXEL_STREAM_18 <<
+ DSI_VID_MAIN_CTL_HEADER_SHIFT;
+ val |= DSI_VID_MAIN_CTL_VID_PIXEL_MODE_18BITS;
+ break;
+ case MIPI_DSI_FMT_RGB666:
+ val |= MIPI_DSI_PIXEL_STREAM_3BYTE_18
+ << DSI_VID_MAIN_CTL_HEADER_SHIFT;
+ val |= DSI_VID_MAIN_CTL_VID_PIXEL_MODE_18BITS_LOOSE;
+ break;
+ case MIPI_DSI_FMT_RGB888:
+ val |= MIPI_DSI_PACKED_PIXEL_STREAM_24 <<
+ DSI_VID_MAIN_CTL_HEADER_SHIFT;
+ val |= DSI_VID_MAIN_CTL_VID_PIXEL_MODE_24BITS;
+ break;
+ default:
+ dev_err(d->dev, "unknown pixel mode\n");
+ return;
+ }
+
+ /* TODO: TVG could be enabled here */
+
+ /* Send blanking packet */
+ val |= DSI_VID_MAIN_CTL_REG_BLKLINE_MODE_LP_0;
+ /* Send EOL packet */
+ val |= DSI_VID_MAIN_CTL_REG_BLKEOL_MODE_LP_0;
+ /* Recovery mode 1 */
+ val |= 1 << DSI_VID_MAIN_CTL_RECOVERY_MODE_SHIFT;
+ /* All other fields zero */
+ writel(val, d->regs + DSI_VID_MAIN_CTL);
+
+ /* Vertical frame parameters are pretty straight-forward */
+ val = mode->vdisplay << DSI_VID_VSIZE_VSA_LENGTH_SHIFT;
+ /* vertical front porch */
+ val |= (mode->vsync_start - mode->vdisplay)
+ << DSI_VID_VSIZE_VFP_LENGTH_SHIFT;
+ /* vertical sync active */
+ val |= (mode->vsync_end - mode->vsync_start)
+ << DSI_VID_VSIZE_VACT_LENGTH_SHIFT;
+ /* vertical back porch */
+ val |= (mode->vtotal - mode->vsync_end)
+ << DSI_VID_VSIZE_VBP_LENGTH_SHIFT;
+ writel(val, d->regs + DSI_VID_VSIZE);
+
+ /*
+ * Horizontal frame parameters:
+ * horizontal resolution is given in pixels and must be re-calculated
+ * into bytes since this is what the hardware expects.
+ *
+ * 6 + 2 is HFP header + checksum
+ */
+ hfp = (mode->hsync_start - mode->hdisplay) * bpp - 6 - 2;
+ if (d->mdsi->mode_flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE) {
+ /*
+ * 6 is HBP header + checksum
+ * 4 is RGB header + checksum
+ */
+ hbp = (mode->htotal - mode->hsync_end) * bpp - 4 - 6;
+ /*
+ * 6 is HBP header + checksum
+ * 4 is HSW packet bytes
+ * 4 is RGB header + checksum
+ */
+ hsa = (mode->hsync_end - mode->hsync_start) * bpp - 4 - 4 - 6;
+ } else {
+ /*
+ * HBP includes both back porch and sync
+ * 6 is HBP header + checksum
+ * 4 is HSW packet bytes
+ * 4 is RGB header + checksum
+ */
+ hbp = (mode->htotal - mode->hsync_start) * bpp - 4 - 4 - 6;
+ /* HSA is not considered in this mode and set to 0 */
+ hsa = 0;
+ }
+ dev_dbg(d->dev, "hfp: %u, hbp: %u, hsa: %u\n",
+ hfp, hbp, hsa);
+
+ /* Frame parameters: horizontal sync active */
+ val = hsa << DSI_VID_HSIZE1_HSA_LENGTH_SHIFT;
+ /* horizontal back porch */
+ val |= hbp << DSI_VID_HSIZE1_HBP_LENGTH_SHIFT;
+ /* horizontal front porch */
+ val |= hfp << DSI_VID_HSIZE1_HFP_LENGTH_SHIFT;
+ writel(val, d->regs + DSI_VID_HSIZE1);
+
+ /* RGB data length (bytes on one scanline) */
+ val = mode->hdisplay * (bpp / 8);
+ writel(val, d->regs + DSI_VID_HSIZE2);
+
+ /* TODO: further adjustments for TVG mode here */
+
+ /*
+ * EOL packet length from bits per line calculations: pixel clock
+ * is given in kHz, calculate the time between two pixels in
+ * picoseconds.
+ */
+ bpl = mode->clock * mode->htotal;
+ bpl *= (d->hs_freq / 8);
+ do_div(bpl, 1000000); /* microseconds */
+ do_div(bpl, 1000000); /* seconds */
+ bpl *= d->mdsi->lanes;
+ dev_dbg(d->dev, "calculated bytes per line: %llu\n", bpl);
+ /*
+ * 6 is header + checksum, header = 4 bytes, checksum = 2 bytes
+ * 4 is short packet for vsync/hsync
+ */
+ if (d->mdsi->mode_flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE) {
+ /* Fixme: isn't the hsync width in pixels? */
+ blkline_pck = bpl - (mode->hsync_end - mode->hsync_start) - 6;
+ val = blkline_pck << DSI_VID_BLKSIZE2_BLKLINE_PULSE_PCK_SHIFT;
+ writel(val, d->regs + DSI_VID_BLKSIZE2);
+ } else {
+ blkline_pck = bpl - 4 - 6;
+ val = blkline_pck << DSI_VID_BLKSIZE1_BLKLINE_EVENT_PCK_SHIFT;
+ writel(val, d->regs + DSI_VID_BLKSIZE1);
+ }
+
+ line_duration = (blkline_pck + 6) / d->mdsi->lanes;
+ dev_dbg(d->dev, "line duration %u\n", line_duration);
+ val = line_duration << DSI_VID_DPHY_TIME_REG_LINE_DURATION_SHIFT;
+ /*
+ * This is the time to perform LP->HS on D-PHY
+ * FIXME: nowhere to get this from: DT property on the DSI?
+ */
+ val |= 0 << DSI_VID_DPHY_TIME_REG_WAKEUP_TIME_SHIFT;
+ writel(val, d->regs + DSI_VID_DPHY_TIME);
+
+ /* Calculate block end of line */
+ blkeol_pck = bpl - mode->hdisplay * bpp - 6;
+ blkeol_duration = (blkeol_pck + 6) / d->mdsi->lanes;
+ dev_dbg(d->dev, "blkeol pck: %u, duration: %u\n",
+ blkeol_pck, blkeol_duration);
+
+ if (d->mdsi->mode_flags & MIPI_DSI_MODE_VIDEO_BURST) {
+ /* Set up EOL clock for burst mode */
+ val = readl(d->regs + DSI_VID_BLKSIZE1);
+ val |= blkeol_pck << DSI_VID_BLKSIZE1_BLKEOL_PCK_SHIFT;
+ writel(val, d->regs + DSI_VID_BLKSIZE1);
+ writel(blkeol_pck, d->regs + DSI_VID_VCA_SETTING2);
+
+ writel(blkeol_duration, d->regs + DSI_VID_PCK_TIME);
+ writel(blkeol_duration - 6, d->regs + DSI_VID_VCA_SETTING1);
+ }
+
+ /* Maximum line limit */
+ val = readl(d->regs + DSI_VID_VCA_SETTING2);
+ val |= blkline_pck <<
+ DSI_VID_VCA_SETTING2_EXACT_BURST_LIMIT_SHIFT;
+ writel(val, d->regs + DSI_VID_VCA_SETTING2);
+
+ /* Put IF1 into video mode */
+ val = readl(d->regs + DSI_MCTL_MAIN_DATA_CTL);
+ val |= DSI_MCTL_MAIN_DATA_CTL_IF1_MODE;
+ writel(val, d->regs + DSI_MCTL_MAIN_DATA_CTL);
+
+ /* Disable command mode on IF1 */
+ val = readl(d->regs + DSI_CMD_MODE_CTL);
+ val &= ~DSI_CMD_MODE_CTL_IF1_LP_EN;
+ writel(val, d->regs + DSI_CMD_MODE_CTL);
+
+ /* Enable some error interrupts */
+ val = readl(d->regs + DSI_VID_MODE_STS_CTL);
+ val |= DSI_VID_MODE_STS_CTL_ERR_MISSING_VSYNC;
+ val |= DSI_VID_MODE_STS_CTL_ERR_MISSING_DATA;
+ writel(val, d->regs + DSI_VID_MODE_STS_CTL);
+
+ /* Enable video mode */
+ val = readl(d->regs + DSI_MCTL_MAIN_DATA_CTL);
+ val |= DSI_MCTL_MAIN_DATA_CTL_VID_EN;
+ writel(val, d->regs + DSI_MCTL_MAIN_DATA_CTL);
+}
+
+static void mcde_dsi_start(struct mcde_dsi *d)
+{
+ unsigned long hs_freq;
+ u32 val;
+ int i;
+
+ /* No integration mode */
+ writel(0, d->regs + DSI_MCTL_INTEGRATION_MODE);
+
+ /* Enable the DSI port, from drivers/video/mcde/dsilink_v2.c */
+ val = DSI_MCTL_MAIN_DATA_CTL_LINK_EN |
+ DSI_MCTL_MAIN_DATA_CTL_BTA_EN |
+ DSI_MCTL_MAIN_DATA_CTL_READ_EN |
+ DSI_MCTL_MAIN_DATA_CTL_REG_TE_EN;
+ if (d->mdsi->mode_flags & MIPI_DSI_MODE_EOT_PACKET)
+ val |= DSI_MCTL_MAIN_DATA_CTL_HOST_EOT_GEN;
+ writel(val, d->regs + DSI_MCTL_MAIN_DATA_CTL);
+
+ /* Set a high command timeout, clear other fields */
+ val = 0x3ff << DSI_CMD_MODE_CTL_TE_TIMEOUT_SHIFT;
+ writel(val, d->regs + DSI_CMD_MODE_CTL);
+
+ /*
+ * UI_X4 is described as "unit interval times four"
+ * I guess since DSI packets are 4 bytes wide, one unit
+ * is one byte.
+ */
+ hs_freq = clk_get_rate(d->hs_clk);
+ hs_freq /= 1000000; /* MHz */
+ val = 4000 / hs_freq;
+ dev_dbg(d->dev, "UI value: %d\n", val);
+ val <<= DSI_MCTL_DPHY_STATIC_UI_X4_SHIFT;
+ val &= DSI_MCTL_DPHY_STATIC_UI_X4_MASK;
+ writel(val, d->regs + DSI_MCTL_DPHY_STATIC);
+
+ /*
+ * Enable clocking: 0x0f (something?) between each burst,
+ * enable the second lane if needed, enable continuous clock if
+ * needed, enable switch into ULPM (ultra-low power mode) on
+ * all the lines.
+ */
+ val = 0x0f << DSI_MCTL_MAIN_PHY_CTL_WAIT_BURST_TIME_SHIFT;
+ if (d->mdsi->lanes == 2)
+ val |= DSI_MCTL_MAIN_PHY_CTL_LANE2_EN;
+ if (!(d->mdsi->mode_flags & MIPI_DSI_CLOCK_NON_CONTINUOUS))
+ val |= DSI_MCTL_MAIN_PHY_CTL_CLK_CONTINUOUS;
+ val |= DSI_MCTL_MAIN_PHY_CTL_CLK_ULPM_EN |
+ DSI_MCTL_MAIN_PHY_CTL_DAT1_ULPM_EN |
+ DSI_MCTL_MAIN_PHY_CTL_DAT2_ULPM_EN;
+ writel(val, d->regs + DSI_MCTL_MAIN_PHY_CTL);
+
+ val = (1 << DSI_MCTL_ULPOUT_TIME_CKLANE_ULPOUT_TIME_SHIFT) |
+ (1 << DSI_MCTL_ULPOUT_TIME_DATA_ULPOUT_TIME_SHIFT);
+ writel(val, d->regs + DSI_MCTL_ULPOUT_TIME);
+
+ writel(DSI_DPHY_LANES_TRIM_DPHY_SPECS_90_81B_0_90,
+ d->regs + DSI_DPHY_LANES_TRIM);
+
+ /* High PHY timeout */
+ val = (0x0f << DSI_MCTL_DPHY_TIMEOUT_CLK_DIV_SHIFT) |
+ (0x3fff << DSI_MCTL_DPHY_TIMEOUT_HSTX_TO_VAL_SHIFT) |
+ (0x3fff << DSI_MCTL_DPHY_TIMEOUT_LPRX_TO_VAL_SHIFT);
+ writel(val, d->regs + DSI_MCTL_DPHY_TIMEOUT);
+
+ val = DSI_MCTL_MAIN_EN_PLL_START |
+ DSI_MCTL_MAIN_EN_CKLANE_EN |
+ DSI_MCTL_MAIN_EN_DAT1_EN |
+ DSI_MCTL_MAIN_EN_IF1_EN;
+ if (d->mdsi->lanes == 2)
+ val |= DSI_MCTL_MAIN_EN_DAT2_EN;
+ writel(val, d->regs + DSI_MCTL_MAIN_EN);
+
+ /* Wait for the PLL to lock and the clock and data lines to come up */
+ i = 0;
+ val = DSI_MCTL_MAIN_STS_PLL_LOCK |
+ DSI_MCTL_MAIN_STS_CLKLANE_READY |
+ DSI_MCTL_MAIN_STS_DAT1_READY;
+ if (d->mdsi->lanes == 2)
+ val |= DSI_MCTL_MAIN_STS_DAT2_READY;
+ while ((readl(d->regs + DSI_MCTL_MAIN_STS) & val) != val) {
+ /* Sleep for a millisecond */
+ usleep_range(1000, 1500);
+ if (i++ == 100) {
+ dev_warn(d->dev, "DSI lanes did not start up\n");
+ return;
+ }
+ }
+
+ /* TODO needed? */
+
+ /* Command mode, clear IF1 ID */
+ val = readl(d->regs + DSI_CMD_MODE_CTL);
+ /*
+ * If we enable low-power mode here, with
+ * val |= DSI_CMD_MODE_CTL_IF1_LP_EN
+ * then display updates become really slow.
+ */
+ val &= ~DSI_CMD_MODE_CTL_IF1_ID_MASK;
+ writel(val, d->regs + DSI_CMD_MODE_CTL);
+
+ /* Wait for DSI PHY to initialize */
+ usleep_range(100, 200);
+ dev_info(d->dev, "DSI link enabled\n");
+}
+
+
+static void mcde_dsi_bridge_enable(struct drm_bridge *bridge)
+{
+ struct mcde_dsi *d = bridge_to_mcde_dsi(bridge);
+
+ dev_info(d->dev, "enable DSI master\n");
+};
+
+static void mcde_dsi_bridge_mode_set(struct drm_bridge *bridge,
+ const struct drm_display_mode *mode,
+ const struct drm_display_mode *adj)
+{
+ struct mcde_dsi *d = bridge_to_mcde_dsi(bridge);
+ unsigned long pixel_clock_hz = mode->clock * 1000;
+ unsigned long hs_freq, lp_freq;
+ u32 val;
+ int ret;
+
+ if (!d->mdsi) {
+ dev_err(d->dev, "no DSI device attached to encoder!\n");
+ return;
+ }
+
+ dev_info(d->dev, "set DSI master to %dx%d %lu Hz %s mode\n",
+ mode->hdisplay, mode->vdisplay, pixel_clock_hz,
+ (d->mdsi->mode_flags & MIPI_DSI_MODE_VIDEO) ? "VIDEO" : "CMD"
+ );
+
+ /* Copy maximum clock frequencies */
+ if (d->mdsi->lp_rate)
+ lp_freq = d->mdsi->lp_rate;
+ else
+ lp_freq = DSI_DEFAULT_LP_FREQ_HZ;
+ if (d->mdsi->hs_rate)
+ hs_freq = d->mdsi->hs_rate;
+ else
+ hs_freq = DSI_DEFAULT_HS_FREQ_HZ;
+
+ /* Enable LP (Low Power, Energy Save, ES) and HS (High Speed) clocks */
+ d->lp_freq = clk_round_rate(d->lp_clk, lp_freq);
+ ret = clk_set_rate(d->lp_clk, d->lp_freq);
+ if (ret)
+ dev_err(d->dev, "failed to set LP clock rate %lu Hz\n",
+ d->lp_freq);
+
+ d->hs_freq = clk_round_rate(d->hs_clk, hs_freq);
+ ret = clk_set_rate(d->hs_clk, d->hs_freq);
+ if (ret)
+ dev_err(d->dev, "failed to set HS clock rate %lu Hz\n",
+ d->hs_freq);
+
+ /* Start clocks */
+ ret = clk_prepare_enable(d->lp_clk);
+ if (ret)
+ dev_err(d->dev, "failed to enable LP clock\n");
+ else
+ dev_info(d->dev, "DSI LP clock rate %lu Hz\n",
+ d->lp_freq);
+ ret = clk_prepare_enable(d->hs_clk);
+ if (ret)
+ dev_err(d->dev, "failed to enable HS clock\n");
+ else
+ dev_info(d->dev, "DSI HS clock rate %lu Hz\n",
+ d->hs_freq);
+
+ if (d->mdsi->mode_flags & MIPI_DSI_MODE_VIDEO) {
+ mcde_dsi_setup_video_mode(d, mode);
+ } else {
+ /* Command mode, clear IF1 ID */
+ val = readl(d->regs + DSI_CMD_MODE_CTL);
+ /*
+ * If we enable low-power mode here with
+ * val |= DSI_CMD_MODE_CTL_IF1_LP_EN
+ * the display updates become really slow.
+ */
+ val &= ~DSI_CMD_MODE_CTL_IF1_ID_MASK;
+ writel(val, d->regs + DSI_CMD_MODE_CTL);
+ }
+}
+
+static void mcde_dsi_wait_for_command_mode_stop(struct mcde_dsi *d)
+{
+ u32 val;
+ int i;
+
+ /*
+ * Wait until we get out of command mode
+ * CSM = Command State Machine
+ */
+ i = 0;
+ val = DSI_CMD_MODE_STS_CSM_RUNNING;
+ while ((readl(d->regs + DSI_CMD_MODE_STS) & val) == val) {
+ /* Sleep for a millisecond */
+ usleep_range(1000, 2000);
+ if (i++ == 100) {
+ dev_warn(d->dev,
+ "could not get out of command mode\n");
+ return;
+ }
+ }
+}
+
+static void mcde_dsi_wait_for_video_mode_stop(struct mcde_dsi *d)
+{
+ u32 val;
+ int i;
+
+ /* Wait until we get out og video mode */
+ i = 0;
+ val = DSI_VID_MODE_STS_VSG_RUNNING;
+ while ((readl(d->regs + DSI_VID_MODE_STS) & val) == val) {
+ /* Sleep for a millisecond */
+ usleep_range(1000, 2000);
+ if (i++ == 100) {
+ dev_warn(d->dev,
+ "could not get out of video mode\n");
+ return;
+ }
+ }
+}
+
+static void mcde_dsi_bridge_disable(struct drm_bridge *bridge)
+{
+ struct mcde_dsi *d = bridge_to_mcde_dsi(bridge);
+ u32 val;
+
+ /* Disable all error interrupts */
+ writel(0, d->regs + DSI_VID_MODE_STS_CTL);
+
+ if (d->mdsi->mode_flags & MIPI_DSI_MODE_VIDEO) {
+ /* Stop video mode */
+ val = readl(d->regs + DSI_MCTL_MAIN_DATA_CTL);
+ val &= ~DSI_MCTL_MAIN_DATA_CTL_VID_EN;
+ writel(val, d->regs + DSI_MCTL_MAIN_DATA_CTL);
+ mcde_dsi_wait_for_video_mode_stop(d);
+ } else {
+ /* Stop command mode */
+ mcde_dsi_wait_for_command_mode_stop(d);
+ }
+
+ /* Stop clocks */
+ clk_disable_unprepare(d->hs_clk);
+ clk_disable_unprepare(d->lp_clk);
+}
+
+/*
+ * This connector needs no special handling, just use the default
+ * helpers for everything. It's pretty dummy.
+ */
+static const struct drm_connector_funcs mcde_dsi_connector_funcs = {
+ .reset = drm_atomic_helper_connector_reset,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .destroy = drm_connector_cleanup,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+};
+
+static int mcde_dsi_get_modes(struct drm_connector *connector)
+{
+ struct mcde_dsi *d = connector_to_mcde_dsi(connector);
+
+ /* Just pass the question to the panel */
+ if (d->panel)
+ return drm_panel_get_modes(d->panel);
+
+ /* TODO: deal with bridges */
+
+ return 0;
+}
+
+static const struct drm_connector_helper_funcs
+mcde_dsi_connector_helper_funcs = {
+ .get_modes = mcde_dsi_get_modes,
+};
+
+static int mcde_dsi_bridge_attach(struct drm_bridge *bridge)
+{
+ struct mcde_dsi *d = bridge_to_mcde_dsi(bridge);
+ struct drm_device *drm = bridge->dev;
+ int ret;
+
+ drm_connector_helper_add(&d->connector,
+ &mcde_dsi_connector_helper_funcs);
+
+ if (!drm_core_check_feature(drm, DRIVER_ATOMIC)) {
+ dev_err(d->dev, "we need atomic updates\n");
+ return -ENOTSUPP;
+ }
+
+ ret = drm_connector_init(drm, &d->connector,
+ &mcde_dsi_connector_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (ret) {
+ dev_err(d->dev, "failed to initialize DSI bridge connector\n");
+ return ret;
+ }
+ d->connector.polled = DRM_CONNECTOR_POLL_CONNECT;
+ /* The encoder in the bridge attached to the DSI bridge */
+ drm_connector_attach_encoder(&d->connector, bridge->encoder);
+ /* Then we attach the DSI bridge to the output (panel etc) bridge */
+ ret = drm_bridge_attach(bridge->encoder, d->bridge_out, bridge);
+ if (ret) {
+ dev_err(d->dev, "failed to attach the DSI bridge\n");
+ return ret;
+ }
+ d->connector.status = connector_status_connected;
+
+ return 0;
+}
+
+static const struct drm_bridge_funcs mcde_dsi_bridge_funcs = {
+ .attach = mcde_dsi_bridge_attach,
+ .mode_set = mcde_dsi_bridge_mode_set,
+ .disable = mcde_dsi_bridge_disable,
+ .enable = mcde_dsi_bridge_enable,
+};
+
+static int mcde_dsi_bind(struct device *dev, struct device *master,
+ void *data)
+{
+ struct drm_device *drm = data;
+ struct mcde *mcde = drm->dev_private;
+ struct mcde_dsi *d = dev_get_drvdata(dev);
+ struct device_node *child;
+ struct drm_panel *panel = NULL;
+ struct drm_bridge *bridge = NULL;
+
+ if (!of_get_available_child_count(dev->of_node)) {
+ dev_info(dev, "unused DSI interface\n");
+ d->unused = true;
+ return 0;
+ }
+ d->mcde = mcde;
+ /* If the display attached before binding, set this up */
+ if (d->mdsi)
+ d->mcde->mdsi = d->mdsi;
+
+ /* Obtain the clocks */
+ d->hs_clk = devm_clk_get(dev, "hs");
+ if (IS_ERR(d->hs_clk)) {
+ dev_err(dev, "unable to get HS clock\n");
+ return PTR_ERR(d->hs_clk);
+ }
+
+ d->lp_clk = devm_clk_get(dev, "lp");
+ if (IS_ERR(d->lp_clk)) {
+ dev_err(dev, "unable to get LP clock\n");
+ return PTR_ERR(d->lp_clk);
+ }
+
+ /* Assert RESET through the PRCMU, active low */
+ /* FIXME: which DSI block? */
+ regmap_update_bits(d->prcmu, PRCM_DSI_SW_RESET,
+ PRCM_DSI_SW_RESET_DSI0_SW_RESETN, 0);
+
+ usleep_range(100, 200);
+
+ /* De-assert RESET again */
+ regmap_update_bits(d->prcmu, PRCM_DSI_SW_RESET,
+ PRCM_DSI_SW_RESET_DSI0_SW_RESETN,
+ PRCM_DSI_SW_RESET_DSI0_SW_RESETN);
+
+ /* Start up the hardware */
+ mcde_dsi_start(d);
+
+ /* Look for a panel as a child to this node */
+ for_each_available_child_of_node(dev->of_node, child) {
+ panel = of_drm_find_panel(child);
+ if (IS_ERR(panel)) {
+ dev_err(dev, "failed to find panel try bridge (%lu)\n",
+ PTR_ERR(panel));
+ bridge = of_drm_find_bridge(child);
+ if (IS_ERR(bridge)) {
+ dev_err(dev, "failed to find bridge (%lu)\n",
+ PTR_ERR(bridge));
+ return PTR_ERR(bridge);
+ }
+ }
+ }
+ if (panel) {
+ bridge = drm_panel_bridge_add(panel,
+ DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(bridge)) {
+ dev_err(dev, "error adding panel bridge\n");
+ return PTR_ERR(bridge);
+ }
+ dev_info(dev, "connected to panel\n");
+ d->panel = panel;
+ } else if (bridge) {
+ /* TODO: AV8100 HDMI encoder goes here for example */
+ dev_info(dev, "connected to non-panel bridge (unsupported)\n");
+ return -ENODEV;
+ } else {
+ dev_err(dev, "no panel or bridge\n");
+ return -ENODEV;
+ }
+
+ d->bridge_out = bridge;
+
+ /* Create a bridge for this DSI channel */
+ d->bridge.funcs = &mcde_dsi_bridge_funcs;
+ d->bridge.of_node = dev->of_node;
+ drm_bridge_add(&d->bridge);
+
+ /* TODO: first come first serve, use a list */
+ mcde->bridge = &d->bridge;
+
+ dev_info(dev, "initialized MCDE DSI bridge\n");
+
+ return 0;
+}
+
+static void mcde_dsi_unbind(struct device *dev, struct device *master,
+ void *data)
+{
+ struct mcde_dsi *d = dev_get_drvdata(dev);
+
+ if (d->panel)
+ drm_panel_bridge_remove(d->bridge_out);
+ regmap_update_bits(d->prcmu, PRCM_DSI_SW_RESET,
+ PRCM_DSI_SW_RESET_DSI0_SW_RESETN, 0);
+}
+
+static const struct component_ops mcde_dsi_component_ops = {
+ .bind = mcde_dsi_bind,
+ .unbind = mcde_dsi_unbind,
+};
+
+static int mcde_dsi_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct mcde_dsi *d;
+ struct mipi_dsi_host *host;
+ struct resource *res;
+ u32 dsi_id;
+ int ret;
+
+ d = devm_kzalloc(dev, sizeof(*d), GFP_KERNEL);
+ if (!d)
+ return -ENOMEM;
+ d->dev = dev;
+ platform_set_drvdata(pdev, d);
+
+ /* Get a handle on the PRCMU so we can do reset */
+ d->prcmu =
+ syscon_regmap_lookup_by_compatible("stericsson,db8500-prcmu");
+ if (IS_ERR(d->prcmu)) {
+ dev_err(dev, "no PRCMU regmap\n");
+ return PTR_ERR(d->prcmu);
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ d->regs = devm_ioremap_resource(dev, res);
+ if (IS_ERR(d->regs)) {
+ dev_err(dev, "no DSI regs\n");
+ return PTR_ERR(d->regs);
+ }
+
+ dsi_id = readl(d->regs + DSI_ID_REG);
+ dev_info(dev, "HW revision 0x%08x\n", dsi_id);
+
+ host = &d->dsi_host;
+ host->dev = dev;
+ host->ops = &mcde_dsi_host_ops;
+ ret = mipi_dsi_host_register(host);
+ if (ret < 0) {
+ dev_err(dev, "failed to register DSI host: %d\n", ret);
+ return ret;
+ }
+ dev_info(dev, "registered DSI host\n");
+
+ platform_set_drvdata(pdev, d);
+ return component_add(dev, &mcde_dsi_component_ops);
+}
+
+static int mcde_dsi_remove(struct platform_device *pdev)
+{
+ struct mcde_dsi *d = platform_get_drvdata(pdev);
+
+ component_del(&pdev->dev, &mcde_dsi_component_ops);
+ mipi_dsi_host_unregister(&d->dsi_host);
+
+ return 0;
+}
+
+static const struct of_device_id mcde_dsi_of_match[] = {
+ {
+ .compatible = "ste,mcde-dsi",
+ },
+ {},
+};
+
+struct platform_driver mcde_dsi_driver = {
+ .driver = {
+ .name = "mcde-dsi",
+ .of_match_table = of_match_ptr(mcde_dsi_of_match),
+ },
+ .probe = mcde_dsi_probe,
+ .remove = mcde_dsi_remove,
+};
diff --git a/drivers/gpu/drm/mcde/mcde_dsi_regs.h b/drivers/gpu/drm/mcde/mcde_dsi_regs.h
new file mode 100644
index 000000000000..c9253321a3be
--- /dev/null
+++ b/drivers/gpu/drm/mcde/mcde_dsi_regs.h
@@ -0,0 +1,385 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __DRM_MCDE_DSI_REGS
+#define __DRM_MCDE_DSI_REGS
+
+#define DSI_MCTL_INTEGRATION_MODE 0x00000000
+
+#define DSI_MCTL_MAIN_DATA_CTL 0x00000004
+#define DSI_MCTL_MAIN_DATA_CTL_LINK_EN BIT(0)
+#define DSI_MCTL_MAIN_DATA_CTL_IF1_MODE BIT(1)
+#define DSI_MCTL_MAIN_DATA_CTL_VID_EN BIT(2)
+#define DSI_MCTL_MAIN_DATA_CTL_TVG_SEL BIT(3)
+#define DSI_MCTL_MAIN_DATA_CTL_TBG_SEL BIT(4)
+#define DSI_MCTL_MAIN_DATA_CTL_IF1_TE_EN BIT(5)
+#define DSI_MCTL_MAIN_DATA_CTL_IF2_TE_EN BIT(6)
+#define DSI_MCTL_MAIN_DATA_CTL_REG_TE_EN BIT(7)
+#define DSI_MCTL_MAIN_DATA_CTL_READ_EN BIT(8)
+#define DSI_MCTL_MAIN_DATA_CTL_BTA_EN BIT(9)
+#define DSI_MCTL_MAIN_DATA_CTL_DISP_GEN_ECC BIT(10)
+#define DSI_MCTL_MAIN_DATA_CTL_DISP_GEN_CHECKSUM BIT(11)
+#define DSI_MCTL_MAIN_DATA_CTL_HOST_EOT_GEN BIT(12)
+#define DSI_MCTL_MAIN_DATA_CTL_DISP_EOT_GEN BIT(13)
+#define DSI_MCTL_MAIN_DATA_CTL_DLX_REMAP_EN BIT(14)
+#define DSI_MCTL_MAIN_DATA_CTL_TE_POLLING_EN BIT(15)
+
+#define DSI_MCTL_MAIN_PHY_CTL 0x00000008
+#define DSI_MCTL_MAIN_PHY_CTL_LANE2_EN BIT(0)
+#define DSI_MCTL_MAIN_PHY_CTL_FORCE_STOP_MODE BIT(1)
+#define DSI_MCTL_MAIN_PHY_CTL_CLK_CONTINUOUS BIT(2)
+#define DSI_MCTL_MAIN_PHY_CTL_CLK_ULPM_EN BIT(3)
+#define DSI_MCTL_MAIN_PHY_CTL_DAT1_ULPM_EN BIT(4)
+#define DSI_MCTL_MAIN_PHY_CTL_DAT2_ULPM_EN BIT(5)
+#define DSI_MCTL_MAIN_PHY_CTL_WAIT_BURST_TIME_SHIFT 6
+#define DSI_MCTL_MAIN_PHY_CTL_WAIT_BURST_TIME_MASK 0x000003C0
+#define DSI_MCTL_MAIN_PHY_CTL_CLOCK_FORCE_STOP_MODE BIT(10)
+
+#define DSI_MCTL_PLL_CTL 0x0000000C
+#define DSI_MCTL_LANE_STS 0x00000010
+
+#define DSI_MCTL_DPHY_TIMEOUT 0x00000014
+#define DSI_MCTL_DPHY_TIMEOUT_CLK_DIV_SHIFT 0
+#define DSI_MCTL_DPHY_TIMEOUT_CLK_DIV_MASK 0x0000000F
+#define DSI_MCTL_DPHY_TIMEOUT_HSTX_TO_VAL_SHIFT 4
+#define DSI_MCTL_DPHY_TIMEOUT_HSTX_TO_VAL_MASK 0x0003FFF0
+#define DSI_MCTL_DPHY_TIMEOUT_LPRX_TO_VAL_SHIFT 18
+#define DSI_MCTL_DPHY_TIMEOUT_LPRX_TO_VAL_MASK 0xFFFC0000
+
+#define DSI_MCTL_ULPOUT_TIME 0x00000018
+#define DSI_MCTL_ULPOUT_TIME_CKLANE_ULPOUT_TIME_SHIFT 0
+#define DSI_MCTL_ULPOUT_TIME_CKLANE_ULPOUT_TIME_MASK 0x000001FF
+#define DSI_MCTL_ULPOUT_TIME_DATA_ULPOUT_TIME_SHIFT 9
+#define DSI_MCTL_ULPOUT_TIME_DATA_ULPOUT_TIME_MASK 0x0003FE00
+
+#define DSI_MCTL_DPHY_STATIC 0x0000001C
+#define DSI_MCTL_DPHY_STATIC_SWAP_PINS_CLK BIT(0)
+#define DSI_MCTL_DPHY_STATIC_HS_INVERT_CLK BIT(1)
+#define DSI_MCTL_DPHY_STATIC_SWAP_PINS_DAT1 BIT(2)
+#define DSI_MCTL_DPHY_STATIC_HS_INVERT_DAT1 BIT(3)
+#define DSI_MCTL_DPHY_STATIC_SWAP_PINS_DAT2 BIT(4)
+#define DSI_MCTL_DPHY_STATIC_HS_INVERT_DAT2 BIT(5)
+#define DSI_MCTL_DPHY_STATIC_UI_X4_SHIFT 6
+#define DSI_MCTL_DPHY_STATIC_UI_X4_MASK 0x00000FC0
+
+#define DSI_MCTL_MAIN_EN 0x00000020
+#define DSI_MCTL_MAIN_EN_PLL_START BIT(0)
+#define DSI_MCTL_MAIN_EN_CKLANE_EN BIT(3)
+#define DSI_MCTL_MAIN_EN_DAT1_EN BIT(4)
+#define DSI_MCTL_MAIN_EN_DAT2_EN BIT(5)
+#define DSI_MCTL_MAIN_EN_CLKLANE_ULPM_REQ BIT(6)
+#define DSI_MCTL_MAIN_EN_DAT1_ULPM_REQ BIT(7)
+#define DSI_MCTL_MAIN_EN_DAT2_ULPM_REQ BIT(8)
+#define DSI_MCTL_MAIN_EN_IF1_EN BIT(9)
+#define DSI_MCTL_MAIN_EN_IF2_EN BIT(10)
+
+#define DSI_MCTL_MAIN_STS 0x00000024
+#define DSI_MCTL_MAIN_STS_PLL_LOCK BIT(0)
+#define DSI_MCTL_MAIN_STS_CLKLANE_READY BIT(1)
+#define DSI_MCTL_MAIN_STS_DAT1_READY BIT(2)
+#define DSI_MCTL_MAIN_STS_DAT2_READY BIT(3)
+#define DSI_MCTL_MAIN_STS_HSTX_TO_ERR BIT(4)
+#define DSI_MCTL_MAIN_STS_LPRX_TO_ERR BIT(5)
+#define DSI_MCTL_MAIN_STS_CRS_UNTERM_PCK BIT(6)
+#define DSI_MCTL_MAIN_STS_VRS_UNTERM_PCK BIT(7)
+
+#define DSI_MCTL_DPHY_ERR 0x00000028
+#define DSI_INT_VID_RDDATA 0x00000030
+#define DSI_INT_VID_GNT 0x00000034
+#define DSI_INT_CMD_RDDATA 0x00000038
+#define DSI_INT_CMD_GNT 0x0000003C
+#define DSI_INT_INTERRUPT_CTL 0x00000040
+
+#define DSI_CMD_MODE_CTL 0x00000050
+#define DSI_CMD_MODE_CTL_IF1_ID_SHIFT 0
+#define DSI_CMD_MODE_CTL_IF1_ID_MASK 0x00000003
+#define DSI_CMD_MODE_CTL_IF2_ID_SHIFT 2
+#define DSI_CMD_MODE_CTL_IF2_ID_MASK 0x0000000C
+#define DSI_CMD_MODE_CTL_IF1_LP_EN BIT(4)
+#define DSI_CMD_MODE_CTL_IF2_LP_EN BIT(5)
+#define DSI_CMD_MODE_CTL_ARB_MODE BIT(6)
+#define DSI_CMD_MODE_CTL_ARB_PRI BIT(7)
+#define DSI_CMD_MODE_CTL_FIL_VALUE_SHIFT 8
+#define DSI_CMD_MODE_CTL_FIL_VALUE_MASK 0x0000FF00
+#define DSI_CMD_MODE_CTL_TE_TIMEOUT_SHIFT 16
+#define DSI_CMD_MODE_CTL_TE_TIMEOUT_MASK 0x03FF0000
+
+#define DSI_CMD_MODE_STS 0x00000054
+#define DSI_CMD_MODE_STS_ERR_NO_TE BIT(0)
+#define DSI_CMD_MODE_STS_ERR_TE_MISS BIT(1)
+#define DSI_CMD_MODE_STS_ERR_SDI1_UNDERRUN BIT(2)
+#define DSI_CMD_MODE_STS_ERR_SDI2_UNDERRUN BIT(3)
+#define DSI_CMD_MODE_STS_ERR_UNWANTED_RD BIT(4)
+#define DSI_CMD_MODE_STS_CSM_RUNNING BIT(5)
+
+#define DSI_DIRECT_CMD_SEND 0x00000060
+
+#define DSI_DIRECT_CMD_MAIN_SETTINGS 0x00000064
+#define DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_NAT_SHIFT 0
+#define DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_NAT_MASK 0x00000007
+#define DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_NAT_WRITE 0
+#define DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_NAT_READ 1
+#define DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_NAT_TE_REQ 4
+#define DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_NAT_TRIG_REQ 5
+#define DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_NAT_BTA_REQ 6
+#define DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_LONGNOTSHORT BIT(3)
+#define DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_HEAD_SHIFT 8
+#define DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_HEAD_MASK 0x00003F00
+#define DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_HEAD_TURN_ON_PERIPHERAL 50
+#define DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_HEAD_SHUT_DOWN_PERIPHERAL 34
+#define DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_HEAD_GENERIC_SHORT_WRITE_0 3
+#define DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_HEAD_GENERIC_SHORT_WRITE_1 19
+#define DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_HEAD_GENERIC_SHORT_WRITE_2 35
+#define DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_HEAD_GENERIC_LONG_WRITE 41
+#define DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_HEAD_DCS_SHORT_WRITE_0 5
+#define DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_HEAD_DCS_SHORT_WRITE_1 21
+#define DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_HEAD_DCS_LONG_WRITE 57
+#define DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_HEAD_DCS_READ 6
+#define DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_HEAD_SET_MAX_PKT_SIZE 55
+#define DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_ID_SHIFT 14
+#define DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_SIZE_SHIFT 16
+#define DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_LP_EN BIT(21)
+#define DSI_DIRECT_CMD_MAIN_SETTINGS_TRIGGER_VAL_SHIFT 24
+#define DSI_DIRECT_CMD_MAIN_SETTINGS_TRIGGER_VAL_MASK 0x0F000000
+
+#define DSI_DIRECT_CMD_STS 0x00000068
+#define DSI_DIRECT_CMD_STS_CMD_TRANSMISSION BIT(0)
+#define DSI_DIRECT_CMD_STS_WRITE_COMPLETED BIT(1)
+#define DSI_DIRECT_CMD_STS_TRIGGER_COMPLETED BIT(2)
+#define DSI_DIRECT_CMD_STS_READ_COMPLETED BIT(3)
+#define DSI_DIRECT_CMD_STS_ACKNOWLEDGE_RECEIVED_SHIFT BIT(4)
+#define DSI_DIRECT_CMD_STS_ACKNOWLEDGE_WITH_ERR_RECEIVED BIT(5)
+#define DSI_DIRECT_CMD_STS_TRIGGER_RECEIVED BIT(6)
+#define DSI_DIRECT_CMD_STS_TE_RECEIVED BIT(7)
+#define DSI_DIRECT_CMD_STS_BTA_COMPLETED BIT(8)
+#define DSI_DIRECT_CMD_STS_BTA_FINISHED BIT(9)
+#define DSI_DIRECT_CMD_STS_READ_COMPLETED_WITH_ERR BIT(10)
+#define DSI_DIRECT_CMD_STS_TRIGGER_VAL_MASK 0x00007800
+#define DSI_DIRECT_CMD_STS_TRIGGER_VAL_SHIFT 11
+#define DSI_DIRECT_CMD_STS_ACK_VAL_SHIFT 16
+#define DSI_DIRECT_CMD_STS_ACK_VAL_MASK 0xFFFF0000
+
+#define DSI_DIRECT_CMD_RD_INIT 0x0000006C
+#define DSI_DIRECT_CMD_RD_INIT_RESET_SHIFT 0
+#define DSI_DIRECT_CMD_RD_INIT_RESET_MASK 0xFFFFFFFF
+
+#define DSI_DIRECT_CMD_WRDAT0 0x00000070
+#define DSI_DIRECT_CMD_WRDAT1 0x00000074
+#define DSI_DIRECT_CMD_WRDAT2 0x00000078
+#define DSI_DIRECT_CMD_WRDAT3 0x0000007C
+
+#define DSI_DIRECT_CMD_RDDAT 0x00000080
+
+#define DSI_DIRECT_CMD_RD_PROPERTY 0x00000084
+#define DSI_DIRECT_CMD_RD_PROPERTY_RD_SIZE_SHIFT 0
+#define DSI_DIRECT_CMD_RD_PROPERTY_RD_SIZE_MASK 0x0000FFFF
+#define DSI_DIRECT_CMD_RD_PROPERTY_RD_ID_SHIFT 16
+#define DSI_DIRECT_CMD_RD_PROPERTY_RD_ID_MASK 0x00030000
+#define DSI_DIRECT_CMD_RD_PROPERTY_RD_DCSNOTGENERIC_SHIFT 18
+#define DSI_DIRECT_CMD_RD_PROPERTY_RD_DCSNOTGENERIC_MASK 0x00040000
+
+#define DSI_DIRECT_CMD_RD_STS 0x00000088
+
+#define DSI_VID_MAIN_CTL 0x00000090
+#define DSI_VID_MAIN_CTL_START_MODE_SHIFT 0
+#define DSI_VID_MAIN_CTL_START_MODE_MASK 0x00000003
+#define DSI_VID_MAIN_CTL_STOP_MODE_SHIFT 2
+#define DSI_VID_MAIN_CTL_STOP_MODE_MASK 0x0000000C
+#define DSI_VID_MAIN_CTL_VID_ID_SHIFT 4
+#define DSI_VID_MAIN_CTL_VID_ID_MASK 0x00000030
+#define DSI_VID_MAIN_CTL_HEADER_SHIFT 6
+#define DSI_VID_MAIN_CTL_HEADER_MASK 0x00000FC0
+#define DSI_VID_MAIN_CTL_VID_PIXEL_MODE_16BITS 0
+#define DSI_VID_MAIN_CTL_VID_PIXEL_MODE_18BITS BIT(12)
+#define DSI_VID_MAIN_CTL_VID_PIXEL_MODE_18BITS_LOOSE BIT(13)
+#define DSI_VID_MAIN_CTL_VID_PIXEL_MODE_24BITS (BIT(12) | BIT(13))
+#define DSI_VID_MAIN_CTL_BURST_MODE BIT(14)
+#define DSI_VID_MAIN_CTL_SYNC_PULSE_ACTIVE BIT(15)
+#define DSI_VID_MAIN_CTL_SYNC_PULSE_HORIZONTAL BIT(16)
+#define DSI_VID_MAIN_CTL_REG_BLKLINE_MODE_NULL 0
+#define DSI_VID_MAIN_CTL_REG_BLKLINE_MODE_BLANKING BIT(17)
+#define DSI_VID_MAIN_CTL_REG_BLKLINE_MODE_LP_0 BIT(18)
+#define DSI_VID_MAIN_CTL_REG_BLKLINE_MODE_LP_1 (BIT(17) | BIT(18))
+#define DSI_VID_MAIN_CTL_REG_BLKEOL_MODE_NULL 0
+#define DSI_VID_MAIN_CTL_REG_BLKEOL_MODE_BLANKING BIT(19)
+#define DSI_VID_MAIN_CTL_REG_BLKEOL_MODE_LP_0 BIT(20)
+#define DSI_VID_MAIN_CTL_REG_BLKEOL_MODE_LP_1 (BIT(19) | BIT(20))
+#define DSI_VID_MAIN_CTL_RECOVERY_MODE_SHIFT 21
+#define DSI_VID_MAIN_CTL_RECOVERY_MODE_MASK 0x00600000
+
+#define DSI_VID_VSIZE 0x00000094
+#define DSI_VID_VSIZE_VSA_LENGTH_SHIFT 0
+#define DSI_VID_VSIZE_VSA_LENGTH_MASK 0x0000003F
+#define DSI_VID_VSIZE_VBP_LENGTH_SHIFT 6
+#define DSI_VID_VSIZE_VBP_LENGTH_MASK 0x00000FC0
+#define DSI_VID_VSIZE_VFP_LENGTH_SHIFT 12
+#define DSI_VID_VSIZE_VFP_LENGTH_MASK 0x000FF000
+#define DSI_VID_VSIZE_VACT_LENGTH_SHIFT 20
+#define DSI_VID_VSIZE_VACT_LENGTH_MASK 0x7FF00000
+
+#define DSI_VID_HSIZE1 0x00000098
+#define DSI_VID_HSIZE1_HSA_LENGTH_SHIFT 0
+#define DSI_VID_HSIZE1_HSA_LENGTH_MASK 0x000003FF
+#define DSI_VID_HSIZE1_HBP_LENGTH_SHIFT 10
+#define DSI_VID_HSIZE1_HBP_LENGTH_MASK 0x000FFC00
+#define DSI_VID_HSIZE1_HFP_LENGTH_SHIFT 20
+#define DSI_VID_HSIZE1_HFP_LENGTH_MASK 0x7FF00000
+
+#define DSI_VID_HSIZE2 0x0000009C
+#define DSI_VID_HSIZE2_RGB_SIZE_SHIFT 0
+#define DSI_VID_HSIZE2_RGB_SIZE_MASK 0x00001FFF
+
+#define DSI_VID_BLKSIZE1 0x000000A0
+#define DSI_VID_BLKSIZE1_BLKLINE_EVENT_PCK_SHIFT 0
+#define DSI_VID_BLKSIZE1_BLKLINE_EVENT_PCK_MASK 0x00001FFF
+#define DSI_VID_BLKSIZE1_BLKEOL_PCK_SHIFT 13
+#define DSI_VID_BLKSIZE1_BLKEOL_PCK_MASK 0x03FFE000
+
+#define DSI_VID_BLKSIZE2 0x000000A4
+#define DSI_VID_BLKSIZE2_BLKLINE_PULSE_PCK_SHIFT 0
+#define DSI_VID_BLKSIZE2_BLKLINE_PULSE_PCK_MASK 0x00001FFF
+
+#define DSI_VID_PCK_TIME 0x000000A8
+#define DSI_VID_PCK_TIME_BLKEOL_DURATION_SHIFT 0
+
+#define DSI_VID_DPHY_TIME 0x000000AC
+#define DSI_VID_DPHY_TIME_REG_LINE_DURATION_SHIFT 0
+#define DSI_VID_DPHY_TIME_REG_LINE_DURATION_MASK 0x00001FFF
+#define DSI_VID_DPHY_TIME_REG_WAKEUP_TIME_SHIFT 13
+#define DSI_VID_DPHY_TIME_REG_WAKEUP_TIME_MASK 0x00FFE000
+
+#define DSI_VID_MODE_STS 0x000000BC
+#define DSI_VID_MODE_STS_VSG_RUNNING BIT(0)
+
+#define DSI_VID_VCA_SETTING1 0x000000C0
+#define DSI_VID_VCA_SETTING1_MAX_BURST_LIMIT_SHIFT 0
+#define DSI_VID_VCA_SETTING1_MAX_BURST_LIMIT_MASK 0x0000FFFF
+#define DSI_VID_VCA_SETTING1_BURST_LP BIT(16)
+
+#define DSI_VID_VCA_SETTING2 0x000000C4
+#define DSI_VID_VCA_SETTING2_EXACT_BURST_LIMIT_SHIFT 0
+#define DSI_VID_VCA_SETTING2_EXACT_BURST_LIMIT_MASK 0x0000FFFF
+#define DSI_VID_VCA_SETTING2_MAX_LINE_LIMIT_SHIFT 16
+#define DSI_VID_VCA_SETTING2_MAX_LINE_LIMIT_MASK 0xFFFF0000
+
+#define DSI_CMD_MODE_STS_CTL 0x000000F4
+#define DSI_CMD_MODE_STS_CTL_ERR_NO_TE_EN BIT(0)
+#define DSI_CMD_MODE_STS_CTL_ERR_TE_MISS_EN BIT(1)
+#define DSI_CMD_MODE_STS_CTL_ERR_SDI1_UNDERRUN_EN BIT(2)
+#define DSI_CMD_MODE_STS_CTL_ERR_SDI2_UNDERRUN_EN BIT(3)
+#define DSI_CMD_MODE_STS_CTL_ERR_UNWANTED_RD_EN BIT(4)
+#define DSI_CMD_MODE_STS_CTL_CSM_RUNNING_EN BIT(5)
+#define DSI_CMD_MODE_STS_CTL_ERR_NO_TE_EDGE BIT(16)
+#define DSI_CMD_MODE_STS_CTL_ERR_TE_MISS_EDGE BIT(17)
+#define DSI_CMD_MODE_STS_CTL_ERR_SDI1_UNDERRUN_EDGE BIT(18)
+#define DSI_CMD_MODE_STS_CTL_ERR_SDI2_UNDERRUN_EDGE BIT(19)
+#define DSI_CMD_MODE_STS_CTL_ERR_UNWANTED_RD_EDGE BIT(20)
+#define DSI_CMD_MODE_STS_CTL_CSM_RUNNING_EDGE BIT(21)
+
+#define DSI_DIRECT_CMD_STS_CTL 0x000000F8
+#define DSI_DIRECT_CMD_STS_CTL_CMD_TRANSMISSION_EN BIT(0)
+#define DSI_DIRECT_CMD_STS_CTL_WRITE_COMPLETED_EN BIT(1)
+#define DSI_DIRECT_CMD_STS_CTL_TRIGGER_COMPLETED_EN BIT(2)
+#define DSI_DIRECT_CMD_STS_CTL_READ_COMPLETED_EN BIT(3)
+#define DSI_DIRECT_CMD_STS_CTL_ACKNOWLEDGE_RECEIVED_EN BIT(4)
+#define DSI_DIRECT_CMD_STS_CTL_ACKNOWLEDGE_WITH_ERR_EN BIT(5)
+#define DSI_DIRECT_CMD_STS_CTL_TRIGGER_RECEIVED_EN BIT(6)
+#define DSI_DIRECT_CMD_STS_CTL_TE_RECEIVED_EN BIT(7)
+#define DSI_DIRECT_CMD_STS_CTL_BTA_COMPLETED_EN BIT(8)
+#define DSI_DIRECT_CMD_STS_CTL_BTA_FINISHED_EN BIT(9)
+#define DSI_DIRECT_CMD_STS_CTL_READ_COMPLETED_WITH_ERR_EN BIT(10)
+#define DSI_DIRECT_CMD_STS_CTL_CMD_TRANSMISSION_EDGE BIT(16)
+#define DSI_DIRECT_CMD_STS_CTL_WRITE_COMPLETED_EDGE BIT(17)
+#define DSI_DIRECT_CMD_STS_CTL_TRIGGER_COMPLETED_EDGE BIT(18)
+#define DSI_DIRECT_CMD_STS_CTL_READ_COMPLETED_EDGE BIT(19)
+#define DSI_DIRECT_CMD_STS_CTL_ACKNOWLEDGE_RECEIVED_EDGE BIT(20)
+#define DSI_DIRECT_CMD_STS_CTL_ACKNOWLEDGE_WITH_ERR_EDGE BIT(21)
+#define DSI_DIRECT_CMD_STS_CTL_TRIGGER_RECEIVED_EDGE BIT(22)
+#define DSI_DIRECT_CMD_STS_CTL_TE_RECEIVED_EDGE BIT(23)
+#define DSI_DIRECT_CMD_STS_CTL_BTA_COMPLETED_EDGE BIT(24)
+#define DSI_DIRECT_CMD_STS_CTL_BTA_FINISHED_EDGE BIT(25)
+#define DSI_DIRECT_CMD_STS_CTL_READ_COMPLETED_WITH_ERR_EDGE BIT(26)
+
+#define DSI_VID_MODE_STS_CTL 0x00000100
+#define DSI_VID_MODE_STS_CTL_VSG_RUNNING BIT(0)
+#define DSI_VID_MODE_STS_CTL_ERR_MISSING_DATA BIT(1)
+#define DSI_VID_MODE_STS_CTL_ERR_MISSING_HSYNC BIT(2)
+#define DSI_VID_MODE_STS_CTL_ERR_MISSING_VSYNC BIT(3)
+#define DSI_VID_MODE_STS_CTL_REG_ERR_SMALL_LENGTH BIT(4)
+#define DSI_VID_MODE_STS_CTL_REG_ERR_SMALL_HEIGHT BIT(5)
+#define DSI_VID_MODE_STS_CTL_ERR_BURSTWRITE BIT(6)
+#define DSI_VID_MODE_STS_CTL_ERR_LONGWRITE BIT(7)
+#define DSI_VID_MODE_STS_CTL_ERR_LONGREAD BIT(8)
+#define DSI_VID_MODE_STS_CTL_ERR_VRS_WRONG_LENGTH BIT(9)
+#define DSI_VID_MODE_STS_CTL_VSG_RUNNING_EDGE BIT(16)
+#define DSI_VID_MODE_STS_CTL_ERR_MISSING_DATA_EDGE BIT(17)
+#define DSI_VID_MODE_STS_CTL_ERR_MISSING_HSYNC_EDGE BIT(18)
+#define DSI_VID_MODE_STS_CTL_ERR_MISSING_VSYNC_EDGE BIT(19)
+#define DSI_VID_MODE_STS_CTL_REG_ERR_SMALL_LENGTH_EDGE BIT(20)
+#define DSI_VID_MODE_STS_CTL_REG_ERR_SMALL_HEIGHT_EDGE BIT(21)
+#define DSI_VID_MODE_STS_CTL_ERR_BURSTWRITE_EDGE BIT(22)
+#define DSI_VID_MODE_STS_CTL_ERR_LONGWRITE_EDGE BIT(23)
+#define DSI_VID_MODE_STS_CTL_ERR_LONGREAD_EDGE BIT(24)
+#define DSI_VID_MODE_STS_CTL_ERR_VRS_WRONG_LENGTH_EDGE BIT(25)
+#define DSI_VID_MODE_STS_CTL_VSG_RECOVERY_EDGE BIT(26)
+
+#define DSI_TG_STS_CTL 0x00000104
+#define DSI_MCTL_DHPY_ERR_CTL 0x00000108
+#define DSI_MCTL_MAIN_STS_CLR 0x00000110
+
+#define DSI_CMD_MODE_STS_CLR 0x00000114
+#define DSI_CMD_MODE_STS_CLR_ERR_NO_TE_CLR BIT(0)
+#define DSI_CMD_MODE_STS_CLR_ERR_TE_MISS_CLR BIT(1)
+#define DSI_CMD_MODE_STS_CLR_ERR_SDI1_UNDERRUN_CLR BIT(2)
+#define DSI_CMD_MODE_STS_CLR_ERR_SDI2_UNDERRUN_CLR BIT(3)
+#define DSI_CMD_MODE_STS_CLR_ERR_UNWANTED_RD_CLR BIT(4)
+#define DSI_CMD_MODE_STS_CLR_CSM_RUNNING_CLR BIT(5)
+
+#define DSI_DIRECT_CMD_STS_CLR 0x00000118
+#define DSI_DIRECT_CMD_STS_CLR_CMD_TRANSMISSION_CLR BIT(0)
+#define DSI_DIRECT_CMD_STS_CLR_WRITE_COMPLETED_CLR BIT(1)
+#define DSI_DIRECT_CMD_STS_CLR_TRIGGER_COMPLETED_CLR BIT(2)
+#define DSI_DIRECT_CMD_STS_CLR_READ_COMPLETED_CLR BIT(3)
+#define DSI_DIRECT_CMD_STS_CLR_ACKNOWLEDGE_RECEIVED_CLR BIT(4)
+#define DSI_DIRECT_CMD_STS_CLR_ACKNOWLEDGE_WITH_ERR_RECEIVED_CLR BIT(5)
+#define DSI_DIRECT_CMD_STS_CLR_TRIGGER_RECEIVED_CLR BIT(6)
+#define DSI_DIRECT_CMD_STS_CLR_TE_RECEIVED_CLR BIT(7)
+#define DSI_DIRECT_CMD_STS_CLR_BTA_COMPLETED_CLR BIT(8)
+#define DSI_DIRECT_CMD_STS_CLR_BTA_FINISHED_CLR BIT(9)
+#define DSI_DIRECT_CMD_STS_CLR_READ_COMPLETED_WITH_ERR_CLR BIT(10)
+
+#define DSI_DIRECT_CMD_RD_STS_CLR 0x0000011C
+#define DSI_VID_MODE_STS_CLR 0x00000120
+#define DSI_TG_STS_CLR 0x00000124
+#define DSI_MCTL_DPHY_ERR_CLR 0x00000128
+#define DSI_MCTL_MAIN_STS_FLAG 0x00000130
+#define DSI_CMD_MODE_STS_FLAG 0x00000134
+#define DSI_DIRECT_CMD_STS_FLAG 0x00000138
+#define DSI_DIRECT_CMD_RD_STS_FLAG 0x0000013C
+#define DSI_VID_MODE_STS_FLAG 0x00000140
+#define DSI_TG_STS_FLAG 0x00000144
+
+#define DSI_DPHY_LANES_TRIM 0x00000150
+#define DSI_DPHY_LANES_TRIM_DPHY_SKEW_DAT1_SHIFT 0
+#define DSI_DPHY_LANES_TRIM_DPHY_SKEW_DAT1_MASK 0x00000003
+#define DSI_DPHY_LANES_TRIM_DPHY_CD_OFF_DAT1 BIT(2)
+#define DSI_DPHY_LANES_TRIM_DPHY_HSTX_SLEWRATE_UP_DAT1 BIT(3)
+#define DSI_DPHY_LANES_TRIM_DPHY_HSTX_SLEWRATE_DOWN_DAT1 BIT(4)
+#define DSI_DPHY_LANES_TRIM_DPHY_TEST_RESERVED_1_DAT1 BIT(5)
+#define DSI_DPHY_LANES_TRIM_DPHY_SKEW_CLK_SHIFT 6
+#define DSI_DPHY_LANES_TRIM_DPHY_SKEW_CLK_MASK 0x000000C0
+#define DSI_DPHY_LANES_TRIM_DPHY_LP_RX_VIL_CLK_SHIFT 8
+#define DSI_DPHY_LANES_TRIM_DPHY_LP_RX_VIL_CLK_MASK 0x00000300
+#define DSI_DPHY_LANES_TRIM_DPHY_LP_TX_SLEWRATE_CLK_SHIFT 10
+#define DSI_DPHY_LANES_TRIM_DPHY_LP_TX_SLEWRATE_CLK_MASK 0x00000C00
+#define DSI_DPHY_LANES_TRIM_DPHY_SPECS_90_81B_0_81 0
+#define DSI_DPHY_LANES_TRIM_DPHY_SPECS_90_81B_0_90 BIT(12)
+#define DSI_DPHY_LANES_TRIM_DPHY_HSTX_SLEWRATE_UP_CLK BIT(13)
+#define DSI_DPHY_LANES_TRIM_DPHY_HSTX_SLEWRATE_DOWN_CLK BIT(14)
+#define DSI_DPHY_LANES_TRIM_DPHY_TEST_RESERVED_1_CLK BIT(15)
+#define DSI_DPHY_LANES_TRIM_DPHY_SKEW_DAT2 BIT(16)
+#define DSI_DPHY_LANES_TRIM_DPHY_HSTX_SLEWRATE_UP_DAT2 BIT(18)
+#define DSI_DPHY_LANES_TRIM_DPHY_HSTX_SLEWRATE_DOWN_DAT2 BIT(19)
+#define DSI_DPHY_LANES_TRIM_DPHY_TEST_RESERVED_1_DAT2 BIT(20)
+
+#define DSI_ID_REG 0x00000FF0
+
+#endif /* __DRM_MCDE_DSI_REGS */
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_fb.c b/drivers/gpu/drm/mediatek/mtk_drm_fb.c
index 44afa912da1f..4c3ad7de2d3b 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_fb.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_fb.c
@@ -24,10 +24,11 @@ static struct drm_framebuffer *mtk_drm_framebuffer_init(struct drm_device *dev,
const struct drm_mode_fb_cmd2 *mode,
struct drm_gem_object *obj)
{
+ const struct drm_format_info *info = drm_get_format_info(dev, mode);
struct drm_framebuffer *fb;
int ret;
- if (drm_format_num_planes(mode->pixel_format) != 1)
+ if (info->num_planes != 1)
return ERR_PTR(-EINVAL);
fb = kzalloc(sizeof(*fb), GFP_KERNEL);
@@ -80,6 +81,7 @@ struct drm_framebuffer *mtk_drm_mode_fb_create(struct drm_device *dev,
struct drm_file *file,
const struct drm_mode_fb_cmd2 *cmd)
{
+ const struct drm_format_info *info = drm_get_format_info(dev, cmd);
struct drm_framebuffer *fb;
struct drm_gem_object *gem;
unsigned int width = cmd->width;
@@ -87,14 +89,14 @@ struct drm_framebuffer *mtk_drm_mode_fb_create(struct drm_device *dev,
unsigned int size, bpp;
int ret;
- if (drm_format_num_planes(cmd->pixel_format) != 1)
+ if (info->num_planes != 1)
return ERR_PTR(-EINVAL);
gem = drm_gem_object_lookup(file, cmd->handles[0]);
if (!gem)
return ERR_PTR(-ENOENT);
- bpp = drm_format_plane_cpp(cmd->pixel_format, 0);
+ bpp = info->cpp[0];
size = (height - 1) * cmd->pitches[0] + width * bpp;
size += cmd->offsets[0];
diff --git a/drivers/gpu/drm/mediatek/mtk_hdmi.c b/drivers/gpu/drm/mediatek/mtk_hdmi.c
index 562cd6113f28..5d6a9f094df5 100644
--- a/drivers/gpu/drm/mediatek/mtk_hdmi.c
+++ b/drivers/gpu/drm/mediatek/mtk_hdmi.c
@@ -333,6 +333,9 @@ static void mtk_hdmi_hw_send_info_frame(struct mtk_hdmi *hdmi, u8 *buffer,
ctrl_frame_en = VS_EN;
ctrl_reg = GRL_ACP_ISRC_CTRL;
break;
+ default:
+ dev_err(hdmi->dev, "Unknown infoframe type %d\n", frame_type);
+ return;
}
mtk_hdmi_clear_bits(hdmi, ctrl_reg, ctrl_frame_en);
mtk_hdmi_write(hdmi, GRL_INFOFRM_TYPE, frame_type);
diff --git a/drivers/gpu/drm/meson/Kconfig b/drivers/gpu/drm/meson/Kconfig
index e450387d0eab..9f9281dd49f8 100644
--- a/drivers/gpu/drm/meson/Kconfig
+++ b/drivers/gpu/drm/meson/Kconfig
@@ -15,3 +15,4 @@ config DRM_MESON_DW_HDMI
depends on DRM_MESON
default y if DRM_MESON
select DRM_DW_HDMI
+ imply DRM_DW_HDMI_I2S_AUDIO
diff --git a/drivers/gpu/drm/meson/meson_overlay.c b/drivers/gpu/drm/meson/meson_overlay.c
index bdbf925ff3e8..cc7c6ae3013d 100644
--- a/drivers/gpu/drm/meson/meson_overlay.c
+++ b/drivers/gpu/drm/meson/meson_overlay.c
@@ -458,7 +458,7 @@ static void meson_overlay_atomic_update(struct drm_plane *plane,
}
/* Update Canvas with buffer address */
- priv->viu.vd1_planes = drm_format_num_planes(fb->format->format);
+ priv->viu.vd1_planes = fb->format->num_planes;
switch (priv->viu.vd1_planes) {
case 3:
@@ -466,8 +466,8 @@ static void meson_overlay_atomic_update(struct drm_plane *plane,
priv->viu.vd1_addr2 = gem->paddr + fb->offsets[2];
priv->viu.vd1_stride2 = fb->pitches[2];
priv->viu.vd1_height2 =
- drm_format_plane_height(fb->height,
- fb->format->format, 2);
+ drm_format_info_plane_height(fb->format,
+ fb->height, 2);
DRM_DEBUG("plane 2 addr 0x%x stride %d height %d\n",
priv->viu.vd1_addr2,
priv->viu.vd1_stride2,
@@ -478,8 +478,8 @@ static void meson_overlay_atomic_update(struct drm_plane *plane,
priv->viu.vd1_addr1 = gem->paddr + fb->offsets[1];
priv->viu.vd1_stride1 = fb->pitches[1];
priv->viu.vd1_height1 =
- drm_format_plane_height(fb->height,
- fb->format->format, 1);
+ drm_format_info_plane_height(fb->format,
+ fb->height, 1);
DRM_DEBUG("plane 1 addr 0x%x stride %d height %d\n",
priv->viu.vd1_addr1,
priv->viu.vd1_stride1,
@@ -490,8 +490,8 @@ static void meson_overlay_atomic_update(struct drm_plane *plane,
priv->viu.vd1_addr0 = gem->paddr + fb->offsets[0];
priv->viu.vd1_stride0 = fb->pitches[0];
priv->viu.vd1_height0 =
- drm_format_plane_height(fb->height,
- fb->format->format, 0);
+ drm_format_info_plane_height(fb->format,
+ fb->height, 0);
DRM_DEBUG("plane 0 addr 0x%x stride %d height %d\n",
priv->viu.vd1_addr0,
priv->viu.vd1_stride0,
@@ -578,6 +578,9 @@ int meson_overlay_create(struct meson_drm *priv)
drm_plane_helper_add(plane, &meson_overlay_helper_funcs);
+ /* For now, VD Overlay plane is always on the back */
+ drm_plane_create_zpos_immutable_property(plane, 0);
+
priv->overlay_plane = plane;
DRM_DEBUG_DRIVER("\n");
diff --git a/drivers/gpu/drm/meson/meson_plane.c b/drivers/gpu/drm/meson/meson_plane.c
index d90427b93a51..7a7e88dadd0b 100644
--- a/drivers/gpu/drm/meson/meson_plane.c
+++ b/drivers/gpu/drm/meson/meson_plane.c
@@ -153,6 +153,13 @@ static void meson_plane_atomic_update(struct drm_plane *plane,
priv->viu.osd1_blk0_cfg[0] |= OSD_BLK_MODE_32 |
OSD_COLOR_MATRIX_32_ARGB;
break;
+ case DRM_FORMAT_XBGR8888:
+ /* For XRGB, replace the pixel's alpha by 0xFF */
+ writel_bits_relaxed(OSD_REPLACE_EN, OSD_REPLACE_EN,
+ priv->io_base + _REG(VIU_OSD1_CTRL_STAT2));
+ priv->viu.osd1_blk0_cfg[0] |= OSD_BLK_MODE_32 |
+ OSD_COLOR_MATRIX_32_ABGR;
+ break;
case DRM_FORMAT_ARGB8888:
/* For ARGB, use the pixel's alpha */
writel_bits_relaxed(OSD_REPLACE_EN, 0,
@@ -160,6 +167,13 @@ static void meson_plane_atomic_update(struct drm_plane *plane,
priv->viu.osd1_blk0_cfg[0] |= OSD_BLK_MODE_32 |
OSD_COLOR_MATRIX_32_ARGB;
break;
+ case DRM_FORMAT_ABGR8888:
+ /* For ARGB, use the pixel's alpha */
+ writel_bits_relaxed(OSD_REPLACE_EN, 0,
+ priv->io_base + _REG(VIU_OSD1_CTRL_STAT2));
+ priv->viu.osd1_blk0_cfg[0] |= OSD_BLK_MODE_32 |
+ OSD_COLOR_MATRIX_32_ABGR;
+ break;
case DRM_FORMAT_RGB888:
priv->viu.osd1_blk0_cfg[0] |= OSD_BLK_MODE_24 |
OSD_COLOR_MATRIX_24_RGB;
@@ -346,7 +360,9 @@ static const struct drm_plane_funcs meson_plane_funcs = {
static const uint32_t supported_drm_formats[] = {
DRM_FORMAT_ARGB8888,
+ DRM_FORMAT_ABGR8888,
DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_XBGR8888,
DRM_FORMAT_RGB888,
DRM_FORMAT_RGB565,
};
@@ -373,6 +389,9 @@ int meson_plane_create(struct meson_drm *priv)
drm_plane_helper_add(plane, &meson_plane_helper_funcs);
+ /* For now, OSD Primary plane is always on the front */
+ drm_plane_create_zpos_immutable_property(plane, 1);
+
priv->primary_plane = plane;
return 0;
diff --git a/drivers/gpu/drm/mgag200/Kconfig b/drivers/gpu/drm/mgag200/Kconfig
index 91f3579546d0..76fee0fbdcae 100644
--- a/drivers/gpu/drm/mgag200/Kconfig
+++ b/drivers/gpu/drm/mgag200/Kconfig
@@ -3,7 +3,7 @@ config DRM_MGAG200
tristate "Kernel modesetting driver for MGA G200 server engines"
depends on DRM && PCI && MMU
select DRM_KMS_HELPER
- select DRM_TTM
+ select DRM_VRAM_HELPER
help
This is a KMS driver for the MGA G200 server chips, it
does not support the original MGA G200 or any of the desktop
diff --git a/drivers/gpu/drm/mgag200/mgag200_cursor.c b/drivers/gpu/drm/mgag200/mgag200_cursor.c
index 853387b0fa88..f0c61a92351c 100644
--- a/drivers/gpu/drm/mgag200/mgag200_cursor.c
+++ b/drivers/gpu/drm/mgag200/mgag200_cursor.c
@@ -19,10 +19,9 @@ static void mga_hide_cursor(struct mga_device *mdev)
{
WREG8(MGA_CURPOSXL, 0);
WREG8(MGA_CURPOSXH, 0);
- if (mdev->cursor.pixels_1->pin_count)
- mgag200_bo_unpin(mdev->cursor.pixels_1);
- if (mdev->cursor.pixels_2->pin_count)
- mgag200_bo_unpin(mdev->cursor.pixels_2);
+ if (mdev->cursor.pixels_current)
+ drm_gem_vram_unpin(mdev->cursor.pixels_current);
+ mdev->cursor.pixels_current = NULL;
}
int mga_crtc_cursor_set(struct drm_crtc *crtc,
@@ -33,13 +32,14 @@ int mga_crtc_cursor_set(struct drm_crtc *crtc,
{
struct drm_device *dev = crtc->dev;
struct mga_device *mdev = (struct mga_device *)dev->dev_private;
- struct mgag200_bo *pixels_1 = mdev->cursor.pixels_1;
- struct mgag200_bo *pixels_2 = mdev->cursor.pixels_2;
- struct mgag200_bo *pixels_current = mdev->cursor.pixels_current;
- struct mgag200_bo *pixels_prev = mdev->cursor.pixels_prev;
+ struct drm_gem_vram_object *pixels_1 = mdev->cursor.pixels_1;
+ struct drm_gem_vram_object *pixels_2 = mdev->cursor.pixels_2;
+ struct drm_gem_vram_object *pixels_current = mdev->cursor.pixels_current;
+ struct drm_gem_vram_object *pixels_next;
struct drm_gem_object *obj;
- struct mgag200_bo *bo = NULL;
+ struct drm_gem_vram_object *gbo = NULL;
int ret = 0;
+ u8 *src, *dst;
unsigned int i, row, col;
uint32_t colour_set[16];
uint32_t *next_space = &colour_set[0];
@@ -47,7 +47,8 @@ int mga_crtc_cursor_set(struct drm_crtc *crtc,
uint32_t this_colour;
bool found = false;
int colour_count = 0;
- u64 gpu_addr;
+ s64 gpu_addr;
+ u64 dst_gpu;
u8 reg_index;
u8 this_row[48];
@@ -57,73 +58,71 @@ int mga_crtc_cursor_set(struct drm_crtc *crtc,
return -ENOTSUPP; /* Didn't allocate space for cursors */
}
- if ((width != 64 || height != 64) && handle) {
- WREG8(MGA_CURPOSXL, 0);
- WREG8(MGA_CURPOSXH, 0);
- return -EINVAL;
+ if (WARN_ON(pixels_current &&
+ pixels_1 != pixels_current &&
+ pixels_2 != pixels_current)) {
+ return -ENOTSUPP; /* inconsistent state */
}
- BUG_ON(pixels_1 != pixels_current && pixels_1 != pixels_prev);
- BUG_ON(pixels_2 != pixels_current && pixels_2 != pixels_prev);
- BUG_ON(pixels_current == pixels_prev);
-
if (!handle || !file_priv) {
mga_hide_cursor(mdev);
return 0;
}
- obj = drm_gem_object_lookup(file_priv, handle);
- if (!obj)
- return -ENOENT;
-
- ret = mgag200_bo_reserve(pixels_1, true);
- if (ret) {
+ if (width != 64 || height != 64) {
WREG8(MGA_CURPOSXL, 0);
WREG8(MGA_CURPOSXH, 0);
- goto out_unref;
- }
- ret = mgag200_bo_reserve(pixels_2, true);
- if (ret) {
- WREG8(MGA_CURPOSXL, 0);
- WREG8(MGA_CURPOSXH, 0);
- mgag200_bo_unreserve(pixels_1);
- goto out_unreserve1;
+ return -EINVAL;
}
- /* Move cursor buffers into VRAM if they aren't already */
- if (!pixels_1->pin_count) {
- ret = mgag200_bo_pin(pixels_1, TTM_PL_FLAG_VRAM,
- &mdev->cursor.pixels_1_gpu_addr);
- if (ret)
- goto out1;
+ if (pixels_current == pixels_1)
+ pixels_next = pixels_2;
+ else
+ pixels_next = pixels_1;
+
+ obj = drm_gem_object_lookup(file_priv, handle);
+ if (!obj)
+ return -ENOENT;
+ gbo = drm_gem_vram_of_gem(obj);
+ ret = drm_gem_vram_pin(gbo, 0);
+ if (ret) {
+ dev_err(&dev->pdev->dev, "failed to lock user bo\n");
+ goto err_drm_gem_object_put_unlocked;
}
- if (!pixels_2->pin_count) {
- ret = mgag200_bo_pin(pixels_2, TTM_PL_FLAG_VRAM,
- &mdev->cursor.pixels_2_gpu_addr);
- if (ret) {
- mgag200_bo_unpin(pixels_1);
- goto out1;
- }
+ src = drm_gem_vram_kmap(gbo, true, NULL);
+ if (IS_ERR(src)) {
+ ret = PTR_ERR(src);
+ dev_err(&dev->pdev->dev,
+ "failed to kmap user buffer updates\n");
+ goto err_drm_gem_vram_unpin_src;
}
- bo = gem_to_mga_bo(obj);
- ret = mgag200_bo_reserve(bo, true);
- if (ret) {
- dev_err(&dev->pdev->dev, "failed to reserve user bo\n");
- goto out1;
+ /* Pin and map up-coming buffer to write colour indices */
+ ret = drm_gem_vram_pin(pixels_next, 0);
+ if (ret)
+ dev_err(&dev->pdev->dev,
+ "failed to pin cursor buffer: %d\n", ret);
+ goto err_drm_gem_vram_kunmap_src;
+ dst = drm_gem_vram_kmap(pixels_next, true, NULL);
+ if (IS_ERR(dst)) {
+ ret = PTR_ERR(dst);
+ dev_err(&dev->pdev->dev,
+ "failed to kmap cursor updates: %d\n", ret);
+ goto err_drm_gem_vram_unpin_dst;
}
- if (!bo->kmap.virtual) {
- ret = ttm_bo_kmap(&bo->bo, 0, bo->bo.num_pages, &bo->kmap);
- if (ret) {
- dev_err(&dev->pdev->dev, "failed to kmap user buffer updates\n");
- goto out2;
- }
+ gpu_addr = drm_gem_vram_offset(pixels_2);
+ if (gpu_addr < 0) {
+ ret = (int)gpu_addr;
+ dev_err(&dev->pdev->dev,
+ "failed to get cursor scanout address: %d\n", ret);
+ goto err_drm_gem_vram_kunmap_dst;
}
+ dst_gpu = (u64)gpu_addr;
memset(&colour_set[0], 0, sizeof(uint32_t)*16);
/* width*height*4 = 16384 */
for (i = 0; i < 16384; i += 4) {
- this_colour = ioread32(bo->kmap.virtual + i);
+ this_colour = ioread32(src + i);
/* No transparency */
if (this_colour>>24 != 0xff &&
this_colour>>24 != 0x0) {
@@ -133,7 +132,7 @@ int mga_crtc_cursor_set(struct drm_crtc *crtc,
warn_transparent = false; /* Only tell the user once. */
}
ret = -EINVAL;
- goto out3;
+ goto err_drm_gem_vram_kunmap_dst;
}
/* Don't need to store transparent pixels as colours */
if (this_colour>>24 == 0x0)
@@ -155,7 +154,7 @@ int mga_crtc_cursor_set(struct drm_crtc *crtc,
warn_palette = false; /* Only tell the user once. */
}
ret = -EINVAL;
- goto out3;
+ goto err_drm_gem_vram_kunmap_dst;
}
*next_space = this_colour;
next_space++;
@@ -174,22 +173,11 @@ int mga_crtc_cursor_set(struct drm_crtc *crtc,
BUG_ON((colour_set[i]>>24 & 0xff) != 0xff);
}
- /* Map up-coming buffer to write colour indices */
- if (!pixels_prev->kmap.virtual) {
- ret = ttm_bo_kmap(&pixels_prev->bo, 0,
- pixels_prev->bo.num_pages,
- &pixels_prev->kmap);
- if (ret) {
- dev_err(&dev->pdev->dev, "failed to kmap cursor updates\n");
- goto out3;
- }
- }
-
/* now write colour indices into hardware cursor buffer */
for (row = 0; row < 64; row++) {
memset(&this_row[0], 0, 48);
for (col = 0; col < 64; col++) {
- this_colour = ioread32(bo->kmap.virtual + 4*(col + 64*row));
+ this_colour = ioread32(src + 4*(col + 64*row));
/* write transparent pixels */
if (this_colour>>24 == 0x0) {
this_row[47 - col/8] |= 0x80>>(col%8);
@@ -207,46 +195,39 @@ int mga_crtc_cursor_set(struct drm_crtc *crtc,
}
}
}
- memcpy_toio(pixels_prev->kmap.virtual + row*48, &this_row[0], 48);
+ memcpy_toio(dst + row*48, &this_row[0], 48);
}
/* Program gpu address of cursor buffer */
- if (pixels_prev == pixels_1)
- gpu_addr = mdev->cursor.pixels_1_gpu_addr;
- else
- gpu_addr = mdev->cursor.pixels_2_gpu_addr;
- WREG_DAC(MGA1064_CURSOR_BASE_ADR_LOW, (u8)((gpu_addr>>10) & 0xff));
- WREG_DAC(MGA1064_CURSOR_BASE_ADR_HI, (u8)((gpu_addr>>18) & 0x3f));
+ WREG_DAC(MGA1064_CURSOR_BASE_ADR_LOW, (u8)((dst_gpu>>10) & 0xff));
+ WREG_DAC(MGA1064_CURSOR_BASE_ADR_HI, (u8)((dst_gpu>>18) & 0x3f));
/* Adjust cursor control register to turn on the cursor */
WREG_DAC(MGA1064_CURSOR_CTL, 4); /* 16-colour palletized cursor mode */
- /* Now swap internal buffer pointers */
- if (mdev->cursor.pixels_1 == mdev->cursor.pixels_prev) {
- mdev->cursor.pixels_prev = mdev->cursor.pixels_2;
- mdev->cursor.pixels_current = mdev->cursor.pixels_1;
- } else if (mdev->cursor.pixels_1 == mdev->cursor.pixels_current) {
- mdev->cursor.pixels_prev = mdev->cursor.pixels_1;
- mdev->cursor.pixels_current = mdev->cursor.pixels_2;
- } else {
- BUG();
- }
- ret = 0;
+ /* Now update internal buffer pointers */
+ if (pixels_current)
+ drm_gem_vram_unpin(pixels_current);
+ mdev->cursor.pixels_current = pixels_next;
- ttm_bo_kunmap(&pixels_prev->kmap);
- out3:
- ttm_bo_kunmap(&bo->kmap);
- out2:
- mgag200_bo_unreserve(bo);
- out1:
- if (ret)
- mga_hide_cursor(mdev);
- mgag200_bo_unreserve(pixels_1);
-out_unreserve1:
- mgag200_bo_unreserve(pixels_2);
-out_unref:
+ drm_gem_vram_kunmap(pixels_next);
+ drm_gem_vram_unpin(pixels_next);
+ drm_gem_vram_kunmap(gbo);
+ drm_gem_vram_unpin(gbo);
drm_gem_object_put_unlocked(obj);
+ return 0;
+
+err_drm_gem_vram_kunmap_dst:
+ drm_gem_vram_kunmap(pixels_next);
+err_drm_gem_vram_unpin_dst:
+ drm_gem_vram_unpin(pixels_next);
+err_drm_gem_vram_kunmap_src:
+ drm_gem_vram_kunmap(gbo);
+err_drm_gem_vram_unpin_src:
+ drm_gem_vram_unpin(gbo);
+err_drm_gem_object_put_unlocked:
+ drm_gem_object_put_unlocked(obj);
return ret;
}
diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.c b/drivers/gpu/drm/mgag200/mgag200_drv.c
index efd2145bc0a2..aafa1cb31f50 100644
--- a/drivers/gpu/drm/mgag200/mgag200_drv.c
+++ b/drivers/gpu/drm/mgag200/mgag200_drv.c
@@ -56,13 +56,7 @@ static void mga_pci_remove(struct pci_dev *pdev)
static const struct file_operations mgag200_driver_fops = {
.owner = THIS_MODULE,
- .open = drm_open,
- .release = drm_release,
- .unlocked_ioctl = drm_ioctl,
- .mmap = mgag200_mmap,
- .poll = drm_poll,
- .compat_ioctl = drm_compat_ioctl,
- .read = drm_read,
+ DRM_VRAM_MM_FILE_OPERATIONS
};
static struct drm_driver driver = {
@@ -76,10 +70,7 @@ static struct drm_driver driver = {
.major = DRIVER_MAJOR,
.minor = DRIVER_MINOR,
.patchlevel = DRIVER_PATCHLEVEL,
-
- .gem_free_object_unlocked = mgag200_gem_free_object,
- .dumb_create = mgag200_dumb_create,
- .dumb_map_offset = mgag200_dumb_mmap_offset,
+ DRM_GEM_VRAM_DRIVER
};
static struct pci_driver mgag200_pci_driver = {
diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.h b/drivers/gpu/drm/mgag200/mgag200_drv.h
index 8506e6d62b63..c47671ce6c48 100644
--- a/drivers/gpu/drm/mgag200/mgag200_drv.h
+++ b/drivers/gpu/drm/mgag200/mgag200_drv.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright 2010 Matt Turner.
- * Copyright 2012 Red Hat
+ * Copyright 2012 Red Hat
*
* Authors: Matthew Garrett
* Matt Turner
@@ -14,13 +14,11 @@
#include <drm/drm_encoder.h>
#include <drm/drm_fb_helper.h>
-#include <drm/ttm/ttm_bo_api.h>
-#include <drm/ttm/ttm_bo_driver.h>
-#include <drm/ttm/ttm_placement.h>
-#include <drm/ttm/ttm_memory.h>
-#include <drm/ttm/ttm_module.h>
#include <drm/drm_gem.h>
+#include <drm/drm_gem_vram_helper.h>
+
+#include <drm/drm_vram_mm_helper.h>
#include <linux/i2c.h>
#include <linux/i2c-algo-bit.h>
@@ -114,7 +112,6 @@ struct mga_fbdev {
struct mga_framebuffer mfb;
void *sysram;
int size;
- struct ttm_bo_kmap_obj mapping;
int x1, y1, x2, y2; /* dirty rect */
spinlock_t dirty_lock;
};
@@ -156,13 +153,10 @@ struct mga_cursor {
If either of these is NULL, then don't do hardware cursors, and
fall back to software.
*/
- struct mgag200_bo *pixels_1;
- struct mgag200_bo *pixels_2;
- u64 pixels_1_gpu_addr, pixels_2_gpu_addr;
+ struct drm_gem_vram_object *pixels_1;
+ struct drm_gem_vram_object *pixels_2;
/* The currently displayed icon, this points to one of pixels_1, or pixels_2 */
- struct mgag200_bo *pixels_current;
- /* The previously displayed icon */
- struct mgag200_bo *pixels_prev;
+ struct drm_gem_vram_object *pixels_current;
};
struct mga_mc {
@@ -208,31 +202,10 @@ struct mga_device {
int fb_mtrr;
- struct {
- struct ttm_bo_device bdev;
- } ttm;
-
/* SE model number stored in reg 0x1e24 */
u32 unique_rev_id;
};
-
-struct mgag200_bo {
- struct ttm_buffer_object bo;
- struct ttm_placement placement;
- struct ttm_bo_kmap_obj kmap;
- struct drm_gem_object gem;
- struct ttm_place placements[3];
- int pin_count;
-};
-#define gem_to_mga_bo(gobj) container_of((gobj), struct mgag200_bo, gem)
-
-static inline struct mgag200_bo *
-mgag200_bo(struct ttm_buffer_object *bo)
-{
- return container_of(bo, struct mgag200_bo, bo);
-}
-
/* mgag200_mode.c */
int mgag200_modeset_init(struct mga_device *mdev);
void mgag200_modeset_fini(struct mga_device *mdev);
@@ -256,45 +229,15 @@ int mgag200_gem_create(struct drm_device *dev,
int mgag200_dumb_create(struct drm_file *file,
struct drm_device *dev,
struct drm_mode_create_dumb *args);
-void mgag200_gem_free_object(struct drm_gem_object *obj);
-int
-mgag200_dumb_mmap_offset(struct drm_file *file,
- struct drm_device *dev,
- uint32_t handle,
- uint64_t *offset);
+
/* mgag200_i2c.c */
struct mga_i2c_chan *mgag200_i2c_create(struct drm_device *dev);
void mgag200_i2c_destroy(struct mga_i2c_chan *i2c);
-void mgag200_ttm_placement(struct mgag200_bo *bo, int domain);
-
-static inline int mgag200_bo_reserve(struct mgag200_bo *bo, bool no_wait)
-{
- int ret;
-
- ret = ttm_bo_reserve(&bo->bo, true, no_wait, NULL);
- if (ret) {
- if (ret != -ERESTARTSYS && ret != -EBUSY)
- DRM_ERROR("reserve failed %p\n", bo);
- return ret;
- }
- return 0;
-}
-
-static inline void mgag200_bo_unreserve(struct mgag200_bo *bo)
-{
- ttm_bo_unreserve(&bo->bo);
-}
-
-int mgag200_bo_create(struct drm_device *dev, int size, int align,
- uint32_t flags, struct mgag200_bo **pastbo);
int mgag200_mm_init(struct mga_device *mdev);
void mgag200_mm_fini(struct mga_device *mdev);
int mgag200_mmap(struct file *filp, struct vm_area_struct *vma);
-int mgag200_bo_pin(struct mgag200_bo *bo, u32 pl_flag, u64 *gpu_addr);
-int mgag200_bo_unpin(struct mgag200_bo *bo);
-int mgag200_bo_push_sysram(struct mgag200_bo *bo);
- /* mgag200_cursor.c */
+
int mga_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
uint32_t handle, uint32_t width, uint32_t height);
int mga_crtc_cursor_move(struct drm_crtc *crtc, int x, int y);
diff --git a/drivers/gpu/drm/mgag200/mgag200_fb.c b/drivers/gpu/drm/mgag200/mgag200_fb.c
index c4d1dcc5afde..8adb33228732 100644
--- a/drivers/gpu/drm/mgag200/mgag200_fb.c
+++ b/drivers/gpu/drm/mgag200/mgag200_fb.c
@@ -20,29 +20,31 @@ static void mga_dirty_update(struct mga_fbdev *mfbdev,
{
int i;
struct drm_gem_object *obj;
- struct mgag200_bo *bo;
+ struct drm_gem_vram_object *gbo;
int src_offset, dst_offset;
int bpp = mfbdev->mfb.base.format->cpp[0];
- int ret = -EBUSY;
+ int ret;
+ u8 *dst;
bool unmap = false;
bool store_for_later = false;
int x2, y2;
unsigned long flags;
obj = mfbdev->mfb.obj;
- bo = gem_to_mga_bo(obj);
-
- /*
- * try and reserve the BO, if we fail with busy
- * then the BO is being moved and we should
- * store up the damage until later.
- */
- if (drm_can_sleep())
- ret = mgag200_bo_reserve(bo, true);
- if (ret) {
- if (ret != -EBUSY)
- return;
-
+ gbo = drm_gem_vram_of_gem(obj);
+
+ if (drm_can_sleep()) {
+ /* We pin the BO so it won't be moved during the
+ * update. The actual location, video RAM or system
+ * memory, is not important.
+ */
+ ret = drm_gem_vram_pin(gbo, 0);
+ if (ret) {
+ if (ret != -EBUSY)
+ return;
+ store_for_later = true;
+ }
+ } else {
store_for_later = true;
}
@@ -72,25 +74,32 @@ static void mga_dirty_update(struct mga_fbdev *mfbdev,
mfbdev->x2 = mfbdev->y2 = 0;
spin_unlock_irqrestore(&mfbdev->dirty_lock, flags);
- if (!bo->kmap.virtual) {
- ret = ttm_bo_kmap(&bo->bo, 0, bo->bo.num_pages, &bo->kmap);
- if (ret) {
+ dst = drm_gem_vram_kmap(gbo, false, NULL);
+ if (IS_ERR(dst)) {
+ DRM_ERROR("failed to kmap fb updates\n");
+ goto out;
+ } else if (!dst) {
+ dst = drm_gem_vram_kmap(gbo, true, NULL);
+ if (IS_ERR(dst)) {
DRM_ERROR("failed to kmap fb updates\n");
- mgag200_bo_unreserve(bo);
- return;
+ goto out;
}
unmap = true;
}
+
for (i = y; i <= y2; i++) {
/* assume equal stride for now */
- src_offset = dst_offset = i * mfbdev->mfb.base.pitches[0] + (x * bpp);
- memcpy_toio(bo->kmap.virtual + src_offset, mfbdev->sysram + src_offset, (x2 - x + 1) * bpp);
-
+ src_offset = dst_offset =
+ i * mfbdev->mfb.base.pitches[0] + (x * bpp);
+ memcpy_toio(dst + dst_offset, mfbdev->sysram + src_offset,
+ (x2 - x + 1) * bpp);
}
+
if (unmap)
- ttm_bo_kunmap(&bo->kmap);
+ drm_gem_vram_kunmap(gbo);
- mgag200_bo_unreserve(bo);
+out:
+ drm_gem_vram_unpin(gbo);
}
static void mga_fillrect(struct fb_info *info,
diff --git a/drivers/gpu/drm/mgag200/mgag200_main.c b/drivers/gpu/drm/mgag200/mgag200_main.c
index 5333bd41a36f..dd61ccc5af5c 100644
--- a/drivers/gpu/drm/mgag200/mgag200_main.c
+++ b/drivers/gpu/drm/mgag200/mgag200_main.c
@@ -227,19 +227,19 @@ int mgag200_driver_load(struct drm_device *dev, unsigned long flags)
}
/* Make small buffers to store a hardware cursor (double buffered icon updates) */
- mgag200_bo_create(dev, roundup(48*64, PAGE_SIZE), 0, 0,
- &mdev->cursor.pixels_1);
- mgag200_bo_create(dev, roundup(48*64, PAGE_SIZE), 0, 0,
- &mdev->cursor.pixels_2);
- if (!mdev->cursor.pixels_2 || !mdev->cursor.pixels_1) {
+ mdev->cursor.pixels_1 = drm_gem_vram_create(dev, &dev->vram_mm->bdev,
+ roundup(48*64, PAGE_SIZE),
+ 0, 0);
+ mdev->cursor.pixels_2 = drm_gem_vram_create(dev, &dev->vram_mm->bdev,
+ roundup(48*64, PAGE_SIZE),
+ 0, 0);
+ if (IS_ERR(mdev->cursor.pixels_2) || IS_ERR(mdev->cursor.pixels_1)) {
mdev->cursor.pixels_1 = NULL;
mdev->cursor.pixels_2 = NULL;
dev_warn(&dev->pdev->dev,
"Could not allocate space for cursors. Not doing hardware cursors.\n");
- } else {
- mdev->cursor.pixels_current = mdev->cursor.pixels_1;
- mdev->cursor.pixels_prev = mdev->cursor.pixels_2;
}
+ mdev->cursor.pixels_current = NULL;
return 0;
@@ -269,7 +269,7 @@ int mgag200_gem_create(struct drm_device *dev,
u32 size, bool iskernel,
struct drm_gem_object **obj)
{
- struct mgag200_bo *astbo;
+ struct drm_gem_vram_object *gbo;
int ret;
*obj = NULL;
@@ -278,78 +278,13 @@ int mgag200_gem_create(struct drm_device *dev,
if (size == 0)
return -EINVAL;
- ret = mgag200_bo_create(dev, size, 0, 0, &astbo);
- if (ret) {
+ gbo = drm_gem_vram_create(dev, &dev->vram_mm->bdev, size, 0, false);
+ if (IS_ERR(gbo)) {
+ ret = PTR_ERR(gbo);
if (ret != -ERESTARTSYS)
DRM_ERROR("failed to allocate GEM object\n");
return ret;
}
- *obj = &astbo->gem;
- return 0;
-}
-
-int mgag200_dumb_create(struct drm_file *file,
- struct drm_device *dev,
- struct drm_mode_create_dumb *args)
-{
- int ret;
- struct drm_gem_object *gobj;
- u32 handle;
-
- args->pitch = args->width * ((args->bpp + 7) / 8);
- args->size = args->pitch * args->height;
-
- ret = mgag200_gem_create(dev, args->size, false,
- &gobj);
- if (ret)
- return ret;
-
- ret = drm_gem_handle_create(file, gobj, &handle);
- drm_gem_object_put_unlocked(gobj);
- if (ret)
- return ret;
-
- args->handle = handle;
- return 0;
-}
-
-static void mgag200_bo_unref(struct mgag200_bo **bo)
-{
- if ((*bo) == NULL)
- return;
- ttm_bo_put(&((*bo)->bo));
- *bo = NULL;
-}
-
-void mgag200_gem_free_object(struct drm_gem_object *obj)
-{
- struct mgag200_bo *mgag200_bo = gem_to_mga_bo(obj);
-
- mgag200_bo_unref(&mgag200_bo);
-}
-
-
-static inline u64 mgag200_bo_mmap_offset(struct mgag200_bo *bo)
-{
- return drm_vma_node_offset_addr(&bo->bo.vma_node);
-}
-
-int
-mgag200_dumb_mmap_offset(struct drm_file *file,
- struct drm_device *dev,
- uint32_t handle,
- uint64_t *offset)
-{
- struct drm_gem_object *obj;
- struct mgag200_bo *bo;
-
- obj = drm_gem_object_lookup(file, handle);
- if (obj == NULL)
- return -ENOENT;
-
- bo = gem_to_mga_bo(obj);
- *offset = mgag200_bo_mmap_offset(bo);
-
- drm_gem_object_put_unlocked(obj);
+ *obj = &gbo->gem;
return 0;
}
diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c
index 210a19354f1c..a25054015e8c 100644
--- a/drivers/gpu/drm/mgag200/mgag200_mode.c
+++ b/drivers/gpu/drm/mgag200/mgag200_mode.c
@@ -855,8 +855,6 @@ static void mga_set_start_address(struct drm_crtc *crtc, unsigned offset)
WREG_ECRT(0x0, ((u8)(addr >> 16) & 0xf) | crtcext0);
}
-
-/* ast is different - we will force move buffers out of VRAM */
static int mga_crtc_do_set_base(struct drm_crtc *crtc,
struct drm_framebuffer *fb,
int x, int y, int atomic)
@@ -864,48 +862,51 @@ static int mga_crtc_do_set_base(struct drm_crtc *crtc,
struct mga_device *mdev = crtc->dev->dev_private;
struct drm_gem_object *obj;
struct mga_framebuffer *mga_fb;
- struct mgag200_bo *bo;
+ struct drm_gem_vram_object *gbo;
int ret;
- u64 gpu_addr;
+ s64 gpu_addr;
+ void *base;
- /* push the previous fb to system ram */
if (!atomic && fb) {
mga_fb = to_mga_framebuffer(fb);
obj = mga_fb->obj;
- bo = gem_to_mga_bo(obj);
- ret = mgag200_bo_reserve(bo, false);
- if (ret)
- return ret;
- mgag200_bo_push_sysram(bo);
- mgag200_bo_unreserve(bo);
+ gbo = drm_gem_vram_of_gem(obj);
+
+ /* unmap if console */
+ if (&mdev->mfbdev->mfb == mga_fb)
+ drm_gem_vram_kunmap(gbo);
+ drm_gem_vram_unpin(gbo);
}
mga_fb = to_mga_framebuffer(crtc->primary->fb);
obj = mga_fb->obj;
- bo = gem_to_mga_bo(obj);
+ gbo = drm_gem_vram_of_gem(obj);
- ret = mgag200_bo_reserve(bo, false);
+ ret = drm_gem_vram_pin(gbo, DRM_GEM_VRAM_PL_FLAG_VRAM);
if (ret)
return ret;
-
- ret = mgag200_bo_pin(bo, TTM_PL_FLAG_VRAM, &gpu_addr);
- if (ret) {
- mgag200_bo_unreserve(bo);
- return ret;
+ gpu_addr = drm_gem_vram_offset(gbo);
+ if (gpu_addr < 0) {
+ ret = (int)gpu_addr;
+ goto err_drm_gem_vram_unpin;
}
if (&mdev->mfbdev->mfb == mga_fb) {
/* if pushing console in kmap it */
- ret = ttm_bo_kmap(&bo->bo, 0, bo->bo.num_pages, &bo->kmap);
- if (ret)
+ base = drm_gem_vram_kmap(gbo, true, NULL);
+ if (IS_ERR(base)) {
+ ret = PTR_ERR(base);
DRM_ERROR("failed to kmap fbcon\n");
-
+ }
}
- mgag200_bo_unreserve(bo);
mga_set_start_address(crtc, (u32)gpu_addr);
return 0;
+
+err_drm_gem_vram_unpin:
+ drm_gem_vram_unpin(gbo);
+ return ret;
}
static int mga_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
@@ -1419,18 +1420,18 @@ static void mga_crtc_destroy(struct drm_crtc *crtc)
static void mga_crtc_disable(struct drm_crtc *crtc)
{
- int ret;
DRM_DEBUG_KMS("\n");
mga_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
if (crtc->primary->fb) {
+ struct mga_device *mdev = crtc->dev->dev_private;
struct mga_framebuffer *mga_fb = to_mga_framebuffer(crtc->primary->fb);
struct drm_gem_object *obj = mga_fb->obj;
- struct mgag200_bo *bo = gem_to_mga_bo(obj);
- ret = mgag200_bo_reserve(bo, false);
- if (ret)
- return;
- mgag200_bo_push_sysram(bo);
- mgag200_bo_unreserve(bo);
+ struct drm_gem_vram_object *gbo = drm_gem_vram_of_gem(obj);
+
+ /* unmap if console */
+ if (&mdev->mfbdev->mfb == mga_fb)
+ drm_gem_vram_kunmap(gbo);
+ drm_gem_vram_unpin(gbo);
}
crtc->primary->fb = NULL;
}
diff --git a/drivers/gpu/drm/mgag200/mgag200_ttm.c b/drivers/gpu/drm/mgag200/mgag200_ttm.c
index bd42365a8aa8..59294c0fd24a 100644
--- a/drivers/gpu/drm/mgag200/mgag200_ttm.c
+++ b/drivers/gpu/drm/mgag200/mgag200_ttm.c
@@ -26,167 +26,21 @@
* Authors: Dave Airlie <airlied@redhat.com>
*/
#include <drm/drmP.h>
-#include <drm/ttm/ttm_page_alloc.h>
#include "mgag200_drv.h"
-static inline struct mga_device *
-mgag200_bdev(struct ttm_bo_device *bd)
-{
- return container_of(bd, struct mga_device, ttm.bdev);
-}
-
-static void mgag200_bo_ttm_destroy(struct ttm_buffer_object *tbo)
-{
- struct mgag200_bo *bo;
-
- bo = container_of(tbo, struct mgag200_bo, bo);
-
- drm_gem_object_release(&bo->gem);
- kfree(bo);
-}
-
-static bool mgag200_ttm_bo_is_mgag200_bo(struct ttm_buffer_object *bo)
-{
- if (bo->destroy == &mgag200_bo_ttm_destroy)
- return true;
- return false;
-}
-
-static int
-mgag200_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
- struct ttm_mem_type_manager *man)
-{
- switch (type) {
- case TTM_PL_SYSTEM:
- man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
- man->available_caching = TTM_PL_MASK_CACHING;
- man->default_caching = TTM_PL_FLAG_CACHED;
- break;
- case TTM_PL_VRAM:
- man->func = &ttm_bo_manager_func;
- man->flags = TTM_MEMTYPE_FLAG_FIXED |
- TTM_MEMTYPE_FLAG_MAPPABLE;
- man->available_caching = TTM_PL_FLAG_UNCACHED |
- TTM_PL_FLAG_WC;
- man->default_caching = TTM_PL_FLAG_WC;
- break;
- default:
- DRM_ERROR("Unsupported memory type %u\n", (unsigned)type);
- return -EINVAL;
- }
- return 0;
-}
-
-static void
-mgag200_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl)
-{
- struct mgag200_bo *mgabo = mgag200_bo(bo);
-
- if (!mgag200_ttm_bo_is_mgag200_bo(bo))
- return;
-
- mgag200_ttm_placement(mgabo, TTM_PL_FLAG_SYSTEM);
- *pl = mgabo->placement;
-}
-
-static int mgag200_bo_verify_access(struct ttm_buffer_object *bo, struct file *filp)
-{
- struct mgag200_bo *mgabo = mgag200_bo(bo);
-
- return drm_vma_node_verify_access(&mgabo->gem.vma_node,
- filp->private_data);
-}
-
-static int mgag200_ttm_io_mem_reserve(struct ttm_bo_device *bdev,
- struct ttm_mem_reg *mem)
-{
- struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
- struct mga_device *mdev = mgag200_bdev(bdev);
-
- mem->bus.addr = NULL;
- mem->bus.offset = 0;
- mem->bus.size = mem->num_pages << PAGE_SHIFT;
- mem->bus.base = 0;
- mem->bus.is_iomem = false;
- if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
- return -EINVAL;
- switch (mem->mem_type) {
- case TTM_PL_SYSTEM:
- /* system memory */
- return 0;
- case TTM_PL_VRAM:
- mem->bus.offset = mem->start << PAGE_SHIFT;
- mem->bus.base = pci_resource_start(mdev->dev->pdev, 0);
- mem->bus.is_iomem = true;
- break;
- default:
- return -EINVAL;
- break;
- }
- return 0;
-}
-
-static void mgag200_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
-{
-}
-
-static void mgag200_ttm_backend_destroy(struct ttm_tt *tt)
-{
- ttm_tt_fini(tt);
- kfree(tt);
-}
-
-static struct ttm_backend_func mgag200_tt_backend_func = {
- .destroy = &mgag200_ttm_backend_destroy,
-};
-
-
-static struct ttm_tt *mgag200_ttm_tt_create(struct ttm_buffer_object *bo,
- uint32_t page_flags)
-{
- struct ttm_tt *tt;
-
- tt = kzalloc(sizeof(struct ttm_tt), GFP_KERNEL);
- if (tt == NULL)
- return NULL;
- tt->func = &mgag200_tt_backend_func;
- if (ttm_tt_init(tt, bo, page_flags)) {
- kfree(tt);
- return NULL;
- }
- return tt;
-}
-
-struct ttm_bo_driver mgag200_bo_driver = {
- .ttm_tt_create = mgag200_ttm_tt_create,
- .init_mem_type = mgag200_bo_init_mem_type,
- .eviction_valuable = ttm_bo_eviction_valuable,
- .evict_flags = mgag200_bo_evict_flags,
- .move = NULL,
- .verify_access = mgag200_bo_verify_access,
- .io_mem_reserve = &mgag200_ttm_io_mem_reserve,
- .io_mem_free = &mgag200_ttm_io_mem_free,
-};
-
int mgag200_mm_init(struct mga_device *mdev)
{
+ struct drm_vram_mm *vmm;
int ret;
struct drm_device *dev = mdev->dev;
- struct ttm_bo_device *bdev = &mdev->ttm.bdev;
-
- ret = ttm_bo_device_init(&mdev->ttm.bdev,
- &mgag200_bo_driver,
- dev->anon_inode->i_mapping,
- true);
- if (ret) {
- DRM_ERROR("Error initialising bo driver; %d\n", ret);
- return ret;
- }
- ret = ttm_bo_init_mm(bdev, TTM_PL_VRAM, mdev->mc.vram_size >> PAGE_SHIFT);
- if (ret) {
- DRM_ERROR("Failed ttm VRAM init: %d\n", ret);
+ vmm = drm_vram_helper_alloc_mm(dev, pci_resource_start(dev->pdev, 0),
+ mdev->mc.vram_size,
+ &drm_gem_vram_mm_funcs);
+ if (IS_ERR(vmm)) {
+ ret = PTR_ERR(vmm);
+ DRM_ERROR("Error initializing VRAM MM; %d\n", ret);
return ret;
}
@@ -203,149 +57,10 @@ void mgag200_mm_fini(struct mga_device *mdev)
{
struct drm_device *dev = mdev->dev;
- ttm_bo_device_release(&mdev->ttm.bdev);
+ drm_vram_helper_release_mm(dev);
arch_io_free_memtype_wc(pci_resource_start(dev->pdev, 0),
pci_resource_len(dev->pdev, 0));
arch_phys_wc_del(mdev->fb_mtrr);
mdev->fb_mtrr = 0;
}
-
-void mgag200_ttm_placement(struct mgag200_bo *bo, int domain)
-{
- u32 c = 0;
- unsigned i;
-
- bo->placement.placement = bo->placements;
- bo->placement.busy_placement = bo->placements;
- if (domain & TTM_PL_FLAG_VRAM)
- bo->placements[c++].flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_VRAM;
- if (domain & TTM_PL_FLAG_SYSTEM)
- bo->placements[c++].flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
- if (!c)
- bo->placements[c++].flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
- bo->placement.num_placement = c;
- bo->placement.num_busy_placement = c;
- for (i = 0; i < c; ++i) {
- bo->placements[i].fpfn = 0;
- bo->placements[i].lpfn = 0;
- }
-}
-
-int mgag200_bo_create(struct drm_device *dev, int size, int align,
- uint32_t flags, struct mgag200_bo **pmgabo)
-{
- struct mga_device *mdev = dev->dev_private;
- struct mgag200_bo *mgabo;
- size_t acc_size;
- int ret;
-
- mgabo = kzalloc(sizeof(struct mgag200_bo), GFP_KERNEL);
- if (!mgabo)
- return -ENOMEM;
-
- ret = drm_gem_object_init(dev, &mgabo->gem, size);
- if (ret) {
- kfree(mgabo);
- return ret;
- }
-
- mgabo->bo.bdev = &mdev->ttm.bdev;
-
- mgag200_ttm_placement(mgabo, TTM_PL_FLAG_VRAM | TTM_PL_FLAG_SYSTEM);
-
- acc_size = ttm_bo_dma_acc_size(&mdev->ttm.bdev, size,
- sizeof(struct mgag200_bo));
-
- ret = ttm_bo_init(&mdev->ttm.bdev, &mgabo->bo, size,
- ttm_bo_type_device, &mgabo->placement,
- align >> PAGE_SHIFT, false, acc_size,
- NULL, NULL, mgag200_bo_ttm_destroy);
- if (ret)
- return ret;
-
- *pmgabo = mgabo;
- return 0;
-}
-
-static inline u64 mgag200_bo_gpu_offset(struct mgag200_bo *bo)
-{
- return bo->bo.offset;
-}
-
-int mgag200_bo_pin(struct mgag200_bo *bo, u32 pl_flag, u64 *gpu_addr)
-{
- struct ttm_operation_ctx ctx = { false, false };
- int i, ret;
-
- if (bo->pin_count) {
- bo->pin_count++;
- if (gpu_addr)
- *gpu_addr = mgag200_bo_gpu_offset(bo);
- return 0;
- }
-
- mgag200_ttm_placement(bo, pl_flag);
- for (i = 0; i < bo->placement.num_placement; i++)
- bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT;
- ret = ttm_bo_validate(&bo->bo, &bo->placement, &ctx);
- if (ret)
- return ret;
-
- bo->pin_count = 1;
- if (gpu_addr)
- *gpu_addr = mgag200_bo_gpu_offset(bo);
- return 0;
-}
-
-int mgag200_bo_unpin(struct mgag200_bo *bo)
-{
- struct ttm_operation_ctx ctx = { false, false };
- int i;
- if (!bo->pin_count) {
- DRM_ERROR("unpin bad %p\n", bo);
- return 0;
- }
- bo->pin_count--;
- if (bo->pin_count)
- return 0;
-
- for (i = 0; i < bo->placement.num_placement ; i++)
- bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT;
- return ttm_bo_validate(&bo->bo, &bo->placement, &ctx);
-}
-
-int mgag200_bo_push_sysram(struct mgag200_bo *bo)
-{
- struct ttm_operation_ctx ctx = { false, false };
- int i, ret;
- if (!bo->pin_count) {
- DRM_ERROR("unpin bad %p\n", bo);
- return 0;
- }
- bo->pin_count--;
- if (bo->pin_count)
- return 0;
-
- if (bo->kmap.virtual)
- ttm_bo_kunmap(&bo->kmap);
-
- mgag200_ttm_placement(bo, TTM_PL_FLAG_SYSTEM);
- for (i = 0; i < bo->placement.num_placement ; i++)
- bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT;
-
- ret = ttm_bo_validate(&bo->bo, &bo->placement, &ctx);
- if (ret) {
- DRM_ERROR("pushing to VRAM failed\n");
- return ret;
- }
- return 0;
-}
-
-int mgag200_mmap(struct file *filp, struct vm_area_struct *vma)
-{
- struct drm_file *file_priv = filp->private_data;
- struct mga_device *mdev = file_priv->minor->dev->dev_private;
-
- return ttm_bo_mmap(filp, vma, &mdev->ttm.bdev);
-}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
index dfdfa766da8f..3772f745589d 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
@@ -694,14 +694,12 @@ end:
static void dpu_crtc_reset(struct drm_crtc *crtc)
{
- struct dpu_crtc_state *cstate;
+ struct dpu_crtc_state *cstate = kzalloc(sizeof(*cstate), GFP_KERNEL);
if (crtc->state)
dpu_crtc_destroy_state(crtc, crtc->state);
- crtc->state = kzalloc(sizeof(*cstate), GFP_KERNEL);
- if (crtc->state)
- crtc->state->crtc = crtc;
+ __drm_atomic_helper_crtc_reset(crtc, &cstate->base);
}
/**
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c
index 0440696b5bad..2307c431a894 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c
@@ -1032,10 +1032,11 @@ int dpu_format_check_modified_format(
const struct drm_mode_fb_cmd2 *cmd,
struct drm_gem_object **bos)
{
- int ret, i, num_base_fmt_planes;
+ const struct drm_format_info *info;
const struct dpu_format *fmt;
struct dpu_hw_fmt_layout layout;
uint32_t bos_total_size = 0;
+ int ret, i;
if (!msm_fmt || !cmd || !bos) {
DRM_ERROR("invalid arguments\n");
@@ -1043,14 +1044,16 @@ int dpu_format_check_modified_format(
}
fmt = to_dpu_format(msm_fmt);
- num_base_fmt_planes = drm_format_num_planes(fmt->base.pixel_format);
+ info = drm_format_info(fmt->base.pixel_format);
+ if (!info)
+ return -EINVAL;
ret = dpu_format_get_plane_sizes(fmt, cmd->width, cmd->height,
&layout, cmd->pitches);
if (ret)
return ret;
- for (i = 0; i < num_base_fmt_planes; i++) {
+ for (i = 0; i < info->num_planes; i++) {
if (!bos[i]) {
DRM_ERROR("invalid handle for plane %d\n", i);
return -EINVAL;
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
index ce1a555e1f31..d831cedb55ec 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
@@ -557,14 +557,9 @@ static void _dpu_plane_setup_scaler(struct dpu_plane *pdpu,
struct dpu_plane_state *pstate,
const struct dpu_format *fmt, bool color_fill)
{
- uint32_t chroma_subsmpl_h, chroma_subsmpl_v;
+ const struct drm_format_info *info = drm_format_info(fmt->base.pixel_format);
/* don't chroma subsample if decimating */
- chroma_subsmpl_h =
- drm_format_horz_chroma_subsampling(fmt->base.pixel_format);
- chroma_subsmpl_v =
- drm_format_vert_chroma_subsampling(fmt->base.pixel_format);
-
/* update scaler. calculate default config for QSEED3 */
_dpu_plane_setup_scaler3(pdpu, pstate,
drm_rect_width(&pdpu->pipe_cfg.src_rect),
@@ -572,7 +567,7 @@ static void _dpu_plane_setup_scaler(struct dpu_plane *pdpu,
drm_rect_width(&pdpu->pipe_cfg.dst_rect),
drm_rect_height(&pdpu->pipe_cfg.dst_rect),
&pstate->scaler3_cfg, fmt,
- chroma_subsmpl_h, chroma_subsmpl_v);
+ info->hsub, info->vsub);
}
/**
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c
index b0cf63c4e3d7..c3751c95b452 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c
@@ -782,6 +782,7 @@ static void get_roi(struct drm_crtc *crtc, uint32_t *roi_w, uint32_t *roi_h)
static void mdp5_crtc_restore_cursor(struct drm_crtc *crtc)
{
+ const struct drm_format_info *info = drm_format_info(DRM_FORMAT_ARGB8888);
struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
struct mdp5_kms *mdp5_kms = get_kms(crtc);
@@ -800,7 +801,7 @@ static void mdp5_crtc_restore_cursor(struct drm_crtc *crtc)
width = mdp5_crtc->cursor.width;
height = mdp5_crtc->cursor.height;
- stride = width * drm_format_plane_cpp(DRM_FORMAT_ARGB8888, 0);
+ stride = width * info->cpp[0];
get_roi(crtc, &roi_w, &roi_h);
@@ -1002,23 +1003,6 @@ mdp5_crtc_atomic_print_state(struct drm_printer *p,
drm_printf(p, "\tcmd_mode=%d\n", mdp5_cstate->cmd_mode);
}
-static void mdp5_crtc_reset(struct drm_crtc *crtc)
-{
- struct mdp5_crtc_state *mdp5_cstate;
-
- if (crtc->state) {
- __drm_atomic_helper_crtc_destroy_state(crtc->state);
- kfree(to_mdp5_crtc_state(crtc->state));
- }
-
- mdp5_cstate = kzalloc(sizeof(*mdp5_cstate), GFP_KERNEL);
-
- if (mdp5_cstate) {
- mdp5_cstate->base.crtc = crtc;
- crtc->state = &mdp5_cstate->base;
- }
-}
-
static struct drm_crtc_state *
mdp5_crtc_duplicate_state(struct drm_crtc *crtc)
{
@@ -1046,6 +1030,17 @@ static void mdp5_crtc_destroy_state(struct drm_crtc *crtc, struct drm_crtc_state
kfree(mdp5_cstate);
}
+static void mdp5_crtc_reset(struct drm_crtc *crtc)
+{
+ struct mdp5_crtc_state *mdp5_cstate =
+ kzalloc(sizeof(*mdp5_cstate), GFP_KERNEL);
+
+ if (crtc->state)
+ mdp5_crtc_destroy_state(crtc, crtc->state);
+
+ __drm_atomic_helper_crtc_reset(crtc, &mdp5_cstate->base);
+}
+
static const struct drm_crtc_funcs mdp5_crtc_funcs = {
.set_config = drm_atomic_helper_set_config,
.destroy = mdp5_crtc_destroy,
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c
index b854f471e9e5..1105c2433f14 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c
@@ -654,10 +654,10 @@ static int calc_scalex_steps(struct drm_plane *plane,
uint32_t pixel_format, uint32_t src, uint32_t dest,
uint32_t phasex_steps[COMP_MAX])
{
+ const struct drm_format_info *info = drm_format_info(pixel_format);
struct mdp5_kms *mdp5_kms = get_kms(plane);
struct device *dev = mdp5_kms->dev->dev;
uint32_t phasex_step;
- unsigned int hsub;
int ret;
ret = calc_phase_step(src, dest, &phasex_step);
@@ -666,11 +666,9 @@ static int calc_scalex_steps(struct drm_plane *plane,
return ret;
}
- hsub = drm_format_horz_chroma_subsampling(pixel_format);
-
phasex_steps[COMP_0] = phasex_step;
phasex_steps[COMP_3] = phasex_step;
- phasex_steps[COMP_1_2] = phasex_step / hsub;
+ phasex_steps[COMP_1_2] = phasex_step / info->hsub;
return 0;
}
@@ -679,10 +677,10 @@ static int calc_scaley_steps(struct drm_plane *plane,
uint32_t pixel_format, uint32_t src, uint32_t dest,
uint32_t phasey_steps[COMP_MAX])
{
+ const struct drm_format_info *info = drm_format_info(pixel_format);
struct mdp5_kms *mdp5_kms = get_kms(plane);
struct device *dev = mdp5_kms->dev->dev;
uint32_t phasey_step;
- unsigned int vsub;
int ret;
ret = calc_phase_step(src, dest, &phasey_step);
@@ -691,11 +689,9 @@ static int calc_scaley_steps(struct drm_plane *plane,
return ret;
}
- vsub = drm_format_vert_chroma_subsampling(pixel_format);
-
phasey_steps[COMP_0] = phasey_step;
phasey_steps[COMP_3] = phasey_step;
- phasey_steps[COMP_1_2] = phasey_step / vsub;
+ phasey_steps[COMP_1_2] = phasey_step / info->vsub;
return 0;
}
@@ -703,8 +699,9 @@ static int calc_scaley_steps(struct drm_plane *plane,
static uint32_t get_scale_config(const struct mdp_format *format,
uint32_t src, uint32_t dst, bool horz)
{
+ const struct drm_format_info *info = drm_format_info(format->base.pixel_format);
bool scaling = format->is_yuv ? true : (src != dst);
- uint32_t sub, pix_fmt = format->base.pixel_format;
+ uint32_t sub;
uint32_t ya_filter, uv_filter;
bool yuv = format->is_yuv;
@@ -712,8 +709,7 @@ static uint32_t get_scale_config(const struct mdp_format *format,
return 0;
if (yuv) {
- sub = horz ? drm_format_horz_chroma_subsampling(pix_fmt) :
- drm_format_vert_chroma_subsampling(pix_fmt);
+ sub = horz ? info->hsub : info->vsub;
uv_filter = ((src / sub) <= dst) ?
SCALE_FILTER_BIL : SCALE_FILTER_PCMN;
}
@@ -758,7 +754,7 @@ static void mdp5_write_pixel_ext(struct mdp5_kms *mdp5_kms, enum mdp5_pipe pipe,
uint32_t src_w, int pe_left[COMP_MAX], int pe_right[COMP_MAX],
uint32_t src_h, int pe_top[COMP_MAX], int pe_bottom[COMP_MAX])
{
- uint32_t pix_fmt = format->base.pixel_format;
+ const struct drm_format_info *info = drm_format_info(format->base.pixel_format);
uint32_t lr, tb, req;
int i;
@@ -767,8 +763,8 @@ static void mdp5_write_pixel_ext(struct mdp5_kms *mdp5_kms, enum mdp5_pipe pipe,
uint32_t roi_h = src_h;
if (format->is_yuv && i == COMP_1_2) {
- roi_w /= drm_format_horz_chroma_subsampling(pix_fmt);
- roi_h /= drm_format_vert_chroma_subsampling(pix_fmt);
+ roi_w /= info->hsub;
+ roi_h /= info->vsub;
}
lr = (pe_left[i] >= 0) ?
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.c
index 6153514db04c..2834837f4d3e 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.c
@@ -127,14 +127,15 @@ uint32_t mdp5_smp_calculate(struct mdp5_smp *smp,
const struct mdp_format *format,
u32 width, bool hdecim)
{
+ const struct drm_format_info *info = drm_format_info(format->base.pixel_format);
struct mdp5_kms *mdp5_kms = get_kms(smp);
int rev = mdp5_cfg_get_hw_rev(mdp5_kms->cfg);
int i, hsub, nplanes, nlines;
u32 fmt = format->base.pixel_format;
uint32_t blkcfg = 0;
- nplanes = drm_format_num_planes(fmt);
- hsub = drm_format_horz_chroma_subsampling(fmt);
+ nplanes = info->num_planes;
+ hsub = info->hsub;
/* different if BWC (compressed framebuffer?) enabled: */
nlines = 2;
@@ -157,7 +158,7 @@ uint32_t mdp5_smp_calculate(struct mdp5_smp *smp,
for (i = 0; i < nplanes; i++) {
int n, fetch_stride, cpp;
- cpp = drm_format_plane_cpp(fmt, i);
+ cpp = info->cpp[i];
fetch_stride = width * cpp / (i ? hsub : 1);
n = DIV_ROUND_UP(fetch_stride * nlines, smp->blk_size);
diff --git a/drivers/gpu/drm/msm/msm_fb.c b/drivers/gpu/drm/msm/msm_fb.c
index 136058978e0f..68fa2c8f61e6 100644
--- a/drivers/gpu/drm/msm/msm_fb.c
+++ b/drivers/gpu/drm/msm/msm_fb.c
@@ -106,9 +106,11 @@ const struct msm_format *msm_framebuffer_format(struct drm_framebuffer *fb)
struct drm_framebuffer *msm_framebuffer_create(struct drm_device *dev,
struct drm_file *file, const struct drm_mode_fb_cmd2 *mode_cmd)
{
+ const struct drm_format_info *info = drm_get_format_info(dev,
+ mode_cmd);
struct drm_gem_object *bos[4] = {0};
struct drm_framebuffer *fb;
- int ret, i, n = drm_format_num_planes(mode_cmd->pixel_format);
+ int ret, i, n = info->num_planes;
for (i = 0; i < n; i++) {
bos[i] = drm_gem_object_lookup(file, mode_cmd->handles[i]);
@@ -135,22 +137,20 @@ out_unref:
static struct drm_framebuffer *msm_framebuffer_init(struct drm_device *dev,
const struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_object **bos)
{
+ const struct drm_format_info *info = drm_get_format_info(dev,
+ mode_cmd);
struct msm_drm_private *priv = dev->dev_private;
struct msm_kms *kms = priv->kms;
struct msm_framebuffer *msm_fb = NULL;
struct drm_framebuffer *fb;
const struct msm_format *format;
int ret, i, n;
- unsigned int hsub, vsub;
DBG("create framebuffer: dev=%p, mode_cmd=%p (%dx%d@%4.4s)",
dev, mode_cmd, mode_cmd->width, mode_cmd->height,
(char *)&mode_cmd->pixel_format);
- n = drm_format_num_planes(mode_cmd->pixel_format);
- hsub = drm_format_horz_chroma_subsampling(mode_cmd->pixel_format);
- vsub = drm_format_vert_chroma_subsampling(mode_cmd->pixel_format);
-
+ n = info->num_planes;
format = kms->funcs->get_format(kms, mode_cmd->pixel_format,
mode_cmd->modifier[0]);
if (!format) {
@@ -176,12 +176,12 @@ static struct drm_framebuffer *msm_framebuffer_init(struct drm_device *dev,
}
for (i = 0; i < n; i++) {
- unsigned int width = mode_cmd->width / (i ? hsub : 1);
- unsigned int height = mode_cmd->height / (i ? vsub : 1);
+ unsigned int width = mode_cmd->width / (i ? info->hsub : 1);
+ unsigned int height = mode_cmd->height / (i ? info->vsub : 1);
unsigned int min_size;
min_size = (height - 1) * mode_cmd->pitches[i]
- + width * drm_format_plane_cpp(mode_cmd->pixel_format, i)
+ + width * info->cpp[i]
+ mode_cmd->offsets[i];
if (bos[i]->size < min_size) {
diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c
index 4b1650f51955..7ba373f493b2 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/disp.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c
@@ -948,11 +948,12 @@ nv50_mstc_get_modes(struct drm_connector *connector)
static int
nv50_mstc_atomic_check(struct drm_connector *connector,
- struct drm_connector_state *new_conn_state)
+ struct drm_atomic_state *state)
{
- struct drm_atomic_state *state = new_conn_state->state;
struct nv50_mstc *mstc = nv50_mstc(connector);
struct drm_dp_mst_topology_mgr *mgr = &mstc->mstm->mgr;
+ struct drm_connector_state *new_conn_state =
+ drm_atomic_get_new_connector_state(state, connector);
struct drm_connector_state *old_conn_state =
drm_atomic_get_old_connector_state(state, connector);
struct drm_crtc_state *crtc_state;
diff --git a/drivers/gpu/drm/nouveau/dispnv50/head.c b/drivers/gpu/drm/nouveau/dispnv50/head.c
index 06ee23823a68..48a6485ec4e0 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/head.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/head.c
@@ -421,16 +421,6 @@ nv50_head_atomic_duplicate_state(struct drm_crtc *crtc)
}
static void
-__drm_atomic_helper_crtc_reset(struct drm_crtc *crtc,
- struct drm_crtc_state *state)
-{
- if (crtc->state)
- crtc->funcs->atomic_destroy_state(crtc, crtc->state);
- crtc->state = state;
- crtc->state->crtc = crtc;
-}
-
-static void
nv50_head_reset(struct drm_crtc *crtc)
{
struct nv50_head_atom *asyh;
@@ -438,6 +428,9 @@ nv50_head_reset(struct drm_crtc *crtc)
if (WARN_ON(!(asyh = kzalloc(sizeof(*asyh), GFP_KERNEL))))
return;
+ if (crtc->state)
+ nv50_head_atomic_destroy_state(crtc, crtc->state);
+
__drm_atomic_helper_crtc_reset(crtc, &asyh->state);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bus/nv04.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bus/nv04.c
index c80b96789c31..2b44ba5cf4b0 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bus/nv04.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bus/nv04.c
@@ -26,8 +26,6 @@
#include <subdev/gpio.h>
-#include <subdev/gpio.h>
-
static void
nv04_bus_intr(struct nvkm_bus *bus)
{
diff --git a/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c b/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c
index 741a5e324767..913e8291a917 100644
--- a/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c
+++ b/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c
@@ -410,8 +410,7 @@ static const struct backlight_ops dsicm_bl_ops = {
static ssize_t dsicm_num_errors_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct panel_drv_data *ddata = platform_get_drvdata(pdev);
+ struct panel_drv_data *ddata = dev_get_drvdata(dev);
struct omap_dss_device *src = ddata->src;
u8 errors = 0;
int r;
@@ -442,8 +441,7 @@ static ssize_t dsicm_num_errors_show(struct device *dev,
static ssize_t dsicm_hw_revision_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct panel_drv_data *ddata = platform_get_drvdata(pdev);
+ struct panel_drv_data *ddata = dev_get_drvdata(dev);
struct omap_dss_device *src = ddata->src;
u8 id1, id2, id3;
int r;
@@ -474,8 +472,7 @@ static ssize_t dsicm_store_ulps(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct panel_drv_data *ddata = platform_get_drvdata(pdev);
+ struct panel_drv_data *ddata = dev_get_drvdata(dev);
struct omap_dss_device *src = ddata->src;
unsigned long t;
int r;
@@ -509,8 +506,7 @@ static ssize_t dsicm_show_ulps(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct panel_drv_data *ddata = platform_get_drvdata(pdev);
+ struct panel_drv_data *ddata = dev_get_drvdata(dev);
unsigned int t;
mutex_lock(&ddata->lock);
@@ -524,8 +520,7 @@ static ssize_t dsicm_store_ulps_timeout(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct panel_drv_data *ddata = platform_get_drvdata(pdev);
+ struct panel_drv_data *ddata = dev_get_drvdata(dev);
struct omap_dss_device *src = ddata->src;
unsigned long t;
int r;
@@ -556,8 +551,7 @@ static ssize_t dsicm_show_ulps_timeout(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct panel_drv_data *ddata = platform_get_drvdata(pdev);
+ struct panel_drv_data *ddata = dev_get_drvdata(dev);
unsigned int t;
mutex_lock(&ddata->lock);
diff --git a/drivers/gpu/drm/omapdrm/dss/omapdss-boot-init.c b/drivers/gpu/drm/omapdrm/dss/omapdss-boot-init.c
index 2b41c75ce988..e02aa8e70968 100644
--- a/drivers/gpu/drm/omapdrm/dss/omapdss-boot-init.c
+++ b/drivers/gpu/drm/omapdrm/dss/omapdss-boot-init.c
@@ -198,6 +198,7 @@ static const struct of_device_id omapdss_of_fixups_whitelist[] __initconst = {
{ .compatible = "toppoly,td028ttec1" },
{ .compatible = "tpo,td028ttec1" },
{ .compatible = "tpo,td043mtea1" },
+ {},
};
static int __init omapdss_boot_init(void)
diff --git a/drivers/gpu/drm/omapdrm/omap_crtc.c b/drivers/gpu/drm/omapdrm/omap_crtc.c
index 5a29bf01c0e8..d61215494617 100644
--- a/drivers/gpu/drm/omapdrm/omap_crtc.c
+++ b/drivers/gpu/drm/omapdrm/omap_crtc.c
@@ -32,6 +32,7 @@ struct omap_crtc_state {
/* Shadow values for legacy userspace support. */
unsigned int rotation;
unsigned int zpos;
+ bool manually_updated;
};
#define to_omap_crtc(x) container_of(x, struct omap_crtc, base)
@@ -51,6 +52,10 @@ struct omap_crtc {
bool pending;
wait_queue_head_t pending_wait;
struct drm_pending_vblank_event *event;
+ struct delayed_work update_work;
+
+ void (*framedone_handler)(void *);
+ void *framedone_handler_data;
};
/* -----------------------------------------------------------------------------
@@ -102,21 +107,18 @@ int omap_crtc_wait_pending(struct drm_crtc *crtc)
/*
* Manager-ops, callbacks from output when they need to configure
* the upstream part of the video pipe.
- *
- * Most of these we can ignore until we add support for command-mode
- * panels.. for video-mode the crtc-helpers already do an adequate
- * job of sequencing the setup of the video pipe in the proper order
*/
-/* we can probably ignore these until we support command-mode panels: */
static void omap_crtc_dss_start_update(struct omap_drm_private *priv,
enum omap_channel channel)
{
+ priv->dispc_ops->mgr_enable(priv->dispc, channel, true);
}
/* Called only from the encoder enable/disable and suspend/resume handlers. */
static void omap_crtc_set_enabled(struct drm_crtc *crtc, bool enable)
{
+ struct omap_crtc_state *omap_state = to_omap_crtc_state(crtc->state);
struct drm_device *dev = crtc->dev;
struct omap_drm_private *priv = dev->dev_private;
struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
@@ -128,6 +130,12 @@ static void omap_crtc_set_enabled(struct drm_crtc *crtc, bool enable)
if (WARN_ON(omap_crtc->enabled == enable))
return;
+ if (omap_state->manually_updated) {
+ omap_irq_enable_framedone(crtc, enable);
+ omap_crtc->enabled = enable;
+ return;
+ }
+
if (omap_crtc->pipe->output->type == OMAP_DISPLAY_TYPE_HDMI) {
priv->dispc_ops->mgr_enable(priv->dispc, channel, enable);
omap_crtc->enabled = enable;
@@ -230,6 +238,18 @@ static int omap_crtc_dss_register_framedone(
struct omap_drm_private *priv, enum omap_channel channel,
void (*handler)(void *), void *data)
{
+ struct drm_crtc *crtc = priv->channels[channel]->crtc;
+ struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
+ struct drm_device *dev = omap_crtc->base.dev;
+
+ if (omap_crtc->framedone_handler)
+ return -EBUSY;
+
+ dev_dbg(dev->dev, "register framedone %s", omap_crtc->name);
+
+ omap_crtc->framedone_handler = handler;
+ omap_crtc->framedone_handler_data = data;
+
return 0;
}
@@ -237,6 +257,17 @@ static void omap_crtc_dss_unregister_framedone(
struct omap_drm_private *priv, enum omap_channel channel,
void (*handler)(void *), void *data)
{
+ struct drm_crtc *crtc = priv->channels[channel]->crtc;
+ struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
+ struct drm_device *dev = omap_crtc->base.dev;
+
+ dev_dbg(dev->dev, "unregister framedone %s", omap_crtc->name);
+
+ WARN_ON(omap_crtc->framedone_handler != handler);
+ WARN_ON(omap_crtc->framedone_handler_data != data);
+
+ omap_crtc->framedone_handler = NULL;
+ omap_crtc->framedone_handler_data = NULL;
}
static const struct dss_mgr_ops mgr_ops = {
@@ -302,6 +333,73 @@ void omap_crtc_vblank_irq(struct drm_crtc *crtc)
DBG("%s: apply done", omap_crtc->name);
}
+void omap_crtc_framedone_irq(struct drm_crtc *crtc, uint32_t irqstatus)
+{
+ struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
+
+ if (!omap_crtc->framedone_handler)
+ return;
+
+ omap_crtc->framedone_handler(omap_crtc->framedone_handler_data);
+
+ spin_lock(&crtc->dev->event_lock);
+ /* Send the vblank event if one has been requested. */
+ if (omap_crtc->event) {
+ drm_crtc_send_vblank_event(crtc, omap_crtc->event);
+ omap_crtc->event = NULL;
+ }
+ omap_crtc->pending = false;
+ spin_unlock(&crtc->dev->event_lock);
+
+ /* Wake up omap_atomic_complete. */
+ wake_up(&omap_crtc->pending_wait);
+}
+
+void omap_crtc_flush(struct drm_crtc *crtc)
+{
+ struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
+ struct omap_crtc_state *omap_state = to_omap_crtc_state(crtc->state);
+
+ if (!omap_state->manually_updated)
+ return;
+
+ if (!delayed_work_pending(&omap_crtc->update_work))
+ schedule_delayed_work(&omap_crtc->update_work, 0);
+}
+
+static void omap_crtc_manual_display_update(struct work_struct *data)
+{
+ struct omap_crtc *omap_crtc =
+ container_of(data, struct omap_crtc, update_work.work);
+ struct drm_display_mode *mode = &omap_crtc->pipe->crtc->mode;
+ struct omap_dss_device *dssdev = omap_crtc->pipe->output->next;
+ struct drm_device *dev = omap_crtc->base.dev;
+ const struct omap_dss_driver *dssdrv;
+ int ret;
+
+ if (!dssdev) {
+ dev_err_once(dev->dev, "missing display dssdev!");
+ return;
+ }
+
+ dssdrv = dssdev->driver;
+ if (!dssdrv || !dssdrv->update) {
+ dev_err_once(dev->dev, "missing or incorrect dssdrv!");
+ return;
+ }
+
+ if (dssdrv->sync)
+ dssdrv->sync(dssdev);
+
+ ret = dssdrv->update(dssdev, 0, 0, mode->hdisplay, mode->vdisplay);
+ if (ret < 0) {
+ spin_lock_irq(&dev->event_lock);
+ omap_crtc->pending = false;
+ spin_unlock_irq(&dev->event_lock);
+ wake_up(&omap_crtc->pending_wait);
+ }
+}
+
static void omap_crtc_write_crtc_properties(struct drm_crtc *crtc)
{
struct omap_drm_private *priv = crtc->dev->dev_private;
@@ -351,12 +449,17 @@ static void omap_crtc_atomic_enable(struct drm_crtc *crtc,
{
struct omap_drm_private *priv = crtc->dev->dev_private;
struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
+ struct omap_crtc_state *omap_state = to_omap_crtc_state(crtc->state);
int ret;
DBG("%s", omap_crtc->name);
priv->dispc_ops->runtime_get(priv->dispc);
+ /* manual updated display will not trigger vsync irq */
+ if (omap_state->manually_updated)
+ return;
+
spin_lock_irq(&crtc->dev->event_lock);
drm_crtc_vblank_on(crtc);
ret = drm_crtc_vblank_get(crtc);
@@ -371,6 +474,7 @@ static void omap_crtc_atomic_disable(struct drm_crtc *crtc,
{
struct omap_drm_private *priv = crtc->dev->dev_private;
struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
+ struct drm_device *dev = crtc->dev;
DBG("%s", omap_crtc->name);
@@ -381,6 +485,11 @@ static void omap_crtc_atomic_disable(struct drm_crtc *crtc,
}
spin_unlock_irq(&crtc->dev->event_lock);
+ cancel_delayed_work(&omap_crtc->update_work);
+
+ if (!omap_crtc_wait_pending(crtc))
+ dev_warn(dev->dev, "manual display update did not finish!");
+
drm_crtc_vblank_off(crtc);
priv->dispc_ops->runtime_put(priv->dispc);
@@ -395,10 +504,20 @@ static enum drm_mode_status omap_crtc_mode_valid(struct drm_crtc *crtc,
int r;
drm_display_mode_to_videomode(mode, &vm);
- r = priv->dispc_ops->mgr_check_timings(priv->dispc, omap_crtc->channel,
- &vm);
- if (r)
- return r;
+
+ /*
+ * DSI might not call this, since the supplied mode is not a
+ * valid DISPC mode. DSI will calculate and configure the
+ * proper DISPC mode later.
+ */
+ if (omap_crtc->pipe->output->next == NULL ||
+ omap_crtc->pipe->output->next->type != OMAP_DISPLAY_TYPE_DSI) {
+ r = priv->dispc_ops->mgr_check_timings(priv->dispc,
+ omap_crtc->channel,
+ &vm);
+ if (r)
+ return r;
+ }
/* Check for bandwidth limit */
if (priv->max_bandwidth) {
@@ -441,6 +560,22 @@ static void omap_crtc_mode_set_nofb(struct drm_crtc *crtc)
drm_display_mode_to_videomode(mode, &omap_crtc->vm);
}
+static bool omap_crtc_is_manually_updated(struct drm_crtc *crtc)
+{
+ struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
+ struct omap_dss_device *display = omap_crtc->pipe->output->next;
+
+ if (!display)
+ return false;
+
+ if (display->caps & OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE) {
+ DBG("detected manually updated display!");
+ return true;
+ }
+
+ return false;
+}
+
static int omap_crtc_atomic_check(struct drm_crtc *crtc,
struct drm_crtc_state *state)
{
@@ -462,6 +597,9 @@ static int omap_crtc_atomic_check(struct drm_crtc *crtc,
/* Mirror new values for zpos and rotation in omap_crtc_state */
omap_crtc_state->zpos = pri_state->zpos;
omap_crtc_state->rotation = pri_state->rotation;
+
+ /* Check if this CRTC is for a manually updated display */
+ omap_crtc_state->manually_updated = omap_crtc_is_manually_updated(crtc);
}
return 0;
@@ -477,6 +615,7 @@ static void omap_crtc_atomic_flush(struct drm_crtc *crtc,
{
struct omap_drm_private *priv = crtc->dev->dev_private;
struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
+ struct omap_crtc_state *omap_crtc_state = to_omap_crtc_state(crtc->state);
int ret;
if (crtc->state->color_mgmt_changed) {
@@ -501,6 +640,15 @@ static void omap_crtc_atomic_flush(struct drm_crtc *crtc,
DBG("%s: GO", omap_crtc->name);
+ if (omap_crtc_state->manually_updated) {
+ /* send new image for page flips and modeset changes */
+ spin_lock_irq(&crtc->dev->event_lock);
+ omap_crtc_flush(crtc);
+ omap_crtc_arm_event(crtc);
+ spin_unlock_irq(&crtc->dev->event_lock);
+ return;
+ }
+
ret = drm_crtc_vblank_get(crtc);
WARN_ON(ret != 0);
@@ -586,6 +734,7 @@ omap_crtc_duplicate_state(struct drm_crtc *crtc)
state->zpos = current_state->zpos;
state->rotation = current_state->rotation;
+ state->manually_updated = current_state->manually_updated;
return &state->base;
}
@@ -662,6 +811,19 @@ struct drm_crtc *omap_crtc_init(struct drm_device *dev,
omap_crtc->channel = channel;
omap_crtc->name = channel_names[channel];
+ /*
+ * We want to refresh manually updated displays from dirty callback,
+ * which is called quite often (e.g. for each drawn line). This will
+ * be used to do the display update asynchronously to avoid blocking
+ * the rendering process and merges multiple dirty calls into one
+ * update if they arrive very fast. We also call this function for
+ * atomic display updates (e.g. for page flips), which means we do
+ * not need extra locking. Atomic updates should be synchronous, but
+ * need to wait for the framedone interrupt anyways.
+ */
+ INIT_DELAYED_WORK(&omap_crtc->update_work,
+ omap_crtc_manual_display_update);
+
ret = drm_crtc_init_with_planes(dev, crtc, plane, NULL,
&omap_crtc_funcs, NULL);
if (ret < 0) {
diff --git a/drivers/gpu/drm/omapdrm/omap_crtc.h b/drivers/gpu/drm/omapdrm/omap_crtc.h
index d9de437ba9dd..2b518c74203e 100644
--- a/drivers/gpu/drm/omapdrm/omap_crtc.h
+++ b/drivers/gpu/drm/omapdrm/omap_crtc.h
@@ -41,5 +41,7 @@ struct drm_crtc *omap_crtc_init(struct drm_device *dev,
int omap_crtc_wait_pending(struct drm_crtc *crtc);
void omap_crtc_error_irq(struct drm_crtc *crtc, u32 irqstatus);
void omap_crtc_vblank_irq(struct drm_crtc *crtc);
+void omap_crtc_framedone_irq(struct drm_crtc *crtc, uint32_t irqstatus);
+void omap_crtc_flush(struct drm_crtc *crtc);
#endif /* __OMAPDRM_CRTC_H__ */
diff --git a/drivers/gpu/drm/omapdrm/omap_drv.c b/drivers/gpu/drm/omapdrm/omap_drv.c
index 1b9b6f5e48e1..672e0f8ad11c 100644
--- a/drivers/gpu/drm/omapdrm/omap_drv.c
+++ b/drivers/gpu/drm/omapdrm/omap_drv.c
@@ -439,20 +439,6 @@ static int ioctl_get_param(struct drm_device *dev, void *data,
return 0;
}
-static int ioctl_set_param(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_omap_param *args = data;
-
- switch (args->param) {
- default:
- DBG("unknown parameter %lld", args->param);
- return -EINVAL;
- }
-
- return 0;
-}
-
#define OMAP_BO_USER_MASK 0x00ffffff /* flags settable by userspace */
static int ioctl_gem_new(struct drm_device *dev, void *data,
@@ -492,7 +478,7 @@ static int ioctl_gem_info(struct drm_device *dev, void *data,
static const struct drm_ioctl_desc ioctls[DRM_COMMAND_END - DRM_COMMAND_BASE] = {
DRM_IOCTL_DEF_DRV(OMAP_GET_PARAM, ioctl_get_param,
DRM_AUTH | DRM_RENDER_ALLOW),
- DRM_IOCTL_DEF_DRV(OMAP_SET_PARAM, ioctl_set_param,
+ DRM_IOCTL_DEF_DRV(OMAP_SET_PARAM, drm_invalid_op,
DRM_AUTH | DRM_MASTER | DRM_ROOT_ONLY),
DRM_IOCTL_DEF_DRV(OMAP_GEM_NEW, ioctl_gem_new,
DRM_AUTH | DRM_RENDER_ALLOW),
diff --git a/drivers/gpu/drm/omapdrm/omap_drv.h b/drivers/gpu/drm/omapdrm/omap_drv.h
index 3cca45cb25f3..896aa12f09b2 100644
--- a/drivers/gpu/drm/omapdrm/omap_drv.h
+++ b/drivers/gpu/drm/omapdrm/omap_drv.h
@@ -37,8 +37,8 @@
#include "omap_irq.h"
#include "omap_plane.h"
-#define DBG(fmt, ...) DRM_DEBUG(fmt"\n", ##__VA_ARGS__)
-#define VERB(fmt, ...) if (0) DRM_DEBUG(fmt, ##__VA_ARGS__) /* verbose debug */
+#define DBG(fmt, ...) DRM_DEBUG_DRIVER(fmt"\n", ##__VA_ARGS__)
+#define VERB(fmt, ...) if (0) DRM_DEBUG_DRIVER(fmt, ##__VA_ARGS__) /* verbose debug */
#define MODULE_NAME "omapdrm"
diff --git a/drivers/gpu/drm/omapdrm/omap_fb.c b/drivers/gpu/drm/omapdrm/omap_fb.c
index 4f8eb9d08f99..06d5c5081e41 100644
--- a/drivers/gpu/drm/omapdrm/omap_fb.c
+++ b/drivers/gpu/drm/omapdrm/omap_fb.c
@@ -66,8 +66,27 @@ struct omap_framebuffer {
struct mutex lock;
};
+static int omap_framebuffer_dirty(struct drm_framebuffer *fb,
+ struct drm_file *file_priv,
+ unsigned flags, unsigned color,
+ struct drm_clip_rect *clips,
+ unsigned num_clips)
+{
+ struct drm_crtc *crtc;
+
+ drm_modeset_lock_all(fb->dev);
+
+ drm_for_each_crtc(crtc, fb->dev)
+ omap_crtc_flush(crtc);
+
+ drm_modeset_unlock_all(fb->dev);
+
+ return 0;
+}
+
static const struct drm_framebuffer_funcs omap_framebuffer_funcs = {
.create_handle = drm_gem_fb_create_handle,
+ .dirty = omap_framebuffer_dirty,
.destroy = drm_gem_fb_destroy,
};
@@ -298,7 +317,9 @@ void omap_framebuffer_describe(struct drm_framebuffer *fb, struct seq_file *m)
struct drm_framebuffer *omap_framebuffer_create(struct drm_device *dev,
struct drm_file *file, const struct drm_mode_fb_cmd2 *mode_cmd)
{
- unsigned int num_planes = drm_format_num_planes(mode_cmd->pixel_format);
+ const struct drm_format_info *info = drm_get_format_info(dev,
+ mode_cmd);
+ unsigned int num_planes = info->num_planes;
struct drm_gem_object *bos[4];
struct drm_framebuffer *fb;
int i;
@@ -337,7 +358,7 @@ struct drm_framebuffer *omap_framebuffer_init(struct drm_device *dev,
dev, mode_cmd, mode_cmd->width, mode_cmd->height,
(char *)&mode_cmd->pixel_format);
- format = drm_format_info(mode_cmd->pixel_format);
+ format = drm_get_format_info(dev, mode_cmd);
for (i = 0; i < ARRAY_SIZE(formats); i++) {
if (formats[i] == mode_cmd->pixel_format)
diff --git a/drivers/gpu/drm/omapdrm/omap_irq.c b/drivers/gpu/drm/omapdrm/omap_irq.c
index 329ad26d6d50..01dda84ca2ee 100644
--- a/drivers/gpu/drm/omapdrm/omap_irq.c
+++ b/drivers/gpu/drm/omapdrm/omap_irq.c
@@ -85,6 +85,28 @@ int omap_irq_wait(struct drm_device *dev, struct omap_irq_wait *wait,
return ret == 0 ? -1 : 0;
}
+int omap_irq_enable_framedone(struct drm_crtc *crtc, bool enable)
+{
+ struct drm_device *dev = crtc->dev;
+ struct omap_drm_private *priv = dev->dev_private;
+ unsigned long flags;
+ enum omap_channel channel = omap_crtc_channel(crtc);
+ int framedone_irq =
+ priv->dispc_ops->mgr_get_framedone_irq(priv->dispc, channel);
+
+ DBG("dev=%p, crtc=%u, enable=%d", dev, channel, enable);
+
+ spin_lock_irqsave(&priv->wait_lock, flags);
+ if (enable)
+ priv->irq_mask |= framedone_irq;
+ else
+ priv->irq_mask &= ~framedone_irq;
+ omap_irq_update(dev);
+ spin_unlock_irqrestore(&priv->wait_lock, flags);
+
+ return 0;
+}
+
/**
* enable_vblank - enable vblank interrupt events
* @dev: DRM device
@@ -217,6 +239,9 @@ static irqreturn_t omap_irq_handler(int irq, void *arg)
if (irqstatus & priv->dispc_ops->mgr_get_sync_lost_irq(priv->dispc, channel))
omap_crtc_error_irq(crtc, irqstatus);
+
+ if (irqstatus & priv->dispc_ops->mgr_get_framedone_irq(priv->dispc, channel))
+ omap_crtc_framedone_irq(crtc, irqstatus);
}
omap_irq_ocp_error_handler(dev, irqstatus);
diff --git a/drivers/gpu/drm/omapdrm/omap_irq.h b/drivers/gpu/drm/omapdrm/omap_irq.h
index 9d5441468eca..02abb4ed9813 100644
--- a/drivers/gpu/drm/omapdrm/omap_irq.h
+++ b/drivers/gpu/drm/omapdrm/omap_irq.h
@@ -27,6 +27,7 @@ struct drm_device;
struct omap_irq_wait;
int omap_irq_enable_vblank(struct drm_crtc *crtc);
+int omap_irq_enable_framedone(struct drm_crtc *crtc, bool enable);
void omap_irq_disable_vblank(struct drm_crtc *crtc);
void omap_drm_irq_uninstall(struct drm_device *dev);
int omap_drm_irq_install(struct drm_device *dev);
diff --git a/drivers/gpu/drm/panel/Kconfig b/drivers/gpu/drm/panel/Kconfig
index e281fc544742..d9d931aa6e26 100644
--- a/drivers/gpu/drm/panel/Kconfig
+++ b/drivers/gpu/drm/panel/Kconfig
@@ -132,6 +132,15 @@ config DRM_PANEL_ORISETECH_OTM8009A
Say Y here if you want to enable support for Orise Technology
otm8009a 480x800 dsi 2dl panel.
+config DRM_PANEL_OSD_OSD101T2587_53TS
+ tristate "OSD OSD101T2587-53TS DSI 1920x1200 video mode panel"
+ depends on OF
+ depends on DRM_MIPI_DSI
+ depends on BACKLIGHT_CLASS_DEVICE
+ help
+ Say Y here if you want to enable support for One Stop Displays
+ OSD101T2587-53TS 10.1" 1920x1200 dsi panel.
+
config DRM_PANEL_PANASONIC_VVX10F034N00
tristate "Panasonic VVX10F034N00 1920x1200 video mode panel"
depends on OF
@@ -201,6 +210,15 @@ config DRM_PANEL_SAMSUNG_S6E63J0X03
depends on BACKLIGHT_CLASS_DEVICE
select VIDEOMODE_HELPERS
+config DRM_PANEL_SAMSUNG_S6E63M0
+ tristate "Samsung S6E63M0 RGB/SPI panel"
+ depends on OF
+ depends on SPI
+ depends on BACKLIGHT_CLASS_DEVICE
+ help
+ Say Y here if you want to enable support for Samsung S6E63M0
+ AMOLED LCD panel.
+
config DRM_PANEL_SAMSUNG_S6E8AA0
tristate "Samsung S6E8AA0 DSI video mode panel"
depends on OF
diff --git a/drivers/gpu/drm/panel/Makefile b/drivers/gpu/drm/panel/Makefile
index 78e3dc376bdd..fb0cb3aaa9e6 100644
--- a/drivers/gpu/drm/panel/Makefile
+++ b/drivers/gpu/drm/panel/Makefile
@@ -11,6 +11,7 @@ obj-$(CONFIG_DRM_PANEL_KINGDISPLAY_KD097D04) += panel-kingdisplay-kd097d04.o
obj-$(CONFIG_DRM_PANEL_LG_LG4573) += panel-lg-lg4573.o
obj-$(CONFIG_DRM_PANEL_OLIMEX_LCD_OLINUXINO) += panel-olimex-lcd-olinuxino.o
obj-$(CONFIG_DRM_PANEL_ORISETECH_OTM8009A) += panel-orisetech-otm8009a.o
+obj-$(CONFIG_DRM_PANEL_OSD_OSD101T2587_53TS) += panel-osd-osd101t2587-53ts.o
obj-$(CONFIG_DRM_PANEL_PANASONIC_VVX10F034N00) += panel-panasonic-vvx10f034n00.o
obj-$(CONFIG_DRM_PANEL_RASPBERRYPI_TOUCHSCREEN) += panel-raspberrypi-touchscreen.o
obj-$(CONFIG_DRM_PANEL_RAYDIUM_RM68200) += panel-raydium-rm68200.o
@@ -20,6 +21,7 @@ obj-$(CONFIG_DRM_PANEL_SAMSUNG_LD9040) += panel-samsung-ld9040.o
obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6D16D0) += panel-samsung-s6d16d0.o
obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6E3HA2) += panel-samsung-s6e3ha2.o
obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6E63J0X03) += panel-samsung-s6e63j0x03.o
+obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6E63M0) += panel-samsung-s6e63m0.o
obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6E8AA0) += panel-samsung-s6e8aa0.o
obj-$(CONFIG_DRM_PANEL_SEIKO_43WVF1G) += panel-seiko-43wvf1g.o
obj-$(CONFIG_DRM_PANEL_SHARP_LQ101R1SX01) += panel-sharp-lq101r1sx01.o
diff --git a/drivers/gpu/drm/panel/panel-arm-versatile.c b/drivers/gpu/drm/panel/panel-arm-versatile.c
index a79908dfa3c8..5f72c922a04b 100644
--- a/drivers/gpu/drm/panel/panel-arm-versatile.c
+++ b/drivers/gpu/drm/panel/panel-arm-versatile.c
@@ -25,13 +25,12 @@
* Epson QCIF display.
*
*/
-#include <drm/drmP.h>
-#include <drm/drm_panel.h>
#include <linux/bitops.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/mfd/syscon.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
@@ -39,6 +38,9 @@
#include <video/of_videomode.h>
#include <video/videomode.h>
+#include <drm/drm_modes.h>
+#include <drm/drm_panel.h>
+
/*
* This configuration register in the Versatile and RealView
* family is uniformly present but appears more and more
diff --git a/drivers/gpu/drm/panel/panel-ilitek-ili9322.c b/drivers/gpu/drm/panel/panel-ilitek-ili9322.c
index a1c4cd2940fb..35a4bd05edf5 100644
--- a/drivers/gpu/drm/panel/panel-ilitek-ili9322.c
+++ b/drivers/gpu/drm/panel/panel-ilitek-ili9322.c
@@ -22,13 +22,10 @@
* published by the Free Software Foundation.
*/
-#include <drm/drmP.h>
-#include <drm/drm_panel.h>
-
-#include <linux/of_device.h>
#include <linux/bitops.h>
#include <linux/gpio/consumer.h>
#include <linux/module.h>
+#include <linux/of_device.h>
#include <linux/regmap.h>
#include <linux/regulator/consumer.h>
#include <linux/spi/spi.h>
@@ -37,6 +34,10 @@
#include <video/of_videomode.h>
#include <video/videomode.h>
+#include <drm/drm_modes.h>
+#include <drm/drm_panel.h>
+#include <drm/drm_print.h>
+
#define ILI9322_CHIP_ID 0x00
#define ILI9322_CHIP_ID_MAGIC 0x96
diff --git a/drivers/gpu/drm/panel/panel-innolux-p079zca.c b/drivers/gpu/drm/panel/panel-innolux-p079zca.c
index 0daafda39df7..d92d1c98878c 100644
--- a/drivers/gpu/drm/panel/panel-innolux-p079zca.c
+++ b/drivers/gpu/drm/panel/panel-innolux-p079zca.c
@@ -4,18 +4,21 @@
*/
#include <linux/backlight.h>
+#include <linux/delay.h>
#include <linux/gpio/consumer.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/regulator/consumer.h>
-#include <drm/drmP.h>
+#include <video/mipi_display.h>
+
#include <drm/drm_crtc.h>
+#include <drm/drm_device.h>
#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_modes.h>
#include <drm/drm_panel.h>
-
-#include <video/mipi_display.h>
+#include <drm/drm_print.h>
struct panel_init_cmd {
size_t len;
@@ -51,7 +54,6 @@ struct innolux_panel {
struct backlight_device *backlight;
struct regulator_bulk_data *supplies;
- unsigned int num_supplies;
struct gpio_desc *enable_gpio;
bool prepared;
diff --git a/drivers/gpu/drm/panel/panel-jdi-lt070me05000.c b/drivers/gpu/drm/panel/panel-jdi-lt070me05000.c
index 99caa7835e7b..ee5ddf771e10 100644
--- a/drivers/gpu/drm/panel/panel-jdi-lt070me05000.c
+++ b/drivers/gpu/drm/panel/panel-jdi-lt070me05000.c
@@ -21,19 +21,21 @@
* You should have received a copy of the GNU General Public License along with
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
+
#include <linux/backlight.h>
+#include <linux/delay.h>
#include <linux/gpio/consumer.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/regulator/consumer.h>
-#include <drm/drmP.h>
+#include <video/mipi_display.h>
+
#include <drm/drm_crtc.h>
#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_modes.h>
#include <drm/drm_panel.h>
-#include <video/mipi_display.h>
-
static const char * const regulator_names[] = {
"vddp",
"iovcc"
diff --git a/drivers/gpu/drm/panel/panel-kingdisplay-kd097d04.c b/drivers/gpu/drm/panel/panel-kingdisplay-kd097d04.c
index 2a25a914d09e..3ac04eb8d0fe 100644
--- a/drivers/gpu/drm/panel/panel-kingdisplay-kd097d04.c
+++ b/drivers/gpu/drm/panel/panel-kingdisplay-kd097d04.c
@@ -4,17 +4,20 @@
*/
#include <linux/backlight.h>
+#include <linux/delay.h>
#include <linux/gpio/consumer.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/regulator/consumer.h>
-#include <drm/drmP.h>
+#include <video/mipi_display.h>
+
#include <drm/drm_crtc.h>
+#include <drm/drm_device.h>
#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_modes.h>
#include <drm/drm_panel.h>
-
-#include <video/mipi_display.h>
+#include <drm/drm_print.h>
struct kingdisplay_panel {
struct drm_panel base;
diff --git a/drivers/gpu/drm/panel/panel-lg-lg4573.c b/drivers/gpu/drm/panel/panel-lg-lg4573.c
index 6989238b276a..0dd4bdda7c4e 100644
--- a/drivers/gpu/drm/panel/panel-lg-lg4573.c
+++ b/drivers/gpu/drm/panel/panel-lg-lg4573.c
@@ -15,10 +15,9 @@
* published by the Free Software Foundation.
*/
-#include <drm/drmP.h>
-#include <drm/drm_panel.h>
-
+#include <linux/delay.h>
#include <linux/gpio/consumer.h>
+#include <linux/module.h>
#include <linux/regulator/consumer.h>
#include <linux/spi/spi.h>
@@ -26,6 +25,10 @@
#include <video/of_videomode.h>
#include <video/videomode.h>
+#include <drm/drm_device.h>
+#include <drm/drm_modes.h>
+#include <drm/drm_panel.h>
+
struct lg4573 {
struct drm_panel panel;
struct spi_device *spi;
diff --git a/drivers/gpu/drm/panel/panel-lvds.c b/drivers/gpu/drm/panel/panel-lvds.c
index 3f6550e6b6a4..1ec57d0806a8 100644
--- a/drivers/gpu/drm/panel/panel-lvds.c
+++ b/drivers/gpu/drm/panel/panel-lvds.c
@@ -16,14 +16,13 @@
#include <linux/regulator/consumer.h>
#include <linux/slab.h>
-#include <drm/drmP.h>
-#include <drm/drm_crtc.h>
-#include <drm/drm_panel.h>
-
#include <video/display_timing.h>
#include <video/of_display_timing.h>
#include <video/videomode.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_panel.h>
+
struct panel_lvds {
struct drm_panel panel;
struct device *dev;
diff --git a/drivers/gpu/drm/panel/panel-olimex-lcd-olinuxino.c b/drivers/gpu/drm/panel/panel-olimex-lcd-olinuxino.c
index a1d8d92fac2b..2bae1db3ff34 100644
--- a/drivers/gpu/drm/panel/panel-olimex-lcd-olinuxino.c
+++ b/drivers/gpu/drm/panel/panel-olimex-lcd-olinuxino.c
@@ -15,13 +15,13 @@
#include <linux/of.h>
#include <linux/regulator/consumer.h>
-#include <drm/drm_modes.h>
-#include <drm/drm_panel.h>
-#include <drm/drmP.h>
-
#include <video/videomode.h>
#include <video/display_timing.h>
+#include <drm/drm_device.h>
+#include <drm/drm_modes.h>
+#include <drm/drm_panel.h>
+
#define LCD_OLINUXINO_HEADER_MAGIC 0x4F4CB727
#define LCD_OLINUXINO_DATA_LEN 256
diff --git a/drivers/gpu/drm/panel/panel-orisetech-otm8009a.c b/drivers/gpu/drm/panel/panel-orisetech-otm8009a.c
index f27a7e426574..c7b48df8869a 100644
--- a/drivers/gpu/drm/panel/panel-orisetech-otm8009a.c
+++ b/drivers/gpu/drm/panel/panel-orisetech-otm8009a.c
@@ -6,14 +6,19 @@
* Yannick Fertre <yannick.fertre@st.com>
*/
-#include <drm/drmP.h>
-#include <drm/drm_mipi_dsi.h>
-#include <drm/drm_panel.h>
#include <linux/backlight.h>
+#include <linux/delay.h>
#include <linux/gpio/consumer.h>
+#include <linux/module.h>
#include <linux/regulator/consumer.h>
+
#include <video/mipi_display.h>
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_modes.h>
+#include <drm/drm_panel.h>
+#include <drm/drm_print.h>
+
#define OTM8009A_BACKLIGHT_DEFAULT 240
#define OTM8009A_BACKLIGHT_MAX 255
diff --git a/drivers/gpu/drm/panel/panel-osd-osd101t2587-53ts.c b/drivers/gpu/drm/panel/panel-osd-osd101t2587-53ts.c
new file mode 100644
index 000000000000..e0e20ecff916
--- /dev/null
+++ b/drivers/gpu/drm/panel/panel-osd-osd101t2587-53ts.c
@@ -0,0 +1,254 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com
+ * Author: Peter Ujfalusi <peter.ujfalusi@ti.com>
+ */
+
+#include <linux/backlight.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/regulator/consumer.h>
+
+#include <drm/drm_crtc.h>
+#include <drm/drm_device.h>
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_panel.h>
+
+#include <video/mipi_display.h>
+
+struct osd101t2587_panel {
+ struct drm_panel base;
+ struct mipi_dsi_device *dsi;
+
+ struct backlight_device *backlight;
+ struct regulator *supply;
+
+ bool prepared;
+ bool enabled;
+
+ const struct drm_display_mode *default_mode;
+};
+
+static inline struct osd101t2587_panel *ti_osd_panel(struct drm_panel *panel)
+{
+ return container_of(panel, struct osd101t2587_panel, base);
+}
+
+static int osd101t2587_panel_disable(struct drm_panel *panel)
+{
+ struct osd101t2587_panel *osd101t2587 = ti_osd_panel(panel);
+ int ret;
+
+ if (!osd101t2587->enabled)
+ return 0;
+
+ backlight_disable(osd101t2587->backlight);
+
+ ret = mipi_dsi_shutdown_peripheral(osd101t2587->dsi);
+
+ osd101t2587->enabled = false;
+
+ return ret;
+}
+
+static int osd101t2587_panel_unprepare(struct drm_panel *panel)
+{
+ struct osd101t2587_panel *osd101t2587 = ti_osd_panel(panel);
+
+ if (!osd101t2587->prepared)
+ return 0;
+
+ regulator_disable(osd101t2587->supply);
+ osd101t2587->prepared = false;
+
+ return 0;
+}
+
+static int osd101t2587_panel_prepare(struct drm_panel *panel)
+{
+ struct osd101t2587_panel *osd101t2587 = ti_osd_panel(panel);
+ int ret;
+
+ if (osd101t2587->prepared)
+ return 0;
+
+ ret = regulator_enable(osd101t2587->supply);
+ if (!ret)
+ osd101t2587->prepared = true;
+
+ return ret;
+}
+
+static int osd101t2587_panel_enable(struct drm_panel *panel)
+{
+ struct osd101t2587_panel *osd101t2587 = ti_osd_panel(panel);
+ int ret;
+
+ if (osd101t2587->enabled)
+ return 0;
+
+ ret = mipi_dsi_turn_on_peripheral(osd101t2587->dsi);
+ if (ret)
+ return ret;
+
+ backlight_enable(osd101t2587->backlight);
+
+ osd101t2587->enabled = true;
+
+ return ret;
+}
+
+static const struct drm_display_mode default_mode_osd101t2587 = {
+ .clock = 164400,
+ .hdisplay = 1920,
+ .hsync_start = 1920 + 152,
+ .hsync_end = 1920 + 152 + 52,
+ .htotal = 1920 + 152 + 52 + 20,
+ .vdisplay = 1200,
+ .vsync_start = 1200 + 24,
+ .vsync_end = 1200 + 24 + 6,
+ .vtotal = 1200 + 24 + 6 + 48,
+ .vrefresh = 60,
+ .flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC,
+};
+
+static int osd101t2587_panel_get_modes(struct drm_panel *panel)
+{
+ struct osd101t2587_panel *osd101t2587 = ti_osd_panel(panel);
+ struct drm_display_mode *mode;
+
+ mode = drm_mode_duplicate(panel->drm, osd101t2587->default_mode);
+ if (!mode) {
+ dev_err(panel->drm->dev, "failed to add mode %ux%ux@%u\n",
+ osd101t2587->default_mode->hdisplay,
+ osd101t2587->default_mode->vdisplay,
+ osd101t2587->default_mode->vrefresh);
+ return -ENOMEM;
+ }
+
+ drm_mode_set_name(mode);
+
+ drm_mode_probed_add(panel->connector, mode);
+
+ panel->connector->display_info.width_mm = 217;
+ panel->connector->display_info.height_mm = 136;
+
+ return 1;
+}
+
+static const struct drm_panel_funcs osd101t2587_panel_funcs = {
+ .disable = osd101t2587_panel_disable,
+ .unprepare = osd101t2587_panel_unprepare,
+ .prepare = osd101t2587_panel_prepare,
+ .enable = osd101t2587_panel_enable,
+ .get_modes = osd101t2587_panel_get_modes,
+};
+
+static const struct of_device_id osd101t2587_of_match[] = {
+ {
+ .compatible = "osddisplays,osd101t2587-53ts",
+ .data = &default_mode_osd101t2587,
+ }, {
+ /* sentinel */
+ }
+};
+MODULE_DEVICE_TABLE(of, osd101t2587_of_match);
+
+static int osd101t2587_panel_add(struct osd101t2587_panel *osd101t2587)
+{
+ struct device *dev = &osd101t2587->dsi->dev;
+
+ osd101t2587->supply = devm_regulator_get(dev, "power");
+ if (IS_ERR(osd101t2587->supply))
+ return PTR_ERR(osd101t2587->supply);
+
+ osd101t2587->backlight = devm_of_find_backlight(dev);
+ if (IS_ERR(osd101t2587->backlight))
+ return PTR_ERR(osd101t2587->backlight);
+
+ drm_panel_init(&osd101t2587->base);
+ osd101t2587->base.funcs = &osd101t2587_panel_funcs;
+ osd101t2587->base.dev = &osd101t2587->dsi->dev;
+
+ return drm_panel_add(&osd101t2587->base);
+}
+
+static int osd101t2587_panel_probe(struct mipi_dsi_device *dsi)
+{
+ struct osd101t2587_panel *osd101t2587;
+ const struct of_device_id *id;
+ int ret;
+
+ id = of_match_node(osd101t2587_of_match, dsi->dev.of_node);
+ if (!id)
+ return -ENODEV;
+
+ dsi->lanes = 4;
+ dsi->format = MIPI_DSI_FMT_RGB888;
+ dsi->mode_flags = MIPI_DSI_MODE_VIDEO |
+ MIPI_DSI_MODE_VIDEO_BURST |
+ MIPI_DSI_MODE_VIDEO_SYNC_PULSE |
+ MIPI_DSI_MODE_EOT_PACKET;
+
+ osd101t2587 = devm_kzalloc(&dsi->dev, sizeof(*osd101t2587), GFP_KERNEL);
+ if (!osd101t2587)
+ return -ENOMEM;
+
+ mipi_dsi_set_drvdata(dsi, osd101t2587);
+
+ osd101t2587->dsi = dsi;
+ osd101t2587->default_mode = id->data;
+
+ ret = osd101t2587_panel_add(osd101t2587);
+ if (ret < 0)
+ return ret;
+
+ ret = mipi_dsi_attach(dsi);
+ if (ret)
+ drm_panel_remove(&osd101t2587->base);
+
+ return ret;
+}
+
+static int osd101t2587_panel_remove(struct mipi_dsi_device *dsi)
+{
+ struct osd101t2587_panel *osd101t2587 = mipi_dsi_get_drvdata(dsi);
+ int ret;
+
+ ret = osd101t2587_panel_disable(&osd101t2587->base);
+ if (ret < 0)
+ dev_warn(&dsi->dev, "failed to disable panel: %d\n", ret);
+
+ osd101t2587_panel_unprepare(&osd101t2587->base);
+
+ drm_panel_remove(&osd101t2587->base);
+
+ ret = mipi_dsi_detach(dsi);
+ if (ret < 0)
+ dev_err(&dsi->dev, "failed to detach from DSI host: %d\n", ret);
+
+ return ret;
+}
+
+static void osd101t2587_panel_shutdown(struct mipi_dsi_device *dsi)
+{
+ struct osd101t2587_panel *osd101t2587 = mipi_dsi_get_drvdata(dsi);
+
+ osd101t2587_panel_disable(&osd101t2587->base);
+ osd101t2587_panel_unprepare(&osd101t2587->base);
+}
+
+static struct mipi_dsi_driver osd101t2587_panel_driver = {
+ .driver = {
+ .name = "panel-osd-osd101t2587-53ts",
+ .of_match_table = osd101t2587_of_match,
+ },
+ .probe = osd101t2587_panel_probe,
+ .remove = osd101t2587_panel_remove,
+ .shutdown = osd101t2587_panel_shutdown,
+};
+module_mipi_dsi_driver(osd101t2587_panel_driver);
+
+MODULE_AUTHOR("Peter Ujfalusi <peter.ujfalusi@ti.com>");
+MODULE_DESCRIPTION("OSD101T2587-53TS DSI panel");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/panel/panel-panasonic-vvx10f034n00.c b/drivers/gpu/drm/panel/panel-panasonic-vvx10f034n00.c
index cb4dfb98be0f..045df41dbde2 100644
--- a/drivers/gpu/drm/panel/panel-panasonic-vvx10f034n00.c
+++ b/drivers/gpu/drm/panel/panel-panasonic-vvx10f034n00.c
@@ -19,17 +19,18 @@
*/
#include <linux/backlight.h>
+#include <linux/delay.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/regulator/consumer.h>
-#include <drm/drmP.h>
+#include <video/mipi_display.h>
+
#include <drm/drm_crtc.h>
+#include <drm/drm_device.h>
#include <drm/drm_mipi_dsi.h>
#include <drm/drm_panel.h>
-#include <video/mipi_display.h>
-
/*
* When power is turned off to this panel a minimum off time of 500ms has to be
* observed before powering back on as there's no external reset pin. Keep
diff --git a/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c b/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c
index 2c9c9722734f..28c0620dfe0f 100644
--- a/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c
+++ b/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c
@@ -53,9 +53,8 @@
#include <linux/of_graph.h>
#include <linux/pm.h>
-#include <drm/drm_panel.h>
-#include <drm/drmP.h>
#include <drm/drm_crtc.h>
+#include <drm/drm_device.h>
#include <drm/drm_mipi_dsi.h>
#include <drm/drm_panel.h>
diff --git a/drivers/gpu/drm/panel/panel-raydium-rm68200.c b/drivers/gpu/drm/panel/panel-raydium-rm68200.c
index 14186827e591..ba889625ad43 100644
--- a/drivers/gpu/drm/panel/panel-raydium-rm68200.c
+++ b/drivers/gpu/drm/panel/panel-raydium-rm68200.c
@@ -7,14 +7,17 @@
*/
#include <linux/backlight.h>
+#include <linux/delay.h>
#include <linux/gpio/consumer.h>
+#include <linux/module.h>
#include <linux/regulator/consumer.h>
#include <video/mipi_display.h>
-#include <drm/drmP.h>
#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_modes.h>
#include <drm/drm_panel.h>
+#include <drm/drm_print.h>
/*** Manufacturer Command Set ***/
#define MCS_CMD_MODE_SW 0xFE /* CMD Mode Switch */
diff --git a/drivers/gpu/drm/panel/panel-rocktech-jh057n00900.c b/drivers/gpu/drm/panel/panel-rocktech-jh057n00900.c
index d88ea8da2ec2..6dcb692c4701 100644
--- a/drivers/gpu/drm/panel/panel-rocktech-jh057n00900.c
+++ b/drivers/gpu/drm/panel/panel-rocktech-jh057n00900.c
@@ -257,20 +257,12 @@ static int allpixelson_set(void *data, u64 val)
DEFINE_SIMPLE_ATTRIBUTE(allpixelson_fops, NULL,
allpixelson_set, "%llu\n");
-static int jh057n_debugfs_init(struct jh057n *ctx)
+static void jh057n_debugfs_init(struct jh057n *ctx)
{
- struct dentry *f;
-
ctx->debugfs = debugfs_create_dir(DRV_NAME, NULL);
- if (!ctx->debugfs)
- return -ENOMEM;
- f = debugfs_create_file("allpixelson", 0600,
- ctx->debugfs, ctx, &allpixelson_fops);
- if (!f)
- return -ENOMEM;
-
- return 0;
+ debugfs_create_file("allpixelson", 0600, ctx->debugfs, ctx,
+ &allpixelson_fops);
}
static void jh057n_debugfs_remove(struct jh057n *ctx)
diff --git a/drivers/gpu/drm/panel/panel-samsung-ld9040.c b/drivers/gpu/drm/panel/panel-samsung-ld9040.c
index 3cf4cf6a6942..5c2a1cae603b 100644
--- a/drivers/gpu/drm/panel/panel-samsung-ld9040.c
+++ b/drivers/gpu/drm/panel/panel-samsung-ld9040.c
@@ -11,10 +11,10 @@
* published by the Free Software Foundation.
*/
-#include <drm/drmP.h>
-#include <drm/drm_panel.h>
-
+#include <linux/delay.h>
#include <linux/gpio/consumer.h>
+#include <linux/module.h>
+#include <linux/of.h>
#include <linux/regulator/consumer.h>
#include <linux/spi/spi.h>
@@ -22,6 +22,10 @@
#include <video/of_videomode.h>
#include <video/videomode.h>
+#include <drm/drm_modes.h>
+#include <drm/drm_panel.h>
+#include <drm/drm_print.h>
+
/* Manufacturer Command Set */
#define MCS_MANPWR 0xb0
#define MCS_ELVSS_ON 0xb1
diff --git a/drivers/gpu/drm/panel/panel-samsung-s6e3ha2.c b/drivers/gpu/drm/panel/panel-samsung-s6e3ha2.c
index 797bbc7a264e..351eee951648 100644
--- a/drivers/gpu/drm/panel/panel-samsung-s6e3ha2.c
+++ b/drivers/gpu/drm/panel/panel-samsung-s6e3ha2.c
@@ -11,14 +11,18 @@
* published by the Free Software Foundation.
*/
-#include <drm/drmP.h>
-#include <drm/drm_mipi_dsi.h>
-#include <drm/drm_panel.h>
#include <linux/backlight.h>
+#include <linux/delay.h>
#include <linux/gpio/consumer.h>
+#include <linux/module.h>
#include <linux/of_device.h>
#include <linux/regulator/consumer.h>
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_modes.h>
+#include <drm/drm_panel.h>
+#include <drm/drm_print.h>
+
#define S6E3HA2_MIN_BRIGHTNESS 0
#define S6E3HA2_MAX_BRIGHTNESS 100
#define S6E3HA2_DEFAULT_BRIGHTNESS 80
diff --git a/drivers/gpu/drm/panel/panel-samsung-s6e63j0x03.c b/drivers/gpu/drm/panel/panel-samsung-s6e63j0x03.c
index aeb32aa58899..19ea325a0e9b 100644
--- a/drivers/gpu/drm/panel/panel-samsung-s6e63j0x03.c
+++ b/drivers/gpu/drm/panel/panel-samsung-s6e63j0x03.c
@@ -11,14 +11,19 @@
* published by the Free Software Foundation.
*/
-#include <drm/drmP.h>
-#include <drm/drm_mipi_dsi.h>
-#include <drm/drm_panel.h>
#include <linux/backlight.h>
+#include <linux/delay.h>
#include <linux/gpio/consumer.h>
+#include <linux/module.h>
#include <linux/regulator/consumer.h>
+
#include <video/mipi_display.h>
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_modes.h>
+#include <drm/drm_panel.h>
+#include <drm/drm_print.h>
+
#define MCS_LEVEL2_KEY 0xf0
#define MCS_MTP_KEY 0xf1
#define MCS_MTP_SET3 0xd4
diff --git a/drivers/gpu/drm/panel/panel-samsung-s6e63m0.c b/drivers/gpu/drm/panel/panel-samsung-s6e63m0.c
new file mode 100644
index 000000000000..142d395ea512
--- /dev/null
+++ b/drivers/gpu/drm/panel/panel-samsung-s6e63m0.c
@@ -0,0 +1,514 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * S6E63M0 AMOLED LCD drm_panel driver.
+ *
+ * Copyright (C) 2019 Paweł Chmiel <pawel.mikolaj.chmiel@gmail.com>
+ * Derived from drivers/gpu/drm/panel-samsung-ld9040.c
+ *
+ * Andrzej Hajda <a.hajda@samsung.com>
+ */
+
+#include <drm/drm_modes.h>
+#include <drm/drm_panel.h>
+#include <drm/drm_print.h>
+
+#include <linux/backlight.h>
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/module.h>
+#include <linux/regulator/consumer.h>
+#include <linux/spi/spi.h>
+
+#include <video/mipi_display.h>
+
+/* Manufacturer Command Set */
+#define MCS_ELVSS_ON 0xb1
+#define MCS_MIECTL1 0xc0
+#define MCS_BCMODE 0xc1
+#define MCS_DISCTL 0xf2
+#define MCS_SRCCTL 0xf6
+#define MCS_IFCTL 0xf7
+#define MCS_PANELCTL 0xF8
+#define MCS_PGAMMACTL 0xfa
+
+#define NUM_GAMMA_LEVELS 11
+#define GAMMA_TABLE_COUNT 23
+
+#define DATA_MASK 0x100
+
+#define MAX_BRIGHTNESS (NUM_GAMMA_LEVELS - 1)
+
+/* array of gamma tables for gamma value 2.2 */
+static u8 const s6e63m0_gamma_22[NUM_GAMMA_LEVELS][GAMMA_TABLE_COUNT] = {
+ { MCS_PGAMMACTL, 0x00,
+ 0x18, 0x08, 0x24, 0x78, 0xEC, 0x3D, 0xC8,
+ 0xC2, 0xB6, 0xC4, 0xC7, 0xB6, 0xD5, 0xD7,
+ 0xCC, 0x00, 0x39, 0x00, 0x36, 0x00, 0x51 },
+ { MCS_PGAMMACTL, 0x00,
+ 0x18, 0x08, 0x24, 0x73, 0x4A, 0x3D, 0xC0,
+ 0xC2, 0xB1, 0xBB, 0xBE, 0xAC, 0xCE, 0xCF,
+ 0xC5, 0x00, 0x5D, 0x00, 0x5E, 0x00, 0x82 },
+ { MCS_PGAMMACTL, 0x00,
+ 0x18, 0x08, 0x24, 0x70, 0x51, 0x3E, 0xBF,
+ 0xC1, 0xAF, 0xB9, 0xBC, 0xAB, 0xCC, 0xCC,
+ 0xC2, 0x00, 0x65, 0x00, 0x67, 0x00, 0x8D },
+ { MCS_PGAMMACTL, 0x00,
+ 0x18, 0x08, 0x24, 0x6C, 0x54, 0x3A, 0xBC,
+ 0xBF, 0xAC, 0xB7, 0xBB, 0xA9, 0xC9, 0xC9,
+ 0xBE, 0x00, 0x71, 0x00, 0x73, 0x00, 0x9E },
+ { MCS_PGAMMACTL, 0x00,
+ 0x18, 0x08, 0x24, 0x69, 0x54, 0x37, 0xBB,
+ 0xBE, 0xAC, 0xB4, 0xB7, 0xA6, 0xC7, 0xC8,
+ 0xBC, 0x00, 0x7B, 0x00, 0x7E, 0x00, 0xAB },
+ { MCS_PGAMMACTL, 0x00,
+ 0x18, 0x08, 0x24, 0x66, 0x55, 0x34, 0xBA,
+ 0xBD, 0xAB, 0xB1, 0xB5, 0xA3, 0xC5, 0xC6,
+ 0xB9, 0x00, 0x85, 0x00, 0x88, 0x00, 0xBA },
+ { MCS_PGAMMACTL, 0x00,
+ 0x18, 0x08, 0x24, 0x63, 0x53, 0x31, 0xB8,
+ 0xBC, 0xA9, 0xB0, 0xB5, 0xA2, 0xC4, 0xC4,
+ 0xB8, 0x00, 0x8B, 0x00, 0x8E, 0x00, 0xC2 },
+ { MCS_PGAMMACTL, 0x00,
+ 0x18, 0x08, 0x24, 0x62, 0x54, 0x30, 0xB9,
+ 0xBB, 0xA9, 0xB0, 0xB3, 0xA1, 0xC1, 0xC3,
+ 0xB7, 0x00, 0x91, 0x00, 0x95, 0x00, 0xDA },
+ { MCS_PGAMMACTL, 0x00,
+ 0x18, 0x08, 0x24, 0x66, 0x58, 0x34, 0xB6,
+ 0xBA, 0xA7, 0xAF, 0xB3, 0xA0, 0xC1, 0xC2,
+ 0xB7, 0x00, 0x97, 0x00, 0x9A, 0x00, 0xD1 },
+ { MCS_PGAMMACTL, 0x00,
+ 0x18, 0x08, 0x24, 0x64, 0x56, 0x33, 0xB6,
+ 0xBA, 0xA8, 0xAC, 0xB1, 0x9D, 0xC1, 0xC1,
+ 0xB7, 0x00, 0x9C, 0x00, 0x9F, 0x00, 0xD6 },
+ { MCS_PGAMMACTL, 0x00,
+ 0x18, 0x08, 0x24, 0x5f, 0x50, 0x2d, 0xB6,
+ 0xB9, 0xA7, 0xAd, 0xB1, 0x9f, 0xbe, 0xC0,
+ 0xB5, 0x00, 0xa0, 0x00, 0xa4, 0x00, 0xdb },
+};
+
+struct s6e63m0 {
+ struct device *dev;
+ struct drm_panel panel;
+ struct backlight_device *bl_dev;
+
+ struct regulator_bulk_data supplies[2];
+ struct gpio_desc *reset_gpio;
+
+ bool prepared;
+ bool enabled;
+
+ /*
+ * This field is tested by functions directly accessing bus before
+ * transfer, transfer is skipped if it is set. In case of transfer
+ * failure or unexpected response the field is set to error value.
+ * Such construct allows to eliminate many checks in higher level
+ * functions.
+ */
+ int error;
+};
+
+static const struct drm_display_mode default_mode = {
+ .clock = 25628,
+ .hdisplay = 480,
+ .hsync_start = 480 + 16,
+ .hsync_end = 480 + 16 + 2,
+ .htotal = 480 + 16 + 2 + 16,
+ .vdisplay = 800,
+ .vsync_start = 800 + 28,
+ .vsync_end = 800 + 28 + 2,
+ .vtotal = 800 + 28 + 2 + 1,
+ .vrefresh = 60,
+ .width_mm = 53,
+ .height_mm = 89,
+ .flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC,
+};
+
+static inline struct s6e63m0 *panel_to_s6e63m0(struct drm_panel *panel)
+{
+ return container_of(panel, struct s6e63m0, panel);
+}
+
+static int s6e63m0_clear_error(struct s6e63m0 *ctx)
+{
+ int ret = ctx->error;
+
+ ctx->error = 0;
+ return ret;
+}
+
+static int s6e63m0_spi_write_word(struct s6e63m0 *ctx, u16 data)
+{
+ struct spi_device *spi = to_spi_device(ctx->dev);
+ struct spi_transfer xfer = {
+ .len = 2,
+ .tx_buf = &data,
+ };
+ struct spi_message msg;
+
+ spi_message_init(&msg);
+ spi_message_add_tail(&xfer, &msg);
+
+ return spi_sync(spi, &msg);
+}
+
+static void s6e63m0_dcs_write(struct s6e63m0 *ctx, const u8 *data, size_t len)
+{
+ int ret = 0;
+
+ if (ctx->error < 0 || len == 0)
+ return;
+
+ DRM_DEV_DEBUG(ctx->dev, "writing dcs seq: %*ph\n", (int)len, data);
+ ret = s6e63m0_spi_write_word(ctx, *data);
+
+ while (!ret && --len) {
+ ++data;
+ ret = s6e63m0_spi_write_word(ctx, *data | DATA_MASK);
+ }
+
+ if (ret) {
+ DRM_DEV_ERROR(ctx->dev, "error %d writing dcs seq: %*ph\n", ret,
+ (int)len, data);
+ ctx->error = ret;
+ }
+
+ usleep_range(300, 310);
+}
+
+#define s6e63m0_dcs_write_seq_static(ctx, seq ...) \
+ ({ \
+ static const u8 d[] = { seq }; \
+ s6e63m0_dcs_write(ctx, d, ARRAY_SIZE(d)); \
+ })
+
+static void s6e63m0_init(struct s6e63m0 *ctx)
+{
+ s6e63m0_dcs_write_seq_static(ctx, MCS_PANELCTL,
+ 0x01, 0x27, 0x27, 0x07, 0x07, 0x54, 0x9f,
+ 0x63, 0x86, 0x1a, 0x33, 0x0d, 0x00, 0x00);
+
+ s6e63m0_dcs_write_seq_static(ctx, MCS_DISCTL,
+ 0x02, 0x03, 0x1c, 0x10, 0x10);
+ s6e63m0_dcs_write_seq_static(ctx, MCS_IFCTL,
+ 0x03, 0x00, 0x00);
+
+ s6e63m0_dcs_write_seq_static(ctx, MCS_PGAMMACTL,
+ 0x00, 0x18, 0x08, 0x24, 0x64, 0x56, 0x33,
+ 0xb6, 0xba, 0xa8, 0xac, 0xb1, 0x9d, 0xc1,
+ 0xc1, 0xb7, 0x00, 0x9c, 0x00, 0x9f, 0x00,
+ 0xd6);
+ s6e63m0_dcs_write_seq_static(ctx, MCS_PGAMMACTL,
+ 0x01);
+
+ s6e63m0_dcs_write_seq_static(ctx, MCS_SRCCTL,
+ 0x00, 0x8c, 0x07);
+ s6e63m0_dcs_write_seq_static(ctx, 0xb3,
+ 0xc);
+
+ s6e63m0_dcs_write_seq_static(ctx, 0xb5,
+ 0x2c, 0x12, 0x0c, 0x0a, 0x10, 0x0e, 0x17,
+ 0x13, 0x1f, 0x1a, 0x2a, 0x24, 0x1f, 0x1b,
+ 0x1a, 0x17, 0x2b, 0x26, 0x22, 0x20, 0x3a,
+ 0x34, 0x30, 0x2c, 0x29, 0x26, 0x25, 0x23,
+ 0x21, 0x20, 0x1e, 0x1e);
+
+ s6e63m0_dcs_write_seq_static(ctx, 0xb6,
+ 0x00, 0x00, 0x11, 0x22, 0x33, 0x44, 0x44,
+ 0x44, 0x55, 0x55, 0x66, 0x66, 0x66, 0x66,
+ 0x66, 0x66);
+
+ s6e63m0_dcs_write_seq_static(ctx, 0xb7,
+ 0x2c, 0x12, 0x0c, 0x0a, 0x10, 0x0e, 0x17,
+ 0x13, 0x1f, 0x1a, 0x2a, 0x24, 0x1f, 0x1b,
+ 0x1a, 0x17, 0x2b, 0x26, 0x22, 0x20, 0x3a,
+ 0x34, 0x30, 0x2c, 0x29, 0x26, 0x25, 0x23,
+ 0x21, 0x20, 0x1e, 0x1e, 0x00, 0x00, 0x11,
+ 0x22, 0x33, 0x44, 0x44, 0x44, 0x55, 0x55,
+ 0x66, 0x66, 0x66, 0x66, 0x66, 0x66);
+
+ s6e63m0_dcs_write_seq_static(ctx, 0xb9,
+ 0x2c, 0x12, 0x0c, 0x0a, 0x10, 0x0e, 0x17,
+ 0x13, 0x1f, 0x1a, 0x2a, 0x24, 0x1f, 0x1b,
+ 0x1a, 0x17, 0x2b, 0x26, 0x22, 0x20, 0x3a,
+ 0x34, 0x30, 0x2c, 0x29, 0x26, 0x25, 0x23,
+ 0x21, 0x20, 0x1e, 0x1e);
+
+ s6e63m0_dcs_write_seq_static(ctx, 0xba,
+ 0x00, 0x00, 0x11, 0x22, 0x33, 0x44, 0x44,
+ 0x44, 0x55, 0x55, 0x66, 0x66, 0x66, 0x66,
+ 0x66, 0x66);
+
+ s6e63m0_dcs_write_seq_static(ctx, MCS_BCMODE,
+ 0x4d, 0x96, 0x1d, 0x00, 0x00, 0x01, 0xdf,
+ 0x00, 0x00, 0x03, 0x1f, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x06,
+ 0x09, 0x0d, 0x0f, 0x12, 0x15, 0x18);
+
+ s6e63m0_dcs_write_seq_static(ctx, 0xb2,
+ 0x10, 0x10, 0x0b, 0x05);
+
+ s6e63m0_dcs_write_seq_static(ctx, MCS_MIECTL1,
+ 0x01);
+
+ s6e63m0_dcs_write_seq_static(ctx, MCS_ELVSS_ON,
+ 0x0b);
+
+ s6e63m0_dcs_write_seq_static(ctx, MIPI_DCS_EXIT_SLEEP_MODE);
+}
+
+static int s6e63m0_power_on(struct s6e63m0 *ctx)
+{
+ int ret;
+
+ ret = regulator_bulk_enable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
+ if (ret < 0)
+ return ret;
+
+ msleep(25);
+
+ gpiod_set_value(ctx->reset_gpio, 0);
+ msleep(120);
+
+ return 0;
+}
+
+static int s6e63m0_power_off(struct s6e63m0 *ctx)
+{
+ int ret;
+
+ gpiod_set_value(ctx->reset_gpio, 1);
+ msleep(120);
+
+ ret = regulator_bulk_disable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static int s6e63m0_disable(struct drm_panel *panel)
+{
+ struct s6e63m0 *ctx = panel_to_s6e63m0(panel);
+
+ if (!ctx->enabled)
+ return 0;
+
+ backlight_disable(ctx->bl_dev);
+
+ s6e63m0_dcs_write_seq_static(ctx, MIPI_DCS_ENTER_SLEEP_MODE);
+ msleep(200);
+
+ ctx->enabled = false;
+
+ return 0;
+}
+
+static int s6e63m0_unprepare(struct drm_panel *panel)
+{
+ struct s6e63m0 *ctx = panel_to_s6e63m0(panel);
+ int ret;
+
+ if (!ctx->prepared)
+ return 0;
+
+ s6e63m0_clear_error(ctx);
+
+ ret = s6e63m0_power_off(ctx);
+ if (ret < 0)
+ return ret;
+
+ ctx->prepared = false;
+
+ return 0;
+}
+
+static int s6e63m0_prepare(struct drm_panel *panel)
+{
+ struct s6e63m0 *ctx = panel_to_s6e63m0(panel);
+ int ret;
+
+ if (ctx->prepared)
+ return 0;
+
+ ret = s6e63m0_power_on(ctx);
+ if (ret < 0)
+ return ret;
+
+ s6e63m0_init(ctx);
+
+ ret = s6e63m0_clear_error(ctx);
+
+ if (ret < 0)
+ s6e63m0_unprepare(panel);
+
+ ctx->prepared = true;
+
+ return ret;
+}
+
+static int s6e63m0_enable(struct drm_panel *panel)
+{
+ struct s6e63m0 *ctx = panel_to_s6e63m0(panel);
+
+ if (ctx->enabled)
+ return 0;
+
+ s6e63m0_dcs_write_seq_static(ctx, MIPI_DCS_SET_DISPLAY_ON);
+
+ backlight_enable(ctx->bl_dev);
+
+ ctx->enabled = true;
+
+ return 0;
+}
+
+static int s6e63m0_get_modes(struct drm_panel *panel)
+{
+ struct drm_connector *connector = panel->connector;
+ struct drm_display_mode *mode;
+
+ mode = drm_mode_duplicate(panel->drm, &default_mode);
+ if (!mode) {
+ DRM_ERROR("failed to add mode %ux%ux@%u\n",
+ default_mode.hdisplay, default_mode.vdisplay,
+ default_mode.vrefresh);
+ return -ENOMEM;
+ }
+
+ drm_mode_set_name(mode);
+
+ mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
+ drm_mode_probed_add(connector, mode);
+
+ return 1;
+}
+
+static const struct drm_panel_funcs s6e63m0_drm_funcs = {
+ .disable = s6e63m0_disable,
+ .unprepare = s6e63m0_unprepare,
+ .prepare = s6e63m0_prepare,
+ .enable = s6e63m0_enable,
+ .get_modes = s6e63m0_get_modes,
+};
+
+static int s6e63m0_set_brightness(struct backlight_device *bd)
+{
+ struct s6e63m0 *ctx = bl_get_data(bd);
+
+ int brightness = bd->props.brightness;
+
+ /* disable and set new gamma */
+ s6e63m0_dcs_write(ctx, s6e63m0_gamma_22[brightness],
+ ARRAY_SIZE(s6e63m0_gamma_22[brightness]));
+
+ /* update gamma table. */
+ s6e63m0_dcs_write_seq_static(ctx, MCS_PGAMMACTL, 0x01);
+
+ return s6e63m0_clear_error(ctx);
+}
+
+static const struct backlight_ops s6e63m0_backlight_ops = {
+ .update_status = s6e63m0_set_brightness,
+};
+
+static int s6e63m0_backlight_register(struct s6e63m0 *ctx)
+{
+ struct backlight_properties props = {
+ .type = BACKLIGHT_RAW,
+ .brightness = MAX_BRIGHTNESS,
+ .max_brightness = MAX_BRIGHTNESS
+ };
+ struct device *dev = ctx->dev;
+ int ret = 0;
+
+ ctx->bl_dev = devm_backlight_device_register(dev, "panel", dev, ctx,
+ &s6e63m0_backlight_ops,
+ &props);
+ if (IS_ERR(ctx->bl_dev)) {
+ ret = PTR_ERR(ctx->bl_dev);
+ DRM_DEV_ERROR(dev, "error registering backlight device (%d)\n",
+ ret);
+ }
+
+ return ret;
+}
+
+static int s6e63m0_probe(struct spi_device *spi)
+{
+ struct device *dev = &spi->dev;
+ struct s6e63m0 *ctx;
+ int ret;
+
+ ctx = devm_kzalloc(dev, sizeof(struct s6e63m0), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ spi_set_drvdata(spi, ctx);
+
+ ctx->dev = dev;
+ ctx->enabled = false;
+ ctx->prepared = false;
+
+ ctx->supplies[0].supply = "vdd3";
+ ctx->supplies[1].supply = "vci";
+ ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(ctx->supplies),
+ ctx->supplies);
+ if (ret < 0) {
+ DRM_DEV_ERROR(dev, "failed to get regulators: %d\n", ret);
+ return ret;
+ }
+
+ ctx->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(ctx->reset_gpio)) {
+ DRM_DEV_ERROR(dev, "cannot get reset-gpios %ld\n",
+ PTR_ERR(ctx->reset_gpio));
+ return PTR_ERR(ctx->reset_gpio);
+ }
+
+ spi->bits_per_word = 9;
+ spi->mode = SPI_MODE_3;
+ ret = spi_setup(spi);
+ if (ret < 0) {
+ DRM_DEV_ERROR(dev, "spi setup failed.\n");
+ return ret;
+ }
+
+ drm_panel_init(&ctx->panel);
+ ctx->panel.dev = dev;
+ ctx->panel.funcs = &s6e63m0_drm_funcs;
+
+ ret = s6e63m0_backlight_register(ctx);
+ if (ret < 0)
+ return ret;
+
+ return drm_panel_add(&ctx->panel);
+}
+
+static int s6e63m0_remove(struct spi_device *spi)
+{
+ struct s6e63m0 *ctx = spi_get_drvdata(spi);
+
+ drm_panel_remove(&ctx->panel);
+
+ return 0;
+}
+
+static const struct of_device_id s6e63m0_of_match[] = {
+ { .compatible = "samsung,s6e63m0" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, s6e63m0_of_match);
+
+static struct spi_driver s6e63m0_driver = {
+ .probe = s6e63m0_probe,
+ .remove = s6e63m0_remove,
+ .driver = {
+ .name = "panel-samsung-s6e63m0",
+ .of_match_table = s6e63m0_of_match,
+ },
+};
+module_spi_driver(s6e63m0_driver);
+
+MODULE_AUTHOR("Paweł Chmiel <pawel.mikolaj.chmiel@gmail.com>");
+MODULE_DESCRIPTION("s6e63m0 LCD Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/panel/panel-samsung-s6e8aa0.c b/drivers/gpu/drm/panel/panel-samsung-s6e8aa0.c
index 6ad827b93ae1..0dcbda8310e3 100644
--- a/drivers/gpu/drm/panel/panel-samsung-s6e8aa0.c
+++ b/drivers/gpu/drm/panel/panel-samsung-s6e8aa0.c
@@ -15,17 +15,21 @@
* published by the Free Software Foundation.
*/
-#include <drm/drmP.h>
-#include <drm/drm_mipi_dsi.h>
-#include <drm/drm_panel.h>
-
+#include <linux/delay.h>
#include <linux/gpio/consumer.h>
+#include <linux/module.h>
+#include <linux/of.h>
#include <linux/regulator/consumer.h>
#include <video/mipi_display.h>
#include <video/of_videomode.h>
#include <video/videomode.h>
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_modes.h>
+#include <drm/drm_panel.h>
+#include <drm/drm_print.h>
+
#define LDI_MTP_LENGTH 24
#define GAMMA_LEVEL_NUM 25
#define GAMMA_TABLE_LEN 26
diff --git a/drivers/gpu/drm/panel/panel-seiko-43wvf1g.c b/drivers/gpu/drm/panel/panel-seiko-43wvf1g.c
index bdcc5d80823d..18b22b1294fb 100644
--- a/drivers/gpu/drm/panel/panel-seiko-43wvf1g.c
+++ b/drivers/gpu/drm/panel/panel-seiko-43wvf1g.c
@@ -7,17 +7,19 @@
*/
#include <linux/backlight.h>
+#include <linux/delay.h>
#include <linux/module.h>
#include <linux/of.h>
+#include <linux/platform_device.h>
#include <linux/regulator/consumer.h>
-#include <drm/drmP.h>
-#include <drm/drm_crtc.h>
-#include <drm/drm_panel.h>
-
#include <video/display_timing.h>
#include <video/videomode.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_device.h>
+#include <drm/drm_panel.h>
+
struct seiko_panel_desc {
const struct drm_display_mode *modes;
unsigned int num_modes;
diff --git a/drivers/gpu/drm/panel/panel-sharp-lq101r1sx01.c b/drivers/gpu/drm/panel/panel-sharp-lq101r1sx01.c
index 02fc0f5423d4..1ac75a30e431 100644
--- a/drivers/gpu/drm/panel/panel-sharp-lq101r1sx01.c
+++ b/drivers/gpu/drm/panel/panel-sharp-lq101r1sx01.c
@@ -7,18 +7,19 @@
*/
#include <linux/backlight.h>
+#include <linux/delay.h>
#include <linux/gpio/consumer.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/regulator/consumer.h>
-#include <drm/drmP.h>
+#include <video/mipi_display.h>
+
#include <drm/drm_crtc.h>
+#include <drm/drm_device.h>
#include <drm/drm_mipi_dsi.h>
#include <drm/drm_panel.h>
-#include <video/mipi_display.h>
-
struct sharp_panel {
struct drm_panel base;
/* the datasheet refers to them as DSI-LINK1 and DSI-LINK2 */
diff --git a/drivers/gpu/drm/panel/panel-sharp-ls043t1le01.c b/drivers/gpu/drm/panel/panel-sharp-ls043t1le01.c
index e5cae0050f52..89d7fc842972 100644
--- a/drivers/gpu/drm/panel/panel-sharp-ls043t1le01.c
+++ b/drivers/gpu/drm/panel/panel-sharp-ls043t1le01.c
@@ -19,18 +19,19 @@
*/
#include <linux/backlight.h>
+#include <linux/delay.h>
#include <linux/gpio/consumer.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/regulator/consumer.h>
-#include <drm/drmP.h>
+#include <video/mipi_display.h>
+
#include <drm/drm_crtc.h>
+#include <drm/drm_device.h>
#include <drm/drm_mipi_dsi.h>
#include <drm/drm_panel.h>
-#include <video/mipi_display.h>
-
struct sharp_nt_panel {
struct drm_panel base;
struct mipi_dsi_device *dsi;
diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c
index 569be4efd8d1..5a93c4edf1e4 100644
--- a/drivers/gpu/drm/panel/panel-simple.c
+++ b/drivers/gpu/drm/panel/panel-simple.c
@@ -22,20 +22,21 @@
*/
#include <linux/backlight.h>
+#include <linux/delay.h>
#include <linux/gpio/consumer.h>
#include <linux/module.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/regulator/consumer.h>
-#include <drm/drmP.h>
+#include <video/display_timing.h>
+#include <video/videomode.h>
+
#include <drm/drm_crtc.h>
+#include <drm/drm_device.h>
#include <drm/drm_mipi_dsi.h>
#include <drm/drm_panel.h>
-#include <video/display_timing.h>
-#include <video/videomode.h>
-
struct panel_desc {
const struct drm_display_mode *modes;
unsigned int num_modes;
@@ -446,6 +447,32 @@ static const struct panel_desc ampire_am800480r3tmqwa1h = {
.bus_format = MEDIA_BUS_FMT_RGB666_1X18,
};
+static const struct display_timing santek_st0700i5y_rbslw_f_timing = {
+ .pixelclock = { 26400000, 33300000, 46800000 },
+ .hactive = { 800, 800, 800 },
+ .hfront_porch = { 16, 210, 354 },
+ .hback_porch = { 45, 36, 6 },
+ .hsync_len = { 1, 10, 40 },
+ .vactive = { 480, 480, 480 },
+ .vfront_porch = { 7, 22, 147 },
+ .vback_porch = { 22, 13, 3 },
+ .vsync_len = { 1, 10, 20 },
+ .flags = DISPLAY_FLAGS_HSYNC_LOW | DISPLAY_FLAGS_VSYNC_LOW |
+ DISPLAY_FLAGS_DE_HIGH | DISPLAY_FLAGS_PIXDATA_POSEDGE
+};
+
+static const struct panel_desc armadeus_st0700_adapt = {
+ .timings = &santek_st0700i5y_rbslw_f_timing,
+ .num_timings = 1,
+ .bpc = 6,
+ .size = {
+ .width = 154,
+ .height = 86,
+ },
+ .bus_format = MEDIA_BUS_FMT_RGB666_1X18,
+ .bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_POSEDGE,
+};
+
static const struct drm_display_mode auo_b101aw03_mode = {
.clock = 51450,
.hdisplay = 1024,
@@ -1096,6 +1123,56 @@ static const struct panel_desc dlc_dlc1010gig = {
.bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
};
+static const struct drm_display_mode edt_et035012dm6_mode = {
+ .clock = 6500,
+ .hdisplay = 320,
+ .hsync_start = 320 + 20,
+ .hsync_end = 320 + 20 + 30,
+ .htotal = 320 + 20 + 68,
+ .vdisplay = 240,
+ .vsync_start = 240 + 4,
+ .vsync_end = 240 + 4 + 4,
+ .vtotal = 240 + 4 + 4 + 14,
+ .vrefresh = 60,
+ .flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC,
+};
+
+static const struct panel_desc edt_et035012dm6 = {
+ .modes = &edt_et035012dm6_mode,
+ .num_modes = 1,
+ .bpc = 8,
+ .size = {
+ .width = 70,
+ .height = 52,
+ },
+ .bus_format = MEDIA_BUS_FMT_RGB888_1X24,
+ .bus_flags = DRM_BUS_FLAG_DE_LOW | DRM_BUS_FLAG_PIXDATA_NEGEDGE,
+};
+
+static const struct drm_display_mode edt_etm0430g0dh6_mode = {
+ .clock = 9000,
+ .hdisplay = 480,
+ .hsync_start = 480 + 2,
+ .hsync_end = 480 + 2 + 41,
+ .htotal = 480 + 2 + 41 + 2,
+ .vdisplay = 272,
+ .vsync_start = 272 + 2,
+ .vsync_end = 272 + 2 + 10,
+ .vtotal = 272 + 2 + 10 + 2,
+ .vrefresh = 60,
+ .flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC,
+};
+
+static const struct panel_desc edt_etm0430g0dh6 = {
+ .modes = &edt_etm0430g0dh6_mode,
+ .num_modes = 1,
+ .bpc = 6,
+ .size = {
+ .width = 95,
+ .height = 54,
+ },
+};
+
static const struct drm_display_mode edt_et057090dhu_mode = {
.clock = 25175,
.hdisplay = 640,
@@ -1160,6 +1237,33 @@ static const struct panel_desc edt_etm0700g0bdh6 = {
.bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE,
};
+static const struct display_timing evervision_vgg804821_timing = {
+ .pixelclock = { 27600000, 33300000, 50000000 },
+ .hactive = { 800, 800, 800 },
+ .hfront_porch = { 40, 66, 70 },
+ .hback_porch = { 40, 67, 70 },
+ .hsync_len = { 40, 67, 70 },
+ .vactive = { 480, 480, 480 },
+ .vfront_porch = { 6, 10, 10 },
+ .vback_porch = { 7, 11, 11 },
+ .vsync_len = { 7, 11, 11 },
+ .flags = DISPLAY_FLAGS_HSYNC_HIGH | DISPLAY_FLAGS_VSYNC_HIGH |
+ DISPLAY_FLAGS_DE_HIGH | DISPLAY_FLAGS_PIXDATA_NEGEDGE |
+ DISPLAY_FLAGS_SYNC_NEGEDGE,
+};
+
+static const struct panel_desc evervision_vgg804821 = {
+ .timings = &evervision_vgg804821_timing,
+ .num_timings = 1,
+ .bpc = 8,
+ .size = {
+ .width = 108,
+ .height = 64,
+ },
+ .bus_format = MEDIA_BUS_FMT_RGB888_1X24,
+ .bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_NEGEDGE,
+};
+
static const struct drm_display_mode foxlink_fl500wvr00_a0t_mode = {
.clock = 32260,
.hdisplay = 800,
@@ -1184,6 +1288,29 @@ static const struct panel_desc foxlink_fl500wvr00_a0t = {
.bus_format = MEDIA_BUS_FMT_RGB888_1X24,
};
+static const struct drm_display_mode friendlyarm_hd702e_mode = {
+ .clock = 67185,
+ .hdisplay = 800,
+ .hsync_start = 800 + 20,
+ .hsync_end = 800 + 20 + 24,
+ .htotal = 800 + 20 + 24 + 20,
+ .vdisplay = 1280,
+ .vsync_start = 1280 + 4,
+ .vsync_end = 1280 + 4 + 8,
+ .vtotal = 1280 + 4 + 8 + 4,
+ .vrefresh = 60,
+ .flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC,
+};
+
+static const struct panel_desc friendlyarm_hd702e = {
+ .modes = &friendlyarm_hd702e_mode,
+ .num_modes = 1,
+ .size = {
+ .width = 94,
+ .height = 151,
+ },
+};
+
static const struct drm_display_mode giantplus_gpg482739qs5_mode = {
.clock = 9000,
.hdisplay = 480,
@@ -1549,6 +1676,29 @@ static const struct panel_desc innolux_zj070na_01p = {
},
};
+static const struct display_timing koe_tx14d24vm1bpa_timing = {
+ .pixelclock = { 5580000, 5850000, 6200000 },
+ .hactive = { 320, 320, 320 },
+ .hfront_porch = { 30, 30, 30 },
+ .hback_porch = { 30, 30, 30 },
+ .hsync_len = { 1, 5, 17 },
+ .vactive = { 240, 240, 240 },
+ .vfront_porch = { 6, 6, 6 },
+ .vback_porch = { 5, 5, 5 },
+ .vsync_len = { 1, 2, 11 },
+ .flags = DISPLAY_FLAGS_DE_HIGH,
+};
+
+static const struct panel_desc koe_tx14d24vm1bpa = {
+ .timings = &koe_tx14d24vm1bpa_timing,
+ .num_timings = 1,
+ .bpc = 6,
+ .size = {
+ .width = 115,
+ .height = 86,
+ },
+};
+
static const struct display_timing koe_tx31d200vm0baa_timing = {
.pixelclock = { 39600000, 43200000, 48000000 },
.hactive = { 1280, 1280, 1280 },
@@ -2355,6 +2505,31 @@ static const struct panel_desc starry_kr122ea0sra = {
},
};
+static const struct drm_display_mode tfc_s9700rtwv43tr_01b_mode = {
+ .clock = 30000,
+ .hdisplay = 800,
+ .hsync_start = 800 + 39,
+ .hsync_end = 800 + 39 + 47,
+ .htotal = 800 + 39 + 47 + 39,
+ .vdisplay = 480,
+ .vsync_start = 480 + 13,
+ .vsync_end = 480 + 13 + 2,
+ .vtotal = 480 + 13 + 2 + 29,
+ .vrefresh = 62,
+};
+
+static const struct panel_desc tfc_s9700rtwv43tr_01b = {
+ .modes = &tfc_s9700rtwv43tr_01b_mode,
+ .num_modes = 1,
+ .bpc = 8,
+ .size = {
+ .width = 155,
+ .height = 90,
+ },
+ .bus_format = MEDIA_BUS_FMT_RGB888_1X24,
+ .bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_POSEDGE,
+};
+
static const struct display_timing tianma_tm070jdhg30_timing = {
.pixelclock = { 62600000, 68200000, 78100000 },
.hactive = { 1280, 1280, 1280 },
@@ -2508,6 +2683,32 @@ static const struct panel_desc urt_umsh_8596md_parallel = {
.bus_format = MEDIA_BUS_FMT_RGB666_1X18,
};
+static const struct drm_display_mode vl050_8048nt_c01_mode = {
+ .clock = 33333,
+ .hdisplay = 800,
+ .hsync_start = 800 + 210,
+ .hsync_end = 800 + 210 + 20,
+ .htotal = 800 + 210 + 20 + 46,
+ .vdisplay = 480,
+ .vsync_start = 480 + 22,
+ .vsync_end = 480 + 22 + 10,
+ .vtotal = 480 + 22 + 10 + 23,
+ .vrefresh = 60,
+ .flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC,
+};
+
+static const struct panel_desc vl050_8048nt_c01 = {
+ .modes = &vl050_8048nt_c01_mode,
+ .num_modes = 1,
+ .bpc = 8,
+ .size = {
+ .width = 120,
+ .height = 76,
+ },
+ .bus_format = MEDIA_BUS_FMT_RGB888_1X24,
+ .bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_POSEDGE,
+};
+
static const struct drm_display_mode winstar_wf35ltiacd_mode = {
.clock = 6410,
.hdisplay = 320,
@@ -2571,6 +2772,9 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "arm,rtsm-display",
.data = &arm_rtsm,
}, {
+ .compatible = "armadeus,st0700-adapt",
+ .data = &armadeus_st0700_adapt,
+ }, {
.compatible = "auo,b101aw03",
.data = &auo_b101aw03,
}, {
@@ -2646,6 +2850,12 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "dlc,dlc1010gig",
.data = &dlc_dlc1010gig,
}, {
+ .compatible = "edt,et035012dm6",
+ .data = &edt_et035012dm6,
+ }, {
+ .compatible = "edt,etm0430g0dh6",
+ .data = &edt_etm0430g0dh6,
+ }, {
.compatible = "edt,et057090dhu",
.data = &edt_et057090dhu,
}, {
@@ -2661,9 +2871,15 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "edt,etm0700g0edh6",
.data = &edt_etm0700g0bdh6,
}, {
+ .compatible = "evervision,vgg804821",
+ .data = &evervision_vgg804821,
+ }, {
.compatible = "foxlink,fl500wvr00-a0t",
.data = &foxlink_fl500wvr00_a0t,
}, {
+ .compatible = "friendlyarm,hd702e",
+ .data = &friendlyarm_hd702e,
+ }, {
.compatible = "giantplus,gpg482739qs5",
.data = &giantplus_gpg482739qs5
}, {
@@ -2706,6 +2922,9 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "innolux,zj070na-01p",
.data = &innolux_zj070na_01p,
}, {
+ .compatible = "koe,tx14d24vm1bpa",
+ .data = &koe_tx14d24vm1bpa,
+ }, {
.compatible = "koe,tx31d200vm0baa",
.data = &koe_tx31d200vm0baa,
}, {
@@ -2802,6 +3021,9 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "starry,kr122ea0sra",
.data = &starry_kr122ea0sra,
}, {
+ .compatible = "tfc,s9700rtwv43tr-01b",
+ .data = &tfc_s9700rtwv43tr_01b,
+ }, {
.compatible = "tianma,tm070jdhg30",
.data = &tianma_tm070jdhg30,
}, {
@@ -2835,6 +3057,9 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "urt,umsh-8596md-20t",
.data = &urt_umsh_8596md_parallel,
}, {
+ .compatible = "vxt,vl050-8048nt-c01",
+ .data = &vl050_8048nt_c01,
+ }, {
.compatible = "winstar,wf35ltiacd",
.data = &winstar_wf35ltiacd,
}, {
@@ -3053,6 +3278,37 @@ static const struct panel_desc_dsi lg_acx467akm_7 = {
.lanes = 4,
};
+static const struct drm_display_mode osd101t2045_53ts_mode = {
+ .clock = 154500,
+ .hdisplay = 1920,
+ .hsync_start = 1920 + 112,
+ .hsync_end = 1920 + 112 + 16,
+ .htotal = 1920 + 112 + 16 + 32,
+ .vdisplay = 1200,
+ .vsync_start = 1200 + 16,
+ .vsync_end = 1200 + 16 + 2,
+ .vtotal = 1200 + 16 + 2 + 16,
+ .vrefresh = 60,
+ .flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC,
+};
+
+static const struct panel_desc_dsi osd101t2045_53ts = {
+ .desc = {
+ .modes = &osd101t2045_53ts_mode,
+ .num_modes = 1,
+ .bpc = 8,
+ .size = {
+ .width = 217,
+ .height = 136,
+ },
+ },
+ .flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST |
+ MIPI_DSI_MODE_VIDEO_SYNC_PULSE |
+ MIPI_DSI_MODE_EOT_PACKET,
+ .format = MIPI_DSI_FMT_RGB888,
+ .lanes = 4,
+};
+
static const struct of_device_id dsi_of_match[] = {
{
.compatible = "auo,b080uan01",
@@ -3073,6 +3329,9 @@ static const struct of_device_id dsi_of_match[] = {
.compatible = "lg,acx467akm-7",
.data = &lg_acx467akm_7
}, {
+ .compatible = "osddisplays,osd101t2045-53ts",
+ .data = &osd101t2045_53ts
+ }, {
/* sentinel */
}
};
@@ -3098,7 +3357,14 @@ static int panel_simple_dsi_probe(struct mipi_dsi_device *dsi)
dsi->format = desc->format;
dsi->lanes = desc->lanes;
- return mipi_dsi_attach(dsi);
+ err = mipi_dsi_attach(dsi);
+ if (err) {
+ struct panel_simple *panel = dev_get_drvdata(&dsi->dev);
+
+ drm_panel_remove(&panel->base);
+ }
+
+ return err;
}
static int panel_simple_dsi_remove(struct mipi_dsi_device *dsi)
diff --git a/drivers/gpu/drm/panel/panel-sitronix-st7701.c b/drivers/gpu/drm/panel/panel-sitronix-st7701.c
index 63f9a1c7fb1b..09c5d9a6f9fa 100644
--- a/drivers/gpu/drm/panel/panel-sitronix-st7701.c
+++ b/drivers/gpu/drm/panel/panel-sitronix-st7701.c
@@ -305,9 +305,9 @@ static const struct drm_display_mode ts8550b_mode = {
.htotal = 480 + 38 + 12 + 12,
.vdisplay = 854,
- .vsync_start = 854 + 4,
- .vsync_end = 854 + 4 + 8,
- .vtotal = 854 + 4 + 8 + 18,
+ .vsync_start = 854 + 18,
+ .vsync_end = 854 + 18 + 8,
+ .vtotal = 854 + 18 + 8 + 4,
.width_mm = 69,
.height_mm = 139,
diff --git a/drivers/gpu/drm/panel/panel-sitronix-st7789v.c b/drivers/gpu/drm/panel/panel-sitronix-st7789v.c
index 74284e5afc5d..bf85a8fa9ad0 100644
--- a/drivers/gpu/drm/panel/panel-sitronix-st7789v.c
+++ b/drivers/gpu/drm/panel/panel-sitronix-st7789v.c
@@ -6,15 +6,19 @@
* 2 as published by the Free Software Foundation.
*/
+#include <linux/backlight.h>
+#include <linux/delay.h>
#include <linux/gpio/consumer.h>
+#include <linux/module.h>
#include <linux/regulator/consumer.h>
#include <linux/spi/spi.h>
-#include <drm/drmP.h>
-#include <drm/drm_panel.h>
-
#include <video/mipi_display.h>
+#include <drm/drm_device.h>
+#include <drm/drm_modes.h>
+#include <drm/drm_panel.h>
+
#define ST7789V_COLMOD_RGB_FMT_18BITS (6 << 4)
#define ST7789V_COLMOD_CTRL_FMT_18BITS (6 << 0)
diff --git a/drivers/gpu/drm/panel/panel-truly-nt35597.c b/drivers/gpu/drm/panel/panel-truly-nt35597.c
index fc2a66c53db4..77e1311b7c69 100644
--- a/drivers/gpu/drm/panel/panel-truly-nt35597.c
+++ b/drivers/gpu/drm/panel/panel-truly-nt35597.c
@@ -3,11 +3,10 @@
* Copyright (c) 2018, The Linux Foundation. All rights reserved.
*/
-#include <drm/drmP.h>
-#include <drm/drm_panel.h>
-#include <drm/drm_mipi_dsi.h>
-
+#include <linux/backlight.h>
+#include <linux/delay.h>
#include <linux/gpio/consumer.h>
+#include <linux/module.h>
#include <linux/of_device.h>
#include <linux/of_graph.h>
#include <linux/pinctrl/consumer.h>
@@ -15,6 +14,11 @@
#include <video/mipi_display.h>
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_modes.h>
+#include <drm/drm_panel.h>
+#include <drm/drm_print.h>
+
static const char * const regulator_names[] = {
"vdda",
"vdispp",
@@ -280,6 +284,7 @@ static int truly_35597_power_on(struct truly_nt35597 *ctx)
gpiod_set_value(ctx->reset_gpio, 1);
usleep_range(10000, 20000);
gpiod_set_value(ctx->reset_gpio, 0);
+ usleep_range(10000, 20000);
return 0;
}
diff --git a/drivers/gpu/drm/panfrost/panfrost_device.c b/drivers/gpu/drm/panfrost/panfrost_device.c
index 3b2bced1b015..ccb8eb2a518c 100644
--- a/drivers/gpu/drm/panfrost/panfrost_device.c
+++ b/drivers/gpu/drm/panfrost/panfrost_device.c
@@ -55,11 +55,33 @@ static int panfrost_clk_init(struct panfrost_device *pfdev)
if (err)
return err;
+ pfdev->bus_clock = devm_clk_get_optional(pfdev->dev, "bus");
+ if (IS_ERR(pfdev->bus_clock)) {
+ dev_err(pfdev->dev, "get bus_clock failed %ld\n",
+ PTR_ERR(pfdev->bus_clock));
+ return PTR_ERR(pfdev->bus_clock);
+ }
+
+ if (pfdev->bus_clock) {
+ rate = clk_get_rate(pfdev->bus_clock);
+ dev_info(pfdev->dev, "bus_clock rate = %lu\n", rate);
+
+ err = clk_prepare_enable(pfdev->bus_clock);
+ if (err)
+ goto disable_clock;
+ }
+
return 0;
+
+disable_clock:
+ clk_disable_unprepare(pfdev->clock);
+
+ return err;
}
static void panfrost_clk_fini(struct panfrost_device *pfdev)
{
+ clk_disable_unprepare(pfdev->bus_clock);
clk_disable_unprepare(pfdev->clock);
}
diff --git a/drivers/gpu/drm/panfrost/panfrost_device.h b/drivers/gpu/drm/panfrost/panfrost_device.h
index 56f452dfb490..8074f221034b 100644
--- a/drivers/gpu/drm/panfrost/panfrost_device.h
+++ b/drivers/gpu/drm/panfrost/panfrost_device.h
@@ -66,6 +66,7 @@ struct panfrost_device {
void __iomem *iomem;
struct clk *clock;
+ struct clk *bus_clock;
struct regulator *regulator;
struct reset_control *rstc;
diff --git a/drivers/gpu/drm/panfrost/panfrost_job.c b/drivers/gpu/drm/panfrost/panfrost_job.c
index a5716c8fe8b3..9bb9260d9181 100644
--- a/drivers/gpu/drm/panfrost/panfrost_job.c
+++ b/drivers/gpu/drm/panfrost/panfrost_job.c
@@ -387,7 +387,7 @@ static void panfrost_job_timedout(struct drm_sched_job *sched_job)
mutex_lock(&pfdev->reset_lock);
for (i = 0; i < NUM_JOB_SLOTS; i++)
- drm_sched_stop(&pfdev->js->queue[i].sched);
+ drm_sched_stop(&pfdev->js->queue[i].sched, sched_job);
if (sched_job)
drm_sched_increase_karma(sched_job);
diff --git a/drivers/gpu/drm/qxl/qxl_release.c b/drivers/gpu/drm/qxl/qxl_release.c
index 30f85f0130cb..49f9a9385393 100644
--- a/drivers/gpu/drm/qxl/qxl_release.c
+++ b/drivers/gpu/drm/qxl/qxl_release.c
@@ -256,7 +256,7 @@ int qxl_release_reserve_list(struct qxl_release *release, bool no_intr)
return 0;
ret = ttm_eu_reserve_buffers(&release->ticket, &release->bos,
- !no_intr, NULL);
+ !no_intr, NULL, true);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/r128/r128_cce.c b/drivers/gpu/drm/r128/r128_cce.c
index b91af1bf531b..138af32480d4 100644
--- a/drivers/gpu/drm/r128/r128_cce.c
+++ b/drivers/gpu/drm/r128/r128_cce.c
@@ -29,13 +29,21 @@
* Gareth Hughes <gareth@valinux.com>
*/
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
#include <linux/firmware.h>
+#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
-#include <linux/module.h>
+#include <linux/uaccess.h>
-#include <drm/drmP.h>
+#include <drm/drm_agpsupport.h>
+#include <drm/drm_device.h>
+#include <drm/drm_file.h>
+#include <drm/drm_irq.h>
+#include <drm/drm_print.h>
#include <drm/r128_drm.h>
+
#include "r128_drv.h"
#define R128_FIFO_DEBUG 0
@@ -85,7 +93,7 @@ static int r128_do_pixcache_flush(drm_r128_private_t *dev_priv)
for (i = 0; i < dev_priv->usec_timeout; i++) {
if (!(R128_READ(R128_PC_NGUI_CTLSTAT) & R128_PC_BUSY))
return 0;
- DRM_UDELAY(1);
+ udelay(1);
}
#if R128_FIFO_DEBUG
@@ -102,7 +110,7 @@ static int r128_do_wait_for_fifo(drm_r128_private_t *dev_priv, int entries)
int slots = R128_READ(R128_GUI_STAT) & R128_GUI_FIFOCNT_MASK;
if (slots >= entries)
return 0;
- DRM_UDELAY(1);
+ udelay(1);
}
#if R128_FIFO_DEBUG
@@ -124,7 +132,7 @@ static int r128_do_wait_for_idle(drm_r128_private_t *dev_priv)
r128_do_pixcache_flush(dev_priv);
return 0;
}
- DRM_UDELAY(1);
+ udelay(1);
}
#if R128_FIFO_DEBUG
@@ -211,7 +219,7 @@ int r128_do_cce_idle(drm_r128_private_t *dev_priv)
return r128_do_pixcache_flush(dev_priv);
}
}
- DRM_UDELAY(1);
+ udelay(1);
}
#if R128_FIFO_DEBUG
@@ -838,7 +846,7 @@ static struct drm_buf *r128_freelist_get(struct drm_device * dev)
return buf;
}
}
- DRM_UDELAY(1);
+ udelay(1);
}
DRM_DEBUG("returning NULL!\n");
@@ -870,7 +878,7 @@ int r128_wait_ring(drm_r128_private_t *dev_priv, int n)
r128_update_ring_snapshot(dev_priv);
if (ring->space >= n)
return 0;
- DRM_UDELAY(1);
+ udelay(1);
}
/* FIXME: This is being ignored... */
@@ -916,7 +924,7 @@ int r128_cce_buffers(struct drm_device *dev, void *data, struct drm_file *file_p
*/
if (d->send_count != 0) {
DRM_ERROR("Process %d trying to send %d buffers via drmDMA\n",
- DRM_CURRENTPID, d->send_count);
+ task_pid_nr(current), d->send_count);
return -EINVAL;
}
@@ -924,7 +932,7 @@ int r128_cce_buffers(struct drm_device *dev, void *data, struct drm_file *file_p
*/
if (d->request_count < 0 || d->request_count > dma->buf_count) {
DRM_ERROR("Process %d trying to get %d buffers (of %d max)\n",
- DRM_CURRENTPID, d->request_count, dma->buf_count);
+ task_pid_nr(current), d->request_count, dma->buf_count);
return -EINVAL;
}
diff --git a/drivers/gpu/drm/r128/r128_drv.c b/drivers/gpu/drm/r128/r128_drv.c
index 4b1a505ab353..fd74f744604f 100644
--- a/drivers/gpu/drm/r128/r128_drv.c
+++ b/drivers/gpu/drm/r128/r128_drv.c
@@ -31,11 +31,14 @@
#include <linux/module.h>
-#include <drm/drmP.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_file.h>
+#include <drm/drm_pci.h>
+#include <drm/drm_pciids.h>
+#include <drm/drm_vblank.h>
#include <drm/r128_drm.h>
-#include "r128_drv.h"
-#include <drm/drm_pciids.h>
+#include "r128_drv.h"
static struct pci_device_id pciidlist[] = {
r128_PCI_IDS
diff --git a/drivers/gpu/drm/r128/r128_drv.h b/drivers/gpu/drm/r128/r128_drv.h
index 2de40d276116..ba8c30ed91d1 100644
--- a/drivers/gpu/drm/r128/r128_drv.h
+++ b/drivers/gpu/drm/r128/r128_drv.h
@@ -35,8 +35,14 @@
#ifndef __R128_DRV_H__
#define __R128_DRV_H__
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/irqreturn.h>
+
#include <drm/ati_pcigart.h>
+#include <drm/drm_ioctl.h>
#include <drm/drm_legacy.h>
+#include <drm/r128_drm.h>
/* General customization:
*/
@@ -397,10 +403,10 @@ extern long r128_compat_ioctl(struct file *filp, unsigned int cmd,
#define R128_PCIGART_TABLE_SIZE 32768
-#define R128_READ(reg) DRM_READ32(dev_priv->mmio, (reg))
-#define R128_WRITE(reg, val) DRM_WRITE32(dev_priv->mmio, (reg), (val))
-#define R128_READ8(reg) DRM_READ8(dev_priv->mmio, (reg))
-#define R128_WRITE8(reg, val) DRM_WRITE8(dev_priv->mmio, (reg), (val))
+#define R128_READ(reg) readl(((void __iomem *)dev_priv->mmio->handle) + (reg))
+#define R128_WRITE(reg, val) writel(val, ((void __iomem *)dev_priv->mmio->handle) + (reg))
+#define R128_READ8(reg) readb(((void __iomem *)dev_priv->mmio->handle) + (reg))
+#define R128_WRITE8(reg, val) writeb(val, ((void __iomem *)dev_priv->mmio->handle) + (reg))
#define R128_WRITE_PLL(addr, val) \
do { \
@@ -445,7 +451,7 @@ do { \
r128_update_ring_snapshot(dev_priv); \
if (ring->space >= ring->high_mark) \
goto __ring_space_done; \
- DRM_UDELAY(1); \
+ udelay(1); \
} \
DRM_ERROR("ring space check failed!\n"); \
return -EBUSY; \
diff --git a/drivers/gpu/drm/r128/r128_state.c b/drivers/gpu/drm/r128/r128_state.c
index b9bfa806d346..9d74c9d914cb 100644
--- a/drivers/gpu/drm/r128/r128_state.c
+++ b/drivers/gpu/drm/r128/r128_state.c
@@ -28,8 +28,15 @@
* Gareth Hughes <gareth@valinux.com>
*/
-#include <drm/drmP.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+
+#include <drm/drm_device.h>
+#include <drm/drm_file.h>
+#include <drm/drm_print.h>
#include <drm/r128_drm.h>
+
#include "r128_drv.h"
/* ================================================================
@@ -824,7 +831,7 @@ static int r128_cce_dispatch_blit(struct drm_device *dev,
if (buf->file_priv != file_priv) {
DRM_ERROR("process %d using buffer owned by %p\n",
- DRM_CURRENTPID, buf->file_priv);
+ task_pid_nr(current), buf->file_priv);
return -EINVAL;
}
if (buf->pending) {
@@ -1317,7 +1324,7 @@ static int r128_cce_vertex(struct drm_device *dev, void *data, struct drm_file *
DEV_INIT_TEST_WITH_RETURN(dev_priv);
DRM_DEBUG("pid=%d index=%d count=%d discard=%d\n",
- DRM_CURRENTPID, vertex->idx, vertex->count, vertex->discard);
+ task_pid_nr(current), vertex->idx, vertex->count, vertex->discard);
if (vertex->idx < 0 || vertex->idx >= dma->buf_count) {
DRM_ERROR("buffer index %d (of %d max)\n",
@@ -1338,7 +1345,7 @@ static int r128_cce_vertex(struct drm_device *dev, void *data, struct drm_file *
if (buf->file_priv != file_priv) {
DRM_ERROR("process %d using buffer owned by %p\n",
- DRM_CURRENTPID, buf->file_priv);
+ task_pid_nr(current), buf->file_priv);
return -EINVAL;
}
if (buf->pending) {
@@ -1369,7 +1376,7 @@ static int r128_cce_indices(struct drm_device *dev, void *data, struct drm_file
DEV_INIT_TEST_WITH_RETURN(dev_priv);
- DRM_DEBUG("pid=%d buf=%d s=%d e=%d d=%d\n", DRM_CURRENTPID,
+ DRM_DEBUG("pid=%d buf=%d s=%d e=%d d=%d\n", task_pid_nr(current),
elts->idx, elts->start, elts->end, elts->discard);
if (elts->idx < 0 || elts->idx >= dma->buf_count) {
@@ -1391,7 +1398,7 @@ static int r128_cce_indices(struct drm_device *dev, void *data, struct drm_file
if (buf->file_priv != file_priv) {
DRM_ERROR("process %d using buffer owned by %p\n",
- DRM_CURRENTPID, buf->file_priv);
+ task_pid_nr(current), buf->file_priv);
return -EINVAL;
}
if (buf->pending) {
@@ -1432,7 +1439,7 @@ static int r128_cce_blit(struct drm_device *dev, void *data, struct drm_file *fi
DEV_INIT_TEST_WITH_RETURN(dev_priv);
- DRM_DEBUG("pid=%d index=%d\n", DRM_CURRENTPID, blit->idx);
+ DRM_DEBUG("pid=%d index=%d\n", task_pid_nr(current), blit->idx);
if (blit->idx < 0 || blit->idx >= dma->buf_count) {
DRM_ERROR("buffer index %d (of %d max)\n",
@@ -1532,7 +1539,7 @@ static int r128_cce_indirect(struct drm_device *dev, void *data, struct drm_file
if (buf->file_priv != file_priv) {
DRM_ERROR("process %d using buffer owned by %p\n",
- DRM_CURRENTPID, buf->file_priv);
+ task_pid_nr(current), buf->file_priv);
return -EINVAL;
}
if (buf->pending) {
@@ -1579,7 +1586,7 @@ int r128_getparam(struct drm_device *dev, void *data, struct drm_file *file_priv
DEV_INIT_TEST_WITH_RETURN(dev_priv);
- DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
+ DRM_DEBUG("pid=%d\n", task_pid_nr(current));
switch (param->param) {
case R128_PARAM_IRQ_NR:
diff --git a/drivers/gpu/drm/radeon/atom.c b/drivers/gpu/drm/radeon/atom.c
index ac98ad561870..2c27627b6659 100644
--- a/drivers/gpu/drm/radeon/atom.c
+++ b/drivers/gpu/drm/radeon/atom.c
@@ -25,8 +25,10 @@
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/slab.h>
+
#include <asm/unaligned.h>
+#include <drm/drm_device.h>
#include <drm/drm_util.h>
#define ATOM_DEBUG
diff --git a/drivers/gpu/drm/radeon/atom.h b/drivers/gpu/drm/radeon/atom.h
index 6d014ddb6b78..364b895e7ebb 100644
--- a/drivers/gpu/drm/radeon/atom.h
+++ b/drivers/gpu/drm/radeon/atom.h
@@ -26,7 +26,6 @@
#define ATOM_H
#include <linux/types.h>
-#include <drm/drmP.h>
#define ATOM_BIOS_MAGIC 0xAA55
#define ATOM_ATI_MAGIC_PTR 0x30
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index d75ae17ff3ad..da2c9e295408 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -23,11 +23,14 @@
* Authors: Dave Airlie
* Alex Deucher
*/
-#include <drm/drmP.h>
+
#include <drm/drm_crtc_helper.h>
#include <drm/drm_fb_helper.h>
-#include <drm/radeon_drm.h>
#include <drm/drm_fixed.h>
+#include <drm/drm_fourcc.h>
+#include <drm/drm_vblank.h>
+#include <drm/radeon_drm.h>
+
#include "radeon.h"
#include "atom.h"
#include "atom-bits.h"
diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c
index 3e798593e042..6f38375c77c8 100644
--- a/drivers/gpu/drm/radeon/atombios_dp.c
+++ b/drivers/gpu/drm/radeon/atombios_dp.c
@@ -24,7 +24,7 @@
* Alex Deucher
* Jerome Glisse
*/
-#include <drm/drmP.h>
+
#include <drm/radeon_drm.h>
#include "radeon.h"
diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
index e67ed383e11b..cc8f32a1b03c 100644
--- a/drivers/gpu/drm/radeon/atombios_encoders.c
+++ b/drivers/gpu/drm/radeon/atombios_encoders.c
@@ -23,15 +23,19 @@
* Authors: Dave Airlie
* Alex Deucher
*/
-#include <drm/drmP.h>
+
+#include <linux/backlight.h>
+#include <linux/dmi.h>
+
#include <drm/drm_crtc_helper.h>
+#include <drm/drm_file.h>
+#include <drm/drm_pci.h>
#include <drm/radeon_drm.h>
+
+#include "atom.h"
#include "radeon.h"
-#include "radeon_audio.h"
#include "radeon_asic.h"
-#include "atom.h"
-#include <linux/backlight.h>
-#include <linux/dmi.h>
+#include "radeon_audio.h"
extern int atom_debug;
diff --git a/drivers/gpu/drm/radeon/atombios_i2c.c b/drivers/gpu/drm/radeon/atombios_i2c.c
index 9022e9af11a0..a570ce40af19 100644
--- a/drivers/gpu/drm/radeon/atombios_i2c.c
+++ b/drivers/gpu/drm/radeon/atombios_i2c.c
@@ -22,7 +22,7 @@
* Authors: Alex Deucher
*
*/
-#include <drm/drmP.h>
+
#include <drm/radeon_drm.h>
#include "radeon.h"
#include "atom.h"
diff --git a/drivers/gpu/drm/radeon/btc_dpm.c b/drivers/gpu/drm/radeon/btc_dpm.c
index 0aef4937c901..ce37de020b91 100644
--- a/drivers/gpu/drm/radeon/btc_dpm.c
+++ b/drivers/gpu/drm/radeon/btc_dpm.c
@@ -22,15 +22,17 @@
* Authors: Alex Deucher
*/
-#include <drm/drmP.h>
-#include "radeon.h"
-#include "radeon_asic.h"
+#include <linux/seq_file.h>
+
+#include <drm/drm_pci.h>
+
+#include "atom.h"
+#include "btc_dpm.h"
#include "btcd.h"
-#include "r600_dpm.h"
#include "cypress_dpm.h"
-#include "btc_dpm.h"
-#include "atom.h"
-#include <linux/seq_file.h>
+#include "r600_dpm.h"
+#include "radeon.h"
+#include "radeon_asic.h"
#define MC_CG_ARB_FREQ_F0 0x0a
#define MC_CG_ARB_FREQ_F1 0x0b
diff --git a/drivers/gpu/drm/radeon/btc_dpm.h b/drivers/gpu/drm/radeon/btc_dpm.h
index 3b6f12b7760b..ec4cbb4aa77c 100644
--- a/drivers/gpu/drm/radeon/btc_dpm.h
+++ b/drivers/gpu/drm/radeon/btc_dpm.h
@@ -23,6 +23,9 @@
#ifndef __BTC_DPM_H__
#define __BTC_DPM_H__
+#include "radeon.h"
+#include "rv770_dpm.h"
+
#define BTC_RLP_UVD_DFLT 20
#define BTC_RMP_UVD_DFLT 50
#define BTC_LHP_UVD_DFLT 50
diff --git a/drivers/gpu/drm/radeon/ci_dpm.c b/drivers/gpu/drm/radeon/ci_dpm.c
index a12439266bb0..c6fd123f60b5 100644
--- a/drivers/gpu/drm/radeon/ci_dpm.c
+++ b/drivers/gpu/drm/radeon/ci_dpm.c
@@ -22,15 +22,17 @@
*/
#include <linux/firmware.h>
-#include <drm/drmP.h>
+#include <linux/seq_file.h>
+
+#include <drm/drm_pci.h>
+
+#include "atom.h"
+#include "ci_dpm.h"
+#include "cikd.h"
+#include "r600_dpm.h"
#include "radeon.h"
#include "radeon_asic.h"
#include "radeon_ucode.h"
-#include "cikd.h"
-#include "r600_dpm.h"
-#include "ci_dpm.h"
-#include "atom.h"
-#include <linux/seq_file.h>
#define MC_CG_ARB_FREQ_F0 0x0a
#define MC_CG_ARB_FREQ_F1 0x0b
diff --git a/drivers/gpu/drm/radeon/ci_dpm.h b/drivers/gpu/drm/radeon/ci_dpm.h
index dff2a63df38f..ac12db5f2cf7 100644
--- a/drivers/gpu/drm/radeon/ci_dpm.h
+++ b/drivers/gpu/drm/radeon/ci_dpm.h
@@ -24,6 +24,7 @@
#define __CI_DPM_H__
#include "ppsmc.h"
+#include "radeon.h"
#define SMU__NUM_SCLK_DPM_STATE 8
#define SMU__NUM_MCLK_DPM_LEVELS 6
diff --git a/drivers/gpu/drm/radeon/ci_smc.c b/drivers/gpu/drm/radeon/ci_smc.c
index 371121913756..f4a1ba567f21 100644
--- a/drivers/gpu/drm/radeon/ci_smc.c
+++ b/drivers/gpu/drm/radeon/ci_smc.c
@@ -23,7 +23,7 @@
*/
#include <linux/firmware.h>
-#include <drm/drmP.h>
+
#include "radeon.h"
#include "cikd.h"
#include "ppsmc.h"
diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c
index ab7b4e2ffcd2..40f4d29edfe2 100644
--- a/drivers/gpu/drm/radeon/cik.c
+++ b/drivers/gpu/drm/radeon/cik.c
@@ -21,18 +21,22 @@
*
* Authors: Alex Deucher
*/
+
#include <linux/firmware.h>
#include <linux/slab.h>
#include <linux/module.h>
-#include <drm/drmP.h>
+
+#include <drm/drm_pci.h>
+#include <drm/drm_vblank.h>
+
+#include "atom.h"
+#include "cik_blit_shaders.h"
+#include "cikd.h"
+#include "clearstate_ci.h"
#include "radeon.h"
#include "radeon_asic.h"
#include "radeon_audio.h"
-#include "cikd.h"
-#include "atom.h"
-#include "cik_blit_shaders.h"
#include "radeon_ucode.h"
-#include "clearstate_ci.h"
#define SH_MEM_CONFIG_GFX_DEFAULT \
ALIGNMENT_MODE(SH_MEM_ALIGNMENT_MODE_UNALIGNED)
@@ -3480,7 +3484,7 @@ int cik_ring_test(struct radeon_device *rdev, struct radeon_ring *ring)
tmp = RREG32(scratch);
if (tmp == 0xDEADBEEF)
break;
- DRM_UDELAY(1);
+ udelay(1);
}
if (i < rdev->usec_timeout) {
DRM_INFO("ring test on %d succeeded in %d usecs\n", ring->idx, i);
@@ -3825,7 +3829,7 @@ int cik_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
tmp = RREG32(scratch);
if (tmp == 0xDEADBEEF)
break;
- DRM_UDELAY(1);
+ udelay(1);
}
if (i < rdev->usec_timeout) {
DRM_INFO("ib test on ring %d succeeded in %u usecs\n", ib.fence->ring, i);
diff --git a/drivers/gpu/drm/radeon/cik_sdma.c b/drivers/gpu/drm/radeon/cik_sdma.c
index 9c351dc8a9e0..589217a7e435 100644
--- a/drivers/gpu/drm/radeon/cik_sdma.c
+++ b/drivers/gpu/drm/radeon/cik_sdma.c
@@ -22,7 +22,7 @@
* Authors: Alex Deucher
*/
#include <linux/firmware.h>
-#include <drm/drmP.h>
+
#include "radeon.h"
#include "radeon_ucode.h"
#include "radeon_asic.h"
@@ -677,7 +677,7 @@ int cik_sdma_ring_test(struct radeon_device *rdev,
tmp = le32_to_cpu(rdev->wb.wb[index/4]);
if (tmp == 0xDEADBEEF)
break;
- DRM_UDELAY(1);
+ udelay(1);
}
if (i < rdev->usec_timeout) {
@@ -751,7 +751,7 @@ int cik_sdma_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
tmp = le32_to_cpu(rdev->wb.wb[index/4]);
if (tmp == 0xDEADBEEF)
break;
- DRM_UDELAY(1);
+ udelay(1);
}
if (i < rdev->usec_timeout) {
DRM_INFO("ib test on ring %d succeeded in %u usecs\n", ib.fence->ring, i);
diff --git a/drivers/gpu/drm/radeon/clearstate_cayman.h b/drivers/gpu/drm/radeon/clearstate_cayman.h
index e48a14037b76..4774e04c4da6 100644
--- a/drivers/gpu/drm/radeon/clearstate_cayman.h
+++ b/drivers/gpu/drm/radeon/clearstate_cayman.h
@@ -21,6 +21,8 @@
*
*/
+#include "clearstate_defs.h"
+
static const u32 SECT_CONTEXT_def_1[] =
{
0x00000000, // DB_RENDER_CONTROL
diff --git a/drivers/gpu/drm/radeon/clearstate_ci.h b/drivers/gpu/drm/radeon/clearstate_ci.h
index f55d06664e31..c1b6c22dbed7 100644
--- a/drivers/gpu/drm/radeon/clearstate_ci.h
+++ b/drivers/gpu/drm/radeon/clearstate_ci.h
@@ -21,6 +21,8 @@
*
*/
+#include "clearstate_defs.h"
+
static const unsigned int ci_SECT_CONTEXT_def_1[] =
{
0x00000000, // DB_RENDER_CONTROL
diff --git a/drivers/gpu/drm/radeon/clearstate_si.h b/drivers/gpu/drm/radeon/clearstate_si.h
index 66e39cdb5cb0..356219c6c7f2 100644
--- a/drivers/gpu/drm/radeon/clearstate_si.h
+++ b/drivers/gpu/drm/radeon/clearstate_si.h
@@ -21,6 +21,8 @@
*
*/
+#include "clearstate_defs.h"
+
static const u32 si_SECT_CONTEXT_def_1[] =
{
0x00000000, // DB_RENDER_CONTROL
diff --git a/drivers/gpu/drm/radeon/cypress_dpm.c b/drivers/gpu/drm/radeon/cypress_dpm.c
index 3eb7899a4035..32ed60f1048b 100644
--- a/drivers/gpu/drm/radeon/cypress_dpm.c
+++ b/drivers/gpu/drm/radeon/cypress_dpm.c
@@ -22,13 +22,14 @@
* Authors: Alex Deucher
*/
-#include <drm/drmP.h>
-#include "radeon.h"
-#include "radeon_asic.h"
+#include <drm/drm_pci.h>
+
+#include "atom.h"
+#include "cypress_dpm.h"
#include "evergreend.h"
#include "r600_dpm.h"
-#include "cypress_dpm.h"
-#include "atom.h"
+#include "radeon.h"
+#include "radeon_asic.h"
#define SMC_RAM_END 0x8000
diff --git a/drivers/gpu/drm/radeon/dce3_1_afmt.c b/drivers/gpu/drm/radeon/dce3_1_afmt.c
index cfa3a84a2af0..e8fe239b9d79 100644
--- a/drivers/gpu/drm/radeon/dce3_1_afmt.c
+++ b/drivers/gpu/drm/radeon/dce3_1_afmt.c
@@ -21,7 +21,7 @@
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include <linux/hdmi.h>
-#include <drm/drmP.h>
+
#include "radeon.h"
#include "radeon_asic.h"
#include "radeon_audio.h"
diff --git a/drivers/gpu/drm/radeon/dce6_afmt.c b/drivers/gpu/drm/radeon/dce6_afmt.c
index 367a916f364e..eec5d7a62738 100644
--- a/drivers/gpu/drm/radeon/dce6_afmt.c
+++ b/drivers/gpu/drm/radeon/dce6_afmt.c
@@ -21,7 +21,7 @@
*
*/
#include <linux/hdmi.h>
-#include <drm/drmP.h>
+
#include "radeon.h"
#include "radeon_audio.h"
#include "sid.h"
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index 5712d63dca20..1d978a3d9c82 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -21,18 +21,22 @@
*
* Authors: Alex Deucher
*/
+
#include <linux/firmware.h>
#include <linux/slab.h>
-#include <drm/drmP.h>
-#include "radeon.h"
-#include "radeon_asic.h"
-#include "radeon_audio.h"
+
+#include <drm/drm_pci.h>
+#include <drm/drm_vblank.h>
#include <drm/radeon_drm.h>
-#include "evergreend.h"
+
#include "atom.h"
#include "avivod.h"
-#include "evergreen_reg.h"
#include "evergreen_blit_shaders.h"
+#include "evergreen_reg.h"
+#include "evergreend.h"
+#include "radeon.h"
+#include "radeon_asic.h"
+#include "radeon_audio.h"
#include "radeon_ucode.h"
#define DC_HPDx_CONTROL(x) (DC_HPD1_CONTROL + (x * 0xc))
diff --git a/drivers/gpu/drm/radeon/evergreen_cs.c b/drivers/gpu/drm/radeon/evergreen_cs.c
index 1e14c6921454..c410cad28f19 100644
--- a/drivers/gpu/drm/radeon/evergreen_cs.c
+++ b/drivers/gpu/drm/radeon/evergreen_cs.c
@@ -25,7 +25,7 @@
* Alex Deucher
* Jerome Glisse
*/
-#include <drm/drmP.h>
+
#include "radeon.h"
#include "radeon_asic.h"
#include "evergreend.h"
diff --git a/drivers/gpu/drm/radeon/evergreen_dma.c b/drivers/gpu/drm/radeon/evergreen_dma.c
index 96535aa8659c..5505a04ca402 100644
--- a/drivers/gpu/drm/radeon/evergreen_dma.c
+++ b/drivers/gpu/drm/radeon/evergreen_dma.c
@@ -21,7 +21,7 @@
*
* Authors: Alex Deucher
*/
-#include <drm/drmP.h>
+
#include "radeon.h"
#include "radeon_asic.h"
#include "evergreend.h"
diff --git a/drivers/gpu/drm/radeon/evergreen_hdmi.c b/drivers/gpu/drm/radeon/evergreen_hdmi.c
index f766c967a284..739336a48d08 100644
--- a/drivers/gpu/drm/radeon/evergreen_hdmi.c
+++ b/drivers/gpu/drm/radeon/evergreen_hdmi.c
@@ -25,7 +25,7 @@
* Rafał Miłecki
*/
#include <linux/hdmi.h>
-#include <drm/drmP.h>
+
#include <drm/radeon_drm.h>
#include "radeon.h"
#include "radeon_asic.h"
diff --git a/drivers/gpu/drm/radeon/kv_dpm.c b/drivers/gpu/drm/radeon/kv_dpm.c
index f055d6ea3522..0d8d30b78f95 100644
--- a/drivers/gpu/drm/radeon/kv_dpm.c
+++ b/drivers/gpu/drm/radeon/kv_dpm.c
@@ -21,13 +21,15 @@
*
*/
-#include <drm/drmP.h>
-#include "radeon.h"
+#include <linux/seq_file.h>
+
+#include <drm/drm_pci.h>
+
#include "cikd.h"
-#include "r600_dpm.h"
#include "kv_dpm.h"
+#include "r600_dpm.h"
+#include "radeon.h"
#include "radeon_asic.h"
-#include <linux/seq_file.h>
#define KV_MAX_DEEPSLEEP_DIVIDER_ID 5
#define KV_MINIMUM_ENGINE_CLOCK 800
diff --git a/drivers/gpu/drm/radeon/kv_smc.c b/drivers/gpu/drm/radeon/kv_smc.c
index af60bd32a287..c0a59527e7b8 100644
--- a/drivers/gpu/drm/radeon/kv_smc.c
+++ b/drivers/gpu/drm/radeon/kv_smc.c
@@ -22,7 +22,6 @@
* Authors: Alex Deucher
*/
-#include <drm/drmP.h>
#include "radeon.h"
#include "cikd.h"
#include "kv_dpm.h"
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
index 381b0255ff02..410f626a39d4 100644
--- a/drivers/gpu/drm/radeon/ni.c
+++ b/drivers/gpu/drm/radeon/ni.c
@@ -21,20 +21,23 @@
*
* Authors: Alex Deucher
*/
+
#include <linux/firmware.h>
#include <linux/slab.h>
#include <linux/module.h>
-#include <drm/drmP.h>
-#include "radeon.h"
-#include "radeon_asic.h"
-#include "radeon_audio.h"
+
+#include <drm/drm_pci.h>
#include <drm/radeon_drm.h>
-#include "nid.h"
+
#include "atom.h"
-#include "ni_reg.h"
#include "cayman_blit_shaders.h"
-#include "radeon_ucode.h"
#include "clearstate_cayman.h"
+#include "ni_reg.h"
+#include "nid.h"
+#include "radeon.h"
+#include "radeon_asic.h"
+#include "radeon_audio.h"
+#include "radeon_ucode.h"
/*
* Indirect registers accessor
diff --git a/drivers/gpu/drm/radeon/ni_dma.c b/drivers/gpu/drm/radeon/ni_dma.c
index ce787a9f12c0..c56136848360 100644
--- a/drivers/gpu/drm/radeon/ni_dma.c
+++ b/drivers/gpu/drm/radeon/ni_dma.c
@@ -21,7 +21,7 @@
*
* Authors: Alex Deucher
*/
-#include <drm/drmP.h>
+
#include "radeon.h"
#include "radeon_asic.h"
#include "radeon_trace.h"
diff --git a/drivers/gpu/drm/radeon/ni_dpm.c b/drivers/gpu/drm/radeon/ni_dpm.c
index 0fd8d6ba9828..d9e62ca65ab8 100644
--- a/drivers/gpu/drm/radeon/ni_dpm.c
+++ b/drivers/gpu/drm/radeon/ni_dpm.c
@@ -21,16 +21,18 @@
*
*/
-#include <drm/drmP.h>
-#include "radeon.h"
-#include "radeon_asic.h"
-#include "nid.h"
-#include "r600_dpm.h"
-#include "ni_dpm.h"
-#include "atom.h"
#include <linux/math64.h>
#include <linux/seq_file.h>
+#include <drm/drm_pci.h>
+
+#include "atom.h"
+#include "ni_dpm.h"
+#include "nid.h"
+#include "r600_dpm.h"
+#include "radeon.h"
+#include "radeon_asic.h"
+
#define MC_CG_ARB_FREQ_F0 0x0a
#define MC_CG_ARB_FREQ_F1 0x0b
#define MC_CG_ARB_FREQ_F2 0x0c
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index 7d39ed63e5be..5c05193da520 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -25,24 +25,30 @@
* Alex Deucher
* Jerome Glisse
*/
+
#include <linux/seq_file.h>
#include <linux/slab.h>
-#include <drm/drmP.h>
+#include <linux/firmware.h>
+#include <linux/module.h>
+
+#include <drm/drm_debugfs.h>
+#include <drm/drm_device.h>
+#include <drm/drm_file.h>
+#include <drm/drm_fourcc.h>
+#include <drm/drm_pci.h>
+#include <drm/drm_vblank.h>
#include <drm/radeon_drm.h>
-#include "radeon_reg.h"
+
+#include "atom.h"
+#include "r100_reg_safe.h"
+#include "r100d.h"
#include "radeon.h"
#include "radeon_asic.h"
-#include "r100d.h"
+#include "radeon_reg.h"
+#include "rn50_reg_safe.h"
#include "rs100d.h"
#include "rv200d.h"
#include "rv250d.h"
-#include "atom.h"
-
-#include <linux/firmware.h>
-#include <linux/module.h>
-
-#include "r100_reg_safe.h"
-#include "rn50_reg_safe.h"
/* Firmware Names */
#define FIRMWARE_R100 "radeon/R100_cp.bin"
@@ -2470,7 +2476,7 @@ static int r100_rbbm_fifo_wait_for_entry(struct radeon_device *rdev, unsigned n)
if (tmp >= n) {
return 0;
}
- DRM_UDELAY(1);
+ udelay(1);
}
return -1;
}
@@ -2488,7 +2494,7 @@ int r100_gui_wait_for_idle(struct radeon_device *rdev)
if (!(tmp & RADEON_RBBM_ACTIVE)) {
return 0;
}
- DRM_UDELAY(1);
+ udelay(1);
}
return -1;
}
@@ -2504,7 +2510,7 @@ int r100_mc_wait_for_idle(struct radeon_device *rdev)
if (tmp & RADEON_MC_IDLE) {
return 0;
}
- DRM_UDELAY(1);
+ udelay(1);
}
return -1;
}
@@ -3669,7 +3675,7 @@ int r100_ring_test(struct radeon_device *rdev, struct radeon_ring *ring)
if (tmp == 0xDEADBEEF) {
break;
}
- DRM_UDELAY(1);
+ udelay(1);
}
if (i < rdev->usec_timeout) {
DRM_INFO("ring test succeeded in %d usecs\n", i);
@@ -3746,7 +3752,7 @@ int r100_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
if (tmp == 0xDEADBEEF) {
break;
}
- DRM_UDELAY(1);
+ udelay(1);
}
if (i < rdev->usec_timeout) {
DRM_INFO("ib test succeeded in %u usecs\n", i);
diff --git a/drivers/gpu/drm/radeon/r100_track.h b/drivers/gpu/drm/radeon/r100_track.h
index 57e2b09784be..1b5ff3f816db 100644
--- a/drivers/gpu/drm/radeon/r100_track.h
+++ b/drivers/gpu/drm/radeon/r100_track.h
@@ -1,5 +1,7 @@
/* SPDX-License-Identifier: MIT */
+#include "radeon.h"
+
#define R100_TRACK_MAX_TEXTURE 3
#define R200_TRACK_MAX_TEXTURE 6
#define R300_TRACK_MAX_TEXTURE 16
diff --git a/drivers/gpu/drm/radeon/r200.c b/drivers/gpu/drm/radeon/r200.c
index c22321cc5a41..9ce6dd83d284 100644
--- a/drivers/gpu/drm/radeon/r200.c
+++ b/drivers/gpu/drm/radeon/r200.c
@@ -25,7 +25,7 @@
* Alex Deucher
* Jerome Glisse
*/
-#include <drm/drmP.h>
+
#include <drm/radeon_drm.h>
#include "radeon_reg.h"
#include "radeon.h"
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c
index 652126fd6dd4..44856e3a7108 100644
--- a/drivers/gpu/drm/radeon/r300.c
+++ b/drivers/gpu/drm/radeon/r300.c
@@ -25,19 +25,25 @@
* Alex Deucher
* Jerome Glisse
*/
+
#include <linux/seq_file.h>
#include <linux/slab.h>
-#include <drm/drmP.h>
+
#include <drm/drm.h>
#include <drm/drm_crtc_helper.h>
-#include "radeon_reg.h"
-#include "radeon.h"
-#include "radeon_asic.h"
+#include <drm/drm_debugfs.h>
+#include <drm/drm_device.h>
+#include <drm/drm_file.h>
+#include <drm/drm_pci.h>
#include <drm/radeon_drm.h>
+
#include "r100_track.h"
+#include "r300_reg_safe.h"
#include "r300d.h"
+#include "radeon.h"
+#include "radeon_asic.h"
+#include "radeon_reg.h"
#include "rv350d.h"
-#include "r300_reg_safe.h"
/* This files gather functions specifics to: r300,r350,rv350,rv370,rv380
*
@@ -350,7 +356,7 @@ int r300_mc_wait_for_idle(struct radeon_device *rdev)
if (tmp & R300_MC_IDLE) {
return 0;
}
- DRM_UDELAY(1);
+ udelay(1);
}
return -1;
}
diff --git a/drivers/gpu/drm/radeon/r420.c b/drivers/gpu/drm/radeon/r420.c
index 2318d9e3ed96..83282ee2bde0 100644
--- a/drivers/gpu/drm/radeon/r420.c
+++ b/drivers/gpu/drm/radeon/r420.c
@@ -25,16 +25,22 @@
* Alex Deucher
* Jerome Glisse
*/
+
#include <linux/seq_file.h>
#include <linux/slab.h>
-#include <drm/drmP.h>
-#include "radeon_reg.h"
-#include "radeon.h"
-#include "radeon_asic.h"
+
+#include <drm/drm_debugfs.h>
+#include <drm/drm_device.h>
+#include <drm/drm_file.h>
+#include <drm/drm_pci.h>
+
#include "atom.h"
#include "r100d.h"
-#include "r420d.h"
#include "r420_reg_safe.h"
+#include "r420d.h"
+#include "radeon.h"
+#include "radeon_asic.h"
+#include "radeon_reg.h"
void r420_pm_init_profile(struct radeon_device *rdev)
{
diff --git a/drivers/gpu/drm/radeon/r520.c b/drivers/gpu/drm/radeon/r520.c
index 074cf752faef..fc78e64ae727 100644
--- a/drivers/gpu/drm/radeon/r520.c
+++ b/drivers/gpu/drm/radeon/r520.c
@@ -25,7 +25,7 @@
* Alex Deucher
* Jerome Glisse
*/
-#include <drm/drmP.h>
+
#include "radeon.h"
#include "radeon_asic.h"
#include "atom.h"
@@ -44,7 +44,7 @@ int r520_mc_wait_for_idle(struct radeon_device *rdev)
if (tmp & R520_MC_STATUS_IDLE) {
return 0;
}
- DRM_UDELAY(1);
+ udelay(1);
}
return -1;
}
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index e06e2d8feab3..7d175a9e8330 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -25,19 +25,25 @@
* Alex Deucher
* Jerome Glisse
*/
+
#include <linux/slab.h>
#include <linux/seq_file.h>
#include <linux/firmware.h>
#include <linux/module.h>
-#include <drm/drmP.h>
+
+#include <drm/drm_debugfs.h>
+#include <drm/drm_device.h>
+#include <drm/drm_pci.h>
+#include <drm/drm_vblank.h>
#include <drm/radeon_drm.h>
+
+#include "atom.h"
+#include "avivod.h"
+#include "r600d.h"
#include "radeon.h"
#include "radeon_asic.h"
#include "radeon_audio.h"
#include "radeon_mode.h"
-#include "r600d.h"
-#include "atom.h"
-#include "avivod.h"
#include "radeon_ucode.h"
/* Firmware Names */
@@ -2840,7 +2846,7 @@ int r600_ring_test(struct radeon_device *rdev, struct radeon_ring *ring)
tmp = RREG32(scratch);
if (tmp == 0xDEADBEEF)
break;
- DRM_UDELAY(1);
+ udelay(1);
}
if (i < rdev->usec_timeout) {
DRM_INFO("ring test on %d succeeded in %d usecs\n", ring->idx, i);
@@ -3433,7 +3439,7 @@ int r600_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
tmp = RREG32(scratch);
if (tmp == 0xDEADBEEF)
break;
- DRM_UDELAY(1);
+ udelay(1);
}
if (i < rdev->usec_timeout) {
DRM_INFO("ib test on ring %d succeeded in %u usecs\n", ib.fence->ring, i);
diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c
index c96b31950ca7..d6c28a5d77ab 100644
--- a/drivers/gpu/drm/radeon/r600_cs.c
+++ b/drivers/gpu/drm/radeon/r600_cs.c
@@ -26,7 +26,7 @@
* Jerome Glisse
*/
#include <linux/kernel.h>
-#include <drm/drmP.h>
+
#include "radeon.h"
#include "radeon_asic.h"
#include "r600d.h"
diff --git a/drivers/gpu/drm/radeon/r600_dma.c b/drivers/gpu/drm/radeon/r600_dma.c
index fb65e6fb5c4f..35d92ef8a0d4 100644
--- a/drivers/gpu/drm/radeon/r600_dma.c
+++ b/drivers/gpu/drm/radeon/r600_dma.c
@@ -21,7 +21,7 @@
*
* Authors: Alex Deucher
*/
-#include <drm/drmP.h>
+
#include "radeon.h"
#include "radeon_asic.h"
#include "r600d.h"
@@ -261,7 +261,7 @@ int r600_dma_ring_test(struct radeon_device *rdev,
tmp = le32_to_cpu(rdev->wb.wb[index/4]);
if (tmp == 0xDEADBEEF)
break;
- DRM_UDELAY(1);
+ udelay(1);
}
if (i < rdev->usec_timeout) {
@@ -382,7 +382,7 @@ int r600_dma_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
tmp = le32_to_cpu(rdev->wb.wb[index/4]);
if (tmp == 0xDEADBEEF)
break;
- DRM_UDELAY(1);
+ udelay(1);
}
if (i < rdev->usec_timeout) {
DRM_INFO("ib test on ring %d succeeded in %u usecs\n", ib.fence->ring, i);
diff --git a/drivers/gpu/drm/radeon/r600_dpm.c b/drivers/gpu/drm/radeon/r600_dpm.c
index 5e044c98fca2..35b77c944701 100644
--- a/drivers/gpu/drm/radeon/r600_dpm.c
+++ b/drivers/gpu/drm/radeon/r600_dpm.c
@@ -22,7 +22,6 @@
* Authors: Alex Deucher
*/
-#include <drm/drmP.h>
#include "radeon.h"
#include "radeon_asic.h"
#include "r600d.h"
diff --git a/drivers/gpu/drm/radeon/r600_dpm.h b/drivers/gpu/drm/radeon/r600_dpm.h
index bd499d749bc9..6e4d22ed2a00 100644
--- a/drivers/gpu/drm/radeon/r600_dpm.h
+++ b/drivers/gpu/drm/radeon/r600_dpm.h
@@ -23,6 +23,8 @@
#ifndef __R600_DPM_H__
#define __R600_DPM_H__
+#include "radeon.h"
+
#define R600_ASI_DFLT 10000
#define R600_BSP_DFLT 0x41EB
#define R600_BSU_DFLT 0x2
diff --git a/drivers/gpu/drm/radeon/r600_hdmi.c b/drivers/gpu/drm/radeon/r600_hdmi.c
index ab32830c4e23..c09549d785b5 100644
--- a/drivers/gpu/drm/radeon/r600_hdmi.c
+++ b/drivers/gpu/drm/radeon/r600_hdmi.c
@@ -25,7 +25,7 @@
*/
#include <linux/hdmi.h>
#include <linux/gcd.h>
-#include <drm/drmP.h>
+
#include <drm/radeon_drm.h>
#include "radeon.h"
#include "radeon_asic.h"
diff --git a/drivers/gpu/drm/radeon/radeon_acpi.c b/drivers/gpu/drm/radeon/radeon_acpi.c
index 224cc21bbe38..6cf1645e7a1a 100644
--- a/drivers/gpu/drm/radeon/radeon_acpi.c
+++ b/drivers/gpu/drm/radeon/radeon_acpi.c
@@ -21,18 +21,21 @@
*
*/
-#include <linux/pci.h>
#include <linux/acpi.h>
-#include <linux/slab.h>
-#include <linux/power_supply.h>
+#include <linux/pci.h>
#include <linux/pm_runtime.h>
+#include <linux/power_supply.h>
+#include <linux/slab.h>
+
+#include <acpi/acpi_bus.h>
#include <acpi/video.h>
-#include <drm/drmP.h>
+
#include <drm/drm_crtc_helper.h>
#include <drm/drm_probe_helper.h>
+
+#include "atom.h"
#include "radeon.h"
#include "radeon_acpi.h"
-#include "atom.h"
#if defined(CONFIG_VGA_SWITCHEROO)
bool radeon_atpx_dgpu_req_power_for_displays(void);
diff --git a/drivers/gpu/drm/radeon/radeon_agp.c b/drivers/gpu/drm/radeon/radeon_agp.c
index c77d349c561c..4de16f3badb4 100644
--- a/drivers/gpu/drm/radeon/radeon_agp.c
+++ b/drivers/gpu/drm/radeon/radeon_agp.c
@@ -24,10 +24,14 @@
* Dave Airlie
* Jerome Glisse <glisse@freedesktop.org>
*/
-#include <drm/drmP.h>
-#include "radeon.h"
+
+#include <drm/drm_agpsupport.h>
+#include <drm/drm_device.h>
+#include <drm/drm_pci.h>
#include <drm/radeon_drm.h>
+#include "radeon.h"
+
#if IS_ENABLED(CONFIG_AGP)
struct radeon_agpmode_quirk {
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c
index bc5121d1a7bc..dc3c2227e06a 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.c
+++ b/drivers/gpu/drm/radeon/radeon_asic.c
@@ -27,14 +27,16 @@
*/
#include <linux/console.h>
-#include <drm/drmP.h>
+#include <linux/vgaarb.h>
+
#include <drm/drm_crtc_helper.h>
+#include <drm/drm_pci.h>
#include <drm/radeon_drm.h>
-#include <linux/vgaarb.h>
-#include "radeon_reg.h"
+
+#include "atom.h"
#include "radeon.h"
#include "radeon_asic.h"
-#include "atom.h"
+#include "radeon_reg.h"
/*
* Registers accessors functions.
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index f422a8d6aec4..226a7bf0eb7a 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -23,8 +23,11 @@
* Authors: Dave Airlie
* Alex Deucher
*/
-#include <drm/drmP.h>
+
+#include <drm/drm_device.h>
+#include <drm/drm_pci.h>
#include <drm/radeon_drm.h>
+
#include "radeon.h"
#include "atom.h"
diff --git a/drivers/gpu/drm/radeon/radeon_audio.c b/drivers/gpu/drm/radeon/radeon_audio.c
index 96f71114237a..b9aea5776d3d 100644
--- a/drivers/gpu/drm/radeon/radeon_audio.c
+++ b/drivers/gpu/drm/radeon/radeon_audio.c
@@ -23,7 +23,7 @@
*/
#include <linux/gcd.h>
-#include <drm/drmP.h>
+
#include <drm/drm_crtc.h>
#include "radeon.h"
#include "atom.h"
diff --git a/drivers/gpu/drm/radeon/radeon_benchmark.c b/drivers/gpu/drm/radeon/radeon_benchmark.c
index 87d5fb21cb61..7ce5064a59f6 100644
--- a/drivers/gpu/drm/radeon/radeon_benchmark.c
+++ b/drivers/gpu/drm/radeon/radeon_benchmark.c
@@ -21,7 +21,7 @@
*
* Authors: Jerome Glisse
*/
-#include <drm/drmP.h>
+
#include <drm/radeon_drm.h>
#include "radeon_reg.h"
#include "radeon.h"
diff --git a/drivers/gpu/drm/radeon/radeon_bios.c b/drivers/gpu/drm/radeon/radeon_bios.c
index 04c0ed41374f..4d1490fbb075 100644
--- a/drivers/gpu/drm/radeon/radeon_bios.c
+++ b/drivers/gpu/drm/radeon/radeon_bios.c
@@ -25,13 +25,17 @@
* Alex Deucher
* Jerome Glisse
*/
-#include <drm/drmP.h>
-#include "radeon_reg.h"
-#include "radeon.h"
-#include "atom.h"
#include <linux/slab.h>
#include <linux/acpi.h>
+
+#include <drm/drm_device.h>
+#include <drm/drm_pci.h>
+
+#include "atom.h"
+#include "radeon.h"
+#include "radeon_reg.h"
+
/*
* BIOS.
*/
diff --git a/drivers/gpu/drm/radeon/radeon_clocks.c b/drivers/gpu/drm/radeon/radeon_clocks.c
index e55146cdf543..9057b32f4498 100644
--- a/drivers/gpu/drm/radeon/radeon_clocks.c
+++ b/drivers/gpu/drm/radeon/radeon_clocks.c
@@ -25,12 +25,15 @@
* Alex Deucher
* Jerome Glisse
*/
-#include <drm/drmP.h>
+
+#include <drm/drm_device.h>
+#include <drm/drm_pci.h>
#include <drm/radeon_drm.h>
-#include "radeon_reg.h"
+
+#include "atom.h"
#include "radeon.h"
#include "radeon_asic.h"
-#include "atom.h"
+#include "radeon_reg.h"
/* 10 khz */
uint32_t radeon_legacy_get_engine_clock(struct radeon_device *rdev)
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c
index 60a61d33f607..c18ae15189f3 100644
--- a/drivers/gpu/drm/radeon/radeon_combios.c
+++ b/drivers/gpu/drm/radeon/radeon_combios.c
@@ -24,8 +24,11 @@
* Authors: Dave Airlie
* Alex Deucher
*/
-#include <drm/drmP.h>
+
+#include <drm/drm_device.h>
+#include <drm/drm_pci.h>
#include <drm/radeon_drm.h>
+
#include "radeon.h"
#include "atom.h"
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index de1745adcccc..c60d1a44d22a 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -23,7 +23,7 @@
* Authors: Dave Airlie
* Alex Deucher
*/
-#include <drm/drmP.h>
+
#include <drm/drm_edid.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_fb_helper.h>
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index f43305329939..cef0e697a2ea 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -24,11 +24,17 @@
* Authors:
* Jerome Glisse <glisse@freedesktop.org>
*/
+
#include <linux/list_sort.h>
-#include <drm/drmP.h>
+#include <linux/uaccess.h>
+
+#include <drm/drm_device.h>
+#include <drm/drm_file.h>
+#include <drm/drm_pci.h>
#include <drm/radeon_drm.h>
-#include "radeon_reg.h"
+
#include "radeon.h"
+#include "radeon_reg.h"
#include "radeon_trace.h"
#define RADEON_CS_MAX_PRIORITY 32u
diff --git a/drivers/gpu/drm/radeon/radeon_cursor.c b/drivers/gpu/drm/radeon/radeon_cursor.c
index 91952277557e..9180bb51b913 100644
--- a/drivers/gpu/drm/radeon/radeon_cursor.c
+++ b/drivers/gpu/drm/radeon/radeon_cursor.c
@@ -23,8 +23,10 @@
* Authors: Dave Airlie
* Alex Deucher
*/
-#include <drm/drmP.h>
+
+#include <drm/drm_device.h>
#include <drm/radeon_drm.h>
+
#include "radeon.h"
static void radeon_lock_cursor(struct drm_crtc *crtc, bool lock)
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 0a9312ea250a..dceb554e5674 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -25,17 +25,23 @@
* Alex Deucher
* Jerome Glisse
*/
+
#include <linux/console.h>
+#include <linux/efi.h>
+#include <linux/pm_runtime.h>
#include <linux/slab.h>
-#include <drm/drmP.h>
+#include <linux/vga_switcheroo.h>
+#include <linux/vgaarb.h>
+
+#include <drm/drm_cache.h>
#include <drm/drm_crtc_helper.h>
+#include <drm/drm_debugfs.h>
+#include <drm/drm_device.h>
+#include <drm/drm_file.h>
+#include <drm/drm_pci.h>
#include <drm/drm_probe_helper.h>
-#include <drm/drm_cache.h>
#include <drm/radeon_drm.h>
-#include <linux/pm_runtime.h>
-#include <linux/vgaarb.h>
-#include <linux/vga_switcheroo.h>
-#include <linux/efi.h>
+
#include "radeon_reg.h"
#include "radeon.h"
#include "atom.h"
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index 433df7036f96..bd52f15e6330 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -23,22 +23,27 @@
* Authors: Dave Airlie
* Alex Deucher
*/
-#include <drm/drmP.h>
-#include <drm/radeon_drm.h>
-#include "radeon.h"
-#include "atom.h"
+#include <linux/pm_runtime.h>
+#include <linux/gcd.h>
+
#include <asm/div64.h>
-#include <linux/pm_runtime.h>
#include <drm/drm_crtc_helper.h>
-#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_device.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_edid.h>
#include <drm/drm_fb_helper.h>
+#include <drm/drm_fourcc.h>
+#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_pci.h>
#include <drm/drm_plane_helper.h>
#include <drm/drm_probe_helper.h>
-#include <drm/drm_edid.h>
+#include <drm/drm_vblank.h>
+#include <drm/radeon_drm.h>
-#include <linux/gcd.h>
+#include "atom.h"
+#include "radeon.h"
static void avivo_crtc_load_lut(struct drm_crtc *crtc)
{
diff --git a/drivers/gpu/drm/radeon/radeon_dp_auxch.c b/drivers/gpu/drm/radeon/radeon_dp_auxch.c
index 12eac4e75542..69379b95146e 100644
--- a/drivers/gpu/drm/radeon/radeon_dp_auxch.c
+++ b/drivers/gpu/drm/radeon/radeon_dp_auxch.c
@@ -21,7 +21,7 @@
*
* Authors: Dave Airlie
*/
-#include <drm/drmP.h>
+
#include <drm/radeon_drm.h>
#include "radeon.h"
#include "nid.h"
diff --git a/drivers/gpu/drm/radeon/radeon_dp_mst.c b/drivers/gpu/drm/radeon/radeon_dp_mst.c
index 8d85540bbb43..2994f07fbad9 100644
--- a/drivers/gpu/drm/radeon/radeon_dp_mst.c
+++ b/drivers/gpu/drm/radeon/radeon_dp_mst.c
@@ -1,13 +1,14 @@
// SPDX-License-Identifier: MIT
-#include <drm/drmP.h>
+#include <drm/drm_debugfs.h>
#include <drm/drm_dp_mst_helper.h>
#include <drm/drm_fb_helper.h>
+#include <drm/drm_file.h>
#include <drm/drm_probe_helper.h>
-#include "radeon.h"
#include "atom.h"
#include "ni_reg.h"
+#include "radeon.h"
static struct radeon_encoder *radeon_dp_create_fake_mst_encoder(struct radeon_connector *connector);
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index 2e96c886392b..a6cbe11f79c6 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -29,21 +29,26 @@
* OTHER DEALINGS IN THE SOFTWARE.
*/
-#include <drm/drmP.h>
-#include <drm/radeon_drm.h>
-#include "radeon_drv.h"
-#include <drm/drm_pciids.h>
+#include <linux/compat.h>
#include <linux/console.h>
#include <linux/module.h>
#include <linux/pm_runtime.h>
#include <linux/vga_switcheroo.h>
-#include <linux/compat.h>
-#include <drm/drm_gem.h>
-#include <drm/drm_fb_helper.h>
#include <drm/drm_crtc_helper.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_fb_helper.h>
+#include <drm/drm_file.h>
+#include <drm/drm_gem.h>
+#include <drm/drm_ioctl.h>
+#include <drm/drm_pci.h>
+#include <drm/drm_pciids.h>
#include <drm/drm_probe_helper.h>
+#include <drm/drm_vblank.h>
+#include <drm/radeon_drm.h>
+
+#include "radeon_drv.h"
/*
* KMS wrapper.
diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c
index c341fb2a5b56..a0c99087034a 100644
--- a/drivers/gpu/drm/radeon/radeon_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_encoders.c
@@ -23,9 +23,12 @@
* Authors: Dave Airlie
* Alex Deucher
*/
-#include <drm/drmP.h>
+
#include <drm/drm_crtc_helper.h>
+#include <drm/drm_device.h>
+#include <drm/drm_pci.h>
#include <drm/radeon_drm.h>
+
#include "radeon.h"
#include "atom.h"
diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c
index 1298b84cb1c7..2c564f4f3468 100644
--- a/drivers/gpu/drm/radeon/radeon_fb.c
+++ b/drivers/gpu/drm/radeon/radeon_fb.c
@@ -23,19 +23,20 @@
* Authors:
* David Airlie
*/
+
#include <linux/module.h>
-#include <linux/slab.h>
#include <linux/pm_runtime.h>
+#include <linux/slab.h>
+#include <linux/vga_switcheroo.h>
-#include <drm/drmP.h>
#include <drm/drm_crtc.h>
#include <drm/drm_crtc_helper.h>
-#include <drm/radeon_drm.h>
-#include "radeon.h"
-
#include <drm/drm_fb_helper.h>
+#include <drm/drm_fourcc.h>
+#include <drm/drm_pci.h>
+#include <drm/radeon_drm.h>
-#include <linux/vga_switcheroo.h>
+#include "radeon.h"
/* object hierarchy -
* this contains a helper + a radeon fb
@@ -125,6 +126,7 @@ static int radeonfb_create_pinned_object(struct radeon_fbdev *rfbdev,
struct drm_mode_fb_cmd2 *mode_cmd,
struct drm_gem_object **gobj_p)
{
+ const struct drm_format_info *info;
struct radeon_device *rdev = rfbdev->rdev;
struct drm_gem_object *gobj = NULL;
struct radeon_bo *rbo = NULL;
@@ -135,7 +137,8 @@ static int radeonfb_create_pinned_object(struct radeon_fbdev *rfbdev,
int height = mode_cmd->height;
u32 cpp;
- cpp = drm_format_plane_cpp(mode_cmd->pixel_format, 0);
+ info = drm_get_format_info(rdev->ddev, mode_cmd);
+ cpp = info->cpp[0];
/* need to align pitch with crtc limits */
mode_cmd->pitches[0] = radeon_align_pitch(rdev, mode_cmd->width, cpp,
diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c
index e86f2bd38410..43f2f9307866 100644
--- a/drivers/gpu/drm/radeon/radeon_fence.c
+++ b/drivers/gpu/drm/radeon/radeon_fence.c
@@ -28,15 +28,21 @@
* Jerome Glisse <glisse@freedesktop.org>
* Dave Airlie
*/
-#include <linux/seq_file.h>
+
#include <linux/atomic.h>
-#include <linux/wait.h>
+#include <linux/firmware.h>
#include <linux/kref.h>
+#include <linux/sched/signal.h>
+#include <linux/seq_file.h>
#include <linux/slab.h>
-#include <linux/firmware.h>
-#include <drm/drmP.h>
-#include "radeon_reg.h"
+#include <linux/wait.h>
+
+#include <drm/drm_debugfs.h>
+#include <drm/drm_device.h>
+#include <drm/drm_file.h>
+
#include "radeon.h"
+#include "radeon_reg.h"
#include "radeon_trace.h"
/*
diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c
index 1cef155cc933..d4d3778d0a98 100644
--- a/drivers/gpu/drm/radeon/radeon_gart.c
+++ b/drivers/gpu/drm/radeon/radeon_gart.c
@@ -25,7 +25,10 @@
* Alex Deucher
* Jerome Glisse
*/
-#include <drm/drmP.h>
+
+#include <linux/vmalloc.h>
+
+#include <drm/drm_pci.h>
#include <drm/radeon_drm.h>
#ifdef CONFIG_X86
#include <asm/set_memory.h>
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
index 44617dec8183..d8bc5d2dfd61 100644
--- a/drivers/gpu/drm/radeon/radeon_gem.c
+++ b/drivers/gpu/drm/radeon/radeon_gem.c
@@ -25,8 +25,13 @@
* Alex Deucher
* Jerome Glisse
*/
-#include <drm/drmP.h>
+
+#include <drm/drm_debugfs.h>
+#include <drm/drm_device.h>
+#include <drm/drm_file.h>
+#include <drm/drm_pci.h>
#include <drm/radeon_drm.h>
+
#include "radeon.h"
void radeon_gem_object_free(struct drm_gem_object *gobj)
@@ -559,7 +564,7 @@ static void radeon_gem_va_update_vm(struct radeon_device *rdev,
if (!vm_bos)
return;
- r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL);
+ r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL, true);
if (r)
goto error_free;
diff --git a/drivers/gpu/drm/radeon/radeon_i2c.c b/drivers/gpu/drm/radeon/radeon_i2c.c
index 29f7817af821..d465a3de7732 100644
--- a/drivers/gpu/drm/radeon/radeon_i2c.c
+++ b/drivers/gpu/drm/radeon/radeon_i2c.c
@@ -23,11 +23,14 @@
* Authors: Dave Airlie
* Alex Deucher
*/
+
#include <linux/export.h>
-#include <drm/drmP.h>
+#include <drm/drm_device.h>
#include <drm/drm_edid.h>
+#include <drm/drm_pci.h>
#include <drm/radeon_drm.h>
+
#include "radeon.h"
#include "atom.h"
diff --git a/drivers/gpu/drm/radeon/radeon_ib.c b/drivers/gpu/drm/radeon/radeon_ib.c
index 92ce0e533bc0..9fd55e9c616b 100644
--- a/drivers/gpu/drm/radeon/radeon_ib.c
+++ b/drivers/gpu/drm/radeon/radeon_ib.c
@@ -26,7 +26,10 @@
* Jerome Glisse
* Christian König
*/
-#include <drm/drmP.h>
+
+#include <drm/drm_debugfs.h>
+#include <drm/drm_file.h>
+
#include "radeon.h"
/*
diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c
index 1d5e3ba7383e..d9613638f9cc 100644
--- a/drivers/gpu/drm/radeon/radeon_irq_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c
@@ -25,15 +25,21 @@
* Alex Deucher
* Jerome Glisse
*/
-#include <drm/drmP.h>
+
+#include <linux/pm_runtime.h>
+
#include <drm/drm_crtc_helper.h>
+#include <drm/drm_device.h>
+#include <drm/drm_irq.h>
+#include <drm/drm_pci.h>
#include <drm/drm_probe_helper.h>
+#include <drm/drm_vblank.h>
#include <drm/radeon_drm.h>
-#include "radeon_reg.h"
-#include "radeon.h"
+
#include "atom.h"
+#include "radeon.h"
+#include "radeon_reg.h"
-#include <linux/pm_runtime.h>
#define RADEON_WAIT_IDLE_TIMEOUT 200
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index 6a8fb6fd183c..07f7ace42c4b 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -25,15 +25,20 @@
* Alex Deucher
* Jerome Glisse
*/
-#include <drm/drmP.h>
+
+#include <linux/pm_runtime.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/vga_switcheroo.h>
+
#include <drm/drm_fb_helper.h>
-#include "radeon.h"
+#include <drm/drm_file.h>
+#include <drm/drm_ioctl.h>
+#include <drm/drm_pci.h>
#include <drm/radeon_drm.h>
-#include "radeon_asic.h"
-#include <linux/vga_switcheroo.h>
-#include <linux/slab.h>
-#include <linux/pm_runtime.h>
+#include "radeon.h"
+#include "radeon_asic.h"
#if defined(CONFIG_VGA_SWITCHEROO)
bool radeon_has_atpx(void);
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
index 35a205ae4318..a1985a552794 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
@@ -23,13 +23,16 @@
* Authors: Dave Airlie
* Alex Deucher
*/
-#include <drm/drmP.h>
+
#include <drm/drm_crtc_helper.h>
#include <drm/drm_fb_helper.h>
-#include <drm/radeon_drm.h>
#include <drm/drm_fixed.h>
-#include "radeon.h"
+#include <drm/drm_fourcc.h>
+#include <drm/drm_vblank.h>
+#include <drm/radeon_drm.h>
+
#include "atom.h"
+#include "radeon.h"
static void radeon_overscan_setup(struct drm_crtc *crtc,
struct drm_display_mode *mode)
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
index 7e3257e8fd56..ef100b790463 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
@@ -23,14 +23,19 @@
* Authors: Dave Airlie
* Alex Deucher
*/
-#include <drm/drmP.h>
-#include <drm/drm_util.h>
+
+#include <linux/backlight.h>
+
#include <drm/drm_crtc_helper.h>
+#include <drm/drm_device.h>
+#include <drm/drm_file.h>
+#include <drm/drm_pci.h>
+#include <drm/drm_util.h>
#include <drm/radeon_drm.h>
+
#include "radeon.h"
#include "radeon_asic.h"
#include "atom.h"
-#include <linux/backlight.h>
#ifdef CONFIG_PMAC_BACKLIGHT
#include <asm/backlight.h>
#endif
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_tv.c b/drivers/gpu/drm/radeon/radeon_legacy_tv.c
index 3dae2c4dec71..f132eec737ad 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_tv.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_tv.c
@@ -1,6 +1,8 @@
// SPDX-License-Identifier: MIT
-#include <drm/drmP.h>
+
#include <drm/drm_crtc_helper.h>
+#include <drm/drm_device.h>
+
#include "radeon.h"
/*
diff --git a/drivers/gpu/drm/radeon/radeon_mn.c b/drivers/gpu/drm/radeon/radeon_mn.c
index c9bd1278f573..8c3871ed23a9 100644
--- a/drivers/gpu/drm/radeon/radeon_mn.c
+++ b/drivers/gpu/drm/radeon/radeon_mn.c
@@ -31,7 +31,7 @@
#include <linux/firmware.h>
#include <linux/module.h>
#include <linux/mmu_notifier.h>
-#include <drm/drmP.h>
+
#include <drm/drm.h>
#include "radeon.h"
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index 833e909706a9..21f73fc86f38 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -29,15 +29,18 @@
* Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
* Dave Airlie
*/
+
+#include <linux/io.h>
#include <linux/list.h>
#include <linux/slab.h>
-#include <drm/drmP.h>
-#include <drm/radeon_drm.h>
+
#include <drm/drm_cache.h>
+#include <drm/drm_prime.h>
+#include <drm/radeon_drm.h>
+
#include "radeon.h"
#include "radeon_trace.h"
-
int radeon_ttm_init(struct radeon_device *rdev);
void radeon_ttm_fini(struct radeon_device *rdev);
static void radeon_bo_clear_surface_reg(struct radeon_bo *bo);
@@ -539,7 +542,7 @@ int radeon_bo_list_validate(struct radeon_device *rdev,
u64 bytes_moved_threshold = radeon_bo_get_threshold_for_moves(rdev);
INIT_LIST_HEAD(&duplicates);
- r = ttm_eu_reserve_buffers(ticket, head, true, &duplicates);
+ r = ttm_eu_reserve_buffers(ticket, head, true, &duplicates, true);
if (unlikely(r != 0)) {
return r;
}
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index 4b6542538ff9..5d10e11a9225 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -20,14 +20,19 @@
* Authors: Rafał Miłecki <zajec5@gmail.com>
* Alex Deucher <alexdeucher@gmail.com>
*/
-#include <drm/drmP.h>
-#include "radeon.h"
-#include "avivod.h"
+
+#include <linux/hwmon-sysfs.h>
+#include <linux/hwmon.h>
+#include <linux/power_supply.h>
+
+#include <drm/drm_debugfs.h>
+#include <drm/drm_pci.h>
+#include <drm/drm_vblank.h>
+
#include "atom.h"
+#include "avivod.h"
#include "r600_dpm.h"
-#include <linux/power_supply.h>
-#include <linux/hwmon.h>
-#include <linux/hwmon-sysfs.h>
+#include "radeon.h"
#define RADEON_IDLE_LOOP_MS 100
#define RADEON_RECLOCK_DELAY_MS 200
diff --git a/drivers/gpu/drm/radeon/radeon_prime.c b/drivers/gpu/drm/radeon/radeon_prime.c
index 7110d403322c..d3a5bea9a2c5 100644
--- a/drivers/gpu/drm/radeon/radeon_prime.c
+++ b/drivers/gpu/drm/radeon/radeon_prime.c
@@ -23,12 +23,14 @@
*
* Authors: Alex Deucher
*/
-#include <drm/drmP.h>
-#include "radeon.h"
-#include <drm/radeon_drm.h>
#include <linux/dma-buf.h>
+#include <drm/drm_prime.h>
+#include <drm/radeon_drm.h>
+
+#include "radeon.h"
+
struct sg_table *radeon_gem_prime_get_sg_table(struct drm_gem_object *obj)
{
struct radeon_bo *bo = gem_to_radeon_bo(obj);
diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c
index 84802b201bef..37093cea24c5 100644
--- a/drivers/gpu/drm/radeon/radeon_ring.c
+++ b/drivers/gpu/drm/radeon/radeon_ring.c
@@ -26,7 +26,11 @@
* Jerome Glisse
* Christian König
*/
-#include <drm/drmP.h>
+
+#include <drm/drm_debugfs.h>
+#include <drm/drm_device.h>
+#include <drm/drm_file.h>
+
#include "radeon.h"
/*
diff --git a/drivers/gpu/drm/radeon/radeon_sa.c b/drivers/gpu/drm/radeon/radeon_sa.c
index 197b157b73d0..310c322c7112 100644
--- a/drivers/gpu/drm/radeon/radeon_sa.c
+++ b/drivers/gpu/drm/radeon/radeon_sa.c
@@ -41,7 +41,7 @@
* If we are asked to block we wait on all the oldest fence of all
* rings. We just wait for any of those fence to complete.
*/
-#include <drm/drmP.h>
+
#include "radeon.h"
static void radeon_sa_bo_remove_locked(struct radeon_sa_bo *sa_bo);
diff --git a/drivers/gpu/drm/radeon/radeon_semaphore.c b/drivers/gpu/drm/radeon/radeon_semaphore.c
index b0eb28e8fb73..221e59476f64 100644
--- a/drivers/gpu/drm/radeon/radeon_semaphore.c
+++ b/drivers/gpu/drm/radeon/radeon_semaphore.c
@@ -27,7 +27,7 @@
* Authors:
* Christian König <deathsimple@vodafone.de>
*/
-#include <drm/drmP.h>
+
#include "radeon.h"
#include "radeon_trace.h"
diff --git a/drivers/gpu/drm/radeon/radeon_sync.c b/drivers/gpu/drm/radeon/radeon_sync.c
index be5d7a38d3aa..8c9780b5a884 100644
--- a/drivers/gpu/drm/radeon/radeon_sync.c
+++ b/drivers/gpu/drm/radeon/radeon_sync.c
@@ -28,7 +28,6 @@
* Christian König <christian.koenig@amd.com>
*/
-#include <drm/drmP.h>
#include "radeon.h"
#include "radeon_trace.h"
diff --git a/drivers/gpu/drm/radeon/radeon_test.c b/drivers/gpu/drm/radeon/radeon_test.c
index 701c4a59e3c3..0f6ba81a1669 100644
--- a/drivers/gpu/drm/radeon/radeon_test.c
+++ b/drivers/gpu/drm/radeon/radeon_test.c
@@ -22,7 +22,7 @@
*
* Authors: Michel Dänzer
*/
-#include <drm/drmP.h>
+
#include <drm/radeon_drm.h>
#include "radeon_reg.h"
#include "radeon.h"
diff --git a/drivers/gpu/drm/radeon/radeon_trace.h b/drivers/gpu/drm/radeon/radeon_trace.h
index 0d84b8aafab3..c93f3ab3c4e3 100644
--- a/drivers/gpu/drm/radeon/radeon_trace.h
+++ b/drivers/gpu/drm/radeon/radeon_trace.h
@@ -3,10 +3,10 @@
#define _RADEON_TRACE_H_
#include <linux/stringify.h>
-#include <linux/types.h>
#include <linux/tracepoint.h>
+#include <linux/types.h>
-#include <drm/drmP.h>
+#include <drm/drm_file.h>
#undef TRACE_SYSTEM
#define TRACE_SYSTEM radeon
diff --git a/drivers/gpu/drm/radeon/radeon_trace_points.c b/drivers/gpu/drm/radeon/radeon_trace_points.c
index 65e92302f974..6806055e3b27 100644
--- a/drivers/gpu/drm/radeon/radeon_trace_points.c
+++ b/drivers/gpu/drm/radeon/radeon_trace_points.c
@@ -2,7 +2,7 @@
/* Copyright Red Hat Inc 2010.
* Author : Dave Airlie <airlied@redhat.com>
*/
-#include <drm/drmP.h>
+
#include <drm/radeon_drm.h>
#include "radeon.h"
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index 5d42f8d8e68d..1e5e744c16e7 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -29,19 +29,27 @@
* Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
* Dave Airlie
*/
+
+#include <linux/dma-mapping.h>
+#include <linux/pagemap.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include <linux/swap.h>
+#include <linux/swiotlb.h>
+
+#include <drm/drm_agpsupport.h>
+#include <drm/drm_debugfs.h>
+#include <drm/drm_device.h>
+#include <drm/drm_file.h>
+#include <drm/drm_pci.h>
+#include <drm/drm_prime.h>
+#include <drm/radeon_drm.h>
#include <drm/ttm/ttm_bo_api.h>
#include <drm/ttm/ttm_bo_driver.h>
-#include <drm/ttm/ttm_placement.h>
#include <drm/ttm/ttm_module.h>
#include <drm/ttm/ttm_page_alloc.h>
-#include <drm/drmP.h>
-#include <drm/radeon_drm.h>
-#include <linux/seq_file.h>
-#include <linux/slab.h>
-#include <linux/swiotlb.h>
-#include <linux/swap.h>
-#include <linux/pagemap.h>
-#include <linux/debugfs.h>
+#include <drm/ttm/ttm_placement.h>
+
#include "radeon_reg.h"
#include "radeon.h"
diff --git a/drivers/gpu/drm/radeon/radeon_ucode.c b/drivers/gpu/drm/radeon/radeon_ucode.c
index 6beec680390c..0d842d01f8e7 100644
--- a/drivers/gpu/drm/radeon/radeon_ucode.c
+++ b/drivers/gpu/drm/radeon/radeon_ucode.c
@@ -24,7 +24,7 @@
#include <linux/firmware.h>
#include <linux/slab.h>
#include <linux/module.h>
-#include <drm/drmP.h>
+
#include "radeon.h"
#include "radeon_ucode.h"
diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c
index 95f4db70dd22..ff4f794d1c86 100644
--- a/drivers/gpu/drm/radeon/radeon_uvd.c
+++ b/drivers/gpu/drm/radeon/radeon_uvd.c
@@ -30,7 +30,7 @@
#include <linux/firmware.h>
#include <linux/module.h>
-#include <drm/drmP.h>
+
#include <drm/drm.h>
#include "radeon.h"
diff --git a/drivers/gpu/drm/radeon/radeon_vce.c b/drivers/gpu/drm/radeon/radeon_vce.c
index c1c619facb47..59db54ace428 100644
--- a/drivers/gpu/drm/radeon/radeon_vce.c
+++ b/drivers/gpu/drm/radeon/radeon_vce.c
@@ -27,7 +27,7 @@
#include <linux/firmware.h>
#include <linux/module.h>
-#include <drm/drmP.h>
+
#include <drm/drm.h>
#include "radeon.h"
@@ -771,7 +771,7 @@ int radeon_vce_ring_test(struct radeon_device *rdev, struct radeon_ring *ring)
for (i = 0; i < rdev->usec_timeout; i++) {
if (vce_v1_0_get_rptr(rdev, ring) != rptr)
break;
- DRM_UDELAY(1);
+ udelay(1);
}
if (i < rdev->usec_timeout) {
diff --git a/drivers/gpu/drm/radeon/radeon_vm.c b/drivers/gpu/drm/radeon/radeon_vm.c
index 0d374211661c..8512b02e9583 100644
--- a/drivers/gpu/drm/radeon/radeon_vm.c
+++ b/drivers/gpu/drm/radeon/radeon_vm.c
@@ -25,7 +25,7 @@
* Alex Deucher
* Jerome Glisse
*/
-#include <drm/drmP.h>
+
#include <drm/radeon_drm.h>
#include "radeon.h"
#include "radeon_trace.h"
diff --git a/drivers/gpu/drm/radeon/rs400.c b/drivers/gpu/drm/radeon/rs400.c
index 4121209c183e..117f60af1ee4 100644
--- a/drivers/gpu/drm/radeon/rs400.c
+++ b/drivers/gpu/drm/radeon/rs400.c
@@ -25,9 +25,14 @@
* Alex Deucher
* Jerome Glisse
*/
+
#include <linux/seq_file.h>
#include <linux/slab.h>
-#include <drm/drmP.h>
+
+#include <drm/drm_debugfs.h>
+#include <drm/drm_device.h>
+#include <drm/drm_file.h>
+
#include "radeon.h"
#include "radeon_asic.h"
#include "rs400d.h"
@@ -67,7 +72,7 @@ void rs400_gart_tlb_flush(struct radeon_device *rdev)
tmp = RREG32_MC(RS480_GART_CACHE_CNTRL);
if ((tmp & RS480_GART_CACHE_INVALIDATE) == 0)
break;
- DRM_UDELAY(1);
+ udelay(1);
timeout--;
} while (timeout > 0);
WREG32_MC(RS480_GART_CACHE_CNTRL, 0);
@@ -245,7 +250,7 @@ int rs400_mc_wait_for_idle(struct radeon_device *rdev)
if (tmp & RADEON_MC_IDLE) {
return 0;
}
- DRM_UDELAY(1);
+ udelay(1);
}
return -1;
}
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
index f16af119c688..2f8ff089f7b1 100644
--- a/drivers/gpu/drm/radeon/rs600.c
+++ b/drivers/gpu/drm/radeon/rs600.c
@@ -35,14 +35,19 @@
* close to the one of the R600 family (R600 likely being an evolution
* of the RS600 GART block).
*/
-#include <drm/drmP.h>
+
+#include <linux/io-64-nonatomic-lo-hi.h>
+
+#include <drm/drm_device.h>
+#include <drm/drm_pci.h>
+#include <drm/drm_vblank.h>
+
+#include "atom.h"
#include "radeon.h"
#include "radeon_asic.h"
#include "radeon_audio.h"
-#include "atom.h"
-#include "rs600d.h"
-
#include "rs600_reg_safe.h"
+#include "rs600d.h"
static void rs600_gpu_init(struct radeon_device *rdev);
int rs600_mc_wait_for_idle(struct radeon_device *rdev);
diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
index 1bae33e43f3c..267d8a9134c8 100644
--- a/drivers/gpu/drm/radeon/rs690.c
+++ b/drivers/gpu/drm/radeon/rs690.c
@@ -25,11 +25,13 @@
* Alex Deucher
* Jerome Glisse
*/
-#include <drm/drmP.h>
+
+#include <drm/drm_pci.h>
+
+#include "atom.h"
#include "radeon.h"
#include "radeon_asic.h"
#include "radeon_audio.h"
-#include "atom.h"
#include "rs690d.h"
int rs690_mc_wait_for_idle(struct radeon_device *rdev)
diff --git a/drivers/gpu/drm/radeon/rs780_dpm.c b/drivers/gpu/drm/radeon/rs780_dpm.c
index 694b7b3e9799..72dbf3251c53 100644
--- a/drivers/gpu/drm/radeon/rs780_dpm.c
+++ b/drivers/gpu/drm/radeon/rs780_dpm.c
@@ -22,14 +22,16 @@
* Authors: Alex Deucher
*/
-#include <drm/drmP.h>
+#include <linux/seq_file.h>
+
+#include <drm/drm_pci.h>
+
+#include "atom.h"
+#include "r600_dpm.h"
#include "radeon.h"
#include "radeon_asic.h"
-#include "rs780d.h"
-#include "r600_dpm.h"
#include "rs780_dpm.h"
-#include "atom.h"
-#include <linux/seq_file.h>
+#include "rs780d.h"
static struct igp_ps *rs780_get_ps(struct radeon_ps *rps)
{
diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c
index ffbd2c006f60..147e5cf8348d 100644
--- a/drivers/gpu/drm/radeon/rv515.c
+++ b/drivers/gpu/drm/radeon/rv515.c
@@ -25,14 +25,19 @@
* Alex Deucher
* Jerome Glisse
*/
+
#include <linux/seq_file.h>
#include <linux/slab.h>
-#include <drm/drmP.h>
-#include "rv515d.h"
+
+#include <drm/drm_debugfs.h>
+#include <drm/drm_device.h>
+#include <drm/drm_file.h>
+
+#include "atom.h"
#include "radeon.h"
#include "radeon_asic.h"
-#include "atom.h"
#include "rv515_reg_safe.h"
+#include "rv515d.h"
/* This files gather functions specifics to: rv515 */
static int rv515_debugfs_pipes_info_init(struct radeon_device *rdev);
@@ -138,7 +143,7 @@ int rv515_mc_wait_for_idle(struct radeon_device *rdev)
if (tmp & MC_STATUS_IDLE) {
return 0;
}
- DRM_UDELAY(1);
+ udelay(1);
}
return -1;
}
diff --git a/drivers/gpu/drm/radeon/rv6xx_dpm.c b/drivers/gpu/drm/radeon/rv6xx_dpm.c
index 6986051fbb89..69d380fff22a 100644
--- a/drivers/gpu/drm/radeon/rv6xx_dpm.c
+++ b/drivers/gpu/drm/radeon/rv6xx_dpm.c
@@ -22,7 +22,6 @@
* Authors: Alex Deucher
*/
-#include <drm/drmP.h>
#include "radeon.h"
#include "radeon_asic.h"
#include "rv6xxd.h"
diff --git a/drivers/gpu/drm/radeon/rv730_dpm.c b/drivers/gpu/drm/radeon/rv730_dpm.c
index 38fdb4152e2a..84a3d6d72486 100644
--- a/drivers/gpu/drm/radeon/rv730_dpm.c
+++ b/drivers/gpu/drm/radeon/rv730_dpm.c
@@ -22,7 +22,6 @@
* Authors: Alex Deucher
*/
-#include <drm/drmP.h>
#include "radeon.h"
#include "rv730d.h"
#include "r600_dpm.h"
diff --git a/drivers/gpu/drm/radeon/rv740_dpm.c b/drivers/gpu/drm/radeon/rv740_dpm.c
index afd597ec5085..327d65a76e1f 100644
--- a/drivers/gpu/drm/radeon/rv740_dpm.c
+++ b/drivers/gpu/drm/radeon/rv740_dpm.c
@@ -22,7 +22,6 @@
* Authors: Alex Deucher
*/
-#include <drm/drmP.h>
#include "radeon.h"
#include "rv740d.h"
#include "r600_dpm.h"
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
index 729ae588c970..7a6fc66d6a40 100644
--- a/drivers/gpu/drm/radeon/rv770.c
+++ b/drivers/gpu/drm/radeon/rv770.c
@@ -25,16 +25,20 @@
* Alex Deucher
* Jerome Glisse
*/
+
#include <linux/firmware.h>
#include <linux/slab.h>
-#include <drm/drmP.h>
+
+#include <drm/drm_device.h>
+#include <drm/drm_pci.h>
+#include <drm/radeon_drm.h>
+
+#include "atom.h"
+#include "avivod.h"
#include "radeon.h"
#include "radeon_asic.h"
#include "radeon_audio.h"
-#include <drm/radeon_drm.h>
#include "rv770d.h"
-#include "atom.h"
-#include "avivod.h"
#define R700_PFP_UCODE_SIZE 848
#define R700_PM4_UCODE_SIZE 1360
diff --git a/drivers/gpu/drm/radeon/rv770_dma.c b/drivers/gpu/drm/radeon/rv770_dma.c
index acff6e09cc40..0866b38ef264 100644
--- a/drivers/gpu/drm/radeon/rv770_dma.c
+++ b/drivers/gpu/drm/radeon/rv770_dma.c
@@ -21,7 +21,7 @@
*
* Authors: Alex Deucher
*/
-#include <drm/drmP.h>
+
#include "radeon.h"
#include "radeon_asic.h"
#include "rv770d.h"
diff --git a/drivers/gpu/drm/radeon/rv770_dpm.c b/drivers/gpu/drm/radeon/rv770_dpm.c
index c765ae7ea806..4a0cf597c11c 100644
--- a/drivers/gpu/drm/radeon/rv770_dpm.c
+++ b/drivers/gpu/drm/radeon/rv770_dpm.c
@@ -22,7 +22,6 @@
* Authors: Alex Deucher
*/
-#include <drm/drmP.h>
#include "radeon.h"
#include "radeon_asic.h"
#include "rv770d.h"
diff --git a/drivers/gpu/drm/radeon/rv770_dpm.h b/drivers/gpu/drm/radeon/rv770_dpm.h
index d12beab7f3e6..d81ccf153c33 100644
--- a/drivers/gpu/drm/radeon/rv770_dpm.h
+++ b/drivers/gpu/drm/radeon/rv770_dpm.h
@@ -23,6 +23,7 @@
#ifndef __RV770_DPM_H__
#define __RV770_DPM_H__
+#include "radeon.h"
#include "rv770_smc.h"
struct rv770_clock_registers {
diff --git a/drivers/gpu/drm/radeon/rv770_smc.c b/drivers/gpu/drm/radeon/rv770_smc.c
index 2b7ddee3984c..45575c0d0a1d 100644
--- a/drivers/gpu/drm/radeon/rv770_smc.c
+++ b/drivers/gpu/drm/radeon/rv770_smc.c
@@ -23,7 +23,7 @@
*/
#include <linux/firmware.h>
-#include <drm/drmP.h>
+
#include "radeon.h"
#include "rv770d.h"
#include "rv770_dpm.h"
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index 841bc8bc333d..05894d198a79 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -21,19 +21,23 @@
*
* Authors: Alex Deucher
*/
+
#include <linux/firmware.h>
#include <linux/slab.h>
#include <linux/module.h>
-#include <drm/drmP.h>
-#include "radeon.h"
-#include "radeon_asic.h"
-#include "radeon_audio.h"
+
+#include <drm/drm_pci.h>
+#include <drm/drm_vblank.h>
#include <drm/radeon_drm.h>
-#include "sid.h"
+
#include "atom.h"
-#include "si_blit_shaders.h"
#include "clearstate_si.h"
+#include "radeon.h"
+#include "radeon_asic.h"
+#include "radeon_audio.h"
#include "radeon_ucode.h"
+#include "si_blit_shaders.h"
+#include "sid.h"
MODULE_FIRMWARE("radeon/TAHITI_pfp.bin");
diff --git a/drivers/gpu/drm/radeon/si_dma.c b/drivers/gpu/drm/radeon/si_dma.c
index 83207929fc62..4773bb7d947e 100644
--- a/drivers/gpu/drm/radeon/si_dma.c
+++ b/drivers/gpu/drm/radeon/si_dma.c
@@ -21,7 +21,7 @@
*
* Authors: Alex Deucher
*/
-#include <drm/drmP.h>
+
#include "radeon.h"
#include "radeon_asic.h"
#include "radeon_trace.h"
diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
index c9f6cb77e857..460fd98e40a7 100644
--- a/drivers/gpu/drm/radeon/si_dpm.c
+++ b/drivers/gpu/drm/radeon/si_dpm.c
@@ -21,15 +21,17 @@
*
*/
-#include <drm/drmP.h>
+#include <linux/math64.h>
+#include <linux/seq_file.h>
+
+#include <drm/drm_pci.h>
+
+#include "atom.h"
+#include "r600_dpm.h"
#include "radeon.h"
#include "radeon_asic.h"
-#include "sid.h"
-#include "r600_dpm.h"
#include "si_dpm.h"
-#include "atom.h"
-#include <linux/math64.h>
-#include <linux/seq_file.h>
+#include "sid.h"
#define MC_CG_ARB_FREQ_F0 0x0a
#define MC_CG_ARB_FREQ_F1 0x0b
diff --git a/drivers/gpu/drm/radeon/si_smc.c b/drivers/gpu/drm/radeon/si_smc.c
index 51155abda8d8..1573a463593c 100644
--- a/drivers/gpu/drm/radeon/si_smc.c
+++ b/drivers/gpu/drm/radeon/si_smc.c
@@ -23,7 +23,7 @@
*/
#include <linux/firmware.h>
-#include <drm/drmP.h>
+
#include "radeon.h"
#include "sid.h"
#include "ppsmc.h"
diff --git a/drivers/gpu/drm/radeon/sumo_dpm.c b/drivers/gpu/drm/radeon/sumo_dpm.c
index 1e4975f3374c..b95d5d390caf 100644
--- a/drivers/gpu/drm/radeon/sumo_dpm.c
+++ b/drivers/gpu/drm/radeon/sumo_dpm.c
@@ -21,7 +21,6 @@
*
*/
-#include <drm/drmP.h>
#include "radeon.h"
#include "radeon_asic.h"
#include "sumod.h"
diff --git a/drivers/gpu/drm/radeon/sumo_dpm.h b/drivers/gpu/drm/radeon/sumo_dpm.h
index 07dda299c784..f1651135a47a 100644
--- a/drivers/gpu/drm/radeon/sumo_dpm.h
+++ b/drivers/gpu/drm/radeon/sumo_dpm.h
@@ -24,6 +24,7 @@
#define __SUMO_DPM_H__
#include "atom.h"
+#include "radeon.h"
#define SUMO_MAX_HARDWARE_POWERLEVELS 5
#define SUMO_PM_NUMBER_OF_TC 15
diff --git a/drivers/gpu/drm/radeon/sumo_smc.c b/drivers/gpu/drm/radeon/sumo_smc.c
index cc051be42362..d78140705736 100644
--- a/drivers/gpu/drm/radeon/sumo_smc.c
+++ b/drivers/gpu/drm/radeon/sumo_smc.c
@@ -21,7 +21,6 @@
*
*/
-#include <drm/drmP.h>
#include "radeon.h"
#include "sumod.h"
#include "sumo_dpm.h"
diff --git a/drivers/gpu/drm/radeon/trinity_dpm.c b/drivers/gpu/drm/radeon/trinity_dpm.c
index 5d317f763eea..65302f9d025e 100644
--- a/drivers/gpu/drm/radeon/trinity_dpm.c
+++ b/drivers/gpu/drm/radeon/trinity_dpm.c
@@ -21,13 +21,15 @@
*
*/
-#include <drm/drmP.h>
+#include <linux/seq_file.h>
+
+#include <drm/drm_pci.h>
+
+#include "r600_dpm.h"
#include "radeon.h"
#include "radeon_asic.h"
-#include "trinityd.h"
-#include "r600_dpm.h"
#include "trinity_dpm.h"
-#include <linux/seq_file.h>
+#include "trinityd.h"
#define TRINITY_MAX_DEEPSLEEP_DIVIDER_ID 5
#define TRINITY_MINIMUM_ENGINE_CLOCK 800
diff --git a/drivers/gpu/drm/radeon/trinity_smc.c b/drivers/gpu/drm/radeon/trinity_smc.c
index 0310e36e3159..f1770a5664ea 100644
--- a/drivers/gpu/drm/radeon/trinity_smc.c
+++ b/drivers/gpu/drm/radeon/trinity_smc.c
@@ -21,7 +21,6 @@
*
*/
-#include <drm/drmP.h>
#include "radeon.h"
#include "trinityd.h"
#include "trinity_dpm.h"
diff --git a/drivers/gpu/drm/radeon/uvd_v1_0.c b/drivers/gpu/drm/radeon/uvd_v1_0.c
index 0dbeb504a429..f858d8d06347 100644
--- a/drivers/gpu/drm/radeon/uvd_v1_0.c
+++ b/drivers/gpu/drm/radeon/uvd_v1_0.c
@@ -23,7 +23,7 @@
*/
#include <linux/firmware.h>
-#include <drm/drmP.h>
+
#include "radeon.h"
#include "radeon_asic.h"
#include "r600d.h"
@@ -438,7 +438,7 @@ int uvd_v1_0_ring_test(struct radeon_device *rdev, struct radeon_ring *ring)
tmp = RREG32(UVD_CONTEXT_ID);
if (tmp == 0xDEADBEEF)
break;
- DRM_UDELAY(1);
+ udelay(1);
}
if (i < rdev->usec_timeout) {
diff --git a/drivers/gpu/drm/radeon/uvd_v2_2.c b/drivers/gpu/drm/radeon/uvd_v2_2.c
index 9071e656a565..23b18edda20e 100644
--- a/drivers/gpu/drm/radeon/uvd_v2_2.c
+++ b/drivers/gpu/drm/radeon/uvd_v2_2.c
@@ -23,7 +23,7 @@
*/
#include <linux/firmware.h>
-#include <drm/drmP.h>
+
#include "radeon.h"
#include "radeon_asic.h"
#include "rv770d.h"
diff --git a/drivers/gpu/drm/radeon/uvd_v3_1.c b/drivers/gpu/drm/radeon/uvd_v3_1.c
index d722db2cf340..b83d0ecb3b5a 100644
--- a/drivers/gpu/drm/radeon/uvd_v3_1.c
+++ b/drivers/gpu/drm/radeon/uvd_v3_1.c
@@ -22,7 +22,6 @@
* Authors: Christian König <christian.koenig@amd.com>
*/
-#include <drm/drmP.h>
#include "radeon.h"
#include "radeon_asic.h"
#include "nid.h"
diff --git a/drivers/gpu/drm/radeon/uvd_v4_2.c b/drivers/gpu/drm/radeon/uvd_v4_2.c
index 91613b8a9dc9..dc54fa4aaea8 100644
--- a/drivers/gpu/drm/radeon/uvd_v4_2.c
+++ b/drivers/gpu/drm/radeon/uvd_v4_2.c
@@ -23,7 +23,7 @@
*/
#include <linux/firmware.h>
-#include <drm/drmP.h>
+
#include "radeon.h"
#include "radeon_asic.h"
#include "cikd.h"
diff --git a/drivers/gpu/drm/radeon/vce_v1_0.c b/drivers/gpu/drm/radeon/vce_v1_0.c
index f541a4b5ac51..bd75bbcf5bf6 100644
--- a/drivers/gpu/drm/radeon/vce_v1_0.c
+++ b/drivers/gpu/drm/radeon/vce_v1_0.c
@@ -26,7 +26,7 @@
*/
#include <linux/firmware.h>
-#include <drm/drmP.h>
+
#include "radeon.h"
#include "radeon_asic.h"
#include "sid.h"
diff --git a/drivers/gpu/drm/radeon/vce_v2_0.c b/drivers/gpu/drm/radeon/vce_v2_0.c
index b0a43b68776d..d6fde3659e65 100644
--- a/drivers/gpu/drm/radeon/vce_v2_0.c
+++ b/drivers/gpu/drm/radeon/vce_v2_0.c
@@ -26,7 +26,7 @@
*/
#include <linux/firmware.h>
-#include <drm/drmP.h>
+
#include "radeon.h"
#include "radeon_asic.h"
#include "cikd.h"
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_drv.c b/drivers/gpu/drm/rcar-du/rcar_du_drv.c
index 75ab17af13a9..6df37c2a9678 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_drv.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_drv.c
@@ -102,6 +102,35 @@ static const struct rcar_du_device_info rzg1_du_r8a77470_info = {
},
};
+static const struct rcar_du_device_info rcar_du_r8a774a1_info = {
+ .gen = 3,
+ .features = RCAR_DU_FEATURE_CRTC_IRQ_CLOCK
+ | RCAR_DU_FEATURE_VSP1_SOURCE
+ | RCAR_DU_FEATURE_INTERLACED
+ | RCAR_DU_FEATURE_TVM_SYNC,
+ .channels_mask = BIT(2) | BIT(1) | BIT(0),
+ .routes = {
+ /*
+ * R8A774A1 has one RGB output, one LVDS output and one HDMI
+ * output.
+ */
+ [RCAR_DU_OUTPUT_DPAD0] = {
+ .possible_crtcs = BIT(2),
+ .port = 0,
+ },
+ [RCAR_DU_OUTPUT_HDMI0] = {
+ .possible_crtcs = BIT(1),
+ .port = 1,
+ },
+ [RCAR_DU_OUTPUT_LVDS0] = {
+ .possible_crtcs = BIT(0),
+ .port = 2,
+ },
+ },
+ .num_lvds = 1,
+ .dpll_mask = BIT(1),
+};
+
static const struct rcar_du_device_info rcar_du_r8a774c0_info = {
.gen = 3,
.features = RCAR_DU_FEATURE_CRTC_IRQ_CLOCK
@@ -386,6 +415,7 @@ static const struct of_device_id rcar_du_of_table[] = {
{ .compatible = "renesas,du-r8a7744", .data = &rzg1_du_r8a7743_info },
{ .compatible = "renesas,du-r8a7745", .data = &rzg1_du_r8a7745_info },
{ .compatible = "renesas,du-r8a77470", .data = &rzg1_du_r8a77470_info },
+ { .compatible = "renesas,du-r8a774a1", .data = &rcar_du_r8a774a1_info },
{ .compatible = "renesas,du-r8a774c0", .data = &rcar_du_r8a774c0_info },
{ .compatible = "renesas,du-r8a7779", .data = &rcar_du_r8a7779_info },
{ .compatible = "renesas,du-r8a7790", .data = &rcar_du_r8a7790_info },
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_encoder.c b/drivers/gpu/drm/rcar-du/rcar_du_encoder.c
index 6c91753af7bc..0f00bdfe2366 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_encoder.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_encoder.c
@@ -16,6 +16,7 @@
#include "rcar_du_drv.h"
#include "rcar_du_encoder.h"
#include "rcar_du_kms.h"
+#include "rcar_lvds.h"
/* -----------------------------------------------------------------------------
* Encoder
@@ -97,6 +98,17 @@ int rcar_du_encoder_init(struct rcar_du_device *rcdu,
}
}
+ /*
+ * On Gen3 skip the LVDS1 output if the LVDS1 encoder is used as a
+ * companion for LVDS0 in dual-link mode.
+ */
+ if (rcdu->info->gen >= 3 && output == RCAR_DU_OUTPUT_LVDS1) {
+ if (rcar_lvds_dual_link(bridge)) {
+ ret = -ENOLINK;
+ goto done;
+ }
+ }
+
ret = drm_encoder_init(rcdu->ddev, encoder, &encoder_funcs,
DRM_MODE_ENCODER_NONE, NULL);
if (ret < 0)
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_kms.c b/drivers/gpu/drm/rcar-du/rcar_du_kms.c
index f8f7fff34dff..2dc9caee8767 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_kms.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_kms.c
@@ -124,6 +124,66 @@ static const struct rcar_du_format_info rcar_du_format_infos[] = {
.bpp = 16,
.planes = 1,
}, {
+ .fourcc = DRM_FORMAT_RGBA4444,
+ .v4l2 = V4L2_PIX_FMT_RGBA444,
+ .bpp = 16,
+ .planes = 1,
+ }, {
+ .fourcc = DRM_FORMAT_RGBX4444,
+ .v4l2 = V4L2_PIX_FMT_RGBX444,
+ .bpp = 16,
+ .planes = 1,
+ }, {
+ .fourcc = DRM_FORMAT_ABGR4444,
+ .v4l2 = V4L2_PIX_FMT_ABGR444,
+ .bpp = 16,
+ .planes = 1,
+ }, {
+ .fourcc = DRM_FORMAT_XBGR4444,
+ .v4l2 = V4L2_PIX_FMT_XBGR444,
+ .bpp = 16,
+ .planes = 1,
+ }, {
+ .fourcc = DRM_FORMAT_BGRA4444,
+ .v4l2 = V4L2_PIX_FMT_BGRA444,
+ .bpp = 16,
+ .planes = 1,
+ }, {
+ .fourcc = DRM_FORMAT_BGRX4444,
+ .v4l2 = V4L2_PIX_FMT_BGRX444,
+ .bpp = 16,
+ .planes = 1,
+ }, {
+ .fourcc = DRM_FORMAT_RGBA5551,
+ .v4l2 = V4L2_PIX_FMT_RGBA555,
+ .bpp = 16,
+ .planes = 1,
+ }, {
+ .fourcc = DRM_FORMAT_RGBX5551,
+ .v4l2 = V4L2_PIX_FMT_RGBX555,
+ .bpp = 16,
+ .planes = 1,
+ }, {
+ .fourcc = DRM_FORMAT_ABGR1555,
+ .v4l2 = V4L2_PIX_FMT_ABGR555,
+ .bpp = 16,
+ .planes = 1,
+ }, {
+ .fourcc = DRM_FORMAT_XBGR1555,
+ .v4l2 = V4L2_PIX_FMT_XBGR555,
+ .bpp = 16,
+ .planes = 1,
+ }, {
+ .fourcc = DRM_FORMAT_BGRA5551,
+ .v4l2 = V4L2_PIX_FMT_BGRA555,
+ .bpp = 16,
+ .planes = 1,
+ }, {
+ .fourcc = DRM_FORMAT_BGRX5551,
+ .v4l2 = V4L2_PIX_FMT_BGRX555,
+ .bpp = 16,
+ .planes = 1,
+ }, {
.fourcc = DRM_FORMAT_BGR888,
.v4l2 = V4L2_PIX_FMT_RGB24,
.bpp = 24,
@@ -134,6 +194,26 @@ static const struct rcar_du_format_info rcar_du_format_infos[] = {
.bpp = 24,
.planes = 1,
}, {
+ .fourcc = DRM_FORMAT_RGBA8888,
+ .v4l2 = V4L2_PIX_FMT_BGRA32,
+ .bpp = 32,
+ .planes = 1,
+ }, {
+ .fourcc = DRM_FORMAT_RGBX8888,
+ .v4l2 = V4L2_PIX_FMT_BGRX32,
+ .bpp = 32,
+ .planes = 1,
+ }, {
+ .fourcc = DRM_FORMAT_ABGR8888,
+ .v4l2 = V4L2_PIX_FMT_RGBA32,
+ .bpp = 32,
+ .planes = 1,
+ }, {
+ .fourcc = DRM_FORMAT_XBGR8888,
+ .v4l2 = V4L2_PIX_FMT_RGBX32,
+ .bpp = 32,
+ .planes = 1,
+ }, {
.fourcc = DRM_FORMAT_BGRA8888,
.v4l2 = V4L2_PIX_FMT_ARGB32,
.bpp = 32,
@@ -378,7 +458,7 @@ static int rcar_du_encoders_init_one(struct rcar_du_device *rcdu,
}
ret = rcar_du_encoder_init(rcdu, output, entity);
- if (ret && ret != -EPROBE_DEFER)
+ if (ret && ret != -EPROBE_DEFER && ret != -ENOLINK)
dev_warn(rcdu->dev,
"failed to initialize encoder %pOF on output %u (%d), skipping\n",
entity, output, ret);
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_writeback.c b/drivers/gpu/drm/rcar-du/rcar_du_writeback.c
index 989a0be94131..ae07290bba6a 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_writeback.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_writeback.c
@@ -14,6 +14,7 @@
#include "rcar_du_crtc.h"
#include "rcar_du_drv.h"
#include "rcar_du_kms.h"
+#include "rcar_du_writeback.h"
/**
* struct rcar_du_wb_conn_state - Driver-specific writeback connector state
diff --git a/drivers/gpu/drm/rcar-du/rcar_lvds.c b/drivers/gpu/drm/rcar-du/rcar_lvds.c
index 620b51aab291..f2a5d4d99707 100644
--- a/drivers/gpu/drm/rcar-du/rcar_lvds.c
+++ b/drivers/gpu/drm/rcar-du/rcar_lvds.c
@@ -63,10 +63,12 @@ struct rcar_lvds {
struct clk *extal; /* External clock */
struct clk *dotclkin[2]; /* External DU clocks */
} clocks;
- bool enabled;
struct drm_display_mode display_mode;
enum rcar_lvds_mode mode;
+
+ struct drm_bridge *companion;
+ bool dual_link;
};
#define bridge_to_rcar_lvds(bridge) \
@@ -92,13 +94,15 @@ static int rcar_lvds_connector_get_modes(struct drm_connector *connector)
}
static int rcar_lvds_connector_atomic_check(struct drm_connector *connector,
- struct drm_connector_state *state)
+ struct drm_atomic_state *state)
{
struct rcar_lvds *lvds = connector_to_rcar_lvds(connector);
const struct drm_display_mode *panel_mode;
+ struct drm_connector_state *conn_state;
struct drm_crtc_state *crtc_state;
- if (!state->crtc)
+ conn_state = drm_atomic_get_new_connector_state(state, connector);
+ if (!conn_state->crtc)
return 0;
if (list_empty(&connector->modes)) {
@@ -110,9 +114,9 @@ static int rcar_lvds_connector_atomic_check(struct drm_connector *connector,
struct drm_display_mode, head);
/* We're not allowed to modify the resolution. */
- crtc_state = drm_atomic_get_crtc_state(state->state, state->crtc);
- if (IS_ERR(crtc_state))
- return PTR_ERR(crtc_state);
+ crtc_state = drm_atomic_get_crtc_state(state, conn_state->crtc);
+ if (!crtc_state)
+ return -EINVAL;
if (crtc_state->mode.hdisplay != panel_mode->hdisplay ||
crtc_state->mode.vdisplay != panel_mode->vdisplay)
@@ -368,15 +372,12 @@ int rcar_lvds_clk_enable(struct drm_bridge *bridge, unsigned long freq)
dev_dbg(lvds->dev, "enabling LVDS PLL, freq=%luHz\n", freq);
- WARN_ON(lvds->enabled);
-
ret = clk_prepare_enable(lvds->clocks.mod);
if (ret < 0)
return ret;
__rcar_lvds_pll_setup_d3_e3(lvds, freq, true);
- lvds->enabled = true;
return 0;
}
EXPORT_SYMBOL_GPL(rcar_lvds_clk_enable);
@@ -390,13 +391,9 @@ void rcar_lvds_clk_disable(struct drm_bridge *bridge)
dev_dbg(lvds->dev, "disabling LVDS PLL\n");
- WARN_ON(!lvds->enabled);
-
rcar_lvds_write(lvds, LVDPLLCR, 0);
clk_disable_unprepare(lvds->clocks.mod);
-
- lvds->enabled = false;
}
EXPORT_SYMBOL_GPL(rcar_lvds_clk_disable);
@@ -408,21 +405,18 @@ static void rcar_lvds_enable(struct drm_bridge *bridge)
{
struct rcar_lvds *lvds = bridge_to_rcar_lvds(bridge);
const struct drm_display_mode *mode = &lvds->display_mode;
- /*
- * FIXME: We should really retrieve the CRTC through the state, but how
- * do we get a state pointer?
- */
- struct drm_crtc *crtc = lvds->bridge.encoder->crtc;
u32 lvdhcr;
u32 lvdcr0;
int ret;
- WARN_ON(lvds->enabled);
-
ret = clk_prepare_enable(lvds->clocks.mod);
if (ret < 0)
return;
+ /* Enable the companion LVDS encoder in dual-link mode. */
+ if (lvds->dual_link && lvds->companion)
+ lvds->companion->funcs->enable(lvds->companion);
+
/*
* Hardcode the channels and control signals routing for now.
*
@@ -445,17 +439,33 @@ static void rcar_lvds_enable(struct drm_bridge *bridge)
rcar_lvds_write(lvds, LVDCHCR, lvdhcr);
if (lvds->info->quirks & RCAR_LVDS_QUIRK_DUAL_LINK) {
- /* Disable dual-link mode. */
- rcar_lvds_write(lvds, LVDSTRIPE, 0);
+ /*
+ * Configure vertical stripe based on the mode of operation of
+ * the connected device.
+ */
+ rcar_lvds_write(lvds, LVDSTRIPE,
+ lvds->dual_link ? LVDSTRIPE_ST_ON : 0);
}
- /* PLL clock configuration. */
- lvds->info->pll_setup(lvds, mode->clock * 1000);
+ /*
+ * PLL clock configuration on all instances but the companion in
+ * dual-link mode.
+ */
+ if (!lvds->dual_link || lvds->companion)
+ lvds->info->pll_setup(lvds, mode->clock * 1000);
/* Set the LVDS mode and select the input. */
lvdcr0 = lvds->mode << LVDCR0_LVMD_SHIFT;
- if (drm_crtc_index(crtc) == 2)
- lvdcr0 |= LVDCR0_DUSEL;
+
+ if (lvds->bridge.encoder) {
+ /*
+ * FIXME: We should really retrieve the CRTC through the state,
+ * but how do we get a state pointer?
+ */
+ if (drm_crtc_index(lvds->bridge.encoder->crtc) == 2)
+ lvdcr0 |= LVDCR0_DUSEL;
+ }
+
rcar_lvds_write(lvds, LVDCR0, lvdcr0);
/* Turn all the channels on. */
@@ -507,16 +517,12 @@ static void rcar_lvds_enable(struct drm_bridge *bridge)
drm_panel_prepare(lvds->panel);
drm_panel_enable(lvds->panel);
}
-
- lvds->enabled = true;
}
static void rcar_lvds_disable(struct drm_bridge *bridge)
{
struct rcar_lvds *lvds = bridge_to_rcar_lvds(bridge);
- WARN_ON(!lvds->enabled);
-
if (lvds->panel) {
drm_panel_disable(lvds->panel);
drm_panel_unprepare(lvds->panel);
@@ -526,9 +532,11 @@ static void rcar_lvds_disable(struct drm_bridge *bridge)
rcar_lvds_write(lvds, LVDCR1, 0);
rcar_lvds_write(lvds, LVDPLLCR, 0);
- clk_disable_unprepare(lvds->clocks.mod);
+ /* Disable the companion LVDS encoder in dual-link mode. */
+ if (lvds->dual_link && lvds->companion)
+ lvds->companion->funcs->disable(lvds->companion);
- lvds->enabled = false;
+ clk_disable_unprepare(lvds->clocks.mod);
}
static bool rcar_lvds_mode_fixup(struct drm_bridge *bridge,
@@ -592,8 +600,6 @@ static void rcar_lvds_mode_set(struct drm_bridge *bridge,
{
struct rcar_lvds *lvds = bridge_to_rcar_lvds(bridge);
- WARN_ON(lvds->enabled);
-
lvds->display_mode = *adjusted_mode;
rcar_lvds_get_lvds_mode(lvds);
@@ -646,10 +652,57 @@ static const struct drm_bridge_funcs rcar_lvds_bridge_ops = {
.mode_set = rcar_lvds_mode_set,
};
+bool rcar_lvds_dual_link(struct drm_bridge *bridge)
+{
+ struct rcar_lvds *lvds = bridge_to_rcar_lvds(bridge);
+
+ return lvds->dual_link;
+}
+EXPORT_SYMBOL_GPL(rcar_lvds_dual_link);
+
/* -----------------------------------------------------------------------------
* Probe & Remove
*/
+static int rcar_lvds_parse_dt_companion(struct rcar_lvds *lvds)
+{
+ const struct of_device_id *match;
+ struct device_node *companion;
+ struct device *dev = lvds->dev;
+ int ret = 0;
+
+ /* Locate the companion LVDS encoder for dual-link operation, if any. */
+ companion = of_parse_phandle(dev->of_node, "renesas,companion", 0);
+ if (!companion) {
+ dev_err(dev, "Companion LVDS encoder not found\n");
+ return -ENXIO;
+ }
+
+ /*
+ * Sanity check: the companion encoder must have the same compatible
+ * string.
+ */
+ match = of_match_device(dev->driver->of_match_table, dev);
+ if (!of_device_is_compatible(companion, match->compatible)) {
+ dev_err(dev, "Companion LVDS encoder is invalid\n");
+ ret = -ENXIO;
+ goto done;
+ }
+
+ lvds->companion = of_drm_find_bridge(companion);
+ if (!lvds->companion) {
+ ret = -EPROBE_DEFER;
+ goto done;
+ }
+
+ dev_dbg(dev, "Found companion encoder %pOF\n", companion);
+
+done:
+ of_node_put(companion);
+
+ return ret;
+}
+
static int rcar_lvds_parse_dt(struct rcar_lvds *lvds)
{
struct device_node *local_output = NULL;
@@ -700,14 +753,26 @@ static int rcar_lvds_parse_dt(struct rcar_lvds *lvds)
if (is_bridge) {
lvds->next_bridge = of_drm_find_bridge(remote);
- if (!lvds->next_bridge)
+ if (!lvds->next_bridge) {
ret = -EPROBE_DEFER;
+ goto done;
+ }
+
+ if (lvds->info->quirks & RCAR_LVDS_QUIRK_DUAL_LINK)
+ lvds->dual_link = lvds->next_bridge->timings
+ ? lvds->next_bridge->timings->dual_link
+ : false;
} else {
lvds->panel = of_drm_find_panel(remote);
- if (IS_ERR(lvds->panel))
+ if (IS_ERR(lvds->panel)) {
ret = PTR_ERR(lvds->panel);
+ goto done;
+ }
}
+ if (lvds->dual_link)
+ ret = rcar_lvds_parse_dt_companion(lvds);
+
done:
of_node_put(local_output);
of_node_put(remote_input);
@@ -793,7 +858,6 @@ static int rcar_lvds_probe(struct platform_device *pdev)
lvds->dev = &pdev->dev;
lvds->info = of_device_get_match_data(&pdev->dev);
- lvds->enabled = false;
ret = rcar_lvds_parse_dt(lvds);
if (ret < 0)
@@ -866,6 +930,7 @@ static const struct rcar_lvds_device_info rcar_lvds_r8a77995_info = {
static const struct of_device_id rcar_lvds_of_table[] = {
{ .compatible = "renesas,r8a7743-lvds", .data = &rcar_lvds_gen2_info },
{ .compatible = "renesas,r8a7744-lvds", .data = &rcar_lvds_gen2_info },
+ { .compatible = "renesas,r8a774a1-lvds", .data = &rcar_lvds_gen3_info },
{ .compatible = "renesas,r8a774c0-lvds", .data = &rcar_lvds_r8a77990_info },
{ .compatible = "renesas,r8a7790-lvds", .data = &rcar_lvds_r8a7790_info },
{ .compatible = "renesas,r8a7791-lvds", .data = &rcar_lvds_gen2_info },
diff --git a/drivers/gpu/drm/rcar-du/rcar_lvds.h b/drivers/gpu/drm/rcar-du/rcar_lvds.h
index a709cae1bc32..222ec0e60785 100644
--- a/drivers/gpu/drm/rcar-du/rcar_lvds.h
+++ b/drivers/gpu/drm/rcar-du/rcar_lvds.h
@@ -15,6 +15,7 @@ struct drm_bridge;
#if IS_ENABLED(CONFIG_DRM_RCAR_LVDS)
int rcar_lvds_clk_enable(struct drm_bridge *bridge, unsigned long freq);
void rcar_lvds_clk_disable(struct drm_bridge *bridge);
+bool rcar_lvds_dual_link(struct drm_bridge *bridge);
#else
static inline int rcar_lvds_clk_enable(struct drm_bridge *bridge,
unsigned long freq)
@@ -22,6 +23,10 @@ static inline int rcar_lvds_clk_enable(struct drm_bridge *bridge,
return -ENOSYS;
}
static inline void rcar_lvds_clk_disable(struct drm_bridge *bridge) { }
+static inline bool rcar_lvds_dual_link(struct drm_bridge *bridge)
+{
+ return false;
+}
#endif /* CONFIG_DRM_RCAR_LVDS */
#endif /* __RCAR_LVDS_H__ */
diff --git a/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c b/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c
index 77d929e6f6ce..40f3a4c53848 100644
--- a/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c
+++ b/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c
@@ -538,11 +538,25 @@ static int dw_hdmi_rockchip_remove(struct platform_device *pdev)
return 0;
}
+static int __maybe_unused dw_hdmi_rockchip_resume(struct device *dev)
+{
+ struct rockchip_hdmi *hdmi = dev_get_drvdata(dev);
+
+ dw_hdmi_resume(hdmi->hdmi);
+
+ return 0;
+}
+
+static const struct dev_pm_ops dw_hdmi_rockchip_pm = {
+ SET_SYSTEM_SLEEP_PM_OPS(NULL, dw_hdmi_rockchip_resume)
+};
+
struct platform_driver dw_hdmi_rockchip_pltfm_driver = {
.probe = dw_hdmi_rockchip_probe,
.remove = dw_hdmi_rockchip_remove,
.driver = {
.name = "dwhdmi-rockchip",
+ .pm = &dw_hdmi_rockchip_pm,
.of_match_table = dw_hdmi_rockchip_dt_ids,
},
};
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_fb.c b/drivers/gpu/drm/rockchip/rockchip_drm_fb.c
index 1c69066b6894..64ca87cf6d50 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_fb.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_fb.c
@@ -8,6 +8,7 @@
#include <drm/drm.h>
#include <drm/drmP.h>
#include <drm/drm_atomic.h>
+#include <drm/drm_damage_helper.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_probe_helper.h>
@@ -17,20 +18,10 @@
#include "rockchip_drm_gem.h"
#include "rockchip_drm_psr.h"
-static int rockchip_drm_fb_dirty(struct drm_framebuffer *fb,
- struct drm_file *file,
- unsigned int flags, unsigned int color,
- struct drm_clip_rect *clips,
- unsigned int num_clips)
-{
- rockchip_drm_psr_flush_all(fb->dev);
- return 0;
-}
-
static const struct drm_framebuffer_funcs rockchip_drm_fb_funcs = {
.destroy = drm_gem_fb_destroy,
.create_handle = drm_gem_fb_create_handle,
- .dirty = rockchip_drm_fb_dirty,
+ .dirty = drm_atomic_helper_dirtyfb,
};
static struct drm_framebuffer *
@@ -66,23 +57,18 @@ static struct drm_framebuffer *
rockchip_user_fb_create(struct drm_device *dev, struct drm_file *file_priv,
const struct drm_mode_fb_cmd2 *mode_cmd)
{
+ const struct drm_format_info *info = drm_get_format_info(dev,
+ mode_cmd);
struct drm_framebuffer *fb;
struct drm_gem_object *objs[ROCKCHIP_MAX_FB_BUFFER];
struct drm_gem_object *obj;
- unsigned int hsub;
- unsigned int vsub;
- int num_planes;
+ int num_planes = min_t(int, info->num_planes, ROCKCHIP_MAX_FB_BUFFER);
int ret;
int i;
- hsub = drm_format_horz_chroma_subsampling(mode_cmd->pixel_format);
- vsub = drm_format_vert_chroma_subsampling(mode_cmd->pixel_format);
- num_planes = min(drm_format_num_planes(mode_cmd->pixel_format),
- ROCKCHIP_MAX_FB_BUFFER);
-
for (i = 0; i < num_planes; i++) {
- unsigned int width = mode_cmd->width / (i ? hsub : 1);
- unsigned int height = mode_cmd->height / (i ? vsub : 1);
+ unsigned int width = mode_cmd->width / (i ? info->hsub : 1);
+ unsigned int height = mode_cmd->height / (i ? info->vsub : 1);
unsigned int min_size;
obj = drm_gem_object_lookup(file_priv, mode_cmd->handles[i]);
@@ -95,7 +81,7 @@ rockchip_user_fb_create(struct drm_device *dev, struct drm_file *file_priv,
min_size = (height - 1) * mode_cmd->pitches[i] +
mode_cmd->offsets[i] +
- width * drm_format_plane_cpp(mode_cmd->pixel_format, i);
+ width * info->cpp[i];
if (obj->size < min_size) {
drm_gem_object_put_unlocked(obj);
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
index 12ed5265a90b..e4580d8f21e1 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
@@ -307,24 +307,19 @@ static uint16_t scl_vop_cal_scale(enum scale_mode mode, uint32_t src,
static void scl_vop_cal_scl_fac(struct vop *vop, const struct vop_win_data *win,
uint32_t src_w, uint32_t src_h, uint32_t dst_w,
- uint32_t dst_h, uint32_t pixel_format)
+ uint32_t dst_h, const struct drm_format_info *info)
{
uint16_t yrgb_hor_scl_mode, yrgb_ver_scl_mode;
uint16_t cbcr_hor_scl_mode = SCALE_NONE;
uint16_t cbcr_ver_scl_mode = SCALE_NONE;
- int hsub = drm_format_horz_chroma_subsampling(pixel_format);
- int vsub = drm_format_vert_chroma_subsampling(pixel_format);
- const struct drm_format_info *info;
bool is_yuv = false;
- uint16_t cbcr_src_w = src_w / hsub;
- uint16_t cbcr_src_h = src_h / vsub;
+ uint16_t cbcr_src_w = src_w / info->hsub;
+ uint16_t cbcr_src_h = src_h / info->vsub;
uint16_t vsu_mode;
uint16_t lb_mode;
uint32_t val;
int vskiplines;
- info = drm_format_info(pixel_format);
-
if (info->is_yuv)
is_yuv = true;
@@ -823,8 +818,8 @@ static void vop_plane_atomic_update(struct drm_plane *plane,
(state->rotation & DRM_MODE_REFLECT_X) ? 1 : 0);
if (is_yuv) {
- int hsub = drm_format_horz_chroma_subsampling(fb->format->format);
- int vsub = drm_format_vert_chroma_subsampling(fb->format->format);
+ int hsub = fb->format->hsub;
+ int vsub = fb->format->vsub;
int bpp = fb->format->cpp[1];
uv_obj = fb->obj[1];
@@ -848,7 +843,7 @@ static void vop_plane_atomic_update(struct drm_plane *plane,
if (win->phy->scl)
scl_vop_cal_scl_fac(vop, win, actual_w, actual_h,
drm_rect_width(dest), drm_rect_height(dest),
- fb->format->format);
+ fb->format);
VOP_WIN_SET(vop, win, act_info, act_info);
VOP_WIN_SET(vop, win, dsp_info, dsp_info);
@@ -1215,17 +1210,6 @@ static void vop_crtc_destroy(struct drm_crtc *crtc)
drm_crtc_cleanup(crtc);
}
-static void vop_crtc_reset(struct drm_crtc *crtc)
-{
- if (crtc->state)
- __drm_atomic_helper_crtc_destroy_state(crtc->state);
- kfree(crtc->state);
-
- crtc->state = kzalloc(sizeof(struct rockchip_crtc_state), GFP_KERNEL);
- if (crtc->state)
- crtc->state->crtc = crtc;
-}
-
static struct drm_crtc_state *vop_crtc_duplicate_state(struct drm_crtc *crtc)
{
struct rockchip_crtc_state *rockchip_state;
@@ -1247,6 +1231,17 @@ static void vop_crtc_destroy_state(struct drm_crtc *crtc,
kfree(s);
}
+static void vop_crtc_reset(struct drm_crtc *crtc)
+{
+ struct rockchip_crtc_state *crtc_state =
+ kzalloc(sizeof(*crtc_state), GFP_KERNEL);
+
+ if (crtc->state)
+ vop_crtc_destroy_state(crtc, crtc->state);
+
+ __drm_atomic_helper_crtc_reset(crtc, &crtc_state->base);
+}
+
#ifdef CONFIG_DRM_ANALOGIX_DP
static struct drm_connector *vop_get_edp_connector(struct vop *vop)
{
diff --git a/drivers/gpu/drm/savage/savage_bci.c b/drivers/gpu/drm/savage/savage_bci.c
index 35dc74883f83..6889d6534eba 100644
--- a/drivers/gpu/drm/savage/savage_bci.c
+++ b/drivers/gpu/drm/savage/savage_bci.c
@@ -22,8 +22,17 @@
* CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
* WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
-#include <drm/drmP.h>
+
+#include <linux/delay.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+
+#include <drm/drm_device.h>
+#include <drm/drm_file.h>
+#include <drm/drm_print.h>
#include <drm/savage_drm.h>
+
#include "savage_drv.h"
/* Need a long timeout for shadow status updates can take a while
@@ -53,7 +62,7 @@ savage_bci_wait_fifo_shadow(drm_savage_private_t * dev_priv, unsigned int n)
status = dev_priv->status_ptr[0];
if ((status & mask) < threshold)
return 0;
- DRM_UDELAY(1);
+ udelay(1);
}
#if SAVAGE_BCI_DEBUG
@@ -74,7 +83,7 @@ savage_bci_wait_fifo_s3d(drm_savage_private_t * dev_priv, unsigned int n)
status = SAVAGE_READ(SAVAGE_STATUS_WORD0);
if ((status & SAVAGE_FIFO_USED_MASK_S3D) <= maxUsed)
return 0;
- DRM_UDELAY(1);
+ udelay(1);
}
#if SAVAGE_BCI_DEBUG
@@ -95,7 +104,7 @@ savage_bci_wait_fifo_s4(drm_savage_private_t * dev_priv, unsigned int n)
status = SAVAGE_READ(SAVAGE_ALT_STATUS_WORD0);
if ((status & SAVAGE_FIFO_USED_MASK_S4) <= maxUsed)
return 0;
- DRM_UDELAY(1);
+ udelay(1);
}
#if SAVAGE_BCI_DEBUG
@@ -128,7 +137,7 @@ savage_bci_wait_event_shadow(drm_savage_private_t * dev_priv, uint16_t e)
if ((((status & 0xffff) - e) & 0xffff) <= 0x7fff ||
(status & 0xffff) == 0)
return 0;
- DRM_UDELAY(1);
+ udelay(1);
}
#if SAVAGE_BCI_DEBUG
@@ -150,7 +159,7 @@ savage_bci_wait_event_reg(drm_savage_private_t * dev_priv, uint16_t e)
if ((((status & 0xffff) - e) & 0xffff) <= 0x7fff ||
(status & 0xffff) == 0)
return 0;
- DRM_UDELAY(1);
+ udelay(1);
}
#if SAVAGE_BCI_DEBUG
@@ -1014,7 +1023,7 @@ int savage_bci_buffers(struct drm_device *dev, void *data, struct drm_file *file
*/
if (d->send_count != 0) {
DRM_ERROR("Process %d trying to send %d buffers via drmDMA\n",
- DRM_CURRENTPID, d->send_count);
+ task_pid_nr(current), d->send_count);
return -EINVAL;
}
@@ -1022,7 +1031,7 @@ int savage_bci_buffers(struct drm_device *dev, void *data, struct drm_file *file
*/
if (d->request_count < 0 || d->request_count > dma->buf_count) {
DRM_ERROR("Process %d trying to get %d buffers (of %d max)\n",
- DRM_CURRENTPID, d->request_count, dma->buf_count);
+ task_pid_nr(current), d->request_count, dma->buf_count);
return -EINVAL;
}
diff --git a/drivers/gpu/drm/savage/savage_drv.c b/drivers/gpu/drm/savage/savage_drv.c
index 2bddeb8bf457..2966fcfd9548 100644
--- a/drivers/gpu/drm/savage/savage_drv.c
+++ b/drivers/gpu/drm/savage/savage_drv.c
@@ -25,12 +25,13 @@
#include <linux/module.h>
-#include <drm/drmP.h>
-#include <drm/savage_drm.h>
-#include "savage_drv.h"
-
+#include <drm/drm_drv.h>
+#include <drm/drm_file.h>
+#include <drm/drm_pci.h>
#include <drm/drm_pciids.h>
+#include "savage_drv.h"
+
static struct pci_device_id pciidlist[] = {
savage_PCI_IDS
};
diff --git a/drivers/gpu/drm/savage/savage_drv.h b/drivers/gpu/drm/savage/savage_drv.h
index 44a1009b6ecb..b0081bb64776 100644
--- a/drivers/gpu/drm/savage/savage_drv.h
+++ b/drivers/gpu/drm/savage/savage_drv.h
@@ -26,7 +26,11 @@
#ifndef __SAVAGE_DRV_H__
#define __SAVAGE_DRV_H__
+#include <linux/io.h>
+
+#include <drm/drm_ioctl.h>
#include <drm/drm_legacy.h>
+#include <drm/savage_drm.h>
#define DRIVER_AUTHOR "Felix Kuehling"
@@ -484,8 +488,10 @@ extern void savage_emit_clip_rect_s4(drm_savage_private_t * dev_priv,
/*
* access to MMIO
*/
-#define SAVAGE_READ(reg) DRM_READ32( dev_priv->mmio, (reg) )
-#define SAVAGE_WRITE(reg) DRM_WRITE32( dev_priv->mmio, (reg) )
+#define SAVAGE_READ(reg) \
+ readl(((void __iomem *)dev_priv->mmio->handle) + (reg))
+#define SAVAGE_WRITE(reg) \
+ writel(val, ((void __iomem *)dev_priv->mmio->handle) + (reg))
/*
* access to the burst command interface (BCI)
diff --git a/drivers/gpu/drm/savage/savage_state.c b/drivers/gpu/drm/savage/savage_state.c
index ebb8b7d32b33..a2ac25c11c90 100644
--- a/drivers/gpu/drm/savage/savage_state.c
+++ b/drivers/gpu/drm/savage/savage_state.c
@@ -22,8 +22,15 @@
* CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
* WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
-#include <drm/drmP.h>
+
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+
+#include <drm/drm_device.h>
+#include <drm/drm_file.h>
+#include <drm/drm_print.h>
#include <drm/savage_drm.h>
+
#include "savage_drv.h"
void savage_emit_clip_rect_s3d(drm_savage_private_t * dev_priv,
diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c
index a1bec2779e76..c1058eece16b 100644
--- a/drivers/gpu/drm/scheduler/sched_main.c
+++ b/drivers/gpu/drm/scheduler/sched_main.c
@@ -265,32 +265,6 @@ void drm_sched_resume_timeout(struct drm_gpu_scheduler *sched,
}
EXPORT_SYMBOL(drm_sched_resume_timeout);
-/* job_finish is called after hw fence signaled
- */
-static void drm_sched_job_finish(struct work_struct *work)
-{
- struct drm_sched_job *s_job = container_of(work, struct drm_sched_job,
- finish_work);
- struct drm_gpu_scheduler *sched = s_job->sched;
- unsigned long flags;
-
- /*
- * Canceling the timeout without removing our job from the ring mirror
- * list is safe, as we will only end up in this worker if our jobs
- * finished fence has been signaled. So even if some another worker
- * manages to find this job as the next job in the list, the fence
- * signaled check below will prevent the timeout to be restarted.
- */
- cancel_delayed_work_sync(&sched->work_tdr);
-
- spin_lock_irqsave(&sched->job_list_lock, flags);
- /* queue TDR for next job */
- drm_sched_start_timeout(sched);
- spin_unlock_irqrestore(&sched->job_list_lock, flags);
-
- sched->ops->free_job(s_job);
-}
-
static void drm_sched_job_begin(struct drm_sched_job *s_job)
{
struct drm_gpu_scheduler *sched = s_job->sched;
@@ -312,9 +286,19 @@ static void drm_sched_job_timedout(struct work_struct *work)
job = list_first_entry_or_null(&sched->ring_mirror_list,
struct drm_sched_job, node);
- if (job)
+ if (job) {
job->sched->ops->timedout_job(job);
+ /*
+ * Guilty job did complete and hence needs to be manually removed
+ * See drm_sched_stop doc.
+ */
+ if (sched->free_guilty) {
+ job->sched->ops->free_job(job);
+ sched->free_guilty = false;
+ }
+ }
+
spin_lock_irqsave(&sched->job_list_lock, flags);
drm_sched_start_timeout(sched);
spin_unlock_irqrestore(&sched->job_list_lock, flags);
@@ -369,41 +353,68 @@ EXPORT_SYMBOL(drm_sched_increase_karma);
* drm_sched_stop - stop the scheduler
*
* @sched: scheduler instance
+ * @bad: job which caused the time out
+ *
+ * Stop the scheduler and also removes and frees all completed jobs.
+ * Note: bad job will not be freed as it might be used later and so it's
+ * callers responsibility to release it manually if it's not part of the
+ * mirror list any more.
*
*/
-void drm_sched_stop(struct drm_gpu_scheduler *sched)
+void drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad)
{
- struct drm_sched_job *s_job;
+ struct drm_sched_job *s_job, *tmp;
unsigned long flags;
- struct dma_fence *last_fence = NULL;
kthread_park(sched->thread);
/*
- * Verify all the signaled jobs in mirror list are removed from the ring
- * by waiting for the latest job to enter the list. This should insure that
- * also all the previous jobs that were in flight also already singaled
- * and removed from the list.
+ * Iterate the job list from later to earlier one and either deactive
+ * their HW callbacks or remove them from mirror list if they already
+ * signaled.
+ * This iteration is thread safe as sched thread is stopped.
*/
- spin_lock_irqsave(&sched->job_list_lock, flags);
- list_for_each_entry_reverse(s_job, &sched->ring_mirror_list, node) {
+ list_for_each_entry_safe_reverse(s_job, tmp, &sched->ring_mirror_list, node) {
if (s_job->s_fence->parent &&
dma_fence_remove_callback(s_job->s_fence->parent,
&s_job->cb)) {
- dma_fence_put(s_job->s_fence->parent);
- s_job->s_fence->parent = NULL;
atomic_dec(&sched->hw_rq_count);
} else {
- last_fence = dma_fence_get(&s_job->s_fence->finished);
- break;
+ /*
+ * remove job from ring_mirror_list.
+ * Locking here is for concurrent resume timeout
+ */
+ spin_lock_irqsave(&sched->job_list_lock, flags);
+ list_del_init(&s_job->node);
+ spin_unlock_irqrestore(&sched->job_list_lock, flags);
+
+ /*
+ * Wait for job's HW fence callback to finish using s_job
+ * before releasing it.
+ *
+ * Job is still alive so fence refcount at least 1
+ */
+ dma_fence_wait(&s_job->s_fence->finished, false);
+
+ /*
+ * We must keep bad job alive for later use during
+ * recovery by some of the drivers but leave a hint
+ * that the guilty job must be released.
+ */
+ if (bad != s_job)
+ sched->ops->free_job(s_job);
+ else
+ sched->free_guilty = true;
}
}
- spin_unlock_irqrestore(&sched->job_list_lock, flags);
- if (last_fence) {
- dma_fence_wait(last_fence, false);
- dma_fence_put(last_fence);
- }
+ /*
+ * Stop pending timer in flight as we rearm it in drm_sched_start. This
+ * avoids the pending timeout work in progress to fire right away after
+ * this TDR finished and before the newly restarted jobs had a
+ * chance to complete.
+ */
+ cancel_delayed_work(&sched->work_tdr);
}
EXPORT_SYMBOL(drm_sched_stop);
@@ -412,26 +423,28 @@ EXPORT_SYMBOL(drm_sched_stop);
* drm_sched_job_recovery - recover jobs after a reset
*
* @sched: scheduler instance
+ * @full_recovery: proceed with complete sched restart
*
*/
void drm_sched_start(struct drm_gpu_scheduler *sched, bool full_recovery)
{
struct drm_sched_job *s_job, *tmp;
+ unsigned long flags;
int r;
- if (!full_recovery)
- goto unpark;
-
/*
* Locking the list is not required here as the sched thread is parked
- * so no new jobs are being pushed in to HW and in drm_sched_stop we
- * flushed all the jobs who were still in mirror list but who already
- * signaled and removed them self from the list. Also concurrent
+ * so no new jobs are being inserted or removed. Also concurrent
* GPU recovers can't run in parallel.
*/
list_for_each_entry_safe(s_job, tmp, &sched->ring_mirror_list, node) {
struct dma_fence *fence = s_job->s_fence->parent;
+ atomic_inc(&sched->hw_rq_count);
+
+ if (!full_recovery)
+ continue;
+
if (fence) {
r = dma_fence_add_callback(fence, &s_job->cb,
drm_sched_process_job);
@@ -444,9 +457,12 @@ void drm_sched_start(struct drm_gpu_scheduler *sched, bool full_recovery)
drm_sched_process_job(NULL, &s_job->cb);
}
- drm_sched_start_timeout(sched);
+ if (full_recovery) {
+ spin_lock_irqsave(&sched->job_list_lock, flags);
+ drm_sched_start_timeout(sched);
+ spin_unlock_irqrestore(&sched->job_list_lock, flags);
+ }
-unpark:
kthread_unpark(sched->thread);
}
EXPORT_SYMBOL(drm_sched_start);
@@ -463,7 +479,6 @@ void drm_sched_resubmit_jobs(struct drm_gpu_scheduler *sched)
uint64_t guilty_context;
bool found_guilty = false;
- /*TODO DO we need spinlock here ? */
list_for_each_entry_safe(s_job, tmp, &sched->ring_mirror_list, node) {
struct drm_sched_fence *s_fence = s_job->s_fence;
@@ -475,8 +490,8 @@ void drm_sched_resubmit_jobs(struct drm_gpu_scheduler *sched)
if (found_guilty && s_job->s_fence->scheduled.context == guilty_context)
dma_fence_set_error(&s_fence->finished, -ECANCELED);
+ dma_fence_put(s_job->s_fence->parent);
s_job->s_fence->parent = sched->ops->run_job(s_job);
- atomic_inc(&sched->hw_rq_count);
}
}
EXPORT_SYMBOL(drm_sched_resubmit_jobs);
@@ -513,7 +528,6 @@ int drm_sched_job_init(struct drm_sched_job *job,
return -ENOMEM;
job->id = atomic64_inc_return(&sched->job_id_count);
- INIT_WORK(&job->finish_work, drm_sched_job_finish);
INIT_LIST_HEAD(&job->node);
return 0;
@@ -596,24 +610,54 @@ static void drm_sched_process_job(struct dma_fence *f, struct dma_fence_cb *cb)
struct drm_sched_job *s_job = container_of(cb, struct drm_sched_job, cb);
struct drm_sched_fence *s_fence = s_job->s_fence;
struct drm_gpu_scheduler *sched = s_fence->sched;
- unsigned long flags;
-
- cancel_delayed_work(&sched->work_tdr);
atomic_dec(&sched->hw_rq_count);
atomic_dec(&sched->num_jobs);
- spin_lock_irqsave(&sched->job_list_lock, flags);
- /* remove job from ring_mirror_list */
- list_del_init(&s_job->node);
- spin_unlock_irqrestore(&sched->job_list_lock, flags);
+ trace_drm_sched_process_job(s_fence);
drm_sched_fence_finished(s_fence);
-
- trace_drm_sched_process_job(s_fence);
wake_up_interruptible(&sched->wake_up_worker);
+}
+
+/**
+ * drm_sched_cleanup_jobs - destroy finished jobs
+ *
+ * @sched: scheduler instance
+ *
+ * Remove all finished jobs from the mirror list and destroy them.
+ */
+static void drm_sched_cleanup_jobs(struct drm_gpu_scheduler *sched)
+{
+ unsigned long flags;
+
+ /* Don't destroy jobs while the timeout worker is running */
+ if (sched->timeout != MAX_SCHEDULE_TIMEOUT &&
+ !cancel_delayed_work(&sched->work_tdr))
+ return;
+
+
+ while (!list_empty(&sched->ring_mirror_list)) {
+ struct drm_sched_job *job;
+
+ job = list_first_entry(&sched->ring_mirror_list,
+ struct drm_sched_job, node);
+ if (!dma_fence_is_signaled(&job->s_fence->finished))
+ break;
+
+ spin_lock_irqsave(&sched->job_list_lock, flags);
+ /* remove job from ring_mirror_list */
+ list_del_init(&job->node);
+ spin_unlock_irqrestore(&sched->job_list_lock, flags);
+
+ sched->ops->free_job(job);
+ }
+
+ /* queue timeout for next job */
+ spin_lock_irqsave(&sched->job_list_lock, flags);
+ drm_sched_start_timeout(sched);
+ spin_unlock_irqrestore(&sched->job_list_lock, flags);
- schedule_work(&s_job->finish_work);
}
/**
@@ -655,9 +699,10 @@ static int drm_sched_main(void *param)
struct dma_fence *fence;
wait_event_interruptible(sched->wake_up_worker,
+ (drm_sched_cleanup_jobs(sched),
(!drm_sched_blocked(sched) &&
(entity = drm_sched_select_entity(sched))) ||
- kthread_should_stop());
+ kthread_should_stop()));
if (!entity)
continue;
diff --git a/drivers/gpu/drm/sis/sis_drv.c b/drivers/gpu/drm/sis/sis_drv.c
index e04a92658cd7..ee3801201ecc 100644
--- a/drivers/gpu/drm/sis/sis_drv.c
+++ b/drivers/gpu/drm/sis/sis_drv.c
@@ -27,11 +27,13 @@
#include <linux/module.h>
-#include <drm/drmP.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_file.h>
+#include <drm/drm_pci.h>
+#include <drm/drm_pciids.h>
#include <drm/sis_drm.h>
-#include "sis_drv.h"
-#include <drm/drm_pciids.h>
+#include "sis_drv.h"
static struct pci_device_id pciidlist[] = {
sisdrv_PCI_IDS
diff --git a/drivers/gpu/drm/sis/sis_drv.h b/drivers/gpu/drm/sis/sis_drv.h
index 328f8a750976..81339443b3b1 100644
--- a/drivers/gpu/drm/sis/sis_drv.h
+++ b/drivers/gpu/drm/sis/sis_drv.h
@@ -28,7 +28,9 @@
#ifndef _SIS_DRV_H_
#define _SIS_DRV_H_
+#include <drm/drm_ioctl.h>
#include <drm/drm_legacy.h>
+#include <drm/drm_mm.h>
/* General customization:
*/
@@ -46,12 +48,8 @@ enum sis_family {
SIS_CHIP_315 = 1,
};
-#include <drm/drm_mm.h>
-
-
-#define SIS_BASE (dev_priv->mmio)
-#define SIS_READ(reg) DRM_READ32(SIS_BASE, reg)
-#define SIS_WRITE(reg, val) DRM_WRITE32(SIS_BASE, reg, val)
+#define SIS_READ(reg) readl(((void __iomem *)dev_priv->mmio->handle) + (reg))
+#define SIS_WRITE(reg, val) writel(val, ((void __iomem *)dev_priv->mmio->handle) + (reg))
typedef struct drm_sis_private {
drm_local_map_t *mmio;
diff --git a/drivers/gpu/drm/sis/sis_mm.c b/drivers/gpu/drm/sis/sis_mm.c
index 1622db24cd39..e51d4289a3d0 100644
--- a/drivers/gpu/drm/sis/sis_mm.c
+++ b/drivers/gpu/drm/sis/sis_mm.c
@@ -31,11 +31,14 @@
* Thomas Hellström <thomas-at-tungstengraphics-dot-com>
*/
-#include <drm/drmP.h>
+#include <video/sisfb.h>
+
+#include <drm/drm_device.h>
+#include <drm/drm_file.h>
#include <drm/sis_drm.h>
+
#include "sis_drv.h"
-#include <video/sisfb.h>
#define VIDEO_TYPE 0
#define AGP_TYPE 1
diff --git a/drivers/gpu/drm/sti/sti_awg_utils.c b/drivers/gpu/drm/sti/sti_awg_utils.c
index 7c5a7830b6e8..5ff87a4a1c4c 100644
--- a/drivers/gpu/drm/sti/sti_awg_utils.c
+++ b/drivers/gpu/drm/sti/sti_awg_utils.c
@@ -4,6 +4,8 @@
* Author: Vincent Abriou <vincent.abriou@st.com> for STMicroelectronics.
*/
+#include <drm/drm_print.h>
+
#include "sti_awg_utils.h"
#define AWG_DELAY (-5)
diff --git a/drivers/gpu/drm/sti/sti_awg_utils.h b/drivers/gpu/drm/sti/sti_awg_utils.h
index 258a568f050b..8ddfdc049b10 100644
--- a/drivers/gpu/drm/sti/sti_awg_utils.h
+++ b/drivers/gpu/drm/sti/sti_awg_utils.h
@@ -7,7 +7,7 @@
#ifndef _STI_AWG_UTILS_H_
#define _STI_AWG_UTILS_H_
-#include <drm/drmP.h>
+#include <linux/types.h>
#define AWG_MAX_INST 64
diff --git a/drivers/gpu/drm/sti/sti_compositor.c b/drivers/gpu/drm/sti/sti_compositor.c
index 021b8fcaa0b9..c7652584255d 100644
--- a/drivers/gpu/drm/sti/sti_compositor.c
+++ b/drivers/gpu/drm/sti/sti_compositor.c
@@ -7,11 +7,14 @@
*/
#include <linux/component.h>
+#include <linux/io.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/reset.h>
-#include <drm/drmP.h>
+#include <drm/drm_device.h>
+#include <drm/drm_print.h>
+#include <drm/drm_vblank.h>
#include "sti_compositor.h"
#include "sti_crtc.h"
diff --git a/drivers/gpu/drm/sti/sti_crtc.c b/drivers/gpu/drm/sti/sti_crtc.c
index 387f0bed6c1c..dc64fbfc4e61 100644
--- a/drivers/gpu/drm/sti/sti_crtc.c
+++ b/drivers/gpu/drm/sti/sti_crtc.c
@@ -8,11 +8,13 @@
#include <linux/clk.h>
-#include <drm/drmP.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_device.h>
#include <drm/drm_plane_helper.h>
+#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
+#include <drm/drm_vblank.h>
#include "sti_compositor.h"
#include "sti_crtc.h"
diff --git a/drivers/gpu/drm/sti/sti_crtc.h b/drivers/gpu/drm/sti/sti_crtc.h
index d87c488212d6..df489ab14e2b 100644
--- a/drivers/gpu/drm/sti/sti_crtc.h
+++ b/drivers/gpu/drm/sti/sti_crtc.h
@@ -7,8 +7,10 @@
#ifndef _STI_CRTC_H_
#define _STI_CRTC_H_
-#include <drm/drmP.h>
-
+struct drm_crtc;
+struct drm_device;
+struct drm_plane;
+struct notifier_block;
struct sti_mixer;
int sti_crtc_init(struct drm_device *drm_dev, struct sti_mixer *mixer,
diff --git a/drivers/gpu/drm/sti/sti_cursor.c b/drivers/gpu/drm/sti/sti_cursor.c
index bc908453ffb3..0bf7c332cf0b 100644
--- a/drivers/gpu/drm/sti/sti_cursor.c
+++ b/drivers/gpu/drm/sti/sti_cursor.c
@@ -6,9 +6,11 @@
* for STMicroelectronics.
*/
+#include <linux/dma-mapping.h>
#include <linux/seq_file.h>
#include <drm/drm_atomic.h>
+#include <drm/drm_device.h>
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_gem_cma_helper.h>
diff --git a/drivers/gpu/drm/sti/sti_cursor.h b/drivers/gpu/drm/sti/sti_cursor.h
index 067feda5226c..25ebeb3f6bbc 100644
--- a/drivers/gpu/drm/sti/sti_cursor.h
+++ b/drivers/gpu/drm/sti/sti_cursor.h
@@ -7,6 +7,9 @@
#ifndef _STI_CURSOR_H_
#define _STI_CURSOR_H_
+struct drm_device;
+struct device;
+
struct drm_plane *sti_cursor_create(struct drm_device *drm_dev,
struct device *dev, int desc,
void __iomem *baseaddr,
diff --git a/drivers/gpu/drm/sti/sti_drv.c b/drivers/gpu/drm/sti/sti_drv.c
index a525fd899f68..bb6ae6dd66c9 100644
--- a/drivers/gpu/drm/sti/sti_drv.c
+++ b/drivers/gpu/drm/sti/sti_drv.c
@@ -4,25 +4,26 @@
* Author: Benjamin Gaignard <benjamin.gaignard@st.com> for STMicroelectronics.
*/
-#include <drm/drmP.h>
-
#include <linux/component.h>
-#include <linux/debugfs.h>
+#include <linux/dma-mapping.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of_platform.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_debugfs.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_fb_helper.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
-#include <drm/drm_fb_helper.h>
-#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_of.h>
#include <drm/drm_probe_helper.h>
#include "sti_crtc.h"
#include "sti_drv.h"
+#include "sti_drv.h"
#include "sti_plane.h"
#define DRIVER_NAME "sti"
@@ -95,7 +96,6 @@ static struct drm_info_list sti_drm_dbg_list[] = {
static int sti_drm_dbg_init(struct drm_minor *minor)
{
- struct dentry *dentry;
int ret;
ret = drm_debugfs_create_files(sti_drm_dbg_list,
@@ -104,13 +104,8 @@ static int sti_drm_dbg_init(struct drm_minor *minor)
if (ret)
goto err;
- dentry = debugfs_create_file("fps_show", S_IRUGO | S_IWUSR,
- minor->debugfs_root, minor->dev,
- &sti_drm_fps_fops);
- if (!dentry) {
- ret = -ENOMEM;
- goto err;
- }
+ debugfs_create_file("fps_show", S_IRUGO | S_IWUSR, minor->debugfs_root,
+ minor->dev, &sti_drm_fps_fops);
DRM_INFO("%s: debugfs installed\n", DRIVER_NAME);
return 0;
diff --git a/drivers/gpu/drm/sti/sti_drv.h b/drivers/gpu/drm/sti/sti_drv.h
index 4b41142a22e4..b5b2dd560bae 100644
--- a/drivers/gpu/drm/sti/sti_drv.h
+++ b/drivers/gpu/drm/sti/sti_drv.h
@@ -7,10 +7,11 @@
#ifndef _STI_DRV_H_
#define _STI_DRV_H_
-#include <drm/drmP.h>
+#include <linux/platform_device.h>
+struct drm_device;
+struct drm_property;
struct sti_compositor;
-struct sti_tvout;
/**
* STI drm private structure
diff --git a/drivers/gpu/drm/sti/sti_dvo.c b/drivers/gpu/drm/sti/sti_dvo.c
index b31cc2672d36..9e6d5d8b7030 100644
--- a/drivers/gpu/drm/sti/sti_dvo.c
+++ b/drivers/gpu/drm/sti/sti_dvo.c
@@ -11,9 +11,10 @@
#include <linux/of_gpio.h>
#include <linux/platform_device.h>
-#include <drm/drmP.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_device.h>
#include <drm/drm_panel.h>
+#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
#include "sti_awg_utils.h"
diff --git a/drivers/gpu/drm/sti/sti_gdp.c b/drivers/gpu/drm/sti/sti_gdp.c
index cff7b2b5ee9e..8e926cd6a1c8 100644
--- a/drivers/gpu/drm/sti/sti_gdp.c
+++ b/drivers/gpu/drm/sti/sti_gdp.c
@@ -5,10 +5,14 @@
* Fabien Dessenne <fabien.dessenne@st.com>
* for STMicroelectronics.
*/
+
+#include <linux/dma-mapping.h>
#include <linux/seq_file.h>
#include <drm/drm_atomic.h>
+#include <drm/drm_device.h>
#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_fourcc.h>
#include <drm/drm_gem_cma_helper.h>
#include "sti_compositor.h"
diff --git a/drivers/gpu/drm/sti/sti_gdp.h b/drivers/gpu/drm/sti/sti_gdp.h
index d3e8ebfe2e66..deb07e34173d 100644
--- a/drivers/gpu/drm/sti/sti_gdp.h
+++ b/drivers/gpu/drm/sti/sti_gdp.h
@@ -11,6 +11,11 @@
#include <linux/types.h>
+#include <drm/drm_plane.h>
+
+struct drm_device;
+struct device;
+
struct drm_plane *sti_gdp_create(struct drm_device *drm_dev,
struct device *dev, int desc,
void __iomem *baseaddr,
diff --git a/drivers/gpu/drm/sti/sti_hda.c b/drivers/gpu/drm/sti/sti_hda.c
index ff9256673fc8..94e404f13234 100644
--- a/drivers/gpu/drm/sti/sti_hda.c
+++ b/drivers/gpu/drm/sti/sti_hda.c
@@ -6,12 +6,16 @@
#include <linux/clk.h>
#include <linux/component.h>
+#include <linux/io.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/seq_file.h>
-#include <drm/drmP.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_debugfs.h>
+#include <drm/drm_device.h>
+#include <drm/drm_file.h>
+#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
/* HDformatter registers */
diff --git a/drivers/gpu/drm/sti/sti_hdmi.c b/drivers/gpu/drm/sti/sti_hdmi.c
index 6000df624980..f03d617edc4c 100644
--- a/drivers/gpu/drm/sti/sti_hdmi.c
+++ b/drivers/gpu/drm/sti/sti_hdmi.c
@@ -13,9 +13,12 @@
#include <linux/platform_device.h>
#include <linux/reset.h>
-#include <drm/drmP.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_debugfs.h>
+#include <drm/drm_drv.h>
#include <drm/drm_edid.h>
+#include <drm/drm_file.h>
+#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
#include <sound/hdmi-codec.h>
diff --git a/drivers/gpu/drm/sti/sti_hdmi.h b/drivers/gpu/drm/sti/sti_hdmi.h
index 63a24941db3b..1f6dc90b5d83 100644
--- a/drivers/gpu/drm/sti/sti_hdmi.h
+++ b/drivers/gpu/drm/sti/sti_hdmi.h
@@ -10,9 +10,11 @@
#include <linux/hdmi.h>
#include <linux/platform_device.h>
-#include <drm/drmP.h>
#include <media/cec-notifier.h>
+#include <drm/drm_modes.h>
+#include <drm/drm_property.h>
+
#define HDMI_STA 0x0010
#define HDMI_STA_DLL_LCK BIT(5)
#define HDMI_STA_HOT_PLUG BIT(4)
diff --git a/drivers/gpu/drm/sti/sti_hdmi_tx3g4c28phy.c b/drivers/gpu/drm/sti/sti_hdmi_tx3g4c28phy.c
index 01699af6a768..d5f94dca0d32 100644
--- a/drivers/gpu/drm/sti/sti_hdmi_tx3g4c28phy.c
+++ b/drivers/gpu/drm/sti/sti_hdmi_tx3g4c28phy.c
@@ -4,6 +4,8 @@
* Author: Vincent Abriou <vincent.abriou@st.com> for STMicroelectronics.
*/
+#include <drm/drm_print.h>
+
#include "sti_hdmi_tx3g4c28phy.h"
#define HDMI_SRZ_CFG 0x504
diff --git a/drivers/gpu/drm/sti/sti_hqvdp.c b/drivers/gpu/drm/sti/sti_hqvdp.c
index 23565f52dd71..1015abe0ce08 100644
--- a/drivers/gpu/drm/sti/sti_hqvdp.c
+++ b/drivers/gpu/drm/sti/sti_hqvdp.c
@@ -5,19 +5,25 @@
*/
#include <linux/component.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
#include <linux/firmware.h>
+#include <linux/io.h>
+#include <linux/module.h>
#include <linux/reset.h>
#include <linux/seq_file.h>
#include <drm/drm_atomic.h>
+#include <drm/drm_device.h>
#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_fourcc.h>
#include <drm/drm_gem_cma_helper.h>
#include "sti_compositor.h"
+#include "sti_drv.h"
#include "sti_hqvdp_lut.h"
#include "sti_plane.h"
#include "sti_vtg.h"
-#include "sti_drv.h"
/* Firmware name */
#define HQVDP_FMW_NAME "hqvdp-stih407.bin"
diff --git a/drivers/gpu/drm/sti/sti_mixer.c b/drivers/gpu/drm/sti/sti_mixer.c
index a4f45c74d678..c3a3e1e5fc8a 100644
--- a/drivers/gpu/drm/sti/sti_mixer.c
+++ b/drivers/gpu/drm/sti/sti_mixer.c
@@ -5,8 +5,12 @@
* Fabien Dessenne <fabien.dessenne@st.com>
* for STMicroelectronics.
*/
+
+#include <linux/moduleparam.h>
#include <linux/seq_file.h>
+#include <drm/drm_print.h>
+
#include "sti_compositor.h"
#include "sti_mixer.h"
#include "sti_vtg.h"
diff --git a/drivers/gpu/drm/sti/sti_mixer.h b/drivers/gpu/drm/sti/sti_mixer.h
index 4cb3cfddc03a..d9544246913a 100644
--- a/drivers/gpu/drm/sti/sti_mixer.h
+++ b/drivers/gpu/drm/sti/sti_mixer.h
@@ -9,10 +9,15 @@
#ifndef _STI_MIXER_H_
#define _STI_MIXER_H_
-#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+
+#include <drm/drm_debugfs.h>
+#include <drm/drm_file.h>
#include "sti_plane.h"
+struct device;
+
#define to_sti_mixer(x) container_of(x, struct sti_mixer, drm_crtc)
enum sti_mixer_status {
diff --git a/drivers/gpu/drm/sti/sti_plane.c b/drivers/gpu/drm/sti/sti_plane.c
index b48cd86e0250..3da4a46df2f2 100644
--- a/drivers/gpu/drm/sti/sti_plane.c
+++ b/drivers/gpu/drm/sti/sti_plane.c
@@ -6,8 +6,10 @@
* for STMicroelectronics.
*/
-#include <drm/drmP.h>
+#include <linux/types.h>
+
#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_fourcc.h>
#include <drm/drm_gem_cma_helper.h>
#include "sti_compositor.h"
diff --git a/drivers/gpu/drm/sti/sti_plane.h b/drivers/gpu/drm/sti/sti_plane.h
index b8d7fae2a014..065ffffbfb4a 100644
--- a/drivers/gpu/drm/sti/sti_plane.h
+++ b/drivers/gpu/drm/sti/sti_plane.h
@@ -7,7 +7,6 @@
#ifndef _STI_PLANE_H_
#define _STI_PLANE_H_
-#include <drm/drmP.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_plane_helper.h>
diff --git a/drivers/gpu/drm/sti/sti_tvout.c b/drivers/gpu/drm/sti/sti_tvout.c
index c42f2fa7053c..e1b3c8cb7287 100644
--- a/drivers/gpu/drm/sti/sti_tvout.c
+++ b/drivers/gpu/drm/sti/sti_tvout.c
@@ -8,14 +8,18 @@
#include <linux/clk.h>
#include <linux/component.h>
+#include <linux/io.h>
#include <linux/module.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/reset.h>
#include <linux/seq_file.h>
-#include <drm/drmP.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_debugfs.h>
+#include <drm/drm_device.h>
+#include <drm/drm_file.h>
+#include <drm/drm_print.h>
#include "sti_crtc.h"
#include "sti_drv.h"
diff --git a/drivers/gpu/drm/sti/sti_vid.c b/drivers/gpu/drm/sti/sti_vid.c
index 2aac36c95835..2d4230410464 100644
--- a/drivers/gpu/drm/sti/sti_vid.c
+++ b/drivers/gpu/drm/sti/sti_vid.c
@@ -5,7 +5,9 @@
*/
#include <linux/seq_file.h>
-#include <drm/drmP.h>
+#include <drm/drm_debugfs.h>
+#include <drm/drm_file.h>
+#include <drm/drm_print.h>
#include "sti_plane.h"
#include "sti_vid.h"
diff --git a/drivers/gpu/drm/sti/sti_vtg.c b/drivers/gpu/drm/sti/sti_vtg.c
index 6c421644de18..ef4009f11396 100644
--- a/drivers/gpu/drm/sti/sti_vtg.c
+++ b/drivers/gpu/drm/sti/sti_vtg.c
@@ -8,11 +8,13 @@
*/
#include <linux/module.h>
+#include <linux/io.h>
#include <linux/notifier.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
-#include <drm/drmP.h>
+#include <drm/drm_modes.h>
+#include <drm/drm_print.h>
#include "sti_drv.h"
#include "sti_vtg.h"
diff --git a/drivers/gpu/drm/sti/sti_vtg.h b/drivers/gpu/drm/sti/sti_vtg.h
index d177129e5bcb..46faf141b2d9 100644
--- a/drivers/gpu/drm/sti/sti_vtg.h
+++ b/drivers/gpu/drm/sti/sti_vtg.h
@@ -16,6 +16,7 @@
#define VTG_SYNC_ID_DVO 4
struct sti_vtg;
+struct drm_crtc;
struct drm_display_mode;
struct notifier_block;
diff --git a/drivers/gpu/drm/stm/drv.c b/drivers/gpu/drm/stm/drv.c
index 5834ef56fbaa..5659572151a8 100644
--- a/drivers/gpu/drm/stm/drv.c
+++ b/drivers/gpu/drm/stm/drv.c
@@ -12,6 +12,7 @@
#include <linux/dma-mapping.h>
#include <linux/module.h>
#include <linux/of_platform.h>
+#include <linux/pm_runtime.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
@@ -135,14 +136,15 @@ static __maybe_unused int drv_suspend(struct device *dev)
struct ltdc_device *ldev = ddev->dev_private;
struct drm_atomic_state *state;
- drm_kms_helper_poll_disable(ddev);
+ if (WARN_ON(!ldev->suspend_state))
+ return -ENOENT;
+
state = drm_atomic_helper_suspend(ddev);
- if (IS_ERR(state)) {
- drm_kms_helper_poll_enable(ddev);
+ if (IS_ERR(state))
return PTR_ERR(state);
- }
+
ldev->suspend_state = state;
- ltdc_suspend(ddev);
+ pm_runtime_force_suspend(dev);
return 0;
}
@@ -151,16 +153,41 @@ static __maybe_unused int drv_resume(struct device *dev)
{
struct drm_device *ddev = dev_get_drvdata(dev);
struct ltdc_device *ldev = ddev->dev_private;
+ int ret;
- ltdc_resume(ddev);
- drm_atomic_helper_resume(ddev, ldev->suspend_state);
- drm_kms_helper_poll_enable(ddev);
+ pm_runtime_force_resume(dev);
+ ret = drm_atomic_helper_resume(ddev, ldev->suspend_state);
+ if (ret) {
+ pm_runtime_force_suspend(dev);
+ ldev->suspend_state = NULL;
+ return ret;
+ }
return 0;
}
+static __maybe_unused int drv_runtime_suspend(struct device *dev)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+
+ DRM_DEBUG_DRIVER("\n");
+ ltdc_suspend(ddev);
+
+ return 0;
+}
+
+static __maybe_unused int drv_runtime_resume(struct device *dev)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+
+ DRM_DEBUG_DRIVER("\n");
+ return ltdc_resume(ddev);
+}
+
static const struct dev_pm_ops drv_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(drv_suspend, drv_resume)
+ SET_RUNTIME_PM_OPS(drv_runtime_suspend,
+ drv_runtime_resume, NULL)
};
static int stm_drm_platform_probe(struct platform_device *pdev)
diff --git a/drivers/gpu/drm/stm/dw_mipi_dsi-stm.c b/drivers/gpu/drm/stm/dw_mipi_dsi-stm.c
index 1bef73e8c8fe..0ab32fee6c1b 100644
--- a/drivers/gpu/drm/stm/dw_mipi_dsi-stm.c
+++ b/drivers/gpu/drm/stm/dw_mipi_dsi-stm.c
@@ -9,6 +9,7 @@
#include <linux/clk.h>
#include <linux/iopoll.h>
#include <linux/module.h>
+#include <linux/regulator/consumer.h>
#include <drm/drmP.h>
#include <drm/drm_mipi_dsi.h>
#include <drm/bridge/dw_mipi_dsi.h>
@@ -76,6 +77,7 @@ struct dw_mipi_dsi_stm {
u32 hw_version;
int lane_min_kbps;
int lane_max_kbps;
+ struct regulator *vdd_supply;
};
static inline void dsi_write(struct dw_mipi_dsi_stm *dsi, u32 reg, u32 val)
@@ -208,10 +210,27 @@ static int dw_mipi_dsi_phy_init(void *priv_data)
if (ret)
DRM_DEBUG_DRIVER("!TIMEOUT! waiting PLL, let's continue\n");
+ return 0;
+}
+
+static void dw_mipi_dsi_phy_power_on(void *priv_data)
+{
+ struct dw_mipi_dsi_stm *dsi = priv_data;
+
+ DRM_DEBUG_DRIVER("\n");
+
/* Enable the DSI wrapper */
dsi_set(dsi, DSI_WCR, WCR_DSIEN);
+}
- return 0;
+static void dw_mipi_dsi_phy_power_off(void *priv_data)
+{
+ struct dw_mipi_dsi_stm *dsi = priv_data;
+
+ DRM_DEBUG_DRIVER("\n");
+
+ /* Disable the DSI wrapper */
+ dsi_clear(dsi, DSI_WCR, WCR_DSIEN);
}
static int
@@ -225,7 +244,6 @@ dw_mipi_dsi_get_lane_mbps(void *priv_data, const struct drm_display_mode *mode,
u32 val;
/* Update lane capabilities according to hw version */
- dsi->hw_version = dsi_read(dsi, DSI_VERSION) & VERSION;
dsi->lane_min_kbps = LANE_MIN_KBPS;
dsi->lane_max_kbps = LANE_MAX_KBPS;
if (dsi->hw_version == HWVER_131) {
@@ -286,6 +304,8 @@ dw_mipi_dsi_get_lane_mbps(void *priv_data, const struct drm_display_mode *mode,
static const struct dw_mipi_dsi_phy_ops dw_mipi_dsi_stm_phy_ops = {
.init = dw_mipi_dsi_phy_init,
+ .power_on = dw_mipi_dsi_phy_power_on,
+ .power_off = dw_mipi_dsi_phy_power_off,
.get_lane_mbps = dw_mipi_dsi_get_lane_mbps,
};
@@ -304,6 +324,7 @@ static int dw_mipi_dsi_stm_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct dw_mipi_dsi_stm *dsi;
+ struct clk *pclk;
struct resource *res;
int ret;
@@ -314,21 +335,58 @@ static int dw_mipi_dsi_stm_probe(struct platform_device *pdev)
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
dsi->base = devm_ioremap_resource(dev, res);
if (IS_ERR(dsi->base)) {
- DRM_ERROR("Unable to get dsi registers\n");
- return PTR_ERR(dsi->base);
+ ret = PTR_ERR(dsi->base);
+ DRM_ERROR("Unable to get dsi registers %d\n", ret);
+ return ret;
+ }
+
+ dsi->vdd_supply = devm_regulator_get(dev, "phy-dsi");
+ if (IS_ERR(dsi->vdd_supply)) {
+ ret = PTR_ERR(dsi->vdd_supply);
+ if (ret != -EPROBE_DEFER)
+ DRM_ERROR("Failed to request regulator: %d\n", ret);
+ return ret;
+ }
+
+ ret = regulator_enable(dsi->vdd_supply);
+ if (ret) {
+ DRM_ERROR("Failed to enable regulator: %d\n", ret);
+ return ret;
}
dsi->pllref_clk = devm_clk_get(dev, "ref");
if (IS_ERR(dsi->pllref_clk)) {
ret = PTR_ERR(dsi->pllref_clk);
- dev_err(dev, "Unable to get pll reference clock: %d\n", ret);
- return ret;
+ DRM_ERROR("Unable to get pll reference clock: %d\n", ret);
+ goto err_clk_get;
}
ret = clk_prepare_enable(dsi->pllref_clk);
if (ret) {
- dev_err(dev, "%s: Failed to enable pllref_clk\n", __func__);
- return ret;
+ DRM_ERROR("Failed to enable pllref_clk: %d\n", ret);
+ goto err_clk_get;
+ }
+
+ pclk = devm_clk_get(dev, "pclk");
+ if (IS_ERR(pclk)) {
+ ret = PTR_ERR(pclk);
+ DRM_ERROR("Unable to get peripheral clock: %d\n", ret);
+ goto err_dsi_probe;
+ }
+
+ ret = clk_prepare_enable(pclk);
+ if (ret) {
+ DRM_ERROR("%s: Failed to enable peripheral clk\n", __func__);
+ goto err_dsi_probe;
+ }
+
+ dsi->hw_version = dsi_read(dsi, DSI_VERSION) & VERSION;
+ clk_disable_unprepare(pclk);
+
+ if (dsi->hw_version != HWVER_130 && dsi->hw_version != HWVER_131) {
+ ret = -ENODEV;
+ DRM_ERROR("bad dsi hardware version\n");
+ goto err_dsi_probe;
}
dw_mipi_dsi_stm_plat_data.base = dsi->base;
@@ -338,20 +396,28 @@ static int dw_mipi_dsi_stm_probe(struct platform_device *pdev)
dsi->dsi = dw_mipi_dsi_probe(pdev, &dw_mipi_dsi_stm_plat_data);
if (IS_ERR(dsi->dsi)) {
- DRM_ERROR("Failed to initialize mipi dsi host\n");
- clk_disable_unprepare(dsi->pllref_clk);
- return PTR_ERR(dsi->dsi);
+ ret = PTR_ERR(dsi->dsi);
+ DRM_ERROR("Failed to initialize mipi dsi host: %d\n", ret);
+ goto err_dsi_probe;
}
return 0;
+
+err_dsi_probe:
+ clk_disable_unprepare(dsi->pllref_clk);
+err_clk_get:
+ regulator_disable(dsi->vdd_supply);
+
+ return ret;
}
static int dw_mipi_dsi_stm_remove(struct platform_device *pdev)
{
struct dw_mipi_dsi_stm *dsi = platform_get_drvdata(pdev);
- clk_disable_unprepare(dsi->pllref_clk);
dw_mipi_dsi_remove(dsi->dsi);
+ clk_disable_unprepare(dsi->pllref_clk);
+ regulator_disable(dsi->vdd_supply);
return 0;
}
@@ -363,6 +429,7 @@ static int __maybe_unused dw_mipi_dsi_stm_suspend(struct device *dev)
DRM_DEBUG_DRIVER("\n");
clk_disable_unprepare(dsi->pllref_clk);
+ regulator_disable(dsi->vdd_supply);
return 0;
}
@@ -370,10 +437,22 @@ static int __maybe_unused dw_mipi_dsi_stm_suspend(struct device *dev)
static int __maybe_unused dw_mipi_dsi_stm_resume(struct device *dev)
{
struct dw_mipi_dsi_stm *dsi = dw_mipi_dsi_stm_plat_data.priv_data;
+ int ret;
DRM_DEBUG_DRIVER("\n");
- clk_prepare_enable(dsi->pllref_clk);
+ ret = regulator_enable(dsi->vdd_supply);
+ if (ret) {
+ DRM_ERROR("Failed to enable regulator: %d\n", ret);
+ return ret;
+ }
+
+ ret = clk_prepare_enable(dsi->pllref_clk);
+ if (ret) {
+ regulator_disable(dsi->vdd_supply);
+ DRM_ERROR("Failed to enable pllref_clk: %d\n", ret);
+ return ret;
+ }
return 0;
}
diff --git a/drivers/gpu/drm/stm/ltdc.c b/drivers/gpu/drm/stm/ltdc.c
index 32fd6a3b37fb..2fe6c4a8d915 100644
--- a/drivers/gpu/drm/stm/ltdc.c
+++ b/drivers/gpu/drm/stm/ltdc.c
@@ -16,6 +16,7 @@
#include <linux/of_address.h>
#include <linux/of_graph.h>
#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
#include <linux/reset.h>
#include <drm/drm_atomic.h>
@@ -232,6 +233,11 @@ static const enum ltdc_pix_fmt ltdc_pix_fmt_a1[NB_PF] = {
PF_ARGB4444 /* 0x07 */
};
+static const u64 ltdc_format_modifiers[] = {
+ DRM_FORMAT_MOD_LINEAR,
+ DRM_FORMAT_MOD_INVALID
+};
+
static inline u32 reg_read(void __iomem *base, u32 reg)
{
return readl_relaxed(base + reg);
@@ -426,8 +432,8 @@ static void ltdc_crtc_atomic_enable(struct drm_crtc *crtc,
/* Enable IRQ */
reg_set(ldev->regs, LTDC_IER, IER_RRIE | IER_FUIE | IER_TERRIE);
- /* Immediately commit the planes */
- reg_set(ldev->regs, LTDC_SRCR, SRCR_IMR);
+ /* Commit shadow registers = update planes at next vblank */
+ reg_set(ldev->regs, LTDC_SRCR, SRCR_VBR);
/* Enable LTDC */
reg_set(ldev->regs, LTDC_GCR, GCR_LTDCEN);
@@ -439,6 +445,7 @@ static void ltdc_crtc_atomic_disable(struct drm_crtc *crtc,
struct drm_crtc_state *old_state)
{
struct ltdc_device *ldev = crtc_to_ltdc(crtc);
+ struct drm_device *ddev = crtc->dev;
DRM_DEBUG_DRIVER("\n");
@@ -452,6 +459,8 @@ static void ltdc_crtc_atomic_disable(struct drm_crtc *crtc,
/* immediately commit disable of layers before switching off LTDC */
reg_set(ldev->regs, LTDC_SRCR, SRCR_IMR);
+
+ pm_runtime_put_sync(ddev->dev);
}
#define CLK_TOLERANCE_HZ 50
@@ -500,33 +509,55 @@ static bool ltdc_crtc_mode_fixup(struct drm_crtc *crtc,
struct drm_display_mode *adjusted_mode)
{
struct ltdc_device *ldev = crtc_to_ltdc(crtc);
+ struct drm_device *ddev = crtc->dev;
int rate = mode->clock * 1000;
+ bool runtime_active;
+ int ret;
- /*
- * TODO clk_round_rate() does not work yet. When ready, it can
- * be used instead of clk_set_rate() then clk_get_rate().
- */
+ runtime_active = pm_runtime_active(ddev->dev);
+
+ if (runtime_active)
+ pm_runtime_put_sync(ddev->dev);
- clk_disable(ldev->pixel_clk);
if (clk_set_rate(ldev->pixel_clk, rate) < 0) {
DRM_ERROR("Cannot set rate (%dHz) for pixel clk\n", rate);
return false;
}
- clk_enable(ldev->pixel_clk);
adjusted_mode->clock = clk_get_rate(ldev->pixel_clk) / 1000;
+ if (runtime_active) {
+ ret = pm_runtime_get_sync(ddev->dev);
+ if (ret) {
+ DRM_ERROR("Failed to fixup mode, cannot get sync\n");
+ return false;
+ }
+ }
+
+ DRM_DEBUG_DRIVER("requested clock %dkHz, adjusted clock %dkHz\n",
+ mode->clock, adjusted_mode->clock);
+
return true;
}
static void ltdc_crtc_mode_set_nofb(struct drm_crtc *crtc)
{
struct ltdc_device *ldev = crtc_to_ltdc(crtc);
+ struct drm_device *ddev = crtc->dev;
struct drm_display_mode *mode = &crtc->state->adjusted_mode;
struct videomode vm;
u32 hsync, vsync, accum_hbp, accum_vbp, accum_act_w, accum_act_h;
u32 total_width, total_height;
u32 val;
+ int ret;
+
+ if (!pm_runtime_active(ddev->dev)) {
+ ret = pm_runtime_get_sync(ddev->dev);
+ if (ret) {
+ DRM_ERROR("Failed to set mode, cannot get sync\n");
+ return;
+ }
+ }
drm_display_mode_to_videomode(mode, &vm);
@@ -555,7 +586,7 @@ static void ltdc_crtc_mode_set_nofb(struct drm_crtc *crtc)
if (vm.flags & DISPLAY_FLAGS_VSYNC_HIGH)
val |= GCR_VSPOL;
- if (vm.flags & DISPLAY_FLAGS_DE_HIGH)
+ if (vm.flags & DISPLAY_FLAGS_DE_LOW)
val |= GCR_DEPOL;
if (vm.flags & DISPLAY_FLAGS_PIXDATA_NEGEDGE)
@@ -587,6 +618,7 @@ static void ltdc_crtc_atomic_flush(struct drm_crtc *crtc,
struct drm_crtc_state *old_crtc_state)
{
struct ltdc_device *ldev = crtc_to_ltdc(crtc);
+ struct drm_device *ddev = crtc->dev;
struct drm_pending_vblank_event *event = crtc->state->event;
DRM_DEBUG_ATOMIC("\n");
@@ -599,12 +631,12 @@ static void ltdc_crtc_atomic_flush(struct drm_crtc *crtc,
if (event) {
crtc->state->event = NULL;
- spin_lock_irq(&crtc->dev->event_lock);
+ spin_lock_irq(&ddev->event_lock);
if (drm_crtc_vblank_get(crtc) == 0)
drm_crtc_arm_vblank_event(crtc, event);
else
drm_crtc_send_vblank_event(crtc, event);
- spin_unlock_irq(&crtc->dev->event_lock);
+ spin_unlock_irq(&ddev->event_lock);
}
}
@@ -660,15 +692,19 @@ bool ltdc_crtc_scanoutpos(struct drm_device *ddev, unsigned int pipe,
* Computation for the two first cases are identical so we can
* simplify the code and only test if line > vactive_end
*/
- line = reg_read(ldev->regs, LTDC_CPSR) & CPSR_CYPOS;
- vactive_start = reg_read(ldev->regs, LTDC_BPCR) & BPCR_AVBP;
- vactive_end = reg_read(ldev->regs, LTDC_AWCR) & AWCR_AAH;
- vtotal = reg_read(ldev->regs, LTDC_TWCR) & TWCR_TOTALH;
-
- if (line > vactive_end)
- *vpos = line - vtotal - vactive_start;
- else
- *vpos = line - vactive_start;
+ if (pm_runtime_active(ddev->dev)) {
+ line = reg_read(ldev->regs, LTDC_CPSR) & CPSR_CYPOS;
+ vactive_start = reg_read(ldev->regs, LTDC_BPCR) & BPCR_AVBP;
+ vactive_end = reg_read(ldev->regs, LTDC_AWCR) & AWCR_AAH;
+ vtotal = reg_read(ldev->regs, LTDC_TWCR) & TWCR_TOTALH;
+
+ if (line > vactive_end)
+ *vpos = line - vtotal - vactive_start;
+ else
+ *vpos = line - vactive_start;
+ } else {
+ *vpos = 0;
+ }
*hpos = 0;
@@ -779,7 +815,7 @@ static void ltdc_plane_atomic_update(struct drm_plane *plane,
/* Configures the color frame buffer pitch in bytes & line length */
pitch_in_bytes = fb->pitches[0];
- line_length = drm_format_plane_cpp(fb->format->format, 0) *
+ line_length = fb->format->cpp[0] *
(x1 - x0 + 1) + (ldev->caps.bus_width >> 3) - 1;
val = ((pitch_in_bytes << 16) | line_length);
reg_update_bits(ldev->regs, LTDC_L1CFBLR + lofs,
@@ -822,11 +858,11 @@ static void ltdc_plane_atomic_update(struct drm_plane *plane,
mutex_lock(&ldev->err_lock);
if (ldev->error_status & ISR_FUIF) {
- DRM_DEBUG_DRIVER("Fifo underrun\n");
+ DRM_WARN("ltdc fifo underrun: please verify display mode\n");
ldev->error_status &= ~ISR_FUIF;
}
if (ldev->error_status & ISR_TERRIF) {
- DRM_DEBUG_DRIVER("Transfer error\n");
+ DRM_WARN("ltdc transfer error\n");
ldev->error_status &= ~ISR_TERRIF;
}
mutex_unlock(&ldev->err_lock);
@@ -864,6 +900,16 @@ static void ltdc_plane_atomic_print_state(struct drm_printer *p,
fpsi->counter = 0;
}
+static bool ltdc_plane_format_mod_supported(struct drm_plane *plane,
+ u32 format,
+ u64 modifier)
+{
+ if (modifier == DRM_FORMAT_MOD_LINEAR)
+ return true;
+
+ return false;
+}
+
static const struct drm_plane_funcs ltdc_plane_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
@@ -872,6 +918,7 @@ static const struct drm_plane_funcs ltdc_plane_funcs = {
.atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
.atomic_print_state = ltdc_plane_atomic_print_state,
+ .format_mod_supported = ltdc_plane_format_mod_supported,
};
static const struct drm_plane_helper_funcs ltdc_plane_helper_funcs = {
@@ -890,6 +937,7 @@ static struct drm_plane *ltdc_plane_create(struct drm_device *ddev,
unsigned int i, nb_fmt = 0;
u32 formats[NB_PF * 2];
u32 drm_fmt, drm_fmt_no_alpha;
+ const u64 *modifiers = ltdc_format_modifiers;
int ret;
/* Get supported pixel formats */
@@ -918,7 +966,7 @@ static struct drm_plane *ltdc_plane_create(struct drm_device *ddev,
ret = drm_universal_plane_init(ddev, plane, possible_crtcs,
&ltdc_plane_funcs, formats, nb_fmt,
- NULL, type, NULL);
+ modifiers, type, NULL);
if (ret < 0)
return NULL;
@@ -1021,10 +1069,13 @@ static int ltdc_get_caps(struct drm_device *ddev)
struct ltdc_device *ldev = ddev->dev_private;
u32 bus_width_log2, lcr, gc2r;
- /* at least 1 layer must be managed */
+ /*
+ * at least 1 layer must be managed & the number of layers
+ * must not exceed LTDC_MAX_LAYER
+ */
lcr = reg_read(ldev->regs, LTDC_LCR);
- ldev->caps.nb_layers = max_t(int, lcr, 1);
+ ldev->caps.nb_layers = clamp((int)lcr, 1, LTDC_MAX_LAYER);
/* set data bus width */
gc2r = reg_read(ldev->regs, LTDC_GC2R);
@@ -1125,8 +1176,9 @@ int ltdc_load(struct drm_device *ddev)
ldev->pixel_clk = devm_clk_get(dev, "lcd");
if (IS_ERR(ldev->pixel_clk)) {
- DRM_ERROR("Unable to get lcd clock\n");
- return -ENODEV;
+ if (PTR_ERR(ldev->pixel_clk) != -EPROBE_DEFER)
+ DRM_ERROR("Unable to get lcd clock\n");
+ return PTR_ERR(ldev->pixel_clk);
}
if (clk_prepare_enable(ldev->pixel_clk)) {
@@ -1134,6 +1186,12 @@ int ltdc_load(struct drm_device *ddev)
return -ENODEV;
}
+ if (!IS_ERR(rstc)) {
+ reset_control_assert(rstc);
+ usleep_range(10, 20);
+ reset_control_deassert(rstc);
+ }
+
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
ldev->regs = devm_ioremap_resource(dev, res);
if (IS_ERR(ldev->regs)) {
@@ -1142,8 +1200,15 @@ int ltdc_load(struct drm_device *ddev)
goto err;
}
+ /* Disable interrupts */
+ reg_clear(ldev->regs, LTDC_IER,
+ IER_LIE | IER_RRIE | IER_FUIE | IER_TERRIE);
+
for (i = 0; i < MAX_IRQ; i++) {
irq = platform_get_irq(pdev, i);
+ if (irq == -EPROBE_DEFER)
+ goto err;
+
if (irq < 0)
continue;
@@ -1156,15 +1221,6 @@ int ltdc_load(struct drm_device *ddev)
}
}
- if (!IS_ERR(rstc)) {
- reset_control_assert(rstc);
- usleep_range(10, 20);
- reset_control_deassert(rstc);
- }
-
- /* Disable interrupts */
- reg_clear(ldev->regs, LTDC_IER,
- IER_LIE | IER_RRIE | IER_FUIE | IER_TERRIE);
ret = ltdc_get_caps(ddev);
if (ret) {
@@ -1173,7 +1229,7 @@ int ltdc_load(struct drm_device *ddev)
goto err;
}
- DRM_INFO("ltdc hw version 0x%08x - ready\n", ldev->caps.hw_version);
+ DRM_DEBUG_DRIVER("ltdc hw version 0x%08x\n", ldev->caps.hw_version);
/* Add endpoints panels or bridges if any */
for (i = 0; i < MAX_ENDPOINTS; i++) {
@@ -1203,6 +1259,8 @@ int ltdc_load(struct drm_device *ddev)
goto err;
}
+ ddev->mode_config.allow_fb_modifiers = true;
+
ret = ltdc_crtc_init(ddev, crtc);
if (ret) {
DRM_ERROR("Failed to init crtc\n");
@@ -1218,8 +1276,11 @@ int ltdc_load(struct drm_device *ddev)
/* Allow usage of vblank without having to call drm_irq_install */
ddev->irq_enabled = 1;
- return 0;
+ clk_disable_unprepare(ldev->pixel_clk);
+
+ pm_runtime_enable(ddev->dev);
+ return 0;
err:
for (i = 0; i < MAX_ENDPOINTS; i++)
drm_panel_bridge_remove(bridge[i]);
@@ -1231,7 +1292,6 @@ err:
void ltdc_unload(struct drm_device *ddev)
{
- struct ltdc_device *ldev = ddev->dev_private;
int i;
DRM_DEBUG_DRIVER("\n");
@@ -1239,7 +1299,7 @@ void ltdc_unload(struct drm_device *ddev)
for (i = 0; i < MAX_ENDPOINTS; i++)
drm_of_panel_bridge_remove(ddev->dev->of_node, 0, i);
- clk_disable_unprepare(ldev->pixel_clk);
+ pm_runtime_disable(ddev->dev);
}
MODULE_AUTHOR("Philippe Cornu <philippe.cornu@st.com>");
diff --git a/drivers/gpu/drm/sun4i/sun4i_drv.c b/drivers/gpu/drm/sun4i/sun4i_drv.c
index 3ff73998d841..1a1b52e6f73e 100644
--- a/drivers/gpu/drm/sun4i/sun4i_drv.c
+++ b/drivers/gpu/drm/sun4i/sun4i_drv.c
@@ -49,22 +49,8 @@ static struct drm_driver sun4i_drv_driver = {
.minor = 0,
/* GEM Operations */
+ DRM_GEM_CMA_VMAP_DRIVER_OPS,
.dumb_create = drm_sun4i_gem_dumb_create,
- .gem_free_object_unlocked = drm_gem_cma_free_object,
- .gem_vm_ops = &drm_gem_cma_vm_ops,
-
- /* PRIME Operations */
- .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
- .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
- .gem_prime_import = drm_gem_prime_import,
- .gem_prime_export = drm_gem_prime_export,
- .gem_prime_get_sg_table = drm_gem_cma_prime_get_sg_table,
- .gem_prime_import_sg_table = drm_gem_cma_prime_import_sg_table,
- .gem_prime_vmap = drm_gem_cma_prime_vmap,
- .gem_prime_vunmap = drm_gem_cma_prime_vunmap,
- .gem_prime_mmap = drm_gem_cma_prime_mmap,
-
- /* Frame Buffer Operations */
};
static int sun4i_drv_bind(struct device *dev)
diff --git a/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c b/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c
index bfa7e2b146df..a1fc8b520985 100644
--- a/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c
+++ b/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c
@@ -980,6 +980,7 @@ static ssize_t sun6i_dsi_transfer(struct mipi_dsi_host *host,
switch (msg->type) {
case MIPI_DSI_DCS_SHORT_WRITE:
case MIPI_DSI_DCS_SHORT_WRITE_PARAM:
+ case MIPI_DSI_GENERIC_SHORT_WRITE_2_PARAM:
ret = sun6i_dsi_dcs_write_short(dsi, msg);
break;
diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c
index 607a6ea17ecc..079250c85733 100644
--- a/drivers/gpu/drm/tegra/dc.c
+++ b/drivers/gpu/drm/tegra/dc.c
@@ -26,6 +26,9 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_plane_helper.h>
+static void tegra_crtc_atomic_destroy_state(struct drm_crtc *crtc,
+ struct drm_crtc_state *state);
+
static void tegra_dc_stats_reset(struct tegra_dc_stats *stats)
{
stats->frames = 0;
@@ -1155,20 +1158,12 @@ static void tegra_dc_destroy(struct drm_crtc *crtc)
static void tegra_crtc_reset(struct drm_crtc *crtc)
{
- struct tegra_dc_state *state;
+ struct tegra_dc_state *state = kzalloc(sizeof(*state), GFP_KERNEL);
if (crtc->state)
- __drm_atomic_helper_crtc_destroy_state(crtc->state);
-
- kfree(crtc->state);
- crtc->state = NULL;
-
- state = kzalloc(sizeof(*state), GFP_KERNEL);
- if (state) {
- crtc->state = &state->base;
- crtc->state->crtc = crtc;
- }
+ tegra_crtc_atomic_destroy_state(crtc, crtc->state);
+ __drm_atomic_helper_crtc_reset(crtc, &state->base);
drm_crtc_vblank_reset(crtc);
}
diff --git a/drivers/gpu/drm/tegra/fb.c b/drivers/gpu/drm/tegra/fb.c
index 1dd83a757dba..57cc26e1da01 100644
--- a/drivers/gpu/drm/tegra/fb.c
+++ b/drivers/gpu/drm/tegra/fb.c
@@ -131,18 +131,16 @@ struct drm_framebuffer *tegra_fb_create(struct drm_device *drm,
struct drm_file *file,
const struct drm_mode_fb_cmd2 *cmd)
{
- unsigned int hsub, vsub, i;
+ const struct drm_format_info *info = drm_get_format_info(drm, cmd);
struct tegra_bo *planes[4];
struct drm_gem_object *gem;
struct drm_framebuffer *fb;
+ unsigned int i;
int err;
- hsub = drm_format_horz_chroma_subsampling(cmd->pixel_format);
- vsub = drm_format_vert_chroma_subsampling(cmd->pixel_format);
-
- for (i = 0; i < drm_format_num_planes(cmd->pixel_format); i++) {
- unsigned int width = cmd->width / (i ? hsub : 1);
- unsigned int height = cmd->height / (i ? vsub : 1);
+ for (i = 0; i < info->num_planes; i++) {
+ unsigned int width = cmd->width / (i ? info->hsub : 1);
+ unsigned int height = cmd->height / (i ? info->vsub : 1);
unsigned int size, bpp;
gem = drm_gem_object_lookup(file, cmd->handles[i]);
@@ -151,7 +149,7 @@ struct drm_framebuffer *tegra_fb_create(struct drm_device *drm,
goto unreference;
}
- bpp = drm_format_plane_cpp(cmd->pixel_format, i);
+ bpp = info->cpp[i];
size = (height - 1) * cmd->pitches[i] +
width * bpp + cmd->offsets[i];
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 2845fceb2fbd..c7de667d482a 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -166,28 +166,35 @@ static void ttm_bo_release_list(struct kref *list_kref)
ttm_mem_global_free(bdev->glob->mem_glob, acc_size);
}
-void ttm_bo_add_to_lru(struct ttm_buffer_object *bo)
+static void ttm_bo_add_mem_to_lru(struct ttm_buffer_object *bo,
+ struct ttm_mem_reg *mem)
{
struct ttm_bo_device *bdev = bo->bdev;
struct ttm_mem_type_manager *man;
reservation_object_assert_held(bo->resv);
- if (!(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) {
- BUG_ON(!list_empty(&bo->lru));
+ if (!list_empty(&bo->lru))
+ return;
- man = &bdev->man[bo->mem.mem_type];
- list_add_tail(&bo->lru, &man->lru[bo->priority]);
- kref_get(&bo->list_kref);
+ if (mem->placement & TTM_PL_FLAG_NO_EVICT)
+ return;
- if (bo->ttm && !(bo->ttm->page_flags &
- (TTM_PAGE_FLAG_SG | TTM_PAGE_FLAG_SWAPPED))) {
- list_add_tail(&bo->swap,
- &bdev->glob->swap_lru[bo->priority]);
- kref_get(&bo->list_kref);
- }
+ man = &bdev->man[mem->mem_type];
+ list_add_tail(&bo->lru, &man->lru[bo->priority]);
+ kref_get(&bo->list_kref);
+
+ if (bo->ttm && !(bo->ttm->page_flags &
+ (TTM_PAGE_FLAG_SG | TTM_PAGE_FLAG_SWAPPED))) {
+ list_add_tail(&bo->swap, &bdev->glob->swap_lru[bo->priority]);
+ kref_get(&bo->list_kref);
}
}
+
+void ttm_bo_add_to_lru(struct ttm_buffer_object *bo)
+{
+ ttm_bo_add_mem_to_lru(bo, &bo->mem);
+}
EXPORT_SYMBOL(ttm_bo_add_to_lru);
static void ttm_bo_ref_bug(struct kref *list_kref)
@@ -766,32 +773,72 @@ EXPORT_SYMBOL(ttm_bo_eviction_valuable);
* b. Otherwise, trylock it.
*/
static bool ttm_bo_evict_swapout_allowable(struct ttm_buffer_object *bo,
- struct ttm_operation_ctx *ctx, bool *locked)
+ struct ttm_operation_ctx *ctx, bool *locked, bool *busy)
{
bool ret = false;
- *locked = false;
if (bo->resv == ctx->resv) {
reservation_object_assert_held(bo->resv);
if (ctx->flags & TTM_OPT_FLAG_ALLOW_RES_EVICT
|| !list_empty(&bo->ddestroy))
ret = true;
+ *locked = false;
+ if (busy)
+ *busy = false;
} else {
- *locked = reservation_object_trylock(bo->resv);
- ret = *locked;
+ ret = reservation_object_trylock(bo->resv);
+ *locked = ret;
+ if (busy)
+ *busy = !ret;
}
return ret;
}
+/**
+ * ttm_mem_evict_wait_busy - wait for a busy BO to become available
+ *
+ * @busy_bo: BO which couldn't be locked with trylock
+ * @ctx: operation context
+ * @ticket: acquire ticket
+ *
+ * Try to lock a busy buffer object to avoid failing eviction.
+ */
+static int ttm_mem_evict_wait_busy(struct ttm_buffer_object *busy_bo,
+ struct ttm_operation_ctx *ctx,
+ struct ww_acquire_ctx *ticket)
+{
+ int r;
+
+ if (!busy_bo || !ticket)
+ return -EBUSY;
+
+ if (ctx->interruptible)
+ r = reservation_object_lock_interruptible(busy_bo->resv,
+ ticket);
+ else
+ r = reservation_object_lock(busy_bo->resv, ticket);
+
+ /*
+ * TODO: It would be better to keep the BO locked until allocation is at
+ * least tried one more time, but that would mean a much larger rework
+ * of TTM.
+ */
+ if (!r)
+ reservation_object_unlock(busy_bo->resv);
+
+ return r == -EDEADLK ? -EAGAIN : r;
+}
+
static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
uint32_t mem_type,
const struct ttm_place *place,
- struct ttm_operation_ctx *ctx)
+ struct ttm_operation_ctx *ctx,
+ struct ww_acquire_ctx *ticket)
{
+ struct ttm_buffer_object *bo = NULL, *busy_bo = NULL;
struct ttm_bo_global *glob = bdev->glob;
struct ttm_mem_type_manager *man = &bdev->man[mem_type];
- struct ttm_buffer_object *bo = NULL;
bool locked = false;
unsigned i;
int ret;
@@ -799,8 +846,15 @@ static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
spin_lock(&glob->lru_lock);
for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {
list_for_each_entry(bo, &man->lru[i], lru) {
- if (!ttm_bo_evict_swapout_allowable(bo, ctx, &locked))
+ bool busy;
+
+ if (!ttm_bo_evict_swapout_allowable(bo, ctx, &locked,
+ &busy)) {
+ if (busy && !busy_bo &&
+ bo->resv->lock.ctx != ticket)
+ busy_bo = bo;
continue;
+ }
if (place && !bdev->driver->eviction_valuable(bo,
place)) {
@@ -819,8 +873,13 @@ static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
}
if (!bo) {
+ if (busy_bo)
+ ttm_bo_get(busy_bo);
spin_unlock(&glob->lru_lock);
- return -EBUSY;
+ ret = ttm_mem_evict_wait_busy(busy_bo, ctx, ticket);
+ if (busy_bo)
+ ttm_bo_put(busy_bo);
+ return ret;
}
kref_get(&bo->list_kref);
@@ -892,13 +951,12 @@ static int ttm_bo_add_move_fence(struct ttm_buffer_object *bo,
* space, or we've evicted everything and there isn't enough space.
*/
static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
- uint32_t mem_type,
- const struct ttm_place *place,
- struct ttm_mem_reg *mem,
- struct ttm_operation_ctx *ctx)
+ const struct ttm_place *place,
+ struct ttm_mem_reg *mem,
+ struct ttm_operation_ctx *ctx)
{
struct ttm_bo_device *bdev = bo->bdev;
- struct ttm_mem_type_manager *man = &bdev->man[mem_type];
+ struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
int ret;
do {
@@ -907,11 +965,12 @@ static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
return ret;
if (mem->mm_node)
break;
- ret = ttm_mem_evict_first(bdev, mem_type, place, ctx);
+ ret = ttm_mem_evict_first(bdev, mem->mem_type, place, ctx,
+ bo->resv->lock.ctx);
if (unlikely(ret != 0))
return ret;
} while (1);
- mem->mem_type = mem_type;
+
return ttm_bo_add_move_fence(bo, man, mem);
}
@@ -960,6 +1019,59 @@ static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man,
}
/**
+ * ttm_bo_mem_placement - check if placement is compatible
+ * @bo: BO to find memory for
+ * @place: where to search
+ * @mem: the memory object to fill in
+ * @ctx: operation context
+ *
+ * Check if placement is compatible and fill in mem structure.
+ * Returns -EBUSY if placement won't work or negative error code.
+ * 0 when placement can be used.
+ */
+static int ttm_bo_mem_placement(struct ttm_buffer_object *bo,
+ const struct ttm_place *place,
+ struct ttm_mem_reg *mem,
+ struct ttm_operation_ctx *ctx)
+{
+ struct ttm_bo_device *bdev = bo->bdev;
+ uint32_t mem_type = TTM_PL_SYSTEM;
+ struct ttm_mem_type_manager *man;
+ uint32_t cur_flags = 0;
+ int ret;
+
+ ret = ttm_mem_type_from_place(place, &mem_type);
+ if (ret)
+ return ret;
+
+ man = &bdev->man[mem_type];
+ if (!man->has_type || !man->use_type)
+ return -EBUSY;
+
+ if (!ttm_bo_mt_compatible(man, mem_type, place, &cur_flags))
+ return -EBUSY;
+
+ cur_flags = ttm_bo_select_caching(man, bo->mem.placement, cur_flags);
+ /*
+ * Use the access and other non-mapping-related flag bits from
+ * the memory placement flags to the current flags
+ */
+ ttm_flag_masked(&cur_flags, place->flags, ~TTM_PL_MASK_MEMTYPE);
+
+ mem->mem_type = mem_type;
+ mem->placement = cur_flags;
+
+ if (bo->mem.mem_type < mem_type && !list_empty(&bo->lru)) {
+ spin_lock(&bo->bdev->glob->lru_lock);
+ ttm_bo_del_from_lru(bo);
+ ttm_bo_add_mem_to_lru(bo, mem);
+ spin_unlock(&bo->bdev->glob->lru_lock);
+ }
+
+ return 0;
+}
+
+/**
* Creates space for memory region @mem according to its type.
*
* This function first searches for free space in compatible memory types in
@@ -973,12 +1085,7 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
struct ttm_operation_ctx *ctx)
{
struct ttm_bo_device *bdev = bo->bdev;
- struct ttm_mem_type_manager *man;
- uint32_t mem_type = TTM_PL_SYSTEM;
- uint32_t cur_flags = 0;
bool type_found = false;
- bool type_ok = false;
- bool has_erestartsys = false;
int i, ret;
ret = reservation_object_reserve_shared(bo->resv, 1);
@@ -988,97 +1095,70 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
mem->mm_node = NULL;
for (i = 0; i < placement->num_placement; ++i) {
const struct ttm_place *place = &placement->placement[i];
+ struct ttm_mem_type_manager *man;
- ret = ttm_mem_type_from_place(place, &mem_type);
- if (ret)
- return ret;
- man = &bdev->man[mem_type];
- if (!man->has_type || !man->use_type)
- continue;
-
- type_ok = ttm_bo_mt_compatible(man, mem_type, place,
- &cur_flags);
-
- if (!type_ok)
+ ret = ttm_bo_mem_placement(bo, place, mem, ctx);
+ if (ret == -EBUSY)
continue;
+ if (ret)
+ goto error;
type_found = true;
- cur_flags = ttm_bo_select_caching(man, bo->mem.placement,
- cur_flags);
- /*
- * Use the access and other non-mapping-related flag bits from
- * the memory placement flags to the current flags
- */
- ttm_flag_masked(&cur_flags, place->flags,
- ~TTM_PL_MASK_MEMTYPE);
-
- if (mem_type == TTM_PL_SYSTEM)
- break;
+ mem->mm_node = NULL;
+ if (mem->mem_type == TTM_PL_SYSTEM)
+ return 0;
+ man = &bdev->man[mem->mem_type];
ret = (*man->func->get_node)(man, bo, place, mem);
if (unlikely(ret))
- return ret;
+ goto error;
if (mem->mm_node) {
ret = ttm_bo_add_move_fence(bo, man, mem);
if (unlikely(ret)) {
(*man->func->put_node)(man, mem);
- return ret;
+ goto error;
}
- break;
+ return 0;
}
}
- if ((type_ok && (mem_type == TTM_PL_SYSTEM)) || mem->mm_node) {
- mem->mem_type = mem_type;
- mem->placement = cur_flags;
- return 0;
- }
-
for (i = 0; i < placement->num_busy_placement; ++i) {
const struct ttm_place *place = &placement->busy_placement[i];
- ret = ttm_mem_type_from_place(place, &mem_type);
- if (ret)
- return ret;
- man = &bdev->man[mem_type];
- if (!man->has_type || !man->use_type)
- continue;
- if (!ttm_bo_mt_compatible(man, mem_type, place, &cur_flags))
+ ret = ttm_bo_mem_placement(bo, place, mem, ctx);
+ if (ret == -EBUSY)
continue;
+ if (ret)
+ goto error;
type_found = true;
- cur_flags = ttm_bo_select_caching(man, bo->mem.placement,
- cur_flags);
- /*
- * Use the access and other non-mapping-related flag bits from
- * the memory placement flags to the current flags
- */
- ttm_flag_masked(&cur_flags, place->flags,
- ~TTM_PL_MASK_MEMTYPE);
-
- if (mem_type == TTM_PL_SYSTEM) {
- mem->mem_type = mem_type;
- mem->placement = cur_flags;
- mem->mm_node = NULL;
+ mem->mm_node = NULL;
+ if (mem->mem_type == TTM_PL_SYSTEM)
return 0;
- }
- ret = ttm_bo_mem_force_space(bo, mem_type, place, mem, ctx);
- if (ret == 0 && mem->mm_node) {
- mem->placement = cur_flags;
+ ret = ttm_bo_mem_force_space(bo, place, mem, ctx);
+ if (ret == 0 && mem->mm_node)
return 0;
- }
- if (ret == -ERESTARTSYS)
- has_erestartsys = true;
+
+ if (ret && ret != -EBUSY)
+ goto error;
}
+ ret = -ENOMEM;
if (!type_found) {
pr_err(TTM_PFX "No compatible memory type found\n");
- return -EINVAL;
+ ret = -EINVAL;
+ }
+
+error:
+ if (bo->mem.mem_type == TTM_PL_SYSTEM && !list_empty(&bo->lru)) {
+ spin_lock(&bo->bdev->glob->lru_lock);
+ ttm_bo_move_to_lru_tail(bo, NULL);
+ spin_unlock(&bo->bdev->glob->lru_lock);
}
- return (has_erestartsys) ? -ERESTARTSYS : -ENOMEM;
+ return ret;
}
EXPORT_SYMBOL(ttm_bo_mem_space);
@@ -1401,7 +1481,8 @@ static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev,
for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {
while (!list_empty(&man->lru[i])) {
spin_unlock(&glob->lru_lock);
- ret = ttm_mem_evict_first(bdev, mem_type, NULL, &ctx);
+ ret = ttm_mem_evict_first(bdev, mem_type, NULL, &ctx,
+ NULL);
if (ret)
return ret;
spin_lock(&glob->lru_lock);
@@ -1772,7 +1853,8 @@ int ttm_bo_swapout(struct ttm_bo_global *glob, struct ttm_operation_ctx *ctx)
spin_lock(&glob->lru_lock);
for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {
list_for_each_entry(bo, &glob->swap_lru[i], swap) {
- if (ttm_bo_evict_swapout_allowable(bo, ctx, &locked)) {
+ if (ttm_bo_evict_swapout_allowable(bo, ctx, &locked,
+ NULL)) {
ret = 0;
break;
}
diff --git a/drivers/gpu/drm/ttm/ttm_execbuf_util.c b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
index 0075eb9a0b52..957ec375a4ba 100644
--- a/drivers/gpu/drm/ttm/ttm_execbuf_util.c
+++ b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
@@ -69,7 +69,8 @@ void ttm_eu_backoff_reservation(struct ww_acquire_ctx *ticket,
list_for_each_entry(entry, list, head) {
struct ttm_buffer_object *bo = entry->bo;
- ttm_bo_add_to_lru(bo);
+ if (list_empty(&bo->lru))
+ ttm_bo_add_to_lru(bo);
reservation_object_unlock(bo->resv);
}
spin_unlock(&glob->lru_lock);
@@ -93,7 +94,7 @@ EXPORT_SYMBOL(ttm_eu_backoff_reservation);
int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
struct list_head *list, bool intr,
- struct list_head *dups)
+ struct list_head *dups, bool del_lru)
{
struct ttm_bo_global *glob;
struct ttm_validate_buffer *entry;
@@ -172,11 +173,11 @@ int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
list_add(&entry->head, list);
}
- if (ticket)
- ww_acquire_done(ticket);
- spin_lock(&glob->lru_lock);
- ttm_eu_del_from_lru_locked(list);
- spin_unlock(&glob->lru_lock);
+ if (del_lru) {
+ spin_lock(&glob->lru_lock);
+ ttm_eu_del_from_lru_locked(list);
+ spin_unlock(&glob->lru_lock);
+ }
return 0;
}
EXPORT_SYMBOL(ttm_eu_reserve_buffers);
@@ -203,7 +204,10 @@ void ttm_eu_fence_buffer_objects(struct ww_acquire_ctx *ticket,
reservation_object_add_shared_fence(bo->resv, fence);
else
reservation_object_add_excl_fence(bo->resv, fence);
- ttm_bo_add_to_lru(bo);
+ if (list_empty(&bo->lru))
+ ttm_bo_add_to_lru(bo);
+ else
+ ttm_bo_move_to_lru_tail(bo, NULL);
reservation_object_unlock(bo->resv);
}
spin_unlock(&glob->lru_lock);
diff --git a/drivers/gpu/drm/v3d/v3d_debugfs.c b/drivers/gpu/drm/v3d/v3d_debugfs.c
index a24af2d2f574..78a78938e81f 100644
--- a/drivers/gpu/drm/v3d/v3d_debugfs.c
+++ b/drivers/gpu/drm/v3d/v3d_debugfs.c
@@ -26,6 +26,11 @@ static const struct v3d_reg_def v3d_hub_reg_defs[] = {
REGDEF(V3D_HUB_IDENT3),
REGDEF(V3D_HUB_INT_STS),
REGDEF(V3D_HUB_INT_MSK_STS),
+
+ REGDEF(V3D_MMU_CTL),
+ REGDEF(V3D_MMU_VIO_ADDR),
+ REGDEF(V3D_MMU_VIO_ID),
+ REGDEF(V3D_MMU_DEBUG_INFO),
};
static const struct v3d_reg_def v3d_gca_reg_defs[] = {
@@ -50,12 +55,25 @@ static const struct v3d_reg_def v3d_core_reg_defs[] = {
REGDEF(V3D_PTB_BPCA),
REGDEF(V3D_PTB_BPCS),
- REGDEF(V3D_MMU_CTL),
- REGDEF(V3D_MMU_VIO_ADDR),
-
REGDEF(V3D_GMP_STATUS),
REGDEF(V3D_GMP_CFG),
REGDEF(V3D_GMP_VIO_ADDR),
+
+ REGDEF(V3D_ERR_FDBGO),
+ REGDEF(V3D_ERR_FDBGB),
+ REGDEF(V3D_ERR_FDBGS),
+ REGDEF(V3D_ERR_STAT),
+};
+
+static const struct v3d_reg_def v3d_csd_reg_defs[] = {
+ REGDEF(V3D_CSD_STATUS),
+ REGDEF(V3D_CSD_CURRENT_CFG0),
+ REGDEF(V3D_CSD_CURRENT_CFG1),
+ REGDEF(V3D_CSD_CURRENT_CFG2),
+ REGDEF(V3D_CSD_CURRENT_CFG3),
+ REGDEF(V3D_CSD_CURRENT_CFG4),
+ REGDEF(V3D_CSD_CURRENT_CFG5),
+ REGDEF(V3D_CSD_CURRENT_CFG6),
};
static int v3d_v3d_debugfs_regs(struct seq_file *m, void *unused)
@@ -89,6 +107,17 @@ static int v3d_v3d_debugfs_regs(struct seq_file *m, void *unused)
V3D_CORE_READ(core,
v3d_core_reg_defs[i].reg));
}
+
+ if (v3d_has_csd(v3d)) {
+ for (i = 0; i < ARRAY_SIZE(v3d_csd_reg_defs); i++) {
+ seq_printf(m, "core %d %s (0x%04x): 0x%08x\n",
+ core,
+ v3d_csd_reg_defs[i].name,
+ v3d_csd_reg_defs[i].reg,
+ V3D_CORE_READ(core,
+ v3d_csd_reg_defs[i].reg));
+ }
+ }
}
return 0;
diff --git a/drivers/gpu/drm/v3d/v3d_drv.c b/drivers/gpu/drm/v3d/v3d_drv.c
index a06b05f714a5..fea597f4db8a 100644
--- a/drivers/gpu/drm/v3d/v3d_drv.c
+++ b/drivers/gpu/drm/v3d/v3d_drv.c
@@ -7,9 +7,9 @@
* This driver supports the Broadcom V3D 3.3 and 4.1 OpenGL ES GPUs.
* For V3D 2.x support, see the VC4 driver.
*
- * Currently only single-core rendering using the binner and renderer,
- * along with TFU (texture formatting unit) rendering is supported.
- * V3D 4.x's CSD (compute shader dispatch) is not yet supported.
+ * The V3D GPU includes a tiled render (composed of a bin and render
+ * pipelines), the TFU (texture formatting unit), and the CSD (compute
+ * shader dispatch).
*/
#include <linux/clk.h>
@@ -120,6 +120,9 @@ static int v3d_get_param_ioctl(struct drm_device *dev, void *data,
case DRM_V3D_PARAM_SUPPORTS_TFU:
args->value = 1;
return 0;
+ case DRM_V3D_PARAM_SUPPORTS_CSD:
+ args->value = v3d_has_csd(v3d);
+ return 0;
default:
DRM_DEBUG("Unknown parameter %d\n", args->param);
return -EINVAL;
@@ -179,6 +182,7 @@ static const struct drm_ioctl_desc v3d_drm_ioctls[] = {
DRM_IOCTL_DEF_DRV(V3D_GET_PARAM, v3d_get_param_ioctl, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(V3D_GET_BO_OFFSET, v3d_get_bo_offset_ioctl, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(V3D_SUBMIT_TFU, v3d_submit_tfu_ioctl, DRM_RENDER_ALLOW | DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(V3D_SUBMIT_CSD, v3d_submit_csd_ioctl, DRM_RENDER_ALLOW | DRM_AUTH),
};
static struct drm_driver v3d_drm_driver = {
@@ -235,9 +239,9 @@ static int v3d_platform_drm_probe(struct platform_device *pdev)
struct drm_device *drm;
struct v3d_dev *v3d;
int ret;
+ u32 mmu_debug;
u32 ident1;
- dev->coherent_dma_mask = DMA_BIT_MASK(36);
v3d = kzalloc(sizeof(*v3d), GFP_KERNEL);
if (!v3d)
@@ -254,6 +258,11 @@ static int v3d_platform_drm_probe(struct platform_device *pdev)
if (ret)
goto dev_free;
+ mmu_debug = V3D_READ(V3D_MMU_DEBUG_INFO);
+ dev->coherent_dma_mask =
+ DMA_BIT_MASK(30 + V3D_GET_FIELD(mmu_debug, V3D_MMU_PA_WIDTH));
+ v3d->va_width = 30 + V3D_GET_FIELD(mmu_debug, V3D_MMU_VA_WIDTH);
+
ident1 = V3D_READ(V3D_HUB_IDENT1);
v3d->ver = (V3D_GET_FIELD(ident1, V3D_HUB_IDENT1_TVER) * 10 +
V3D_GET_FIELD(ident1, V3D_HUB_IDENT1_REV));
diff --git a/drivers/gpu/drm/v3d/v3d_drv.h b/drivers/gpu/drm/v3d/v3d_drv.h
index e9d4a2fdcf44..9aad9da1eb11 100644
--- a/drivers/gpu/drm/v3d/v3d_drv.h
+++ b/drivers/gpu/drm/v3d/v3d_drv.h
@@ -16,9 +16,11 @@ enum v3d_queue {
V3D_BIN,
V3D_RENDER,
V3D_TFU,
+ V3D_CSD,
+ V3D_CACHE_CLEAN,
};
-#define V3D_MAX_QUEUES (V3D_TFU + 1)
+#define V3D_MAX_QUEUES (V3D_CACHE_CLEAN + 1)
struct v3d_queue_state {
struct drm_gpu_scheduler sched;
@@ -55,6 +57,8 @@ struct v3d_dev {
*/
void *mmu_scratch;
dma_addr_t mmu_scratch_paddr;
+ /* virtual address bits from V3D to the MMU. */
+ int va_width;
/* Number of V3D cores. */
u32 cores;
@@ -67,9 +71,10 @@ struct v3d_dev {
struct work_struct overflow_mem_work;
- struct v3d_exec_info *bin_job;
- struct v3d_exec_info *render_job;
+ struct v3d_bin_job *bin_job;
+ struct v3d_render_job *render_job;
struct v3d_tfu_job *tfu_job;
+ struct v3d_csd_job *csd_job;
struct v3d_queue_state queue[V3D_MAX_QUEUES];
@@ -92,6 +97,12 @@ struct v3d_dev {
*/
struct mutex sched_lock;
+ /* Lock taken during a cache clean and when initiating an L2
+ * flush, to keep L2 flushes from interfering with the
+ * synchronous L2 cleans.
+ */
+ struct mutex cache_clean_lock;
+
struct {
u32 num_allocated;
u32 pages_allocated;
@@ -104,6 +115,12 @@ to_v3d_dev(struct drm_device *dev)
return (struct v3d_dev *)dev->dev_private;
}
+static inline bool
+v3d_has_csd(struct v3d_dev *v3d)
+{
+ return v3d->ver >= 41;
+}
+
/* The per-fd struct, which tracks the MMU mappings. */
struct v3d_file_priv {
struct v3d_dev *v3d;
@@ -117,7 +134,7 @@ struct v3d_bo {
struct drm_mm_node node;
/* List entry for the BO's position in
- * v3d_exec_info->unref_list
+ * v3d_render_job->unref_list
*/
struct list_head unref_head;
};
@@ -157,67 +174,74 @@ to_v3d_fence(struct dma_fence *fence)
struct v3d_job {
struct drm_sched_job base;
- struct v3d_exec_info *exec;
+ struct kref refcount;
- /* An optional fence userspace can pass in for the job to depend on. */
- struct dma_fence *in_fence;
+ struct v3d_dev *v3d;
+
+ /* This is the array of BOs that were looked up at the start
+ * of submission.
+ */
+ struct drm_gem_object **bo;
+ u32 bo_count;
+
+ /* Array of struct dma_fence * to block on before submitting this job.
+ */
+ struct xarray deps;
+ unsigned long last_dep;
/* v3d fence to be signaled by IRQ handler when the job is complete. */
struct dma_fence *irq_fence;
+ /* scheduler fence for when the job is considered complete and
+ * the BO reservations can be released.
+ */
+ struct dma_fence *done_fence;
+
+ /* Callback for the freeing of the job on refcount going to 0. */
+ void (*free)(struct kref *ref);
+};
+
+struct v3d_bin_job {
+ struct v3d_job base;
+
/* GPU virtual addresses of the start/end of the CL job. */
u32 start, end;
u32 timedout_ctca, timedout_ctra;
-};
-struct v3d_exec_info {
- struct v3d_dev *v3d;
+ /* Corresponding render job, for attaching our overflow memory. */
+ struct v3d_render_job *render;
- struct v3d_job bin, render;
-
- /* Fence for when the scheduler considers the binner to be
- * done, for render to depend on.
- */
- struct dma_fence *bin_done_fence;
+ /* Submitted tile memory allocation start/size, tile state. */
+ u32 qma, qms, qts;
+};
- /* Fence for when the scheduler considers the render to be
- * done, for when the BOs reservations should be complete.
- */
- struct dma_fence *render_done_fence;
+struct v3d_render_job {
+ struct v3d_job base;
- struct kref refcount;
+ /* GPU virtual addresses of the start/end of the CL job. */
+ u32 start, end;
- /* This is the array of BOs that were looked up at the start of exec. */
- struct v3d_bo **bo;
- u32 bo_count;
+ u32 timedout_ctca, timedout_ctra;
/* List of overflow BOs used in the job that need to be
* released once the job is complete.
*/
struct list_head unref_list;
-
- /* Submitted tile memory allocation start/size, tile state. */
- u32 qma, qms, qts;
};
struct v3d_tfu_job {
- struct drm_sched_job base;
+ struct v3d_job base;
struct drm_v3d_submit_tfu args;
+};
- /* An optional fence userspace can pass in for the job to depend on. */
- struct dma_fence *in_fence;
-
- /* v3d fence to be signaled by IRQ handler when the job is complete. */
- struct dma_fence *irq_fence;
-
- struct v3d_dev *v3d;
+struct v3d_csd_job {
+ struct v3d_job base;
- struct kref refcount;
+ u32 timedout_batches;
- /* This is the array of BOs that were looked up at the start of exec. */
- struct v3d_bo *bo[4];
+ struct drm_v3d_submit_csd args;
};
/**
@@ -281,12 +305,14 @@ int v3d_submit_cl_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int v3d_submit_tfu_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
+int v3d_submit_csd_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
int v3d_wait_bo_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
-void v3d_exec_put(struct v3d_exec_info *exec);
-void v3d_tfu_job_put(struct v3d_tfu_job *exec);
+void v3d_job_put(struct v3d_job *job);
void v3d_reset(struct v3d_dev *v3d);
void v3d_invalidate_caches(struct v3d_dev *v3d);
+void v3d_clean_caches(struct v3d_dev *v3d);
/* v3d_irq.c */
int v3d_irq_init(struct v3d_dev *v3d);
diff --git a/drivers/gpu/drm/v3d/v3d_fence.c b/drivers/gpu/drm/v3d/v3d_fence.c
index b0a2a1ae2eb1..89840ed212c0 100644
--- a/drivers/gpu/drm/v3d/v3d_fence.c
+++ b/drivers/gpu/drm/v3d/v3d_fence.c
@@ -36,6 +36,8 @@ static const char *v3d_fence_get_timeline_name(struct dma_fence *fence)
return "v3d-render";
case V3D_TFU:
return "v3d-tfu";
+ case V3D_CSD:
+ return "v3d-csd";
default:
return NULL;
}
diff --git a/drivers/gpu/drm/v3d/v3d_gem.c b/drivers/gpu/drm/v3d/v3d_gem.c
index 93ff8fcbe475..27e0f87075d9 100644
--- a/drivers/gpu/drm/v3d/v3d_gem.c
+++ b/drivers/gpu/drm/v3d/v3d_gem.c
@@ -109,7 +109,9 @@ v3d_reset(struct v3d_dev *v3d)
{
struct drm_device *dev = &v3d->drm;
- DRM_ERROR("Resetting GPU.\n");
+ DRM_DEV_ERROR(dev->dev, "Resetting GPU for hang.\n");
+ DRM_DEV_ERROR(dev->dev, "V3D_ERR_STAT: 0x%08x\n",
+ V3D_CORE_READ(0, V3D_ERR_STAT));
trace_v3d_reset_begin(dev);
/* XXX: only needed for safe powerdown, not reset. */
@@ -162,10 +164,52 @@ v3d_flush_l2t(struct v3d_dev *v3d, int core)
/* While there is a busy bit (V3D_L2TCACTL_L2TFLS), we don't
* need to wait for completion before dispatching the job --
* L2T accesses will be stalled until the flush has completed.
+ * However, we do need to make sure we don't try to trigger a
+ * new flush while the L2_CLEAN queue is trying to
+ * synchronously clean after a job.
*/
+ mutex_lock(&v3d->cache_clean_lock);
V3D_CORE_WRITE(core, V3D_CTL_L2TCACTL,
V3D_L2TCACTL_L2TFLS |
V3D_SET_FIELD(V3D_L2TCACTL_FLM_FLUSH, V3D_L2TCACTL_FLM));
+ mutex_unlock(&v3d->cache_clean_lock);
+}
+
+/* Cleans texture L1 and L2 cachelines (writing back dirty data).
+ *
+ * For cleaning, which happens from the CACHE_CLEAN queue after CSD has
+ * executed, we need to make sure that the clean is done before
+ * signaling job completion. So, we synchronously wait before
+ * returning, and we make sure that L2 invalidates don't happen in the
+ * meantime to confuse our are-we-done checks.
+ */
+void
+v3d_clean_caches(struct v3d_dev *v3d)
+{
+ struct drm_device *dev = &v3d->drm;
+ int core = 0;
+
+ trace_v3d_cache_clean_begin(dev);
+
+ V3D_CORE_WRITE(core, V3D_CTL_L2TCACTL, V3D_L2TCACTL_TMUWCF);
+ if (wait_for(!(V3D_CORE_READ(core, V3D_CTL_L2TCACTL) &
+ V3D_L2TCACTL_L2TFLS), 100)) {
+ DRM_ERROR("Timeout waiting for L1T write combiner flush\n");
+ }
+
+ mutex_lock(&v3d->cache_clean_lock);
+ V3D_CORE_WRITE(core, V3D_CTL_L2TCACTL,
+ V3D_L2TCACTL_L2TFLS |
+ V3D_SET_FIELD(V3D_L2TCACTL_FLM_CLEAN, V3D_L2TCACTL_FLM));
+
+ if (wait_for(!(V3D_CORE_READ(core, V3D_CTL_L2TCACTL) &
+ V3D_L2TCACTL_L2TFLS), 100)) {
+ DRM_ERROR("Timeout waiting for L2T clean\n");
+ }
+
+ mutex_unlock(&v3d->cache_clean_lock);
+
+ trace_v3d_cache_clean_end(dev);
}
/* Invalidates the slice caches. These are read-only caches. */
@@ -193,28 +237,6 @@ v3d_invalidate_caches(struct v3d_dev *v3d)
v3d_invalidate_slices(v3d, 0);
}
-static void
-v3d_attach_object_fences(struct v3d_bo **bos, int bo_count,
- struct dma_fence *fence)
-{
- int i;
-
- for (i = 0; i < bo_count; i++) {
- /* XXX: Use shared fences for read-only objects. */
- reservation_object_add_excl_fence(bos[i]->base.base.resv,
- fence);
- }
-}
-
-static void
-v3d_unlock_bo_reservations(struct v3d_bo **bos,
- int bo_count,
- struct ww_acquire_ctx *acquire_ctx)
-{
- drm_gem_unlock_reservations((struct drm_gem_object **)bos, bo_count,
- acquire_ctx);
-}
-
/* Takes the reservation lock on all the BOs being referenced, so that
* at queue submit time we can update the reservations.
*
@@ -223,26 +245,21 @@ v3d_unlock_bo_reservations(struct v3d_bo **bos,
* to v3d, so we don't attach dma-buf fences to them.
*/
static int
-v3d_lock_bo_reservations(struct v3d_bo **bos,
- int bo_count,
+v3d_lock_bo_reservations(struct v3d_job *job,
struct ww_acquire_ctx *acquire_ctx)
{
int i, ret;
- ret = drm_gem_lock_reservations((struct drm_gem_object **)bos,
- bo_count, acquire_ctx);
+ ret = drm_gem_lock_reservations(job->bo, job->bo_count, acquire_ctx);
if (ret)
return ret;
- /* Reserve space for our shared (read-only) fence references,
- * before we commit the CL to the hardware.
- */
- for (i = 0; i < bo_count; i++) {
- ret = reservation_object_reserve_shared(bos[i]->base.base.resv,
- 1);
+ for (i = 0; i < job->bo_count; i++) {
+ ret = drm_gem_fence_array_add_implicit(&job->deps,
+ job->bo[i], true);
if (ret) {
- v3d_unlock_bo_reservations(bos, bo_count,
- acquire_ctx);
+ drm_gem_unlock_reservations(job->bo, job->bo_count,
+ acquire_ctx);
return ret;
}
}
@@ -251,11 +268,11 @@ v3d_lock_bo_reservations(struct v3d_bo **bos,
}
/**
- * v3d_cl_lookup_bos() - Sets up exec->bo[] with the GEM objects
+ * v3d_lookup_bos() - Sets up job->bo[] with the GEM objects
* referenced by the job.
* @dev: DRM device
* @file_priv: DRM file for this fd
- * @exec: V3D job being set up
+ * @job: V3D job being set up
*
* The command validator needs to reference BOs by their index within
* the submitted job's BO list. This does the validation of the job's
@@ -265,18 +282,19 @@ v3d_lock_bo_reservations(struct v3d_bo **bos,
* failure, because that will happen at v3d_exec_cleanup() time.
*/
static int
-v3d_cl_lookup_bos(struct drm_device *dev,
- struct drm_file *file_priv,
- struct drm_v3d_submit_cl *args,
- struct v3d_exec_info *exec)
+v3d_lookup_bos(struct drm_device *dev,
+ struct drm_file *file_priv,
+ struct v3d_job *job,
+ u64 bo_handles,
+ u32 bo_count)
{
u32 *handles;
int ret = 0;
int i;
- exec->bo_count = args->bo_handle_count;
+ job->bo_count = bo_count;
- if (!exec->bo_count) {
+ if (!job->bo_count) {
/* See comment on bo_index for why we have to check
* this.
*/
@@ -284,15 +302,15 @@ v3d_cl_lookup_bos(struct drm_device *dev,
return -EINVAL;
}
- exec->bo = kvmalloc_array(exec->bo_count,
- sizeof(struct drm_gem_cma_object *),
- GFP_KERNEL | __GFP_ZERO);
- if (!exec->bo) {
+ job->bo = kvmalloc_array(job->bo_count,
+ sizeof(struct drm_gem_cma_object *),
+ GFP_KERNEL | __GFP_ZERO);
+ if (!job->bo) {
DRM_DEBUG("Failed to allocate validated BO pointers\n");
return -ENOMEM;
}
- handles = kvmalloc_array(exec->bo_count, sizeof(u32), GFP_KERNEL);
+ handles = kvmalloc_array(job->bo_count, sizeof(u32), GFP_KERNEL);
if (!handles) {
ret = -ENOMEM;
DRM_DEBUG("Failed to allocate incoming GEM handles\n");
@@ -300,15 +318,15 @@ v3d_cl_lookup_bos(struct drm_device *dev,
}
if (copy_from_user(handles,
- (void __user *)(uintptr_t)args->bo_handles,
- exec->bo_count * sizeof(u32))) {
+ (void __user *)(uintptr_t)bo_handles,
+ job->bo_count * sizeof(u32))) {
ret = -EFAULT;
DRM_DEBUG("Failed to copy in GEM handles\n");
goto fail;
}
spin_lock(&file_priv->table_lock);
- for (i = 0; i < exec->bo_count; i++) {
+ for (i = 0; i < job->bo_count; i++) {
struct drm_gem_object *bo = idr_find(&file_priv->object_idr,
handles[i]);
if (!bo) {
@@ -319,7 +337,7 @@ v3d_cl_lookup_bos(struct drm_device *dev,
goto fail;
}
drm_gem_object_get(bo);
- exec->bo[i] = to_v3d_bo(bo);
+ job->bo[i] = bo;
}
spin_unlock(&file_priv->table_lock);
@@ -329,67 +347,50 @@ fail:
}
static void
-v3d_exec_cleanup(struct kref *ref)
+v3d_job_free(struct kref *ref)
{
- struct v3d_exec_info *exec = container_of(ref, struct v3d_exec_info,
- refcount);
- struct v3d_dev *v3d = exec->v3d;
- unsigned int i;
- struct v3d_bo *bo, *save;
-
- dma_fence_put(exec->bin.in_fence);
- dma_fence_put(exec->render.in_fence);
-
- dma_fence_put(exec->bin.irq_fence);
- dma_fence_put(exec->render.irq_fence);
-
- dma_fence_put(exec->bin_done_fence);
- dma_fence_put(exec->render_done_fence);
+ struct v3d_job *job = container_of(ref, struct v3d_job, refcount);
+ unsigned long index;
+ struct dma_fence *fence;
+ int i;
- for (i = 0; i < exec->bo_count; i++)
- drm_gem_object_put_unlocked(&exec->bo[i]->base.base);
- kvfree(exec->bo);
+ for (i = 0; i < job->bo_count; i++) {
+ if (job->bo[i])
+ drm_gem_object_put_unlocked(job->bo[i]);
+ }
+ kvfree(job->bo);
- list_for_each_entry_safe(bo, save, &exec->unref_list, unref_head) {
- drm_gem_object_put_unlocked(&bo->base.base);
+ xa_for_each(&job->deps, index, fence) {
+ dma_fence_put(fence);
}
+ xa_destroy(&job->deps);
- pm_runtime_mark_last_busy(v3d->dev);
- pm_runtime_put_autosuspend(v3d->dev);
+ dma_fence_put(job->irq_fence);
+ dma_fence_put(job->done_fence);
- kfree(exec);
-}
+ pm_runtime_mark_last_busy(job->v3d->dev);
+ pm_runtime_put_autosuspend(job->v3d->dev);
-void v3d_exec_put(struct v3d_exec_info *exec)
-{
- kref_put(&exec->refcount, v3d_exec_cleanup);
+ kfree(job);
}
static void
-v3d_tfu_job_cleanup(struct kref *ref)
+v3d_render_job_free(struct kref *ref)
{
- struct v3d_tfu_job *job = container_of(ref, struct v3d_tfu_job,
- refcount);
- struct v3d_dev *v3d = job->v3d;
- unsigned int i;
-
- dma_fence_put(job->in_fence);
- dma_fence_put(job->irq_fence);
+ struct v3d_render_job *job = container_of(ref, struct v3d_render_job,
+ base.refcount);
+ struct v3d_bo *bo, *save;
- for (i = 0; i < ARRAY_SIZE(job->bo); i++) {
- if (job->bo[i])
- drm_gem_object_put_unlocked(&job->bo[i]->base.base);
+ list_for_each_entry_safe(bo, save, &job->unref_list, unref_head) {
+ drm_gem_object_put_unlocked(&bo->base.base);
}
- pm_runtime_mark_last_busy(v3d->dev);
- pm_runtime_put_autosuspend(v3d->dev);
-
- kfree(job);
+ v3d_job_free(ref);
}
-void v3d_tfu_job_put(struct v3d_tfu_job *job)
+void v3d_job_put(struct v3d_job *job)
{
- kref_put(&job->refcount, v3d_tfu_job_cleanup);
+ kref_put(&job->refcount, job->free);
}
int
@@ -425,6 +426,87 @@ v3d_wait_bo_ioctl(struct drm_device *dev, void *data,
return ret;
}
+static int
+v3d_job_init(struct v3d_dev *v3d, struct drm_file *file_priv,
+ struct v3d_job *job, void (*free)(struct kref *ref),
+ u32 in_sync)
+{
+ struct dma_fence *in_fence = NULL;
+ int ret;
+
+ job->v3d = v3d;
+ job->free = free;
+
+ ret = pm_runtime_get_sync(v3d->dev);
+ if (ret < 0)
+ return ret;
+
+ xa_init_flags(&job->deps, XA_FLAGS_ALLOC);
+
+ ret = drm_syncobj_find_fence(file_priv, in_sync, 0, 0, &in_fence);
+ if (ret == -EINVAL)
+ goto fail;
+
+ ret = drm_gem_fence_array_add(&job->deps, in_fence);
+ if (ret)
+ goto fail;
+
+ kref_init(&job->refcount);
+
+ return 0;
+fail:
+ xa_destroy(&job->deps);
+ pm_runtime_put_autosuspend(v3d->dev);
+ return ret;
+}
+
+static int
+v3d_push_job(struct v3d_file_priv *v3d_priv,
+ struct v3d_job *job, enum v3d_queue queue)
+{
+ int ret;
+
+ ret = drm_sched_job_init(&job->base, &v3d_priv->sched_entity[queue],
+ v3d_priv);
+ if (ret)
+ return ret;
+
+ job->done_fence = dma_fence_get(&job->base.s_fence->finished);
+
+ /* put by scheduler job completion */
+ kref_get(&job->refcount);
+
+ drm_sched_entity_push_job(&job->base, &v3d_priv->sched_entity[queue]);
+
+ return 0;
+}
+
+static void
+v3d_attach_fences_and_unlock_reservation(struct drm_file *file_priv,
+ struct v3d_job *job,
+ struct ww_acquire_ctx *acquire_ctx,
+ u32 out_sync,
+ struct dma_fence *done_fence)
+{
+ struct drm_syncobj *sync_out;
+ int i;
+
+ for (i = 0; i < job->bo_count; i++) {
+ /* XXX: Use shared fences for read-only objects. */
+ reservation_object_add_excl_fence(job->bo[i]->resv,
+ job->done_fence);
+ }
+
+ drm_gem_unlock_reservations(job->bo, job->bo_count, acquire_ctx);
+
+ /* Update the return sync object for the job */
+ sync_out = drm_syncobj_find(file_priv, out_sync);
+ if (sync_out) {
+ drm_syncobj_replace_fence(sync_out, done_fence);
+ drm_syncobj_put(sync_out);
+ }
+}
+
/**
* v3d_submit_cl_ioctl() - Submits a job (frame) to the V3D.
* @dev: DRM device
@@ -444,9 +526,9 @@ v3d_submit_cl_ioctl(struct drm_device *dev, void *data,
struct v3d_dev *v3d = to_v3d_dev(dev);
struct v3d_file_priv *v3d_priv = file_priv->driver_priv;
struct drm_v3d_submit_cl *args = data;
- struct v3d_exec_info *exec;
+ struct v3d_bin_job *bin = NULL;
+ struct v3d_render_job *render;
struct ww_acquire_ctx acquire_ctx;
- struct drm_syncobj *sync_out;
int ret = 0;
trace_v3d_submit_cl_ioctl(&v3d->drm, args->rcl_start, args->rcl_end);
@@ -456,100 +538,87 @@ v3d_submit_cl_ioctl(struct drm_device *dev, void *data,
return -EINVAL;
}
- exec = kcalloc(1, sizeof(*exec), GFP_KERNEL);
- if (!exec)
+ render = kcalloc(1, sizeof(*render), GFP_KERNEL);
+ if (!render)
return -ENOMEM;
- ret = pm_runtime_get_sync(v3d->dev);
- if (ret < 0) {
- kfree(exec);
+ render->start = args->rcl_start;
+ render->end = args->rcl_end;
+ INIT_LIST_HEAD(&render->unref_list);
+
+ ret = v3d_job_init(v3d, file_priv, &render->base,
+ v3d_render_job_free, args->in_sync_rcl);
+ if (ret) {
+ kfree(render);
return ret;
}
- kref_init(&exec->refcount);
+ if (args->bcl_start != args->bcl_end) {
+ bin = kcalloc(1, sizeof(*bin), GFP_KERNEL);
+ if (!bin)
+ return -ENOMEM;
- ret = drm_syncobj_find_fence(file_priv, args->in_sync_bcl,
- 0, 0, &exec->bin.in_fence);
- if (ret == -EINVAL)
- goto fail;
+ ret = v3d_job_init(v3d, file_priv, &bin->base,
+ v3d_job_free, args->in_sync_bcl);
+ if (ret) {
+ v3d_job_put(&render->base);
+ return ret;
+ }
- ret = drm_syncobj_find_fence(file_priv, args->in_sync_rcl,
- 0, 0, &exec->render.in_fence);
- if (ret == -EINVAL)
- goto fail;
+ bin->start = args->bcl_start;
+ bin->end = args->bcl_end;
+ bin->qma = args->qma;
+ bin->qms = args->qms;
+ bin->qts = args->qts;
+ bin->render = render;
+ }
- exec->qma = args->qma;
- exec->qms = args->qms;
- exec->qts = args->qts;
- exec->bin.exec = exec;
- exec->bin.start = args->bcl_start;
- exec->bin.end = args->bcl_end;
- exec->render.exec = exec;
- exec->render.start = args->rcl_start;
- exec->render.end = args->rcl_end;
- exec->v3d = v3d;
- INIT_LIST_HEAD(&exec->unref_list);
-
- ret = v3d_cl_lookup_bos(dev, file_priv, args, exec);
+ ret = v3d_lookup_bos(dev, file_priv, &render->base,
+ args->bo_handles, args->bo_handle_count);
if (ret)
goto fail;
- ret = v3d_lock_bo_reservations(exec->bo, exec->bo_count,
- &acquire_ctx);
+ ret = v3d_lock_bo_reservations(&render->base, &acquire_ctx);
if (ret)
goto fail;
mutex_lock(&v3d->sched_lock);
- if (exec->bin.start != exec->bin.end) {
- ret = drm_sched_job_init(&exec->bin.base,
- &v3d_priv->sched_entity[V3D_BIN],
- v3d_priv);
+ if (bin) {
+ ret = v3d_push_job(v3d_priv, &bin->base, V3D_BIN);
if (ret)
goto fail_unreserve;
- exec->bin_done_fence =
- dma_fence_get(&exec->bin.base.s_fence->finished);
-
- kref_get(&exec->refcount); /* put by scheduler job completion */
- drm_sched_entity_push_job(&exec->bin.base,
- &v3d_priv->sched_entity[V3D_BIN]);
+ ret = drm_gem_fence_array_add(&render->base.deps,
+ dma_fence_get(bin->base.done_fence));
+ if (ret)
+ goto fail_unreserve;
}
- ret = drm_sched_job_init(&exec->render.base,
- &v3d_priv->sched_entity[V3D_RENDER],
- v3d_priv);
+ ret = v3d_push_job(v3d_priv, &render->base, V3D_RENDER);
if (ret)
goto fail_unreserve;
-
- exec->render_done_fence =
- dma_fence_get(&exec->render.base.s_fence->finished);
-
- kref_get(&exec->refcount); /* put by scheduler job completion */
- drm_sched_entity_push_job(&exec->render.base,
- &v3d_priv->sched_entity[V3D_RENDER]);
mutex_unlock(&v3d->sched_lock);
- v3d_attach_object_fences(exec->bo, exec->bo_count,
- exec->render_done_fence);
-
- v3d_unlock_bo_reservations(exec->bo, exec->bo_count, &acquire_ctx);
-
- /* Update the return sync object for the */
- sync_out = drm_syncobj_find(file_priv, args->out_sync);
- if (sync_out) {
- drm_syncobj_replace_fence(sync_out, exec->render_done_fence);
- drm_syncobj_put(sync_out);
- }
+ v3d_attach_fences_and_unlock_reservation(file_priv,
+ &render->base,
+ &acquire_ctx,
+ args->out_sync,
+ render->base.done_fence);
- v3d_exec_put(exec);
+ if (bin)
+ v3d_job_put(&bin->base);
+ v3d_job_put(&render->base);
return 0;
fail_unreserve:
mutex_unlock(&v3d->sched_lock);
- v3d_unlock_bo_reservations(exec->bo, exec->bo_count, &acquire_ctx);
+ drm_gem_unlock_reservations(render->base.bo,
+ render->base.bo_count, &acquire_ctx);
fail:
- v3d_exec_put(exec);
+ if (bin)
+ v3d_job_put(&bin->base);
+ v3d_job_put(&render->base);
return ret;
}
@@ -572,10 +641,7 @@ v3d_submit_tfu_ioctl(struct drm_device *dev, void *data,
struct drm_v3d_submit_tfu *args = data;
struct v3d_tfu_job *job;
struct ww_acquire_ctx acquire_ctx;
- struct drm_syncobj *sync_out;
- struct dma_fence *sched_done_fence;
int ret = 0;
- int bo_count;
trace_v3d_submit_tfu_ioctl(&v3d->drm, args->iia);
@@ -583,81 +649,172 @@ v3d_submit_tfu_ioctl(struct drm_device *dev, void *data,
if (!job)
return -ENOMEM;
- ret = pm_runtime_get_sync(v3d->dev);
- if (ret < 0) {
+ ret = v3d_job_init(v3d, file_priv, &job->base,
+ v3d_job_free, args->in_sync);
+ if (ret) {
kfree(job);
return ret;
}
- kref_init(&job->refcount);
-
- ret = drm_syncobj_find_fence(file_priv, args->in_sync,
- 0, 0, &job->in_fence);
- if (ret == -EINVAL)
- goto fail;
+ job->base.bo = kcalloc(ARRAY_SIZE(args->bo_handles),
+ sizeof(*job->base.bo), GFP_KERNEL);
+ if (!job->base.bo) {
+ v3d_job_put(&job->base);
+ return -ENOMEM;
+ }
job->args = *args;
- job->v3d = v3d;
spin_lock(&file_priv->table_lock);
- for (bo_count = 0; bo_count < ARRAY_SIZE(job->bo); bo_count++) {
+ for (job->base.bo_count = 0;
+ job->base.bo_count < ARRAY_SIZE(args->bo_handles);
+ job->base.bo_count++) {
struct drm_gem_object *bo;
- if (!args->bo_handles[bo_count])
+ if (!args->bo_handles[job->base.bo_count])
break;
bo = idr_find(&file_priv->object_idr,
- args->bo_handles[bo_count]);
+ args->bo_handles[job->base.bo_count]);
if (!bo) {
DRM_DEBUG("Failed to look up GEM BO %d: %d\n",
- bo_count, args->bo_handles[bo_count]);
+ job->base.bo_count,
+ args->bo_handles[job->base.bo_count]);
ret = -ENOENT;
spin_unlock(&file_priv->table_lock);
goto fail;
}
drm_gem_object_get(bo);
- job->bo[bo_count] = to_v3d_bo(bo);
+ job->base.bo[job->base.bo_count] = bo;
}
spin_unlock(&file_priv->table_lock);
- ret = v3d_lock_bo_reservations(job->bo, bo_count, &acquire_ctx);
+ ret = v3d_lock_bo_reservations(&job->base, &acquire_ctx);
if (ret)
goto fail;
mutex_lock(&v3d->sched_lock);
- ret = drm_sched_job_init(&job->base,
- &v3d_priv->sched_entity[V3D_TFU],
- v3d_priv);
+ ret = v3d_push_job(v3d_priv, &job->base, V3D_TFU);
if (ret)
goto fail_unreserve;
+ mutex_unlock(&v3d->sched_lock);
- sched_done_fence = dma_fence_get(&job->base.s_fence->finished);
+ v3d_attach_fences_and_unlock_reservation(file_priv,
+ &job->base, &acquire_ctx,
+ args->out_sync,
+ job->base.done_fence);
- kref_get(&job->refcount); /* put by scheduler job completion */
- drm_sched_entity_push_job(&job->base, &v3d_priv->sched_entity[V3D_TFU]);
+ v3d_job_put(&job->base);
+
+ return 0;
+
+fail_unreserve:
mutex_unlock(&v3d->sched_lock);
+ drm_gem_unlock_reservations(job->base.bo, job->base.bo_count,
+ &acquire_ctx);
+fail:
+ v3d_job_put(&job->base);
- v3d_attach_object_fences(job->bo, bo_count, sched_done_fence);
+ return ret;
+}
- v3d_unlock_bo_reservations(job->bo, bo_count, &acquire_ctx);
+/**
+ * v3d_submit_csd_ioctl() - Submits a CSD (texture formatting) job to the V3D.
+ * @dev: DRM device
+ * @data: ioctl argument
+ * @file_priv: DRM file for this fd
+ *
+ * Userspace provides the register setup for the CSD, which we don't
+ * need to validate since the CSD is behind the MMU.
+ */
+int
+v3d_submit_csd_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct v3d_dev *v3d = to_v3d_dev(dev);
+ struct v3d_file_priv *v3d_priv = file_priv->driver_priv;
+ struct drm_v3d_submit_csd *args = data;
+ struct v3d_csd_job *job;
+ struct v3d_job *clean_job;
+ struct ww_acquire_ctx acquire_ctx;
+ int ret;
- /* Update the return sync object */
- sync_out = drm_syncobj_find(file_priv, args->out_sync);
- if (sync_out) {
- drm_syncobj_replace_fence(sync_out, sched_done_fence);
- drm_syncobj_put(sync_out);
+ trace_v3d_submit_csd_ioctl(&v3d->drm, args->cfg[5], args->cfg[6]);
+
+ if (!v3d_has_csd(v3d)) {
+ DRM_DEBUG("Attempting CSD submit on non-CSD hardware\n");
+ return -EINVAL;
+ }
+
+ job = kcalloc(1, sizeof(*job), GFP_KERNEL);
+ if (!job)
+ return -ENOMEM;
+
+ ret = v3d_job_init(v3d, file_priv, &job->base,
+ v3d_job_free, args->in_sync);
+ if (ret) {
+ kfree(job);
+ return ret;
+ }
+
+ clean_job = kcalloc(1, sizeof(*clean_job), GFP_KERNEL);
+ if (!clean_job) {
+ v3d_job_put(&job->base);
+ kfree(job);
+ return -ENOMEM;
}
- dma_fence_put(sched_done_fence);
- v3d_tfu_job_put(job);
+ ret = v3d_job_init(v3d, file_priv, clean_job, v3d_job_free, 0);
+ if (ret) {
+ v3d_job_put(&job->base);
+ kfree(clean_job);
+ return ret;
+ }
+
+ job->args = *args;
+
+ ret = v3d_lookup_bos(dev, file_priv, clean_job,
+ args->bo_handles, args->bo_handle_count);
+ if (ret)
+ goto fail;
+
+ ret = v3d_lock_bo_reservations(clean_job, &acquire_ctx);
+ if (ret)
+ goto fail;
+
+ mutex_lock(&v3d->sched_lock);
+ ret = v3d_push_job(v3d_priv, &job->base, V3D_CSD);
+ if (ret)
+ goto fail_unreserve;
+
+ ret = drm_gem_fence_array_add(&clean_job->deps,
+ dma_fence_get(job->base.done_fence));
+ if (ret)
+ goto fail_unreserve;
+
+ ret = v3d_push_job(v3d_priv, clean_job, V3D_CACHE_CLEAN);
+ if (ret)
+ goto fail_unreserve;
+ mutex_unlock(&v3d->sched_lock);
+
+ v3d_attach_fences_and_unlock_reservation(file_priv,
+ clean_job,
+ &acquire_ctx,
+ args->out_sync,
+ clean_job->done_fence);
+
+ v3d_job_put(&job->base);
+ v3d_job_put(clean_job);
return 0;
fail_unreserve:
mutex_unlock(&v3d->sched_lock);
- v3d_unlock_bo_reservations(job->bo, bo_count, &acquire_ctx);
+ drm_gem_unlock_reservations(clean_job->bo, clean_job->bo_count,
+ &acquire_ctx);
fail:
- v3d_tfu_job_put(job);
+ v3d_job_put(&job->base);
+ v3d_job_put(clean_job);
return ret;
}
@@ -677,6 +834,7 @@ v3d_gem_init(struct drm_device *dev)
mutex_init(&v3d->bo_lock);
mutex_init(&v3d->reset_lock);
mutex_init(&v3d->sched_lock);
+ mutex_init(&v3d->cache_clean_lock);
/* Note: We don't allocate address 0. Various bits of HW
* treat 0 as special, such as the occlusion query counters
@@ -715,7 +873,7 @@ v3d_gem_destroy(struct drm_device *dev)
v3d_sched_fini(v3d);
- /* Waiting for exec to finish would need to be done before
+ /* Waiting for jobs to finish would need to be done before
* unregistering V3D.
*/
WARN_ON(v3d->bin_job);
diff --git a/drivers/gpu/drm/v3d/v3d_irq.c b/drivers/gpu/drm/v3d/v3d_irq.c
index aa0a180ae700..268d8a889ac5 100644
--- a/drivers/gpu/drm/v3d/v3d_irq.c
+++ b/drivers/gpu/drm/v3d/v3d_irq.c
@@ -4,9 +4,9 @@
/**
* DOC: Interrupt management for the V3D engine
*
- * When we take a bin, render, or TFU done interrupt, we need to
- * signal the fence for that job so that the scheduler can queue up
- * the next one and unblock any waiters.
+ * When we take a bin, render, TFU done, or CSD done interrupt, we
+ * need to signal the fence for that job so that the scheduler can
+ * queue up the next one and unblock any waiters.
*
* When we take the binner out of memory interrupt, we need to
* allocate some new memory and pass it to the binner so that the
@@ -20,6 +20,7 @@
#define V3D_CORE_IRQS ((u32)(V3D_INT_OUTOMEM | \
V3D_INT_FLDONE | \
V3D_INT_FRDONE | \
+ V3D_INT_CSDDONE | \
V3D_INT_GMPV))
#define V3D_HUB_IRQS ((u32)(V3D_HUB_INT_MMU_WRV | \
@@ -62,7 +63,7 @@ v3d_overflow_mem_work(struct work_struct *work)
}
drm_gem_object_get(obj);
- list_add_tail(&bo->unref_head, &v3d->bin_job->unref_list);
+ list_add_tail(&bo->unref_head, &v3d->bin_job->render->unref_list);
spin_unlock_irqrestore(&v3d->job_lock, irqflags);
V3D_CORE_WRITE(0, V3D_PTB_BPOA, bo->node.start << PAGE_SHIFT);
@@ -96,7 +97,7 @@ v3d_irq(int irq, void *arg)
if (intsts & V3D_INT_FLDONE) {
struct v3d_fence *fence =
- to_v3d_fence(v3d->bin_job->bin.irq_fence);
+ to_v3d_fence(v3d->bin_job->base.irq_fence);
trace_v3d_bcl_irq(&v3d->drm, fence->seqno);
dma_fence_signal(&fence->base);
@@ -105,13 +106,22 @@ v3d_irq(int irq, void *arg)
if (intsts & V3D_INT_FRDONE) {
struct v3d_fence *fence =
- to_v3d_fence(v3d->render_job->render.irq_fence);
+ to_v3d_fence(v3d->render_job->base.irq_fence);
trace_v3d_rcl_irq(&v3d->drm, fence->seqno);
dma_fence_signal(&fence->base);
status = IRQ_HANDLED;
}
+ if (intsts & V3D_INT_CSDDONE) {
+ struct v3d_fence *fence =
+ to_v3d_fence(v3d->csd_job->base.irq_fence);
+
+ trace_v3d_csd_irq(&v3d->drm, fence->seqno);
+ dma_fence_signal(&fence->base);
+ status = IRQ_HANDLED;
+ }
+
/* We shouldn't be triggering these if we have GMP in
* always-allowed mode.
*/
@@ -141,7 +151,7 @@ v3d_hub_irq(int irq, void *arg)
if (intsts & V3D_HUB_INT_TFUC) {
struct v3d_fence *fence =
- to_v3d_fence(v3d->tfu_job->irq_fence);
+ to_v3d_fence(v3d->tfu_job->base.irq_fence);
trace_v3d_tfu_irq(&v3d->drm, fence->seqno);
dma_fence_signal(&fence->base);
@@ -152,10 +162,33 @@ v3d_hub_irq(int irq, void *arg)
V3D_HUB_INT_MMU_PTI |
V3D_HUB_INT_MMU_CAP)) {
u32 axi_id = V3D_READ(V3D_MMU_VIO_ID);
- u64 vio_addr = (u64)V3D_READ(V3D_MMU_VIO_ADDR) << 8;
-
- dev_err(v3d->dev, "MMU error from client %d at 0x%08llx%s%s%s\n",
- axi_id, (long long)vio_addr,
+ u64 vio_addr = ((u64)V3D_READ(V3D_MMU_VIO_ADDR) <<
+ (v3d->va_width - 32));
+ static const char *const v3d41_axi_ids[] = {
+ "L2T",
+ "PTB",
+ "PSE",
+ "TLB",
+ "CLE",
+ "TFU",
+ "MMU",
+ "GMP",
+ };
+ const char *client = "?";
+
+ V3D_WRITE(V3D_MMU_CTL,
+ V3D_READ(V3D_MMU_CTL) & (V3D_MMU_CTL_CAP_EXCEEDED |
+ V3D_MMU_CTL_PT_INVALID |
+ V3D_MMU_CTL_WRITE_VIOLATION));
+
+ if (v3d->ver >= 41) {
+ axi_id = axi_id >> 5;
+ if (axi_id < ARRAY_SIZE(v3d41_axi_ids))
+ client = v3d41_axi_ids[axi_id];
+ }
+
+ dev_err(v3d->dev, "MMU error from client %s (%d) at 0x%llx%s%s%s\n",
+ client, axi_id, (long long)vio_addr,
((intsts & V3D_HUB_INT_MMU_WRV) ?
", write violation" : ""),
((intsts & V3D_HUB_INT_MMU_PTI) ?
diff --git a/drivers/gpu/drm/v3d/v3d_mmu.c b/drivers/gpu/drm/v3d/v3d_mmu.c
index 7a21f1787ab1..395e81d97163 100644
--- a/drivers/gpu/drm/v3d/v3d_mmu.c
+++ b/drivers/gpu/drm/v3d/v3d_mmu.c
@@ -69,10 +69,13 @@ int v3d_mmu_set_page_table(struct v3d_dev *v3d)
V3D_WRITE(V3D_MMU_PT_PA_BASE, v3d->pt_paddr >> V3D_MMU_PAGE_SHIFT);
V3D_WRITE(V3D_MMU_CTL,
V3D_MMU_CTL_ENABLE |
- V3D_MMU_CTL_PT_INVALID |
+ V3D_MMU_CTL_PT_INVALID_ENABLE |
V3D_MMU_CTL_PT_INVALID_ABORT |
+ V3D_MMU_CTL_PT_INVALID_INT |
V3D_MMU_CTL_WRITE_VIOLATION_ABORT |
- V3D_MMU_CTL_CAP_EXCEEDED_ABORT);
+ V3D_MMU_CTL_WRITE_VIOLATION_INT |
+ V3D_MMU_CTL_CAP_EXCEEDED_ABORT |
+ V3D_MMU_CTL_CAP_EXCEEDED_INT);
V3D_WRITE(V3D_MMU_ILLEGAL_ADDR,
(v3d->mmu_scratch_paddr >> V3D_MMU_PAGE_SHIFT) |
V3D_MMU_ILLEGAL_ADDR_ENABLE);
diff --git a/drivers/gpu/drm/v3d/v3d_regs.h b/drivers/gpu/drm/v3d/v3d_regs.h
index 8e88af237610..9bcb57781d31 100644
--- a/drivers/gpu/drm/v3d/v3d_regs.h
+++ b/drivers/gpu/drm/v3d/v3d_regs.h
@@ -152,7 +152,8 @@
# define V3D_MMU_CTL_PT_INVALID_ABORT BIT(19)
# define V3D_MMU_CTL_PT_INVALID_INT BIT(18)
# define V3D_MMU_CTL_PT_INVALID_EXCEPTION BIT(17)
-# define V3D_MMU_CTL_WRITE_VIOLATION BIT(16)
+# define V3D_MMU_CTL_PT_INVALID_ENABLE BIT(16)
+# define V3D_MMU_CTL_WRITE_VIOLATION BIT(12)
# define V3D_MMU_CTL_WRITE_VIOLATION_ABORT BIT(11)
# define V3D_MMU_CTL_WRITE_VIOLATION_INT BIT(10)
# define V3D_MMU_CTL_WRITE_VIOLATION_EXCEPTION BIT(9)
@@ -191,6 +192,14 @@
/* Address that faulted */
#define V3D_MMU_VIO_ADDR 0x01234
+#define V3D_MMU_DEBUG_INFO 0x01238
+# define V3D_MMU_PA_WIDTH_MASK V3D_MASK(11, 8)
+# define V3D_MMU_PA_WIDTH_SHIFT 8
+# define V3D_MMU_VA_WIDTH_MASK V3D_MASK(7, 4)
+# define V3D_MMU_VA_WIDTH_SHIFT 4
+# define V3D_MMU_VERSION_MASK V3D_MASK(3, 0)
+# define V3D_MMU_VERSION_SHIFT 0
+
/* Per-V3D-core registers */
#define V3D_CTL_IDENT0 0x00000
@@ -238,8 +247,11 @@
#define V3D_CTL_L2TCACTL 0x00030
# define V3D_L2TCACTL_TMUWCF BIT(8)
# define V3D_L2TCACTL_L2T_NO_WM BIT(4)
+/* Invalidates cache lines. */
# define V3D_L2TCACTL_FLM_FLUSH 0
+/* Removes cachelines without writing dirty lines back. */
# define V3D_L2TCACTL_FLM_CLEAR 1
+/* Writes out dirty cachelines and marks them clean, but doesn't invalidate. */
# define V3D_L2TCACTL_FLM_CLEAN 2
# define V3D_L2TCACTL_FLM_MASK V3D_MASK(2, 1)
# define V3D_L2TCACTL_FLM_SHIFT 1
@@ -255,6 +267,8 @@
#define V3D_CTL_INT_MSK_CLR 0x00064
# define V3D_INT_QPU_MASK V3D_MASK(27, 16)
# define V3D_INT_QPU_SHIFT 16
+# define V3D_INT_CSDDONE BIT(7)
+# define V3D_INT_PCTR BIT(6)
# define V3D_INT_GMPV BIT(5)
# define V3D_INT_TRFB BIT(4)
# define V3D_INT_SPILLUSE BIT(3)
@@ -374,4 +388,110 @@
#define V3D_GMP_PRESERVE_LOAD 0x00818
#define V3D_GMP_VALID_LINES 0x00820
+#define V3D_CSD_STATUS 0x00900
+# define V3D_CSD_STATUS_NUM_COMPLETED_MASK V3D_MASK(11, 4)
+# define V3D_CSD_STATUS_NUM_COMPLETED_SHIFT 4
+# define V3D_CSD_STATUS_NUM_ACTIVE_MASK V3D_MASK(3, 2)
+# define V3D_CSD_STATUS_NUM_ACTIVE_SHIFT 2
+# define V3D_CSD_STATUS_HAVE_CURRENT_DISPATCH BIT(1)
+# define V3D_CSD_STATUS_HAVE_QUEUED_DISPATCH BIT(0)
+
+#define V3D_CSD_QUEUED_CFG0 0x00904
+# define V3D_CSD_QUEUED_CFG0_NUM_WGS_X_MASK V3D_MASK(31, 16)
+# define V3D_CSD_QUEUED_CFG0_NUM_WGS_X_SHIFT 16
+# define V3D_CSD_QUEUED_CFG0_WG_X_OFFSET_MASK V3D_MASK(15, 0)
+# define V3D_CSD_QUEUED_CFG0_WG_X_OFFSET_SHIFT 0
+
+#define V3D_CSD_QUEUED_CFG1 0x00908
+# define V3D_CSD_QUEUED_CFG1_NUM_WGS_Y_MASK V3D_MASK(31, 16)
+# define V3D_CSD_QUEUED_CFG1_NUM_WGS_Y_SHIFT 16
+# define V3D_CSD_QUEUED_CFG1_WG_Y_OFFSET_MASK V3D_MASK(15, 0)
+# define V3D_CSD_QUEUED_CFG1_WG_Y_OFFSET_SHIFT 0
+
+#define V3D_CSD_QUEUED_CFG2 0x0090c
+# define V3D_CSD_QUEUED_CFG2_NUM_WGS_Z_MASK V3D_MASK(31, 16)
+# define V3D_CSD_QUEUED_CFG2_NUM_WGS_Z_SHIFT 16
+# define V3D_CSD_QUEUED_CFG2_WG_Z_OFFSET_MASK V3D_MASK(15, 0)
+# define V3D_CSD_QUEUED_CFG2_WG_Z_OFFSET_SHIFT 0
+
+#define V3D_CSD_QUEUED_CFG3 0x00910
+# define V3D_CSD_QUEUED_CFG3_OVERLAP_WITH_PREV BIT(26)
+# define V3D_CSD_QUEUED_CFG3_MAX_SG_ID_MASK V3D_MASK(25, 20)
+# define V3D_CSD_QUEUED_CFG3_MAX_SG_ID_SHIFT 20
+# define V3D_CSD_QUEUED_CFG3_BATCHES_PER_SG_M1_MASK V3D_MASK(19, 12)
+# define V3D_CSD_QUEUED_CFG3_BATCHES_PER_SG_M1_SHIFT 12
+# define V3D_CSD_QUEUED_CFG3_WGS_PER_SG_MASK V3D_MASK(11, 8)
+# define V3D_CSD_QUEUED_CFG3_WGS_PER_SG_SHIFT 8
+# define V3D_CSD_QUEUED_CFG3_WG_SIZE_MASK V3D_MASK(7, 0)
+# define V3D_CSD_QUEUED_CFG3_WG_SIZE_SHIFT 0
+
+/* Number of batches, minus 1 */
+#define V3D_CSD_QUEUED_CFG4 0x00914
+
+/* Shader address, pnan, singleseg, threading, like a shader record. */
+#define V3D_CSD_QUEUED_CFG5 0x00918
+
+/* Uniforms address (4 byte aligned) */
+#define V3D_CSD_QUEUED_CFG6 0x0091c
+
+#define V3D_CSD_CURRENT_CFG0 0x00920
+#define V3D_CSD_CURRENT_CFG1 0x00924
+#define V3D_CSD_CURRENT_CFG2 0x00928
+#define V3D_CSD_CURRENT_CFG3 0x0092c
+#define V3D_CSD_CURRENT_CFG4 0x00930
+#define V3D_CSD_CURRENT_CFG5 0x00934
+#define V3D_CSD_CURRENT_CFG6 0x00938
+
+#define V3D_CSD_CURRENT_ID0 0x0093c
+# define V3D_CSD_CURRENT_ID0_WG_X_MASK V3D_MASK(31, 16)
+# define V3D_CSD_CURRENT_ID0_WG_X_SHIFT 16
+# define V3D_CSD_CURRENT_ID0_WG_IN_SG_MASK V3D_MASK(11, 8)
+# define V3D_CSD_CURRENT_ID0_WG_IN_SG_SHIFT 8
+# define V3D_CSD_CURRENT_ID0_L_IDX_MASK V3D_MASK(7, 0)
+# define V3D_CSD_CURRENT_ID0_L_IDX_SHIFT 0
+
+#define V3D_CSD_CURRENT_ID1 0x00940
+# define V3D_CSD_CURRENT_ID0_WG_Z_MASK V3D_MASK(31, 16)
+# define V3D_CSD_CURRENT_ID0_WG_Z_SHIFT 16
+# define V3D_CSD_CURRENT_ID0_WG_Y_MASK V3D_MASK(15, 0)
+# define V3D_CSD_CURRENT_ID0_WG_Y_SHIFT 0
+
+#define V3D_ERR_FDBGO 0x00f04
+#define V3D_ERR_FDBGB 0x00f08
+#define V3D_ERR_FDBGR 0x00f0c
+
+#define V3D_ERR_FDBGS 0x00f10
+# define V3D_ERR_FDBGS_INTERPZ_IP_STALL BIT(17)
+# define V3D_ERR_FDBGS_DEPTHO_FIFO_IP_STALL BIT(16)
+# define V3D_ERR_FDBGS_XYNRM_IP_STALL BIT(14)
+# define V3D_ERR_FDBGS_EZREQ_FIFO_OP_VALID BIT(13)
+# define V3D_ERR_FDBGS_QXYF_FIFO_OP_VALID BIT(12)
+# define V3D_ERR_FDBGS_QXYF_FIFO_OP_LAST BIT(11)
+# define V3D_ERR_FDBGS_EZTEST_ANYQVALID BIT(7)
+# define V3D_ERR_FDBGS_EZTEST_PASS BIT(6)
+# define V3D_ERR_FDBGS_EZTEST_QREADY BIT(5)
+# define V3D_ERR_FDBGS_EZTEST_VLF_OKNOVALID BIT(4)
+# define V3D_ERR_FDBGS_EZTEST_QSTALL BIT(3)
+# define V3D_ERR_FDBGS_EZTEST_IP_VLFSTALL BIT(2)
+# define V3D_ERR_FDBGS_EZTEST_IP_PRSTALL BIT(1)
+# define V3D_ERR_FDBGS_EZTEST_IP_QSTALL BIT(0)
+
+#define V3D_ERR_STAT 0x00f20
+# define V3D_ERR_L2CARE BIT(15)
+# define V3D_ERR_VCMBE BIT(14)
+# define V3D_ERR_VCMRE BIT(13)
+# define V3D_ERR_VCDI BIT(12)
+# define V3D_ERR_VCDE BIT(11)
+# define V3D_ERR_VDWE BIT(10)
+# define V3D_ERR_VPMEAS BIT(9)
+# define V3D_ERR_VPMEFNA BIT(8)
+# define V3D_ERR_VPMEWNA BIT(7)
+# define V3D_ERR_VPMERNA BIT(6)
+# define V3D_ERR_VPMERR BIT(5)
+# define V3D_ERR_VPMEWR BIT(4)
+# define V3D_ERR_VPAERRGL BIT(3)
+# define V3D_ERR_VPAEBRGL BIT(2)
+# define V3D_ERR_VPAERGS BIT(1)
+# define V3D_ERR_VPAEABB BIT(0)
+
#endif /* V3D_REGS_H */
diff --git a/drivers/gpu/drm/v3d/v3d_sched.c b/drivers/gpu/drm/v3d/v3d_sched.c
index e740f3b99aa5..8c2df6d95283 100644
--- a/drivers/gpu/drm/v3d/v3d_sched.c
+++ b/drivers/gpu/drm/v3d/v3d_sched.c
@@ -30,158 +30,152 @@ to_v3d_job(struct drm_sched_job *sched_job)
return container_of(sched_job, struct v3d_job, base);
}
-static struct v3d_tfu_job *
-to_tfu_job(struct drm_sched_job *sched_job)
+static struct v3d_bin_job *
+to_bin_job(struct drm_sched_job *sched_job)
{
- return container_of(sched_job, struct v3d_tfu_job, base);
+ return container_of(sched_job, struct v3d_bin_job, base.base);
}
-static void
-v3d_job_free(struct drm_sched_job *sched_job)
+static struct v3d_render_job *
+to_render_job(struct drm_sched_job *sched_job)
{
- struct v3d_job *job = to_v3d_job(sched_job);
+ return container_of(sched_job, struct v3d_render_job, base.base);
+}
- drm_sched_job_cleanup(sched_job);
+static struct v3d_tfu_job *
+to_tfu_job(struct drm_sched_job *sched_job)
+{
+ return container_of(sched_job, struct v3d_tfu_job, base.base);
+}
- v3d_exec_put(job->exec);
+static struct v3d_csd_job *
+to_csd_job(struct drm_sched_job *sched_job)
+{
+ return container_of(sched_job, struct v3d_csd_job, base.base);
}
static void
-v3d_tfu_job_free(struct drm_sched_job *sched_job)
+v3d_job_free(struct drm_sched_job *sched_job)
{
- struct v3d_tfu_job *job = to_tfu_job(sched_job);
+ struct v3d_job *job = to_v3d_job(sched_job);
drm_sched_job_cleanup(sched_job);
-
- v3d_tfu_job_put(job);
+ v3d_job_put(job);
}
/**
- * Returns the fences that the bin or render job depends on, one by one.
- * v3d_job_run() won't be called until all of them have been signaled.
+ * Returns the fences that the job depends on, one by one.
+ *
+ * If placed in the scheduler's .dependency method, the corresponding
+ * .run_job won't be called until all of them have been signaled.
*/
static struct dma_fence *
v3d_job_dependency(struct drm_sched_job *sched_job,
struct drm_sched_entity *s_entity)
{
struct v3d_job *job = to_v3d_job(sched_job);
- struct v3d_exec_info *exec = job->exec;
- enum v3d_queue q = job == &exec->bin ? V3D_BIN : V3D_RENDER;
- struct dma_fence *fence;
-
- fence = job->in_fence;
- if (fence) {
- job->in_fence = NULL;
- return fence;
- }
-
- if (q == V3D_RENDER) {
- /* If we had a bin job, the render job definitely depends on
- * it. We first have to wait for bin to be scheduled, so that
- * its done_fence is created.
- */
- fence = exec->bin_done_fence;
- if (fence) {
- exec->bin_done_fence = NULL;
- return fence;
- }
- }
/* XXX: Wait on a fence for switching the GMP if necessary,
* and then do so.
*/
- return fence;
-}
-
-/**
- * Returns the fences that the TFU job depends on, one by one.
- * v3d_tfu_job_run() won't be called until all of them have been
- * signaled.
- */
-static struct dma_fence *
-v3d_tfu_job_dependency(struct drm_sched_job *sched_job,
- struct drm_sched_entity *s_entity)
-{
- struct v3d_tfu_job *job = to_tfu_job(sched_job);
- struct dma_fence *fence;
-
- fence = job->in_fence;
- if (fence) {
- job->in_fence = NULL;
- return fence;
- }
+ if (!xa_empty(&job->deps))
+ return xa_erase(&job->deps, job->last_dep++);
return NULL;
}
-static struct dma_fence *v3d_job_run(struct drm_sched_job *sched_job)
+static struct dma_fence *v3d_bin_job_run(struct drm_sched_job *sched_job)
{
- struct v3d_job *job = to_v3d_job(sched_job);
- struct v3d_exec_info *exec = job->exec;
- enum v3d_queue q = job == &exec->bin ? V3D_BIN : V3D_RENDER;
- struct v3d_dev *v3d = exec->v3d;
+ struct v3d_bin_job *job = to_bin_job(sched_job);
+ struct v3d_dev *v3d = job->base.v3d;
struct drm_device *dev = &v3d->drm;
struct dma_fence *fence;
unsigned long irqflags;
- if (unlikely(job->base.s_fence->finished.error))
+ if (unlikely(job->base.base.s_fence->finished.error))
return NULL;
/* Lock required around bin_job update vs
* v3d_overflow_mem_work().
*/
spin_lock_irqsave(&v3d->job_lock, irqflags);
- if (q == V3D_BIN) {
- v3d->bin_job = job->exec;
-
- /* Clear out the overflow allocation, so we don't
- * reuse the overflow attached to a previous job.
- */
- V3D_CORE_WRITE(0, V3D_PTB_BPOS, 0);
- } else {
- v3d->render_job = job->exec;
- }
+ v3d->bin_job = job;
+ /* Clear out the overflow allocation, so we don't
+ * reuse the overflow attached to a previous job.
+ */
+ V3D_CORE_WRITE(0, V3D_PTB_BPOS, 0);
spin_unlock_irqrestore(&v3d->job_lock, irqflags);
- /* Can we avoid this flush when q==RENDER? We need to be
- * careful of scheduling, though -- imagine job0 rendering to
- * texture and job1 reading, and them being executed as bin0,
- * bin1, render0, render1, so that render1's flush at bin time
+ v3d_invalidate_caches(v3d);
+
+ fence = v3d_fence_create(v3d, V3D_BIN);
+ if (IS_ERR(fence))
+ return NULL;
+
+ if (job->base.irq_fence)
+ dma_fence_put(job->base.irq_fence);
+ job->base.irq_fence = dma_fence_get(fence);
+
+ trace_v3d_submit_cl(dev, false, to_v3d_fence(fence)->seqno,
+ job->start, job->end);
+
+ /* Set the current and end address of the control list.
+ * Writing the end register is what starts the job.
+ */
+ if (job->qma) {
+ V3D_CORE_WRITE(0, V3D_CLE_CT0QMA, job->qma);
+ V3D_CORE_WRITE(0, V3D_CLE_CT0QMS, job->qms);
+ }
+ if (job->qts) {
+ V3D_CORE_WRITE(0, V3D_CLE_CT0QTS,
+ V3D_CLE_CT0QTS_ENABLE |
+ job->qts);
+ }
+ V3D_CORE_WRITE(0, V3D_CLE_CT0QBA, job->start);
+ V3D_CORE_WRITE(0, V3D_CLE_CT0QEA, job->end);
+
+ return fence;
+}
+
+static struct dma_fence *v3d_render_job_run(struct drm_sched_job *sched_job)
+{
+ struct v3d_render_job *job = to_render_job(sched_job);
+ struct v3d_dev *v3d = job->base.v3d;
+ struct drm_device *dev = &v3d->drm;
+ struct dma_fence *fence;
+
+ if (unlikely(job->base.base.s_fence->finished.error))
+ return NULL;
+
+ v3d->render_job = job;
+
+ /* Can we avoid this flush? We need to be careful of
+ * scheduling, though -- imagine job0 rendering to texture and
+ * job1 reading, and them being executed as bin0, bin1,
+ * render0, render1, so that render1's flush at bin time
* wasn't enough.
*/
v3d_invalidate_caches(v3d);
- fence = v3d_fence_create(v3d, q);
+ fence = v3d_fence_create(v3d, V3D_RENDER);
if (IS_ERR(fence))
return NULL;
- if (job->irq_fence)
- dma_fence_put(job->irq_fence);
- job->irq_fence = dma_fence_get(fence);
+ if (job->base.irq_fence)
+ dma_fence_put(job->base.irq_fence);
+ job->base.irq_fence = dma_fence_get(fence);
- trace_v3d_submit_cl(dev, q == V3D_RENDER, to_v3d_fence(fence)->seqno,
+ trace_v3d_submit_cl(dev, true, to_v3d_fence(fence)->seqno,
job->start, job->end);
- if (q == V3D_BIN) {
- if (exec->qma) {
- V3D_CORE_WRITE(0, V3D_CLE_CT0QMA, exec->qma);
- V3D_CORE_WRITE(0, V3D_CLE_CT0QMS, exec->qms);
- }
- if (exec->qts) {
- V3D_CORE_WRITE(0, V3D_CLE_CT0QTS,
- V3D_CLE_CT0QTS_ENABLE |
- exec->qts);
- }
- } else {
- /* XXX: Set the QCFG */
- }
+ /* XXX: Set the QCFG */
/* Set the current and end address of the control list.
* Writing the end register is what starts the job.
*/
- V3D_CORE_WRITE(0, V3D_CLE_CTNQBA(q), job->start);
- V3D_CORE_WRITE(0, V3D_CLE_CTNQEA(q), job->end);
+ V3D_CORE_WRITE(0, V3D_CLE_CT1QBA, job->start);
+ V3D_CORE_WRITE(0, V3D_CLE_CT1QEA, job->end);
return fence;
}
@@ -190,7 +184,7 @@ static struct dma_fence *
v3d_tfu_job_run(struct drm_sched_job *sched_job)
{
struct v3d_tfu_job *job = to_tfu_job(sched_job);
- struct v3d_dev *v3d = job->v3d;
+ struct v3d_dev *v3d = job->base.v3d;
struct drm_device *dev = &v3d->drm;
struct dma_fence *fence;
@@ -199,9 +193,9 @@ v3d_tfu_job_run(struct drm_sched_job *sched_job)
return NULL;
v3d->tfu_job = job;
- if (job->irq_fence)
- dma_fence_put(job->irq_fence);
- job->irq_fence = dma_fence_get(fence);
+ if (job->base.irq_fence)
+ dma_fence_put(job->base.irq_fence);
+ job->base.irq_fence = dma_fence_get(fence);
trace_v3d_submit_tfu(dev, to_v3d_fence(fence)->seqno);
@@ -223,6 +217,48 @@ v3d_tfu_job_run(struct drm_sched_job *sched_job)
return fence;
}
+static struct dma_fence *
+v3d_csd_job_run(struct drm_sched_job *sched_job)
+{
+ struct v3d_csd_job *job = to_csd_job(sched_job);
+ struct v3d_dev *v3d = job->base.v3d;
+ struct drm_device *dev = &v3d->drm;
+ struct dma_fence *fence;
+ int i;
+
+ v3d->csd_job = job;
+
+ v3d_invalidate_caches(v3d);
+
+ fence = v3d_fence_create(v3d, V3D_CSD);
+ if (IS_ERR(fence))
+ return NULL;
+
+ if (job->base.irq_fence)
+ dma_fence_put(job->base.irq_fence);
+ job->base.irq_fence = dma_fence_get(fence);
+
+ trace_v3d_submit_csd(dev, to_v3d_fence(fence)->seqno);
+
+ for (i = 1; i <= 6; i++)
+ V3D_CORE_WRITE(0, V3D_CSD_QUEUED_CFG0 + 4 * i, job->args.cfg[i]);
+ /* CFG0 write kicks off the job. */
+ V3D_CORE_WRITE(0, V3D_CSD_QUEUED_CFG0, job->args.cfg[0]);
+
+ return fence;
+}
+
+static struct dma_fence *
+v3d_cache_clean_job_run(struct drm_sched_job *sched_job)
+{
+ struct v3d_job *job = to_v3d_job(sched_job);
+ struct v3d_dev *v3d = job->v3d;
+
+ v3d_clean_caches(v3d);
+
+ return NULL;
+}
+
static void
v3d_gpu_reset_for_timeout(struct v3d_dev *v3d, struct drm_sched_job *sched_job)
{
@@ -232,7 +268,7 @@ v3d_gpu_reset_for_timeout(struct v3d_dev *v3d, struct drm_sched_job *sched_job)
/* block scheduler */
for (q = 0; q < V3D_MAX_QUEUES; q++)
- drm_sched_stop(&v3d->queue[q].sched);
+ drm_sched_stop(&v3d->queue[q].sched, sched_job);
if (sched_job)
drm_sched_increase_karma(sched_job);
@@ -251,25 +287,23 @@ v3d_gpu_reset_for_timeout(struct v3d_dev *v3d, struct drm_sched_job *sched_job)
mutex_unlock(&v3d->reset_lock);
}
+/* If the current address or return address have changed, then the GPU
+ * has probably made progress and we should delay the reset. This
+ * could fail if the GPU got in an infinite loop in the CL, but that
+ * is pretty unlikely outside of an i-g-t testcase.
+ */
static void
-v3d_job_timedout(struct drm_sched_job *sched_job)
+v3d_cl_job_timedout(struct drm_sched_job *sched_job, enum v3d_queue q,
+ u32 *timedout_ctca, u32 *timedout_ctra)
{
struct v3d_job *job = to_v3d_job(sched_job);
- struct v3d_exec_info *exec = job->exec;
- struct v3d_dev *v3d = exec->v3d;
- enum v3d_queue job_q = job == &exec->bin ? V3D_BIN : V3D_RENDER;
- u32 ctca = V3D_CORE_READ(0, V3D_CLE_CTNCA(job_q));
- u32 ctra = V3D_CORE_READ(0, V3D_CLE_CTNRA(job_q));
-
- /* If the current address or return address have changed, then
- * the GPU has probably made progress and we should delay the
- * reset. This could fail if the GPU got in an infinite loop
- * in the CL, but that is pretty unlikely outside of an i-g-t
- * testcase.
- */
- if (job->timedout_ctca != ctca || job->timedout_ctra != ctra) {
- job->timedout_ctca = ctca;
- job->timedout_ctra = ctra;
+ struct v3d_dev *v3d = job->v3d;
+ u32 ctca = V3D_CORE_READ(0, V3D_CLE_CTNCA(q));
+ u32 ctra = V3D_CORE_READ(0, V3D_CLE_CTNRA(q));
+
+ if (*timedout_ctca != ctca || *timedout_ctra != ctra) {
+ *timedout_ctca = ctca;
+ *timedout_ctra = ctra;
return;
}
@@ -277,25 +311,82 @@ v3d_job_timedout(struct drm_sched_job *sched_job)
}
static void
-v3d_tfu_job_timedout(struct drm_sched_job *sched_job)
+v3d_bin_job_timedout(struct drm_sched_job *sched_job)
{
- struct v3d_tfu_job *job = to_tfu_job(sched_job);
+ struct v3d_bin_job *job = to_bin_job(sched_job);
+
+ v3d_cl_job_timedout(sched_job, V3D_BIN,
+ &job->timedout_ctca, &job->timedout_ctra);
+}
+
+static void
+v3d_render_job_timedout(struct drm_sched_job *sched_job)
+{
+ struct v3d_render_job *job = to_render_job(sched_job);
+
+ v3d_cl_job_timedout(sched_job, V3D_RENDER,
+ &job->timedout_ctca, &job->timedout_ctra);
+}
+
+static void
+v3d_generic_job_timedout(struct drm_sched_job *sched_job)
+{
+ struct v3d_job *job = to_v3d_job(sched_job);
v3d_gpu_reset_for_timeout(job->v3d, sched_job);
}
-static const struct drm_sched_backend_ops v3d_sched_ops = {
+static void
+v3d_csd_job_timedout(struct drm_sched_job *sched_job)
+{
+ struct v3d_csd_job *job = to_csd_job(sched_job);
+ struct v3d_dev *v3d = job->base.v3d;
+ u32 batches = V3D_CORE_READ(0, V3D_CSD_CURRENT_CFG4);
+
+ /* If we've made progress, skip reset and let the timer get
+ * rearmed.
+ */
+ if (job->timedout_batches != batches) {
+ job->timedout_batches = batches;
+ return;
+ }
+
+ v3d_gpu_reset_for_timeout(v3d, sched_job);
+}
+
+static const struct drm_sched_backend_ops v3d_bin_sched_ops = {
.dependency = v3d_job_dependency,
- .run_job = v3d_job_run,
- .timedout_job = v3d_job_timedout,
- .free_job = v3d_job_free
+ .run_job = v3d_bin_job_run,
+ .timedout_job = v3d_bin_job_timedout,
+ .free_job = v3d_job_free,
+};
+
+static const struct drm_sched_backend_ops v3d_render_sched_ops = {
+ .dependency = v3d_job_dependency,
+ .run_job = v3d_render_job_run,
+ .timedout_job = v3d_render_job_timedout,
+ .free_job = v3d_job_free,
};
static const struct drm_sched_backend_ops v3d_tfu_sched_ops = {
- .dependency = v3d_tfu_job_dependency,
+ .dependency = v3d_job_dependency,
.run_job = v3d_tfu_job_run,
- .timedout_job = v3d_tfu_job_timedout,
- .free_job = v3d_tfu_job_free
+ .timedout_job = v3d_generic_job_timedout,
+ .free_job = v3d_job_free,
+};
+
+static const struct drm_sched_backend_ops v3d_csd_sched_ops = {
+ .dependency = v3d_job_dependency,
+ .run_job = v3d_csd_job_run,
+ .timedout_job = v3d_csd_job_timedout,
+ .free_job = v3d_job_free
+};
+
+static const struct drm_sched_backend_ops v3d_cache_clean_sched_ops = {
+ .dependency = v3d_job_dependency,
+ .run_job = v3d_cache_clean_job_run,
+ .timedout_job = v3d_generic_job_timedout,
+ .free_job = v3d_job_free
};
int
@@ -307,7 +398,7 @@ v3d_sched_init(struct v3d_dev *v3d)
int ret;
ret = drm_sched_init(&v3d->queue[V3D_BIN].sched,
- &v3d_sched_ops,
+ &v3d_bin_sched_ops,
hw_jobs_limit, job_hang_limit,
msecs_to_jiffies(hang_limit_ms),
"v3d_bin");
@@ -317,14 +408,14 @@ v3d_sched_init(struct v3d_dev *v3d)
}
ret = drm_sched_init(&v3d->queue[V3D_RENDER].sched,
- &v3d_sched_ops,
+ &v3d_render_sched_ops,
hw_jobs_limit, job_hang_limit,
msecs_to_jiffies(hang_limit_ms),
"v3d_render");
if (ret) {
dev_err(v3d->dev, "Failed to create render scheduler: %d.",
ret);
- drm_sched_fini(&v3d->queue[V3D_BIN].sched);
+ v3d_sched_fini(v3d);
return ret;
}
@@ -336,11 +427,36 @@ v3d_sched_init(struct v3d_dev *v3d)
if (ret) {
dev_err(v3d->dev, "Failed to create TFU scheduler: %d.",
ret);
- drm_sched_fini(&v3d->queue[V3D_RENDER].sched);
- drm_sched_fini(&v3d->queue[V3D_BIN].sched);
+ v3d_sched_fini(v3d);
return ret;
}
+ if (v3d_has_csd(v3d)) {
+ ret = drm_sched_init(&v3d->queue[V3D_CSD].sched,
+ &v3d_csd_sched_ops,
+ hw_jobs_limit, job_hang_limit,
+ msecs_to_jiffies(hang_limit_ms),
+ "v3d_csd");
+ if (ret) {
+ dev_err(v3d->dev, "Failed to create CSD scheduler: %d.",
+ ret);
+ v3d_sched_fini(v3d);
+ return ret;
+ }
+
+ ret = drm_sched_init(&v3d->queue[V3D_CACHE_CLEAN].sched,
+ &v3d_cache_clean_sched_ops,
+ hw_jobs_limit, job_hang_limit,
+ msecs_to_jiffies(hang_limit_ms),
+ "v3d_cache_clean");
+ if (ret) {
+ dev_err(v3d->dev, "Failed to create CACHE_CLEAN scheduler: %d.",
+ ret);
+ v3d_sched_fini(v3d);
+ return ret;
+ }
+ }
+
return 0;
}
@@ -349,6 +465,8 @@ v3d_sched_fini(struct v3d_dev *v3d)
{
enum v3d_queue q;
- for (q = 0; q < V3D_MAX_QUEUES; q++)
- drm_sched_fini(&v3d->queue[q].sched);
+ for (q = 0; q < V3D_MAX_QUEUES; q++) {
+ if (v3d->queue[q].sched.ready)
+ drm_sched_fini(&v3d->queue[q].sched);
+ }
}
diff --git a/drivers/gpu/drm/v3d/v3d_trace.h b/drivers/gpu/drm/v3d/v3d_trace.h
index edd984afa33f..7aa8dc356e54 100644
--- a/drivers/gpu/drm/v3d/v3d_trace.h
+++ b/drivers/gpu/drm/v3d/v3d_trace.h
@@ -124,6 +124,26 @@ TRACE_EVENT(v3d_tfu_irq,
__entry->seqno)
);
+TRACE_EVENT(v3d_csd_irq,
+ TP_PROTO(struct drm_device *dev,
+ uint64_t seqno),
+ TP_ARGS(dev, seqno),
+
+ TP_STRUCT__entry(
+ __field(u32, dev)
+ __field(u64, seqno)
+ ),
+
+ TP_fast_assign(
+ __entry->dev = dev->primary->index;
+ __entry->seqno = seqno;
+ ),
+
+ TP_printk("dev=%u, seqno=%llu",
+ __entry->dev,
+ __entry->seqno)
+);
+
TRACE_EVENT(v3d_submit_tfu_ioctl,
TP_PROTO(struct drm_device *dev, u32 iia),
TP_ARGS(dev, iia),
@@ -163,6 +183,80 @@ TRACE_EVENT(v3d_submit_tfu,
__entry->seqno)
);
+TRACE_EVENT(v3d_submit_csd_ioctl,
+ TP_PROTO(struct drm_device *dev, u32 cfg5, u32 cfg6),
+ TP_ARGS(dev, cfg5, cfg6),
+
+ TP_STRUCT__entry(
+ __field(u32, dev)
+ __field(u32, cfg5)
+ __field(u32, cfg6)
+ ),
+
+ TP_fast_assign(
+ __entry->dev = dev->primary->index;
+ __entry->cfg5 = cfg5;
+ __entry->cfg6 = cfg6;
+ ),
+
+ TP_printk("dev=%u, CFG5 0x%08x, CFG6 0x%08x",
+ __entry->dev,
+ __entry->cfg5,
+ __entry->cfg6)
+);
+
+TRACE_EVENT(v3d_submit_csd,
+ TP_PROTO(struct drm_device *dev,
+ uint64_t seqno),
+ TP_ARGS(dev, seqno),
+
+ TP_STRUCT__entry(
+ __field(u32, dev)
+ __field(u64, seqno)
+ ),
+
+ TP_fast_assign(
+ __entry->dev = dev->primary->index;
+ __entry->seqno = seqno;
+ ),
+
+ TP_printk("dev=%u, seqno=%llu",
+ __entry->dev,
+ __entry->seqno)
+);
+
+TRACE_EVENT(v3d_cache_clean_begin,
+ TP_PROTO(struct drm_device *dev),
+ TP_ARGS(dev),
+
+ TP_STRUCT__entry(
+ __field(u32, dev)
+ ),
+
+ TP_fast_assign(
+ __entry->dev = dev->primary->index;
+ ),
+
+ TP_printk("dev=%u",
+ __entry->dev)
+);
+
+TRACE_EVENT(v3d_cache_clean_end,
+ TP_PROTO(struct drm_device *dev),
+ TP_ARGS(dev),
+
+ TP_STRUCT__entry(
+ __field(u32, dev)
+ ),
+
+ TP_fast_assign(
+ __entry->dev = dev->primary->index;
+ ),
+
+ TP_printk("dev=%u",
+ __entry->dev)
+);
+
TRACE_EVENT(v3d_reset_begin,
TP_PROTO(struct drm_device *dev),
TP_ARGS(dev),
diff --git a/drivers/gpu/drm/vboxvideo/Kconfig b/drivers/gpu/drm/vboxvideo/Kconfig
index d6ab955c0768..56ba510f21a2 100644
--- a/drivers/gpu/drm/vboxvideo/Kconfig
+++ b/drivers/gpu/drm/vboxvideo/Kconfig
@@ -3,7 +3,7 @@ config DRM_VBOXVIDEO
tristate "Virtual Box Graphics Card"
depends on DRM && X86 && PCI
select DRM_KMS_HELPER
- select DRM_TTM
+ select DRM_VRAM_HELPER
select GENERIC_ALLOCATOR
help
This is a KMS driver for the virtual Graphics Card used in
diff --git a/drivers/gpu/drm/vboxvideo/vbox_drv.c b/drivers/gpu/drm/vboxvideo/vbox_drv.c
index fb6a0f0b8167..02537ab9cc08 100644
--- a/drivers/gpu/drm/vboxvideo/vbox_drv.c
+++ b/drivers/gpu/drm/vboxvideo/vbox_drv.c
@@ -191,13 +191,7 @@ static struct pci_driver vbox_pci_driver = {
static const struct file_operations vbox_fops = {
.owner = THIS_MODULE,
- .open = drm_open,
- .release = drm_release,
- .unlocked_ioctl = drm_ioctl,
- .compat_ioctl = drm_compat_ioctl,
- .mmap = vbox_mmap,
- .poll = drm_poll,
- .read = drm_read,
+ DRM_VRAM_MM_FILE_OPERATIONS
};
static struct drm_driver driver = {
@@ -215,9 +209,7 @@ static struct drm_driver driver = {
.minor = DRIVER_MINOR,
.patchlevel = DRIVER_PATCHLEVEL,
- .gem_free_object_unlocked = vbox_gem_free_object,
- .dumb_create = vbox_dumb_create,
- .dumb_map_offset = vbox_dumb_mmap_offset,
+ DRM_GEM_VRAM_DRIVER,
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
.gem_prime_export = drm_gem_prime_export,
diff --git a/drivers/gpu/drm/vboxvideo/vbox_drv.h b/drivers/gpu/drm/vboxvideo/vbox_drv.h
index ece31f395540..9028f946bc06 100644
--- a/drivers/gpu/drm/vboxvideo/vbox_drv.h
+++ b/drivers/gpu/drm/vboxvideo/vbox_drv.h
@@ -18,12 +18,9 @@
#include <drm/drm_encoder.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_gem.h>
+#include <drm/drm_gem_vram_helper.h>
-#include <drm/ttm/ttm_bo_api.h>
-#include <drm/ttm/ttm_bo_driver.h>
-#include <drm/ttm/ttm_placement.h>
-#include <drm/ttm/ttm_memory.h>
-#include <drm/ttm/ttm_module.h>
+#include <drm/drm_vram_mm_helper.h>
#include "vboxvideo_guest.h"
#include "vboxvideo_vbe.h"
@@ -77,10 +74,6 @@ struct vbox_private {
int fb_mtrr;
- struct {
- struct ttm_bo_device bdev;
- } ttm;
-
struct mutex hw_mutex; /* protects modeset and accel/vbva accesses */
struct work_struct hotplug_work;
u32 input_mapping_width;
@@ -96,8 +89,6 @@ struct vbox_private {
#undef CURSOR_PIXEL_COUNT
#undef CURSOR_DATA_SIZE
-struct vbox_gem_object;
-
struct vbox_connector {
struct drm_connector base;
char name[32];
@@ -170,74 +161,12 @@ int vboxfb_create(struct drm_fb_helper *helper,
struct drm_fb_helper_surface_size *sizes);
void vbox_fbdev_fini(struct vbox_private *vbox);
-struct vbox_bo {
- struct ttm_buffer_object bo;
- struct ttm_placement placement;
- struct ttm_bo_kmap_obj kmap;
- struct drm_gem_object gem;
- struct ttm_place placements[3];
- int pin_count;
-};
-
-#define gem_to_vbox_bo(gobj) container_of((gobj), struct vbox_bo, gem)
-
-static inline struct vbox_bo *vbox_bo(struct ttm_buffer_object *bo)
-{
- return container_of(bo, struct vbox_bo, bo);
-}
-
-#define to_vbox_obj(x) container_of(x, struct vbox_gem_object, base)
-
-static inline u64 vbox_bo_gpu_offset(struct vbox_bo *bo)
-{
- return bo->bo.offset;
-}
-
-int vbox_dumb_create(struct drm_file *file,
- struct drm_device *dev,
- struct drm_mode_create_dumb *args);
-
-void vbox_gem_free_object(struct drm_gem_object *obj);
-int vbox_dumb_mmap_offset(struct drm_file *file,
- struct drm_device *dev,
- u32 handle, u64 *offset);
-
int vbox_mm_init(struct vbox_private *vbox);
void vbox_mm_fini(struct vbox_private *vbox);
-int vbox_bo_create(struct vbox_private *vbox, int size, int align,
- u32 flags, struct vbox_bo **pvboxbo);
-
int vbox_gem_create(struct vbox_private *vbox,
u32 size, bool iskernel, struct drm_gem_object **obj);
-int vbox_bo_pin(struct vbox_bo *bo, u32 pl_flag);
-int vbox_bo_unpin(struct vbox_bo *bo);
-
-static inline int vbox_bo_reserve(struct vbox_bo *bo, bool no_wait)
-{
- int ret;
-
- ret = ttm_bo_reserve(&bo->bo, true, no_wait, NULL);
- if (ret) {
- if (ret != -ERESTARTSYS && ret != -EBUSY)
- DRM_ERROR("reserve failed %p\n", bo);
- return ret;
- }
- return 0;
-}
-
-static inline void vbox_bo_unreserve(struct vbox_bo *bo)
-{
- ttm_bo_unreserve(&bo->bo);
-}
-
-void vbox_ttm_placement(struct vbox_bo *bo, int domain);
-int vbox_bo_push_sysram(struct vbox_bo *bo);
-int vbox_mmap(struct file *filp, struct vm_area_struct *vma);
-void *vbox_bo_kmap(struct vbox_bo *bo);
-void vbox_bo_kunmap(struct vbox_bo *bo);
-
/* vbox_prime.c */
int vbox_gem_prime_pin(struct drm_gem_object *obj);
void vbox_gem_prime_unpin(struct drm_gem_object *obj);
diff --git a/drivers/gpu/drm/vboxvideo/vbox_fb.c b/drivers/gpu/drm/vboxvideo/vbox_fb.c
index b724fe7c0c30..8f74bcffc034 100644
--- a/drivers/gpu/drm/vboxvideo/vbox_fb.c
+++ b/drivers/gpu/drm/vboxvideo/vbox_fb.c
@@ -51,9 +51,9 @@ int vboxfb_create(struct drm_fb_helper *helper,
struct drm_framebuffer *fb;
struct fb_info *info;
struct drm_gem_object *gobj;
- struct vbox_bo *bo;
+ struct drm_gem_vram_object *gbo;
int size, ret;
- u64 gpu_addr;
+ s64 gpu_addr;
u32 pitch;
mode_cmd.width = sizes->surface_width;
@@ -75,9 +75,9 @@ int vboxfb_create(struct drm_fb_helper *helper,
if (ret)
return ret;
- bo = gem_to_vbox_bo(gobj);
+ gbo = drm_gem_vram_of_gem(gobj);
- ret = vbox_bo_pin(bo, TTM_PL_FLAG_VRAM);
+ ret = drm_gem_vram_pin(gbo, DRM_GEM_VRAM_PL_FLAG_VRAM);
if (ret)
return ret;
@@ -86,7 +86,7 @@ int vboxfb_create(struct drm_fb_helper *helper,
return PTR_ERR(info);
info->screen_size = size;
- info->screen_base = (char __iomem *)vbox_bo_kmap(bo);
+ info->screen_base = (char __iomem *)drm_gem_vram_kmap(gbo, true, NULL);
if (IS_ERR(info->screen_base))
return PTR_ERR(info->screen_base);
@@ -104,7 +104,9 @@ int vboxfb_create(struct drm_fb_helper *helper,
drm_fb_helper_fill_info(info, helper, sizes);
- gpu_addr = vbox_bo_gpu_offset(bo);
+ gpu_addr = drm_gem_vram_offset(gbo);
+ if (gpu_addr < 0)
+ return (int)gpu_addr;
info->fix.smem_start = info->apertures->ranges[0].base + gpu_addr;
info->fix.smem_len = vbox->available_vram_size - gpu_addr;
@@ -132,12 +134,10 @@ void vbox_fbdev_fini(struct vbox_private *vbox)
drm_fb_helper_unregister_fbi(&vbox->fb_helper);
if (afb->obj) {
- struct vbox_bo *bo = gem_to_vbox_bo(afb->obj);
+ struct drm_gem_vram_object *gbo = drm_gem_vram_of_gem(afb->obj);
- vbox_bo_kunmap(bo);
-
- if (bo->pin_count)
- vbox_bo_unpin(bo);
+ drm_gem_vram_kunmap(gbo);
+ drm_gem_vram_unpin(gbo);
drm_gem_object_put_unlocked(afb->obj);
afb->obj = NULL;
diff --git a/drivers/gpu/drm/vboxvideo/vbox_main.c b/drivers/gpu/drm/vboxvideo/vbox_main.c
index f4d02de5518a..18693e2bf72a 100644
--- a/drivers/gpu/drm/vboxvideo/vbox_main.c
+++ b/drivers/gpu/drm/vboxvideo/vbox_main.c
@@ -274,7 +274,7 @@ void vbox_hw_fini(struct vbox_private *vbox)
int vbox_gem_create(struct vbox_private *vbox,
u32 size, bool iskernel, struct drm_gem_object **obj)
{
- struct vbox_bo *vboxbo;
+ struct drm_gem_vram_object *gbo;
int ret;
*obj = NULL;
@@ -283,79 +283,16 @@ int vbox_gem_create(struct vbox_private *vbox,
if (size == 0)
return -EINVAL;
- ret = vbox_bo_create(vbox, size, 0, 0, &vboxbo);
- if (ret) {
+ gbo = drm_gem_vram_create(&vbox->ddev, &vbox->ddev.vram_mm->bdev,
+ size, 0, false);
+ if (IS_ERR(gbo)) {
+ ret = PTR_ERR(gbo);
if (ret != -ERESTARTSYS)
DRM_ERROR("failed to allocate GEM object\n");
return ret;
}
- *obj = &vboxbo->gem;
-
- return 0;
-}
-
-int vbox_dumb_create(struct drm_file *file,
- struct drm_device *dev, struct drm_mode_create_dumb *args)
-{
- struct vbox_private *vbox =
- container_of(dev, struct vbox_private, ddev);
- struct drm_gem_object *gobj;
- u32 handle;
- int ret;
-
- args->pitch = args->width * ((args->bpp + 7) / 8);
- args->size = args->pitch * args->height;
-
- ret = vbox_gem_create(vbox, args->size, false, &gobj);
- if (ret)
- return ret;
-
- ret = drm_gem_handle_create(file, gobj, &handle);
- drm_gem_object_put_unlocked(gobj);
- if (ret)
- return ret;
-
- args->handle = handle;
+ *obj = &gbo->gem;
return 0;
}
-
-void vbox_gem_free_object(struct drm_gem_object *obj)
-{
- struct vbox_bo *vbox_bo = gem_to_vbox_bo(obj);
-
- ttm_bo_put(&vbox_bo->bo);
-}
-
-static inline u64 vbox_bo_mmap_offset(struct vbox_bo *bo)
-{
- return drm_vma_node_offset_addr(&bo->bo.vma_node);
-}
-
-int
-vbox_dumb_mmap_offset(struct drm_file *file,
- struct drm_device *dev,
- u32 handle, u64 *offset)
-{
- struct drm_gem_object *obj;
- int ret;
- struct vbox_bo *bo;
-
- mutex_lock(&dev->struct_mutex);
- obj = drm_gem_object_lookup(file, handle);
- if (!obj) {
- ret = -ENOENT;
- goto out_unlock;
- }
-
- bo = gem_to_vbox_bo(obj);
- *offset = vbox_bo_mmap_offset(bo);
-
- drm_gem_object_put(obj);
- ret = 0;
-
-out_unlock:
- mutex_unlock(&dev->struct_mutex);
- return ret;
-}
diff --git a/drivers/gpu/drm/vboxvideo/vbox_mode.c b/drivers/gpu/drm/vboxvideo/vbox_mode.c
index 58cea131470e..e1e48ba919eb 100644
--- a/drivers/gpu/drm/vboxvideo/vbox_mode.c
+++ b/drivers/gpu/drm/vboxvideo/vbox_mode.c
@@ -172,7 +172,8 @@ static void vbox_crtc_set_base_and_mode(struct drm_crtc *crtc,
struct drm_framebuffer *fb,
int x, int y)
{
- struct vbox_bo *bo = gem_to_vbox_bo(to_vbox_framebuffer(fb)->obj);
+ struct drm_gem_vram_object *gbo =
+ drm_gem_vram_of_gem(to_vbox_framebuffer(fb)->obj);
struct vbox_private *vbox = crtc->dev->dev_private;
struct vbox_crtc *vbox_crtc = to_vbox_crtc(crtc);
bool needs_modeset = drm_atomic_crtc_needs_modeset(crtc->state);
@@ -186,7 +187,7 @@ static void vbox_crtc_set_base_and_mode(struct drm_crtc *crtc,
vbox_crtc->x = x;
vbox_crtc->y = y;
- vbox_crtc->fb_offset = vbox_bo_gpu_offset(bo);
+ vbox_crtc->fb_offset = drm_gem_vram_offset(gbo);
/* vbox_do_modeset() checks vbox->single_framebuffer so update it now */
if (needs_modeset && vbox_set_up_input_mapping(vbox)) {
@@ -302,14 +303,14 @@ static void vbox_primary_atomic_disable(struct drm_plane *plane,
static int vbox_primary_prepare_fb(struct drm_plane *plane,
struct drm_plane_state *new_state)
{
- struct vbox_bo *bo;
+ struct drm_gem_vram_object *gbo;
int ret;
if (!new_state->fb)
return 0;
- bo = gem_to_vbox_bo(to_vbox_framebuffer(new_state->fb)->obj);
- ret = vbox_bo_pin(bo, TTM_PL_FLAG_VRAM);
+ gbo = drm_gem_vram_of_gem(to_vbox_framebuffer(new_state->fb)->obj);
+ ret = drm_gem_vram_pin(gbo, DRM_GEM_VRAM_PL_FLAG_VRAM);
if (ret)
DRM_WARN("Error %d pinning new fb, out of video mem?\n", ret);
@@ -319,13 +320,13 @@ static int vbox_primary_prepare_fb(struct drm_plane *plane,
static void vbox_primary_cleanup_fb(struct drm_plane *plane,
struct drm_plane_state *old_state)
{
- struct vbox_bo *bo;
+ struct drm_gem_vram_object *gbo;
if (!old_state->fb)
return;
- bo = gem_to_vbox_bo(to_vbox_framebuffer(old_state->fb)->obj);
- vbox_bo_unpin(bo);
+ gbo = drm_gem_vram_of_gem(to_vbox_framebuffer(old_state->fb)->obj);
+ drm_gem_vram_unpin(gbo);
}
static int vbox_cursor_atomic_check(struct drm_plane *plane,
@@ -385,7 +386,8 @@ static void vbox_cursor_atomic_update(struct drm_plane *plane,
container_of(plane->dev, struct vbox_private, ddev);
struct vbox_crtc *vbox_crtc = to_vbox_crtc(plane->state->crtc);
struct drm_framebuffer *fb = plane->state->fb;
- struct vbox_bo *bo = gem_to_vbox_bo(to_vbox_framebuffer(fb)->obj);
+ struct drm_gem_vram_object *gbo =
+ drm_gem_vram_of_gem(to_vbox_framebuffer(fb)->obj);
u32 width = plane->state->crtc_w;
u32 height = plane->state->crtc_h;
size_t data_size, mask_size;
@@ -404,7 +406,7 @@ static void vbox_cursor_atomic_update(struct drm_plane *plane,
vbox_crtc->cursor_enabled = true;
/* pinning is done in prepare/cleanup framebuffer */
- src = vbox_bo_kmap(bo);
+ src = drm_gem_vram_kmap(gbo, true, NULL);
if (IS_ERR(src)) {
mutex_unlock(&vbox->hw_mutex);
DRM_WARN("Could not kmap cursor bo, skipping update\n");
@@ -420,7 +422,7 @@ static void vbox_cursor_atomic_update(struct drm_plane *plane,
data_size = width * height * 4 + mask_size;
copy_cursor_image(src, vbox->cursor_data, width, height, mask_size);
- vbox_bo_kunmap(bo);
+ drm_gem_vram_kunmap(gbo);
flags = VBOX_MOUSE_POINTER_VISIBLE | VBOX_MOUSE_POINTER_SHAPE |
VBOX_MOUSE_POINTER_ALPHA;
@@ -460,25 +462,25 @@ static void vbox_cursor_atomic_disable(struct drm_plane *plane,
static int vbox_cursor_prepare_fb(struct drm_plane *plane,
struct drm_plane_state *new_state)
{
- struct vbox_bo *bo;
+ struct drm_gem_vram_object *gbo;
if (!new_state->fb)
return 0;
- bo = gem_to_vbox_bo(to_vbox_framebuffer(new_state->fb)->obj);
- return vbox_bo_pin(bo, TTM_PL_FLAG_SYSTEM);
+ gbo = drm_gem_vram_of_gem(to_vbox_framebuffer(new_state->fb)->obj);
+ return drm_gem_vram_pin(gbo, DRM_GEM_VRAM_PL_FLAG_SYSTEM);
}
static void vbox_cursor_cleanup_fb(struct drm_plane *plane,
struct drm_plane_state *old_state)
{
- struct vbox_bo *bo;
+ struct drm_gem_vram_object *gbo;
if (!plane->state->fb)
return;
- bo = gem_to_vbox_bo(to_vbox_framebuffer(plane->state->fb)->obj);
- vbox_bo_unpin(bo);
+ gbo = drm_gem_vram_of_gem(to_vbox_framebuffer(plane->state->fb)->obj);
+ drm_gem_vram_unpin(gbo);
}
static const u32 vbox_cursor_plane_formats[] = {
diff --git a/drivers/gpu/drm/vboxvideo/vbox_ttm.c b/drivers/gpu/drm/vboxvideo/vbox_ttm.c
index 9d78438c2877..b82595a9ed0f 100644
--- a/drivers/gpu/drm/vboxvideo/vbox_ttm.c
+++ b/drivers/gpu/drm/vboxvideo/vbox_ttm.c
@@ -8,167 +8,23 @@
*/
#include <linux/pci.h>
#include <drm/drm_file.h>
-#include <drm/ttm/ttm_page_alloc.h>
#include "vbox_drv.h"
-static inline struct vbox_private *vbox_bdev(struct ttm_bo_device *bd)
-{
- return container_of(bd, struct vbox_private, ttm.bdev);
-}
-
-static void vbox_bo_ttm_destroy(struct ttm_buffer_object *tbo)
-{
- struct vbox_bo *bo;
-
- bo = container_of(tbo, struct vbox_bo, bo);
-
- drm_gem_object_release(&bo->gem);
- kfree(bo);
-}
-
-static bool vbox_ttm_bo_is_vbox_bo(struct ttm_buffer_object *bo)
-{
- if (bo->destroy == &vbox_bo_ttm_destroy)
- return true;
-
- return false;
-}
-
-static int
-vbox_bo_init_mem_type(struct ttm_bo_device *bdev, u32 type,
- struct ttm_mem_type_manager *man)
-{
- switch (type) {
- case TTM_PL_SYSTEM:
- man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
- man->available_caching = TTM_PL_MASK_CACHING;
- man->default_caching = TTM_PL_FLAG_CACHED;
- break;
- case TTM_PL_VRAM:
- man->func = &ttm_bo_manager_func;
- man->flags = TTM_MEMTYPE_FLAG_FIXED | TTM_MEMTYPE_FLAG_MAPPABLE;
- man->available_caching = TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC;
- man->default_caching = TTM_PL_FLAG_WC;
- break;
- default:
- DRM_ERROR("Unsupported memory type %u\n", (unsigned int)type);
- return -EINVAL;
- }
-
- return 0;
-}
-
-static void
-vbox_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl)
-{
- struct vbox_bo *vboxbo = vbox_bo(bo);
-
- if (!vbox_ttm_bo_is_vbox_bo(bo))
- return;
-
- vbox_ttm_placement(vboxbo, TTM_PL_FLAG_SYSTEM);
- *pl = vboxbo->placement;
-}
-
-static int vbox_bo_verify_access(struct ttm_buffer_object *bo,
- struct file *filp)
-{
- return 0;
-}
-
-static int vbox_ttm_io_mem_reserve(struct ttm_bo_device *bdev,
- struct ttm_mem_reg *mem)
-{
- struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
- struct vbox_private *vbox = vbox_bdev(bdev);
-
- mem->bus.addr = NULL;
- mem->bus.offset = 0;
- mem->bus.size = mem->num_pages << PAGE_SHIFT;
- mem->bus.base = 0;
- mem->bus.is_iomem = false;
- if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
- return -EINVAL;
- switch (mem->mem_type) {
- case TTM_PL_SYSTEM:
- /* system memory */
- return 0;
- case TTM_PL_VRAM:
- mem->bus.offset = mem->start << PAGE_SHIFT;
- mem->bus.base = pci_resource_start(vbox->ddev.pdev, 0);
- mem->bus.is_iomem = true;
- break;
- default:
- return -EINVAL;
- }
- return 0;
-}
-
-static void vbox_ttm_io_mem_free(struct ttm_bo_device *bdev,
- struct ttm_mem_reg *mem)
-{
-}
-
-static void vbox_ttm_backend_destroy(struct ttm_tt *tt)
-{
- ttm_tt_fini(tt);
- kfree(tt);
-}
-
-static struct ttm_backend_func vbox_tt_backend_func = {
- .destroy = &vbox_ttm_backend_destroy,
-};
-
-static struct ttm_tt *vbox_ttm_tt_create(struct ttm_buffer_object *bo,
- u32 page_flags)
-{
- struct ttm_tt *tt;
-
- tt = kzalloc(sizeof(*tt), GFP_KERNEL);
- if (!tt)
- return NULL;
-
- tt->func = &vbox_tt_backend_func;
- if (ttm_tt_init(tt, bo, page_flags)) {
- kfree(tt);
- return NULL;
- }
-
- return tt;
-}
-
-static struct ttm_bo_driver vbox_bo_driver = {
- .ttm_tt_create = vbox_ttm_tt_create,
- .init_mem_type = vbox_bo_init_mem_type,
- .eviction_valuable = ttm_bo_eviction_valuable,
- .evict_flags = vbox_bo_evict_flags,
- .verify_access = vbox_bo_verify_access,
- .io_mem_reserve = &vbox_ttm_io_mem_reserve,
- .io_mem_free = &vbox_ttm_io_mem_free,
-};
-
int vbox_mm_init(struct vbox_private *vbox)
{
+ struct drm_vram_mm *vmm;
int ret;
struct drm_device *dev = &vbox->ddev;
- struct ttm_bo_device *bdev = &vbox->ttm.bdev;
- ret = ttm_bo_device_init(&vbox->ttm.bdev,
- &vbox_bo_driver,
- dev->anon_inode->i_mapping,
- true);
- if (ret) {
- DRM_ERROR("Error initialising bo driver; %d\n", ret);
+ vmm = drm_vram_helper_alloc_mm(dev, pci_resource_start(dev->pdev, 0),
+ vbox->available_vram_size,
+ &drm_gem_vram_mm_funcs);
+ if (IS_ERR(vmm)) {
+ ret = PTR_ERR(vmm);
+ DRM_ERROR("Error initializing VRAM MM; %d\n", ret);
return ret;
}
- ret = ttm_bo_init_mm(bdev, TTM_PL_VRAM,
- vbox->available_vram_size >> PAGE_SHIFT);
- if (ret) {
- DRM_ERROR("Failed ttm VRAM init: %d\n", ret);
- goto err_device_release;
- }
-
#ifdef DRM_MTRR_WC
vbox->fb_mtrr = drm_mtrr_add(pci_resource_start(dev->pdev, 0),
pci_resource_len(dev->pdev, 0),
@@ -178,10 +34,6 @@ int vbox_mm_init(struct vbox_private *vbox)
pci_resource_len(dev->pdev, 0));
#endif
return 0;
-
-err_device_release:
- ttm_bo_device_release(&vbox->ttm.bdev);
- return ret;
}
void vbox_mm_fini(struct vbox_private *vbox)
@@ -193,196 +45,5 @@ void vbox_mm_fini(struct vbox_private *vbox)
#else
arch_phys_wc_del(vbox->fb_mtrr);
#endif
- ttm_bo_device_release(&vbox->ttm.bdev);
-}
-
-void vbox_ttm_placement(struct vbox_bo *bo, int domain)
-{
- unsigned int i;
- u32 c = 0;
-
- bo->placement.placement = bo->placements;
- bo->placement.busy_placement = bo->placements;
-
- if (domain & TTM_PL_FLAG_VRAM)
- bo->placements[c++].flags =
- TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_VRAM;
- if (domain & TTM_PL_FLAG_SYSTEM)
- bo->placements[c++].flags =
- TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
- if (!c)
- bo->placements[c++].flags =
- TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
-
- bo->placement.num_placement = c;
- bo->placement.num_busy_placement = c;
-
- for (i = 0; i < c; ++i) {
- bo->placements[i].fpfn = 0;
- bo->placements[i].lpfn = 0;
- }
-}
-
-int vbox_bo_create(struct vbox_private *vbox, int size, int align,
- u32 flags, struct vbox_bo **pvboxbo)
-{
- struct vbox_bo *vboxbo;
- size_t acc_size;
- int ret;
-
- vboxbo = kzalloc(sizeof(*vboxbo), GFP_KERNEL);
- if (!vboxbo)
- return -ENOMEM;
-
- ret = drm_gem_object_init(&vbox->ddev, &vboxbo->gem, size);
- if (ret)
- goto err_free_vboxbo;
-
- vboxbo->bo.bdev = &vbox->ttm.bdev;
-
- vbox_ttm_placement(vboxbo, TTM_PL_FLAG_VRAM | TTM_PL_FLAG_SYSTEM);
-
- acc_size = ttm_bo_dma_acc_size(&vbox->ttm.bdev, size,
- sizeof(struct vbox_bo));
-
- ret = ttm_bo_init(&vbox->ttm.bdev, &vboxbo->bo, size,
- ttm_bo_type_device, &vboxbo->placement,
- align >> PAGE_SHIFT, false, acc_size,
- NULL, NULL, vbox_bo_ttm_destroy);
- if (ret)
- goto err_free_vboxbo;
-
- *pvboxbo = vboxbo;
-
- return 0;
-
-err_free_vboxbo:
- kfree(vboxbo);
- return ret;
-}
-
-int vbox_bo_pin(struct vbox_bo *bo, u32 pl_flag)
-{
- struct ttm_operation_ctx ctx = { false, false };
- int i, ret;
-
- if (bo->pin_count) {
- bo->pin_count++;
- return 0;
- }
-
- ret = vbox_bo_reserve(bo, false);
- if (ret)
- return ret;
-
- vbox_ttm_placement(bo, pl_flag);
-
- for (i = 0; i < bo->placement.num_placement; i++)
- bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT;
-
- ret = ttm_bo_validate(&bo->bo, &bo->placement, &ctx);
- if (ret == 0)
- bo->pin_count = 1;
-
- vbox_bo_unreserve(bo);
-
- return ret;
-}
-
-int vbox_bo_unpin(struct vbox_bo *bo)
-{
- struct ttm_operation_ctx ctx = { false, false };
- int i, ret;
-
- if (!bo->pin_count) {
- DRM_ERROR("unpin bad %p\n", bo);
- return 0;
- }
- bo->pin_count--;
- if (bo->pin_count)
- return 0;
-
- ret = vbox_bo_reserve(bo, false);
- if (ret) {
- DRM_ERROR("Error %d reserving bo, leaving it pinned\n", ret);
- return ret;
- }
-
- for (i = 0; i < bo->placement.num_placement; i++)
- bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT;
-
- ret = ttm_bo_validate(&bo->bo, &bo->placement, &ctx);
-
- vbox_bo_unreserve(bo);
-
- return ret;
-}
-
-/*
- * Move a vbox-owned buffer object to system memory if no one else has it
- * pinned. The caller must have pinned it previously, and this call will
- * release the caller's pin.
- */
-int vbox_bo_push_sysram(struct vbox_bo *bo)
-{
- struct ttm_operation_ctx ctx = { false, false };
- int i, ret;
-
- if (!bo->pin_count) {
- DRM_ERROR("unpin bad %p\n", bo);
- return 0;
- }
- bo->pin_count--;
- if (bo->pin_count)
- return 0;
-
- if (bo->kmap.virtual) {
- ttm_bo_kunmap(&bo->kmap);
- bo->kmap.virtual = NULL;
- }
-
- vbox_ttm_placement(bo, TTM_PL_FLAG_SYSTEM);
-
- for (i = 0; i < bo->placement.num_placement; i++)
- bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT;
-
- ret = ttm_bo_validate(&bo->bo, &bo->placement, &ctx);
- if (ret) {
- DRM_ERROR("pushing to VRAM failed\n");
- return ret;
- }
-
- return 0;
-}
-
-int vbox_mmap(struct file *filp, struct vm_area_struct *vma)
-{
- struct drm_file *file_priv = filp->private_data;
- struct vbox_private *vbox = file_priv->minor->dev->dev_private;
-
- return ttm_bo_mmap(filp, vma, &vbox->ttm.bdev);
-}
-
-void *vbox_bo_kmap(struct vbox_bo *bo)
-{
- int ret;
-
- if (bo->kmap.virtual)
- return bo->kmap.virtual;
-
- ret = ttm_bo_kmap(&bo->bo, 0, bo->bo.num_pages, &bo->kmap);
- if (ret) {
- DRM_ERROR("Error kmapping bo: %d\n", ret);
- return NULL;
- }
-
- return bo->kmap.virtual;
-}
-
-void vbox_bo_kunmap(struct vbox_bo *bo)
-{
- if (bo->kmap.virtual) {
- ttm_bo_kunmap(&bo->kmap);
- bo->kmap.virtual = NULL;
- }
+ drm_vram_helper_release_mm(&vbox->ddev);
}
diff --git a/drivers/gpu/drm/vc4/vc4_bo.c b/drivers/gpu/drm/vc4/vc4_bo.c
index 88ebd681d7eb..1434bb829267 100644
--- a/drivers/gpu/drm/vc4/vc4_bo.c
+++ b/drivers/gpu/drm/vc4/vc4_bo.c
@@ -799,13 +799,36 @@ vc4_prime_import_sg_table(struct drm_device *dev,
return obj;
}
+static int vc4_grab_bin_bo(struct vc4_dev *vc4, struct vc4_file *vc4file)
+{
+ int ret;
+
+ if (!vc4->v3d)
+ return -ENODEV;
+
+ if (vc4file->bin_bo_used)
+ return 0;
+
+ ret = vc4_v3d_bin_bo_get(vc4, &vc4file->bin_bo_used);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
int vc4_create_bo_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_vc4_create_bo *args = data;
+ struct vc4_file *vc4file = file_priv->driver_priv;
+ struct vc4_dev *vc4 = to_vc4_dev(dev);
struct vc4_bo *bo = NULL;
int ret;
+ ret = vc4_grab_bin_bo(vc4, vc4file);
+ if (ret)
+ return ret;
+
/*
* We can't allocate from the BO cache, because the BOs don't
* get zeroed, and that might leak data between users.
@@ -846,6 +869,8 @@ vc4_create_shader_bo_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_vc4_create_shader_bo *args = data;
+ struct vc4_file *vc4file = file_priv->driver_priv;
+ struct vc4_dev *vc4 = to_vc4_dev(dev);
struct vc4_bo *bo = NULL;
int ret;
@@ -865,6 +890,10 @@ vc4_create_shader_bo_ioctl(struct drm_device *dev, void *data,
return -EINVAL;
}
+ ret = vc4_grab_bin_bo(vc4, vc4file);
+ if (ret)
+ return ret;
+
bo = vc4_bo_create(dev, args->size, true, VC4_BO_TYPE_V3D_SHADER);
if (IS_ERR(bo))
return PTR_ERR(bo);
@@ -894,7 +923,7 @@ vc4_create_shader_bo_ioctl(struct drm_device *dev, void *data,
*/
ret = drm_gem_handle_create(file_priv, &bo->base.base, &args->handle);
- fail:
+fail:
drm_gem_object_put_unlocked(&bo->base.base);
return ret;
diff --git a/drivers/gpu/drm/vc4/vc4_drv.c b/drivers/gpu/drm/vc4/vc4_drv.c
index 6d9be20a32be..0f99ad03614e 100644
--- a/drivers/gpu/drm/vc4/vc4_drv.c
+++ b/drivers/gpu/drm/vc4/vc4_drv.c
@@ -128,8 +128,12 @@ static int vc4_open(struct drm_device *dev, struct drm_file *file)
static void vc4_close(struct drm_device *dev, struct drm_file *file)
{
+ struct vc4_dev *vc4 = to_vc4_dev(dev);
struct vc4_file *vc4file = file->driver_priv;
+ if (vc4file->bin_bo_used)
+ vc4_v3d_bin_bo_put(vc4);
+
vc4_perfmon_close_file(vc4file);
kfree(vc4file);
}
@@ -274,6 +278,8 @@ static int vc4_drm_bind(struct device *dev)
drm->dev_private = vc4;
INIT_LIST_HEAD(&vc4->debugfs_list);
+ mutex_init(&vc4->bin_bo_lock);
+
ret = vc4_bo_cache_init(drm);
if (ret)
goto dev_put;
diff --git a/drivers/gpu/drm/vc4/vc4_drv.h b/drivers/gpu/drm/vc4/vc4_drv.h
index 4f13f6262491..9170a24ec5f5 100644
--- a/drivers/gpu/drm/vc4/vc4_drv.h
+++ b/drivers/gpu/drm/vc4/vc4_drv.h
@@ -216,6 +216,11 @@ struct vc4_dev {
* the minor is available (after drm_dev_register()).
*/
struct list_head debugfs_list;
+
+ /* Mutex for binner bo allocation. */
+ struct mutex bin_bo_lock;
+ /* Reference count for our binner bo. */
+ struct kref bin_bo_kref;
};
static inline struct vc4_dev *
@@ -584,6 +589,11 @@ struct vc4_exec_info {
* NULL otherwise.
*/
struct vc4_perfmon *perfmon;
+
+ /* Whether the exec has taken a reference to the binner BO, which should
+ * happen with a VC4_PACKET_TILE_BINNING_MODE_CONFIG packet.
+ */
+ bool bin_bo_used;
};
/* Per-open file private data. Any driver-specific resource that has to be
@@ -594,6 +604,8 @@ struct vc4_file {
struct idr idr;
struct mutex lock;
} perfmon;
+
+ bool bin_bo_used;
};
static inline struct vc4_exec_info *
@@ -833,6 +845,8 @@ void vc4_plane_async_set_fb(struct drm_plane *plane,
extern struct platform_driver vc4_v3d_driver;
extern const struct of_device_id vc4_v3d_dt_match[];
int vc4_v3d_get_bin_slot(struct vc4_dev *vc4);
+int vc4_v3d_bin_bo_get(struct vc4_dev *vc4, bool *used);
+void vc4_v3d_bin_bo_put(struct vc4_dev *vc4);
int vc4_v3d_pm_get(struct vc4_dev *vc4);
void vc4_v3d_pm_put(struct vc4_dev *vc4);
diff --git a/drivers/gpu/drm/vc4/vc4_gem.c b/drivers/gpu/drm/vc4/vc4_gem.c
index d9311be32a4f..84795d928f20 100644
--- a/drivers/gpu/drm/vc4/vc4_gem.c
+++ b/drivers/gpu/drm/vc4/vc4_gem.c
@@ -820,6 +820,7 @@ static int
vc4_get_bcl(struct drm_device *dev, struct vc4_exec_info *exec)
{
struct drm_vc4_submit_cl *args = exec->args;
+ struct vc4_dev *vc4 = to_vc4_dev(dev);
void *temp = NULL;
void *bin;
int ret = 0;
@@ -918,6 +919,12 @@ vc4_get_bcl(struct drm_device *dev, struct vc4_exec_info *exec)
if (ret)
goto fail;
+ if (exec->found_tile_binning_mode_config_packet) {
+ ret = vc4_v3d_bin_bo_get(vc4, &exec->bin_bo_used);
+ if (ret)
+ goto fail;
+ }
+
/* Block waiting on any previous rendering into the CS's VBO,
* IB, or textures, so that pixels are actually written by the
* time we try to read them.
@@ -966,6 +973,10 @@ vc4_complete_exec(struct drm_device *dev, struct vc4_exec_info *exec)
vc4->bin_alloc_used &= ~exec->bin_slots;
spin_unlock_irqrestore(&vc4->job_lock, irqflags);
+ /* Release the reference on the binner BO if needed. */
+ if (exec->bin_bo_used)
+ vc4_v3d_bin_bo_put(vc4);
+
/* Release the reference we had on the perf monitor. */
vc4_perfmon_put(exec->perfmon);
diff --git a/drivers/gpu/drm/vc4/vc4_irq.c b/drivers/gpu/drm/vc4/vc4_irq.c
index ffd0a4388752..e226c24e543f 100644
--- a/drivers/gpu/drm/vc4/vc4_irq.c
+++ b/drivers/gpu/drm/vc4/vc4_irq.c
@@ -59,15 +59,22 @@ vc4_overflow_mem_work(struct work_struct *work)
{
struct vc4_dev *vc4 =
container_of(work, struct vc4_dev, overflow_mem_work);
- struct vc4_bo *bo = vc4->bin_bo;
+ struct vc4_bo *bo;
int bin_bo_slot;
struct vc4_exec_info *exec;
unsigned long irqflags;
+ mutex_lock(&vc4->bin_bo_lock);
+
+ if (!vc4->bin_bo)
+ goto complete;
+
+ bo = vc4->bin_bo;
+
bin_bo_slot = vc4_v3d_get_bin_slot(vc4);
if (bin_bo_slot < 0) {
DRM_ERROR("Couldn't allocate binner overflow mem\n");
- return;
+ goto complete;
}
spin_lock_irqsave(&vc4->job_lock, irqflags);
@@ -98,6 +105,9 @@ vc4_overflow_mem_work(struct work_struct *work)
V3D_WRITE(V3D_INTCTL, V3D_INT_OUTOMEM);
V3D_WRITE(V3D_INTENA, V3D_INT_OUTOMEM);
spin_unlock_irqrestore(&vc4->job_lock, irqflags);
+
+complete:
+ mutex_unlock(&vc4->bin_bo_lock);
}
static void
@@ -249,8 +259,10 @@ vc4_irq_postinstall(struct drm_device *dev)
if (!vc4->v3d)
return 0;
- /* Enable both the render done and out of memory interrupts. */
- V3D_WRITE(V3D_INTENA, V3D_DRIVER_IRQS);
+ /* Enable the render done interrupts. The out-of-memory interrupt is
+ * enabled as soon as we have a binner BO allocated.
+ */
+ V3D_WRITE(V3D_INTENA, V3D_INT_FLDONE | V3D_INT_FRDONE);
return 0;
}
diff --git a/drivers/gpu/drm/vc4/vc4_plane.c b/drivers/gpu/drm/vc4/vc4_plane.c
index afc80b245ea3..441e06d45c89 100644
--- a/drivers/gpu/drm/vc4/vc4_plane.c
+++ b/drivers/gpu/drm/vc4/vc4_plane.c
@@ -310,10 +310,10 @@ static int vc4_plane_setup_clipping_and_scaling(struct drm_plane_state *state)
struct drm_framebuffer *fb = state->fb;
struct drm_gem_cma_object *bo = drm_fb_cma_get_gem_obj(fb, 0);
u32 subpixel_src_mask = (1 << 16) - 1;
- u32 format = fb->format->format;
int num_planes = fb->format->num_planes;
struct drm_crtc_state *crtc_state;
- u32 h_subsample, v_subsample;
+ u32 h_subsample = fb->format->hsub;
+ u32 v_subsample = fb->format->vsub;
int i, ret;
crtc_state = drm_atomic_get_existing_crtc_state(state->state,
@@ -328,9 +328,6 @@ static int vc4_plane_setup_clipping_and_scaling(struct drm_plane_state *state)
if (ret)
return ret;
- h_subsample = drm_format_horz_chroma_subsampling(format);
- v_subsample = drm_format_vert_chroma_subsampling(format);
-
for (i = 0; i < num_planes; i++)
vc4_state->offsets[i] = bo->paddr + fb->offsets[i];
@@ -592,8 +589,9 @@ static int vc4_plane_mode_set(struct drm_plane *plane,
u32 ctl0_offset = vc4_state->dlist_count;
const struct hvs_format *format = vc4_get_hvs_format(fb->format->format);
u64 base_format_mod = fourcc_mod_broadcom_mod(fb->modifier);
- int num_planes = drm_format_num_planes(format->drm);
- u32 h_subsample, v_subsample;
+ int num_planes = fb->format->num_planes;
+ u32 h_subsample = fb->format->hsub;
+ u32 v_subsample = fb->format->vsub;
bool mix_plane_alpha;
bool covers_screen;
u32 scl0, scl1, pitch0;
@@ -623,9 +621,6 @@ static int vc4_plane_mode_set(struct drm_plane *plane,
scl1 = vc4_get_scl_field(state, 0);
}
- h_subsample = drm_format_horz_chroma_subsampling(format->drm);
- v_subsample = drm_format_vert_chroma_subsampling(format->drm);
-
rotation = drm_rotation_simplify(state->rotation,
DRM_MODE_ROTATE_0 |
DRM_MODE_REFLECT_X |
diff --git a/drivers/gpu/drm/vc4/vc4_txp.c b/drivers/gpu/drm/vc4/vc4_txp.c
index c8b89a78f9f4..96f91c1b4b6e 100644
--- a/drivers/gpu/drm/vc4/vc4_txp.c
+++ b/drivers/gpu/drm/vc4/vc4_txp.c
@@ -221,17 +221,18 @@ static const u32 txp_fmts[] = {
};
static int vc4_txp_connector_atomic_check(struct drm_connector *conn,
- struct drm_connector_state *conn_state)
+ struct drm_atomic_state *state)
{
+ struct drm_connector_state *conn_state;
struct drm_crtc_state *crtc_state;
struct drm_framebuffer *fb;
int i;
+ conn_state = drm_atomic_get_new_connector_state(state, conn);
if (!conn_state->writeback_job || !conn_state->writeback_job->fb)
return 0;
- crtc_state = drm_atomic_get_new_crtc_state(conn_state->state,
- conn_state->crtc);
+ crtc_state = drm_atomic_get_new_crtc_state(state, conn_state->crtc);
fb = conn_state->writeback_job->fb;
if (fb->width != crtc_state->mode.hdisplay ||
diff --git a/drivers/gpu/drm/vc4/vc4_v3d.c b/drivers/gpu/drm/vc4/vc4_v3d.c
index a4b6859e3af6..0533646a4d13 100644
--- a/drivers/gpu/drm/vc4/vc4_v3d.c
+++ b/drivers/gpu/drm/vc4/vc4_v3d.c
@@ -213,7 +213,7 @@ try_again:
}
/**
- * vc4_allocate_bin_bo() - allocates the memory that will be used for
+ * bin_bo_alloc() - allocates the memory that will be used for
* tile binning.
*
* The binner has a limitation that the addresses in the tile state
@@ -234,14 +234,16 @@ try_again:
* overall CMA pool before they make scenes complicated enough to run
* out of bin space.
*/
-static int vc4_allocate_bin_bo(struct drm_device *drm)
+static int bin_bo_alloc(struct vc4_dev *vc4)
{
- struct vc4_dev *vc4 = to_vc4_dev(drm);
struct vc4_v3d *v3d = vc4->v3d;
uint32_t size = 16 * 1024 * 1024;
int ret = 0;
struct list_head list;
+ if (!v3d)
+ return -ENODEV;
+
/* We may need to try allocating more than once to get a BO
* that doesn't cross 256MB. Track the ones we've allocated
* that failed so far, so that we can free them when we've got
@@ -251,7 +253,7 @@ static int vc4_allocate_bin_bo(struct drm_device *drm)
INIT_LIST_HEAD(&list);
while (true) {
- struct vc4_bo *bo = vc4_bo_create(drm, size, true,
+ struct vc4_bo *bo = vc4_bo_create(vc4->dev, size, true,
VC4_BO_TYPE_BIN);
if (IS_ERR(bo)) {
@@ -292,6 +294,14 @@ static int vc4_allocate_bin_bo(struct drm_device *drm)
WARN_ON_ONCE(sizeof(vc4->bin_alloc_used) * 8 !=
bo->base.base.size / vc4->bin_alloc_size);
+ kref_init(&vc4->bin_bo_kref);
+
+ /* Enable the out-of-memory interrupt to set our
+ * newly-allocated binner BO, potentially from an
+ * already-pending-but-masked interrupt.
+ */
+ V3D_WRITE(V3D_INTENA, V3D_INT_OUTOMEM);
+
break;
}
@@ -311,6 +321,47 @@ static int vc4_allocate_bin_bo(struct drm_device *drm)
return ret;
}
+int vc4_v3d_bin_bo_get(struct vc4_dev *vc4, bool *used)
+{
+ int ret = 0;
+
+ mutex_lock(&vc4->bin_bo_lock);
+
+ if (used && *used)
+ goto complete;
+
+ if (vc4->bin_bo)
+ kref_get(&vc4->bin_bo_kref);
+ else
+ ret = bin_bo_alloc(vc4);
+
+ if (ret == 0 && used)
+ *used = true;
+
+complete:
+ mutex_unlock(&vc4->bin_bo_lock);
+
+ return ret;
+}
+
+static void bin_bo_release(struct kref *ref)
+{
+ struct vc4_dev *vc4 = container_of(ref, struct vc4_dev, bin_bo_kref);
+
+ if (WARN_ON_ONCE(!vc4->bin_bo))
+ return;
+
+ drm_gem_object_put_unlocked(&vc4->bin_bo->base.base);
+ vc4->bin_bo = NULL;
+}
+
+void vc4_v3d_bin_bo_put(struct vc4_dev *vc4)
+{
+ mutex_lock(&vc4->bin_bo_lock);
+ kref_put(&vc4->bin_bo_kref, bin_bo_release);
+ mutex_unlock(&vc4->bin_bo_lock);
+}
+
#ifdef CONFIG_PM
static int vc4_v3d_runtime_suspend(struct device *dev)
{
@@ -319,9 +370,6 @@ static int vc4_v3d_runtime_suspend(struct device *dev)
vc4_irq_uninstall(vc4->dev);
- drm_gem_object_put_unlocked(&vc4->bin_bo->base.base);
- vc4->bin_bo = NULL;
-
clk_disable_unprepare(v3d->clk);
return 0;
@@ -333,10 +381,6 @@ static int vc4_v3d_runtime_resume(struct device *dev)
struct vc4_dev *vc4 = v3d->vc4;
int ret;
- ret = vc4_allocate_bin_bo(vc4->dev);
- if (ret)
- return ret;
-
ret = clk_prepare_enable(v3d->clk);
if (ret != 0)
return ret;
@@ -403,12 +447,6 @@ static int vc4_v3d_bind(struct device *dev, struct device *master, void *data)
if (ret != 0)
return ret;
- ret = vc4_allocate_bin_bo(drm);
- if (ret) {
- clk_disable_unprepare(v3d->clk);
- return ret;
- }
-
/* Reset the binner overflow address/size at setup, to be sure
* we don't reuse an old one.
*/
diff --git a/drivers/gpu/drm/virtio/Makefile b/drivers/gpu/drm/virtio/Makefile
index 4e90cc8fa651..458e606a936f 100644
--- a/drivers/gpu/drm/virtio/Makefile
+++ b/drivers/gpu/drm/virtio/Makefile
@@ -4,8 +4,8 @@
# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
virtio-gpu-y := virtgpu_drv.o virtgpu_kms.o virtgpu_gem.o \
- virtgpu_fb.o virtgpu_display.o virtgpu_vq.o virtgpu_ttm.o \
+ virtgpu_display.o virtgpu_vq.o virtgpu_ttm.o \
virtgpu_fence.o virtgpu_object.o virtgpu_debugfs.o virtgpu_plane.o \
- virtgpu_ioctl.o virtgpu_prime.o
+ virtgpu_ioctl.o virtgpu_prime.o virtgpu_trace_points.o
obj-$(CONFIG_DRM_VIRTIO_GPU) += virtio-gpu.o
diff --git a/drivers/gpu/drm/virtio/virtgpu_display.c b/drivers/gpu/drm/virtio/virtgpu_display.c
index 86843a4d6102..ba16e8cb7124 100644
--- a/drivers/gpu/drm/virtio/virtgpu_display.c
+++ b/drivers/gpu/drm/virtio/virtgpu_display.c
@@ -29,6 +29,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_probe_helper.h>
+#include <drm/drm_damage_helper.h>
#define XRES_MIN 32
#define YRES_MIN 32
@@ -49,23 +50,10 @@ static const struct drm_crtc_funcs virtio_gpu_crtc_funcs = {
.atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
};
-static int
-virtio_gpu_framebuffer_surface_dirty(struct drm_framebuffer *fb,
- struct drm_file *file_priv,
- unsigned int flags, unsigned int color,
- struct drm_clip_rect *clips,
- unsigned int num_clips)
-{
- struct virtio_gpu_framebuffer *virtio_gpu_fb
- = to_virtio_gpu_framebuffer(fb);
-
- return virtio_gpu_surface_dirty(virtio_gpu_fb, clips, num_clips);
-}
-
static const struct drm_framebuffer_funcs virtio_gpu_fb_funcs = {
.create_handle = drm_gem_fb_create_handle,
.destroy = drm_gem_fb_destroy,
- .dirty = virtio_gpu_framebuffer_surface_dirty,
+ .dirty = drm_atomic_helper_dirtyfb,
};
int
@@ -85,10 +73,6 @@ virtio_gpu_framebuffer_init(struct drm_device *dev,
vgfb->base.obj[0] = NULL;
return ret;
}
-
- spin_lock_init(&vgfb->dirty_lock);
- vgfb->x1 = vgfb->y1 = INT_MAX;
- vgfb->x2 = vgfb->y2 = 0;
return 0;
}
diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.h b/drivers/gpu/drm/virtio/virtgpu_drv.h
index b69ae10ca238..9e2d3062b01d 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drv.h
+++ b/drivers/gpu/drm/virtio/virtgpu_drv.h
@@ -102,7 +102,6 @@ struct virtio_gpu_fence {
struct dma_fence f;
struct virtio_gpu_fence_driver *drv;
struct list_head node;
- uint64_t seq;
};
#define to_virtio_fence(x) \
container_of(x, struct virtio_gpu_fence, f)
@@ -143,9 +142,6 @@ struct virtio_gpu_output {
struct virtio_gpu_framebuffer {
struct drm_framebuffer base;
- int x1, y1, x2, y2; /* dirty rect */
- spinlock_t dirty_lock;
- uint32_t hw_res_handle;
struct virtio_gpu_fence *fence;
};
#define to_virtio_gpu_framebuffer(x) \
@@ -255,10 +251,6 @@ int virtio_gpu_mode_dumb_mmap(struct drm_file *file_priv,
struct drm_device *dev,
uint32_t handle, uint64_t *offset_p);
-/* virtio_fb */
-int virtio_gpu_surface_dirty(struct virtio_gpu_framebuffer *qfb,
- struct drm_clip_rect *clips,
- unsigned int num_clips);
/* virtio vg */
int virtio_gpu_alloc_vbufs(struct virtio_gpu_device *vgdev);
void virtio_gpu_free_vbufs(struct virtio_gpu_device *vgdev);
@@ -356,7 +348,7 @@ int virtio_gpu_mmap(struct file *filp, struct vm_area_struct *vma);
bool virtio_fence_signaled(struct dma_fence *f);
struct virtio_gpu_fence *virtio_gpu_fence_alloc(
struct virtio_gpu_device *vgdev);
-int virtio_gpu_fence_emit(struct virtio_gpu_device *vgdev,
+void virtio_gpu_fence_emit(struct virtio_gpu_device *vgdev,
struct virtio_gpu_ctrl_hdr *cmd_hdr,
struct virtio_gpu_fence *fence);
void virtio_gpu_fence_event_process(struct virtio_gpu_device *vdev,
diff --git a/drivers/gpu/drm/virtio/virtgpu_fb.c b/drivers/gpu/drm/virtio/virtgpu_fb.c
deleted file mode 100644
index b07584b1c2bf..000000000000
--- a/drivers/gpu/drm/virtio/virtgpu_fb.c
+++ /dev/null
@@ -1,150 +0,0 @@
-/*
- * Copyright (C) 2015 Red Hat, Inc.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining
- * a copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sublicense, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial
- * portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
- * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
- * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
- * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
- * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- */
-
-#include <drm/drmP.h>
-#include <drm/drm_fb_helper.h>
-#include "virtgpu_drv.h"
-
-static int virtio_gpu_dirty_update(struct virtio_gpu_framebuffer *fb,
- bool store, int x, int y,
- int width, int height)
-{
- struct drm_device *dev = fb->base.dev;
- struct virtio_gpu_device *vgdev = dev->dev_private;
- bool store_for_later = false;
- int bpp = fb->base.format->cpp[0];
- int x2, y2;
- unsigned long flags;
- struct virtio_gpu_object *obj = gem_to_virtio_gpu_obj(fb->base.obj[0]);
-
- if ((width <= 0) ||
- (x + width > fb->base.width) ||
- (y + height > fb->base.height)) {
- DRM_DEBUG("values out of range %dx%d+%d+%d, fb %dx%d\n",
- width, height, x, y,
- fb->base.width, fb->base.height);
- return -EINVAL;
- }
-
- /*
- * Can be called with pretty much any context (console output
- * path). If we are in atomic just store the dirty rect info
- * to send out the update later.
- *
- * Can't test inside spin lock.
- */
- if (in_atomic() || store)
- store_for_later = true;
-
- x2 = x + width - 1;
- y2 = y + height - 1;
-
- spin_lock_irqsave(&fb->dirty_lock, flags);
-
- if (fb->y1 < y)
- y = fb->y1;
- if (fb->y2 > y2)
- y2 = fb->y2;
- if (fb->x1 < x)
- x = fb->x1;
- if (fb->x2 > x2)
- x2 = fb->x2;
-
- if (store_for_later) {
- fb->x1 = x;
- fb->x2 = x2;
- fb->y1 = y;
- fb->y2 = y2;
- spin_unlock_irqrestore(&fb->dirty_lock, flags);
- return 0;
- }
-
- fb->x1 = fb->y1 = INT_MAX;
- fb->x2 = fb->y2 = 0;
-
- spin_unlock_irqrestore(&fb->dirty_lock, flags);
-
- {
- uint32_t offset;
- uint32_t w = x2 - x + 1;
- uint32_t h = y2 - y + 1;
-
- offset = (y * fb->base.pitches[0]) + x * bpp;
-
- virtio_gpu_cmd_transfer_to_host_2d(vgdev, obj,
- offset,
- cpu_to_le32(w),
- cpu_to_le32(h),
- cpu_to_le32(x),
- cpu_to_le32(y),
- NULL);
-
- }
- virtio_gpu_cmd_resource_flush(vgdev, obj->hw_res_handle,
- x, y, x2 - x + 1, y2 - y + 1);
- return 0;
-}
-
-int virtio_gpu_surface_dirty(struct virtio_gpu_framebuffer *vgfb,
- struct drm_clip_rect *clips,
- unsigned int num_clips)
-{
- struct virtio_gpu_device *vgdev = vgfb->base.dev->dev_private;
- struct virtio_gpu_object *obj = gem_to_virtio_gpu_obj(vgfb->base.obj[0]);
- struct drm_clip_rect norect;
- struct drm_clip_rect *clips_ptr;
- int left, right, top, bottom;
- int i;
- int inc = 1;
-
- if (!num_clips) {
- num_clips = 1;
- clips = &norect;
- norect.x1 = norect.y1 = 0;
- norect.x2 = vgfb->base.width;
- norect.y2 = vgfb->base.height;
- }
- left = clips->x1;
- right = clips->x2;
- top = clips->y1;
- bottom = clips->y2;
-
- /* skip the first clip rect */
- for (i = 1, clips_ptr = clips + inc;
- i < num_clips; i++, clips_ptr += inc) {
- left = min_t(int, left, (int)clips_ptr->x1);
- right = max_t(int, right, (int)clips_ptr->x2);
- top = min_t(int, top, (int)clips_ptr->y1);
- bottom = max_t(int, bottom, (int)clips_ptr->y2);
- }
-
- if (obj->dumb)
- return virtio_gpu_dirty_update(vgfb, false, left, top,
- right - left, bottom - top);
-
- virtio_gpu_cmd_resource_flush(vgdev, obj->hw_res_handle,
- left, top, right - left, bottom - top);
- return 0;
-}
diff --git a/drivers/gpu/drm/virtio/virtgpu_fence.c b/drivers/gpu/drm/virtio/virtgpu_fence.c
index 87d1966192f4..70d6c4329778 100644
--- a/drivers/gpu/drm/virtio/virtgpu_fence.c
+++ b/drivers/gpu/drm/virtio/virtgpu_fence.c
@@ -24,6 +24,7 @@
*/
#include <drm/drmP.h>
+#include <trace/events/dma_fence.h>
#include "virtgpu_drv.h"
static const char *virtio_get_driver_name(struct dma_fence *f)
@@ -40,16 +41,14 @@ bool virtio_fence_signaled(struct dma_fence *f)
{
struct virtio_gpu_fence *fence = to_virtio_fence(f);
- if (atomic64_read(&fence->drv->last_seq) >= fence->seq)
+ if (atomic64_read(&fence->drv->last_seq) >= fence->f.seqno)
return true;
return false;
}
static void virtio_fence_value_str(struct dma_fence *f, char *str, int size)
{
- struct virtio_gpu_fence *fence = to_virtio_fence(f);
-
- snprintf(str, size, "%llu", fence->seq);
+ snprintf(str, size, "%llu", f->seqno);
}
static void virtio_timeline_value_str(struct dma_fence *f, char *str, int size)
@@ -71,17 +70,22 @@ struct virtio_gpu_fence *virtio_gpu_fence_alloc(struct virtio_gpu_device *vgdev)
{
struct virtio_gpu_fence_driver *drv = &vgdev->fence_drv;
struct virtio_gpu_fence *fence = kzalloc(sizeof(struct virtio_gpu_fence),
- GFP_ATOMIC);
+ GFP_KERNEL);
if (!fence)
return fence;
fence->drv = drv;
+
+ /* This only partially initializes the fence because the seqno is
+ * unknown yet. The fence must not be used outside of the driver
+ * until virtio_gpu_fence_emit is called.
+ */
dma_fence_init(&fence->f, &virtio_fence_ops, &drv->lock, drv->context, 0);
return fence;
}
-int virtio_gpu_fence_emit(struct virtio_gpu_device *vgdev,
+void virtio_gpu_fence_emit(struct virtio_gpu_device *vgdev,
struct virtio_gpu_ctrl_hdr *cmd_hdr,
struct virtio_gpu_fence *fence)
{
@@ -89,14 +93,15 @@ int virtio_gpu_fence_emit(struct virtio_gpu_device *vgdev,
unsigned long irq_flags;
spin_lock_irqsave(&drv->lock, irq_flags);
- fence->seq = ++drv->sync_seq;
+ fence->f.seqno = ++drv->sync_seq;
dma_fence_get(&fence->f);
list_add_tail(&fence->node, &drv->fences);
spin_unlock_irqrestore(&drv->lock, irq_flags);
+ trace_dma_fence_emit(&fence->f);
+
cmd_hdr->flags |= cpu_to_le32(VIRTIO_GPU_FLAG_FENCE);
- cmd_hdr->fence_id = cpu_to_le64(fence->seq);
- return 0;
+ cmd_hdr->fence_id = cpu_to_le64(fence->f.seqno);
}
void virtio_gpu_fence_event_process(struct virtio_gpu_device *vgdev,
@@ -109,7 +114,7 @@ void virtio_gpu_fence_event_process(struct virtio_gpu_device *vgdev,
spin_lock_irqsave(&drv->lock, irq_flags);
atomic64_set(&vgdev->fence_drv.last_seq, last_seq);
list_for_each_entry_safe(fence, tmp, &drv->fences, node) {
- if (last_seq < fence->seq)
+ if (last_seq < fence->f.seqno)
continue;
dma_fence_signal_locked(&fence->f);
list_del(&fence->node);
diff --git a/drivers/gpu/drm/virtio/virtgpu_ioctl.c b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
index 949a264985fc..ac60be9b5c19 100644
--- a/drivers/gpu/drm/virtio/virtgpu_ioctl.c
+++ b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
@@ -63,7 +63,7 @@ int virtio_gpu_object_list_validate(struct ww_acquire_ctx *ticket,
struct virtio_gpu_object *qobj;
int ret;
- ret = ttm_eu_reserve_buffers(ticket, head, true, NULL);
+ ret = ttm_eu_reserve_buffers(ticket, head, true, NULL, true);
if (ret != 0)
return ret;
@@ -168,7 +168,7 @@ static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data,
goto out_unused_fd;
}
- user_bo_handles = (void __user *)(uintptr_t)exbuf->bo_handles;
+ user_bo_handles = u64_to_user_ptr(exbuf->bo_handles);
if (copy_from_user(bo_handles, user_bo_handles,
exbuf->num_bo_handles * sizeof(uint32_t))) {
ret = -EFAULT;
@@ -195,8 +195,7 @@ static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data,
if (ret)
goto out_free;
- buf = memdup_user((void __user *)(uintptr_t)exbuf->command,
- exbuf->size);
+ buf = memdup_user(u64_to_user_ptr(exbuf->command), exbuf->size);
if (IS_ERR(buf)) {
ret = PTR_ERR(buf);
goto out_unresv;
@@ -263,10 +262,9 @@ static int virtio_gpu_getparam_ioctl(struct drm_device *dev, void *data,
default:
return -EINVAL;
}
- if (copy_to_user((void __user *)(unsigned long)param->value,
- &value, sizeof(int))) {
+ if (copy_to_user(u64_to_user_ptr(param->value), &value, sizeof(int)))
return -EFAULT;
- }
+
return 0;
}
@@ -526,7 +524,6 @@ static int virtio_gpu_get_caps_ioctl(struct drm_device *dev,
list_for_each_entry(cache_ent, &vgdev->cap_cache, head) {
if (cache_ent->id == args->cap_set_id &&
cache_ent->version == args->cap_set_ver) {
- ptr = cache_ent->caps_cache;
spin_unlock(&vgdev->display_info_lock);
goto copy_exit;
}
@@ -537,15 +534,18 @@ static int virtio_gpu_get_caps_ioctl(struct drm_device *dev,
virtio_gpu_cmd_get_capset(vgdev, found_valid, args->cap_set_ver,
&cache_ent);
+copy_exit:
ret = wait_event_timeout(vgdev->resp_wq,
atomic_read(&cache_ent->is_valid), 5 * HZ);
if (!ret)
return -EBUSY;
+ /* is_valid check must proceed before copy of the cache entry. */
+ smp_rmb();
+
ptr = cache_ent->caps_cache;
-copy_exit:
- if (copy_to_user((void __user *)(unsigned long)args->addr, ptr, size))
+ if (copy_to_user(u64_to_user_ptr(args->addr), ptr, size))
return -EFAULT;
return 0;
@@ -553,34 +553,34 @@ copy_exit:
struct drm_ioctl_desc virtio_gpu_ioctls[DRM_VIRTIO_NUM_IOCTLS] = {
DRM_IOCTL_DEF_DRV(VIRTGPU_MAP, virtio_gpu_map_ioctl,
- DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
+ DRM_AUTH | DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(VIRTGPU_EXECBUFFER, virtio_gpu_execbuffer_ioctl,
- DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
+ DRM_AUTH | DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(VIRTGPU_GETPARAM, virtio_gpu_getparam_ioctl,
- DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
+ DRM_AUTH | DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(VIRTGPU_RESOURCE_CREATE,
virtio_gpu_resource_create_ioctl,
- DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
+ DRM_AUTH | DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(VIRTGPU_RESOURCE_INFO, virtio_gpu_resource_info_ioctl,
- DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
+ DRM_AUTH | DRM_RENDER_ALLOW),
/* make transfer async to the main ring? - no sure, can we
* thread these in the underlying GL
*/
DRM_IOCTL_DEF_DRV(VIRTGPU_TRANSFER_FROM_HOST,
virtio_gpu_transfer_from_host_ioctl,
- DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
+ DRM_AUTH | DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(VIRTGPU_TRANSFER_TO_HOST,
virtio_gpu_transfer_to_host_ioctl,
- DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
+ DRM_AUTH | DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(VIRTGPU_WAIT, virtio_gpu_wait_ioctl,
- DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
+ DRM_AUTH | DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(VIRTGPU_GET_CAPS, virtio_gpu_get_caps_ioctl,
- DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
+ DRM_AUTH | DRM_RENDER_ALLOW),
};
diff --git a/drivers/gpu/drm/virtio/virtgpu_trace.h b/drivers/gpu/drm/virtio/virtgpu_trace.h
new file mode 100644
index 000000000000..711ecc2bd241
--- /dev/null
+++ b/drivers/gpu/drm/virtio/virtgpu_trace.h
@@ -0,0 +1,52 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#if !defined(_VIRTGPU_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ)
+#define _VIRTGPU_TRACE_H_
+
+#include <linux/tracepoint.h>
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM virtio_gpu
+#define TRACE_INCLUDE_FILE virtgpu_trace
+
+DECLARE_EVENT_CLASS(virtio_gpu_cmd,
+ TP_PROTO(struct virtqueue *vq, struct virtio_gpu_ctrl_hdr *hdr),
+ TP_ARGS(vq, hdr),
+ TP_STRUCT__entry(
+ __field(int, dev)
+ __field(unsigned int, vq)
+ __field(const char *, name)
+ __field(u32, type)
+ __field(u32, flags)
+ __field(u64, fence_id)
+ __field(u32, ctx_id)
+ ),
+ TP_fast_assign(
+ __entry->dev = vq->vdev->index;
+ __entry->vq = vq->index;
+ __entry->name = vq->name;
+ __entry->type = le32_to_cpu(hdr->type);
+ __entry->flags = le32_to_cpu(hdr->flags);
+ __entry->fence_id = le64_to_cpu(hdr->fence_id);
+ __entry->ctx_id = le32_to_cpu(hdr->ctx_id);
+ ),
+ TP_printk("vdev=%d vq=%u name=%s type=0x%x flags=0x%x fence_id=%llu ctx_id=%u",
+ __entry->dev, __entry->vq, __entry->name,
+ __entry->type, __entry->flags, __entry->fence_id,
+ __entry->ctx_id)
+);
+
+DEFINE_EVENT(virtio_gpu_cmd, virtio_gpu_cmd_queue,
+ TP_PROTO(struct virtqueue *vq, struct virtio_gpu_ctrl_hdr *hdr),
+ TP_ARGS(vq, hdr)
+);
+
+DEFINE_EVENT(virtio_gpu_cmd, virtio_gpu_cmd_response,
+ TP_PROTO(struct virtqueue *vq, struct virtio_gpu_ctrl_hdr *hdr),
+ TP_ARGS(vq, hdr)
+);
+
+#endif
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH ../../drivers/gpu/drm/virtio
+#include <trace/define_trace.h>
diff --git a/drivers/gpu/drm/virtio/virtgpu_trace_points.c b/drivers/gpu/drm/virtio/virtgpu_trace_points.c
new file mode 100644
index 000000000000..1970cb6f24ef
--- /dev/null
+++ b/drivers/gpu/drm/virtio/virtgpu_trace_points.c
@@ -0,0 +1,5 @@
+// SPDX-License-Identifier: GPL-2.0
+#include "virtgpu_drv.h"
+
+#define CREATE_TRACE_POINTS
+#include "virtgpu_trace.h"
diff --git a/drivers/gpu/drm/virtio/virtgpu_vq.c b/drivers/gpu/drm/virtio/virtgpu_vq.c
index e62fe24b1a2e..6c1a90717535 100644
--- a/drivers/gpu/drm/virtio/virtgpu_vq.c
+++ b/drivers/gpu/drm/virtio/virtgpu_vq.c
@@ -28,6 +28,7 @@
#include <drm/drmP.h>
#include "virtgpu_drv.h"
+#include "virtgpu_trace.h"
#include <linux/virtio.h>
#include <linux/virtio_config.h>
#include <linux/virtio_ring.h>
@@ -192,6 +193,9 @@ void virtio_gpu_dequeue_ctrl_func(struct work_struct *work)
list_for_each_entry_safe(entry, tmp, &reclaim_list, list) {
resp = (struct virtio_gpu_ctrl_hdr *)entry->resp_buf;
+
+ trace_virtio_gpu_cmd_response(vgdev->ctrlq.vq, resp);
+
if (resp->type != cpu_to_le32(VIRTIO_GPU_RESP_OK_NODATA)) {
if (resp->type >= cpu_to_le32(VIRTIO_GPU_RESP_ERR_UNSPEC)) {
struct virtio_gpu_ctrl_hdr *cmd;
@@ -284,6 +288,9 @@ retry:
spin_lock(&vgdev->ctrlq.qlock);
goto retry;
} else {
+ trace_virtio_gpu_cmd_queue(vq,
+ (struct virtio_gpu_ctrl_hdr *)vbuf->buf);
+
virtqueue_kick(vq);
}
@@ -359,6 +366,9 @@ retry:
spin_lock(&vgdev->cursorq.qlock);
goto retry;
} else {
+ trace_virtio_gpu_cmd_queue(vq,
+ (struct virtio_gpu_ctrl_hdr *)vbuf->buf);
+
virtqueue_kick(vq);
}
@@ -583,12 +593,14 @@ static void virtio_gpu_cmd_capset_cb(struct virtio_gpu_device *vgdev,
cache_ent->id == le32_to_cpu(cmd->capset_id)) {
memcpy(cache_ent->caps_cache, resp->capset_data,
cache_ent->size);
+ /* Copy must occur before is_valid is signalled. */
+ smp_wmb();
atomic_set(&cache_ent->is_valid, 1);
break;
}
}
spin_unlock(&vgdev->display_info_lock);
- wake_up(&vgdev->resp_wq);
+ wake_up_all(&vgdev->resp_wq);
}
static int virtio_get_edid_block(void *data, u8 *buf,
@@ -684,8 +696,11 @@ int virtio_gpu_cmd_get_capset(struct virtio_gpu_device *vgdev,
struct virtio_gpu_vbuffer *vbuf;
int max_size;
struct virtio_gpu_drv_cap_cache *cache_ent;
+ struct virtio_gpu_drv_cap_cache *search_ent;
void *resp_buf;
+ *cache_p = NULL;
+
if (idx >= vgdev->num_capsets)
return -EINVAL;
@@ -716,9 +731,26 @@ int virtio_gpu_cmd_get_capset(struct virtio_gpu_device *vgdev,
atomic_set(&cache_ent->is_valid, 0);
cache_ent->size = max_size;
spin_lock(&vgdev->display_info_lock);
- list_add_tail(&cache_ent->head, &vgdev->cap_cache);
+ /* Search while under lock in case it was added by another task. */
+ list_for_each_entry(search_ent, &vgdev->cap_cache, head) {
+ if (search_ent->id == vgdev->capsets[idx].id &&
+ search_ent->version == version) {
+ *cache_p = search_ent;
+ break;
+ }
+ }
+ if (!*cache_p)
+ list_add_tail(&cache_ent->head, &vgdev->cap_cache);
spin_unlock(&vgdev->display_info_lock);
+ if (*cache_p) {
+ /* Entry was found, so free everything that was just created. */
+ kfree(resp_buf);
+ kfree(cache_ent->caps_cache);
+ kfree(cache_ent);
+ return 0;
+ }
+
cmd_p = virtio_gpu_alloc_cmd_resp
(vgdev, &virtio_gpu_cmd_capset_cb, &vbuf, sizeof(*cmd_p),
sizeof(struct virtio_gpu_resp_capset) + max_size,
diff --git a/drivers/gpu/drm/vkms/vkms_crtc.c b/drivers/gpu/drm/vkms/vkms_crtc.c
index bb66dbcd5e3f..1bbe099b7db8 100644
--- a/drivers/gpu/drm/vkms/vkms_crtc.c
+++ b/drivers/gpu/drm/vkms/vkms_crtc.c
@@ -15,6 +15,10 @@ static enum hrtimer_restart vkms_vblank_simulate(struct hrtimer *timer)
spin_lock(&output->lock);
+ ret_overrun = hrtimer_forward_now(&output->vblank_hrtimer,
+ output->period_ns);
+ WARN_ON(ret_overrun != 1);
+
ret = drm_crtc_handle_vblank(crtc);
if (!ret)
DRM_ERROR("vkms failure on handling vblank");
@@ -35,10 +39,6 @@ static enum hrtimer_restart vkms_vblank_simulate(struct hrtimer *timer)
DRM_WARN("failed to queue vkms_crc_work_handle");
}
- ret_overrun = hrtimer_forward_now(&output->vblank_hrtimer,
- output->period_ns);
- WARN_ON(ret_overrun != 1);
-
spin_unlock(&output->lock);
return HRTIMER_RESTART;
@@ -74,33 +74,23 @@ bool vkms_get_vblank_timestamp(struct drm_device *dev, unsigned int pipe,
{
struct vkms_device *vkmsdev = drm_device_to_vkms_device(dev);
struct vkms_output *output = &vkmsdev->output;
+ struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
*vblank_time = output->vblank_hrtimer.node.expires;
- if (!in_vblank_irq)
- *vblank_time -= output->period_ns;
-
- return true;
-}
-
-static void vkms_atomic_crtc_reset(struct drm_crtc *crtc)
-{
- struct vkms_crtc_state *vkms_state = NULL;
+ if (WARN_ON(*vblank_time == vblank->time))
+ return true;
- if (crtc->state) {
- vkms_state = to_vkms_crtc_state(crtc->state);
- __drm_atomic_helper_crtc_destroy_state(crtc->state);
- kfree(vkms_state);
- crtc->state = NULL;
- }
-
- vkms_state = kzalloc(sizeof(*vkms_state), GFP_KERNEL);
- if (!vkms_state)
- return;
- INIT_WORK(&vkms_state->crc_work, vkms_crc_work_handle);
+ /*
+ * To prevent races we roll the hrtimer forward before we do any
+ * interrupt processing - this is how real hw works (the interrupt is
+ * only generated after all the vblank registers are updated) and what
+ * the vblank core expects. Therefore we need to always correct the
+ * timestampe by one frame.
+ */
+ *vblank_time -= output->period_ns;
- crtc->state = &vkms_state->base;
- crtc->state->crtc = crtc;
+ return true;
}
static struct drm_crtc_state *
@@ -135,6 +125,19 @@ static void vkms_atomic_crtc_destroy_state(struct drm_crtc *crtc,
}
}
+static void vkms_atomic_crtc_reset(struct drm_crtc *crtc)
+{
+ struct vkms_crtc_state *vkms_state =
+ kzalloc(sizeof(*vkms_state), GFP_KERNEL);
+
+ if (crtc->state)
+ vkms_atomic_crtc_destroy_state(crtc, crtc->state);
+
+ __drm_atomic_helper_crtc_reset(crtc, &vkms_state->base);
+ if (vkms_state)
+ INIT_WORK(&vkms_state->crc_work, vkms_crc_work_handle);
+}
+
static const struct drm_crtc_funcs vkms_crtc_funcs = {
.set_config = drm_atomic_helper_set_config,
.destroy = drm_crtc_cleanup,
diff --git a/drivers/gpu/drm/vkms/vkms_output.c b/drivers/gpu/drm/vkms/vkms_output.c
index 3b162b25312e..56fb5c2a2315 100644
--- a/drivers/gpu/drm/vkms/vkms_output.c
+++ b/drivers/gpu/drm/vkms/vkms_output.c
@@ -6,7 +6,6 @@
static void vkms_connector_destroy(struct drm_connector *connector)
{
- drm_connector_unregister(connector);
drm_connector_cleanup(connector);
}
@@ -71,12 +70,6 @@ int vkms_output_init(struct vkms_device *vkmsdev)
drm_connector_helper_add(connector, &vkms_conn_helper_funcs);
- ret = drm_connector_register(connector);
- if (ret) {
- DRM_ERROR("Failed to register connector\n");
- goto err_connector_register;
- }
-
ret = drm_encoder_init(dev, encoder, &vkms_encoder_funcs,
DRM_MODE_ENCODER_VIRTUAL, NULL);
if (ret) {
@@ -99,9 +92,6 @@ err_attach:
drm_encoder_cleanup(encoder);
err_encoder:
- drm_connector_unregister(connector);
-
-err_connector_register:
drm_connector_cleanup(connector);
err_connector:
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index 711f8fd0dd45..1d38a8b2f2ec 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -464,7 +464,8 @@ vmw_resource_check_buffer(struct ww_acquire_ctx *ticket,
val_buf->bo = &res->backup->base;
val_buf->num_shared = 0;
list_add_tail(&val_buf->head, &val_list);
- ret = ttm_eu_reserve_buffers(ticket, &val_list, interruptible, NULL);
+ ret = ttm_eu_reserve_buffers(ticket, &val_list, interruptible, NULL,
+ true);
if (unlikely(ret != 0))
goto out_no_reserve;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_validation.h b/drivers/gpu/drm/vmwgfx/vmwgfx_validation.h
index 523f6ac5c335..1d2322ad6fd5 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_validation.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_validation.h
@@ -169,7 +169,7 @@ vmw_validation_bo_reserve(struct vmw_validation_context *ctx,
bool intr)
{
return ttm_eu_reserve_buffers(&ctx->ticket, &ctx->bo_list, intr,
- NULL);
+ NULL, true);
}
/**
diff --git a/drivers/gpu/drm/zte/zx_plane.c b/drivers/gpu/drm/zte/zx_plane.c
index 83d236fd893c..706452f9b276 100644
--- a/drivers/gpu/drm/zte/zx_plane.c
+++ b/drivers/gpu/drm/zte/zx_plane.c
@@ -199,7 +199,6 @@ static void zx_vl_plane_atomic_update(struct drm_plane *plane,
u32 dst_x, dst_y, dst_w, dst_h;
uint32_t format;
int fmt;
- int num_planes;
int i;
if (!fb)
@@ -218,13 +217,12 @@ static void zx_vl_plane_atomic_update(struct drm_plane *plane,
dst_h = drm_rect_height(dst);
/* Set up data address registers for Y, Cb and Cr planes */
- num_planes = drm_format_num_planes(format);
paddr_reg = layer + VL_Y;
- for (i = 0; i < num_planes; i++) {
+ for (i = 0; i < fb->format->num_planes; i++) {
cma_obj = drm_fb_cma_get_gem_obj(fb, i);
paddr = cma_obj->paddr + fb->offsets[i];
paddr += src_y * fb->pitches[i];
- paddr += src_x * drm_format_plane_cpp(format, i);
+ paddr += src_x * fb->format->cpp[i];
zx_writel(paddr_reg, paddr);
paddr_reg += 4;
}
diff --git a/drivers/gpu/vga/vga_switcheroo.c b/drivers/gpu/vga/vga_switcheroo.c
index a132c37d7334..a48d810d6ccb 100644
--- a/drivers/gpu/vga/vga_switcheroo.c
+++ b/drivers/gpu/vga/vga_switcheroo.c
@@ -133,7 +133,6 @@ static DEFINE_MUTEX(vgasr_mutex);
* @delayed_switch_active: whether a delayed switch is pending
* @delayed_client_id: client to which a delayed switch is pending
* @debugfs_root: directory for vga_switcheroo debugfs interface
- * @switch_file: file for vga_switcheroo debugfs interface
* @registered_clients: number of registered GPUs
* (counting only vga clients, not audio clients)
* @clients: list of registered clients
@@ -152,7 +151,6 @@ struct vgasr_priv {
enum vga_switcheroo_client_id delayed_client_id;
struct dentry *debugfs_root;
- struct dentry *switch_file;
int registered_clients;
struct list_head clients;
@@ -168,7 +166,7 @@ struct vgasr_priv {
#define client_is_vga(c) (!client_is_audio(c))
#define client_id(c) ((c)->id & ~ID_BIT_AUDIO)
-static int vga_switcheroo_debugfs_init(struct vgasr_priv *priv);
+static void vga_switcheroo_debugfs_init(struct vgasr_priv *priv);
static void vga_switcheroo_debugfs_fini(struct vgasr_priv *priv);
/* only one switcheroo per system */
@@ -914,38 +912,20 @@ static const struct file_operations vga_switcheroo_debugfs_fops = {
static void vga_switcheroo_debugfs_fini(struct vgasr_priv *priv)
{
- debugfs_remove(priv->switch_file);
- priv->switch_file = NULL;
-
- debugfs_remove(priv->debugfs_root);
+ debugfs_remove_recursive(priv->debugfs_root);
priv->debugfs_root = NULL;
}
-static int vga_switcheroo_debugfs_init(struct vgasr_priv *priv)
+static void vga_switcheroo_debugfs_init(struct vgasr_priv *priv)
{
- static const char mp[] = "/sys/kernel/debug";
-
/* already initialised */
if (priv->debugfs_root)
- return 0;
- priv->debugfs_root = debugfs_create_dir("vgaswitcheroo", NULL);
+ return;
- if (!priv->debugfs_root) {
- pr_err("Cannot create %s/vgaswitcheroo\n", mp);
- goto fail;
- }
+ priv->debugfs_root = debugfs_create_dir("vgaswitcheroo", NULL);
- priv->switch_file = debugfs_create_file("switch", 0644,
- priv->debugfs_root, NULL,
- &vga_switcheroo_debugfs_fops);
- if (!priv->switch_file) {
- pr_err("cannot create %s/vgaswitcheroo/switch\n", mp);
- goto fail;
- }
- return 0;
-fail:
- vga_switcheroo_debugfs_fini(priv);
- return -1;
+ debugfs_create_file("switch", 0644, priv->debugfs_root, NULL,
+ &vga_switcheroo_debugfs_fops);
}
/**
diff --git a/drivers/misc/mei/hdcp/mei_hdcp.c b/drivers/misc/mei/hdcp/mei_hdcp.c
index b07000202d4a..417865129407 100644
--- a/drivers/misc/mei/hdcp/mei_hdcp.c
+++ b/drivers/misc/mei/hdcp/mei_hdcp.c
@@ -576,7 +576,7 @@ static int mei_hdcp_verify_mprime(struct device *dev,
memcpy(verify_mprime_in.m_prime, stream_ready->m_prime,
HDCP_2_2_MPRIME_LEN);
- drm_hdcp2_u32_to_seq_num(verify_mprime_in.seq_num_m, data->seq_num_m);
+ drm_hdcp_cpu_to_be24(verify_mprime_in.seq_num_m, data->seq_num_m);
memcpy(verify_mprime_in.streams, data->streams,
(data->k * sizeof(struct hdcp2_streamid_type)));
diff --git a/drivers/video/hdmi.c b/drivers/video/hdmi.c
index 799ae49774f5..b939bc28d886 100644
--- a/drivers/video/hdmi.c
+++ b/drivers/video/hdmi.c
@@ -650,6 +650,150 @@ hdmi_vendor_any_infoframe_check_only(const union hdmi_vendor_any_infoframe *fram
return 0;
}
+/**
+ * hdmi_drm_infoframe_init() - initialize an HDMI Dynaminc Range and
+ * mastering infoframe
+ * @frame: HDMI DRM infoframe
+ *
+ * Returns 0 on success or a negative error code on failure.
+ */
+int hdmi_drm_infoframe_init(struct hdmi_drm_infoframe *frame)
+{
+ memset(frame, 0, sizeof(*frame));
+
+ frame->type = HDMI_INFOFRAME_TYPE_DRM;
+ frame->version = 1;
+ frame->length = HDMI_DRM_INFOFRAME_SIZE;
+
+ return 0;
+}
+EXPORT_SYMBOL(hdmi_drm_infoframe_init);
+
+static int hdmi_drm_infoframe_check_only(const struct hdmi_drm_infoframe *frame)
+{
+ if (frame->type != HDMI_INFOFRAME_TYPE_DRM ||
+ frame->version != 1)
+ return -EINVAL;
+
+ if (frame->length != HDMI_DRM_INFOFRAME_SIZE)
+ return -EINVAL;
+
+ return 0;
+}
+
+/**
+ * hdmi_drm_infoframe_check() - check a HDMI DRM infoframe
+ * @frame: HDMI DRM infoframe
+ *
+ * Validates that the infoframe is consistent.
+ * Returns 0 on success or a negative error code on failure.
+ */
+int hdmi_drm_infoframe_check(struct hdmi_drm_infoframe *frame)
+{
+ return hdmi_drm_infoframe_check_only(frame);
+}
+EXPORT_SYMBOL(hdmi_drm_infoframe_check);
+
+/**
+ * hdmi_drm_infoframe_pack_only() - write HDMI DRM infoframe to binary buffer
+ * @frame: HDMI DRM infoframe
+ * @buffer: destination buffer
+ * @size: size of buffer
+ *
+ * Packs the information contained in the @frame structure into a binary
+ * representation that can be written into the corresponding controller
+ * registers. Also computes the checksum as required by section 5.3.5 of
+ * the HDMI 1.4 specification.
+ *
+ * Returns the number of bytes packed into the binary buffer or a negative
+ * error code on failure.
+ */
+ssize_t hdmi_drm_infoframe_pack_only(const struct hdmi_drm_infoframe *frame,
+ void *buffer, size_t size)
+{
+ u8 *ptr = buffer;
+ size_t length;
+ int i;
+
+ length = HDMI_INFOFRAME_HEADER_SIZE + frame->length;
+
+ if (size < length)
+ return -ENOSPC;
+
+ memset(buffer, 0, size);
+
+ ptr[0] = frame->type;
+ ptr[1] = frame->version;
+ ptr[2] = frame->length;
+ ptr[3] = 0; /* checksum */
+
+ /* start infoframe payload */
+ ptr += HDMI_INFOFRAME_HEADER_SIZE;
+
+ *ptr++ = frame->eotf;
+ *ptr++ = frame->metadata_type;
+
+ for (i = 0; i < 3; i++) {
+ *ptr++ = frame->display_primaries[i].x;
+ *ptr++ = frame->display_primaries[i].x >> 8;
+ *ptr++ = frame->display_primaries[i].y;
+ *ptr++ = frame->display_primaries[i].y >> 8;
+ }
+
+ *ptr++ = frame->white_point.x;
+ *ptr++ = frame->white_point.x >> 8;
+
+ *ptr++ = frame->white_point.y;
+ *ptr++ = frame->white_point.y >> 8;
+
+ *ptr++ = frame->max_display_mastering_luminance;
+ *ptr++ = frame->max_display_mastering_luminance >> 8;
+
+ *ptr++ = frame->min_display_mastering_luminance;
+ *ptr++ = frame->min_display_mastering_luminance >> 8;
+
+ *ptr++ = frame->max_cll;
+ *ptr++ = frame->max_cll >> 8;
+
+ *ptr++ = frame->max_fall;
+ *ptr++ = frame->max_fall >> 8;
+
+ hdmi_infoframe_set_checksum(buffer, length);
+
+ return length;
+}
+EXPORT_SYMBOL(hdmi_drm_infoframe_pack_only);
+
+/**
+ * hdmi_drm_infoframe_pack() - check a HDMI DRM infoframe,
+ * and write it to binary buffer
+ * @frame: HDMI DRM infoframe
+ * @buffer: destination buffer
+ * @size: size of buffer
+ *
+ * Validates that the infoframe is consistent and updates derived fields
+ * (eg. length) based on other fields, after which it packs the information
+ * contained in the @frame structure into a binary representation that
+ * can be written into the corresponding controller registers. This function
+ * also computes the checksum as required by section 5.3.5 of the HDMI 1.4
+ * specification.
+ *
+ * Returns the number of bytes packed into the binary buffer or a negative
+ * error code on failure.
+ */
+ssize_t hdmi_drm_infoframe_pack(struct hdmi_drm_infoframe *frame,
+ void *buffer, size_t size)
+{
+ int ret;
+
+ ret = hdmi_drm_infoframe_check(frame);
+ if (ret)
+ return ret;
+
+ return hdmi_drm_infoframe_pack_only(frame, buffer, size);
+}
+EXPORT_SYMBOL(hdmi_drm_infoframe_pack);
+
/*
* hdmi_vendor_any_infoframe_check() - check a vendor infoframe
*/
@@ -758,6 +902,10 @@ hdmi_infoframe_pack_only(const union hdmi_infoframe *frame, void *buffer, size_t
length = hdmi_avi_infoframe_pack_only(&frame->avi,
buffer, size);
break;
+ case HDMI_INFOFRAME_TYPE_DRM:
+ length = hdmi_drm_infoframe_pack_only(&frame->drm,
+ buffer, size);
+ break;
case HDMI_INFOFRAME_TYPE_SPD:
length = hdmi_spd_infoframe_pack_only(&frame->spd,
buffer, size);
@@ -806,6 +954,9 @@ hdmi_infoframe_pack(union hdmi_infoframe *frame,
case HDMI_INFOFRAME_TYPE_AVI:
length = hdmi_avi_infoframe_pack(&frame->avi, buffer, size);
break;
+ case HDMI_INFOFRAME_TYPE_DRM:
+ length = hdmi_drm_infoframe_pack(&frame->drm, buffer, size);
+ break;
case HDMI_INFOFRAME_TYPE_SPD:
length = hdmi_spd_infoframe_pack(&frame->spd, buffer, size);
break;
@@ -838,6 +989,8 @@ static const char *hdmi_infoframe_type_get_name(enum hdmi_infoframe_type type)
return "Source Product Description (SPD)";
case HDMI_INFOFRAME_TYPE_AUDIO:
return "Audio";
+ case HDMI_INFOFRAME_TYPE_DRM:
+ return "Dynamic Range and Mastering";
}
return "Reserved";
}
@@ -1038,12 +1191,6 @@ hdmi_content_type_get_name(enum hdmi_content_type content_type)
return "Invalid";
}
-/**
- * hdmi_avi_infoframe_log() - log info of HDMI AVI infoframe
- * @level: logging level
- * @dev: device
- * @frame: HDMI AVI infoframe
- */
static void hdmi_avi_infoframe_log(const char *level,
struct device *dev,
const struct hdmi_avi_infoframe *frame)
@@ -1115,12 +1262,6 @@ static const char *hdmi_spd_sdi_get_name(enum hdmi_spd_sdi sdi)
return "Reserved";
}
-/**
- * hdmi_spd_infoframe_log() - log info of HDMI SPD infoframe
- * @level: logging level
- * @dev: device
- * @frame: HDMI SPD infoframe
- */
static void hdmi_spd_infoframe_log(const char *level,
struct device *dev,
const struct hdmi_spd_infoframe *frame)
@@ -1251,12 +1392,6 @@ hdmi_audio_coding_type_ext_get_name(enum hdmi_audio_coding_type_ext ctx)
return "Reserved";
}
-/**
- * hdmi_audio_infoframe_log() - log info of HDMI AUDIO infoframe
- * @level: logging level
- * @dev: device
- * @frame: HDMI AUDIO infoframe
- */
static void hdmi_audio_infoframe_log(const char *level,
struct device *dev,
const struct hdmi_audio_infoframe *frame)
@@ -1284,6 +1419,34 @@ static void hdmi_audio_infoframe_log(const char *level,
frame->downmix_inhibit ? "Yes" : "No");
}
+static void hdmi_drm_infoframe_log(const char *level,
+ struct device *dev,
+ const struct hdmi_drm_infoframe *frame)
+{
+ int i;
+
+ hdmi_infoframe_log_header(level, dev,
+ (struct hdmi_any_infoframe *)frame);
+ hdmi_log("length: %d\n", frame->length);
+ hdmi_log("metadata type: %d\n", frame->metadata_type);
+ hdmi_log("eotf: %d\n", frame->eotf);
+ for (i = 0; i < 3; i++) {
+ hdmi_log("x[%d]: %d\n", i, frame->display_primaries[i].x);
+ hdmi_log("y[%d]: %d\n", i, frame->display_primaries[i].y);
+ }
+
+ hdmi_log("white point x: %d\n", frame->white_point.x);
+ hdmi_log("white point y: %d\n", frame->white_point.y);
+
+ hdmi_log("max_display_mastering_luminance: %d\n",
+ frame->max_display_mastering_luminance);
+ hdmi_log("min_display_mastering_luminance: %d\n",
+ frame->min_display_mastering_luminance);
+
+ hdmi_log("max_cll: %d\n", frame->max_cll);
+ hdmi_log("max_fall: %d\n", frame->max_fall);
+}
+
static const char *
hdmi_3d_structure_get_name(enum hdmi_3d_structure s3d_struct)
{
@@ -1313,12 +1476,6 @@ hdmi_3d_structure_get_name(enum hdmi_3d_structure s3d_struct)
return "Reserved";
}
-/**
- * hdmi_vendor_infoframe_log() - log info of HDMI VENDOR infoframe
- * @level: logging level
- * @dev: device
- * @frame: HDMI VENDOR infoframe
- */
static void
hdmi_vendor_any_infoframe_log(const char *level,
struct device *dev,
@@ -1372,6 +1529,9 @@ void hdmi_infoframe_log(const char *level,
case HDMI_INFOFRAME_TYPE_VENDOR:
hdmi_vendor_any_infoframe_log(level, dev, &frame->vendor);
break;
+ case HDMI_INFOFRAME_TYPE_DRM:
+ hdmi_drm_infoframe_log(level, dev, &frame->drm);
+ break;
}
}
EXPORT_SYMBOL(hdmi_infoframe_log);
@@ -1615,6 +1775,70 @@ hdmi_vendor_any_infoframe_unpack(union hdmi_vendor_any_infoframe *frame,
}
/**
+ * hdmi_drm_infoframe_unpack() - unpack binary buffer to a HDMI DRM infoframe
+ * @frame: HDMI DRM infoframe
+ * @buffer: source buffer
+ * @size: size of buffer
+ *
+ * Unpacks the information contained in binary @buffer into a structured
+ * @frame of the HDMI Dynamic Range and Mastering (DRM) information frame.
+ * Also verifies the checksum as required by section 5.3.5 of the HDMI 1.4
+ * specification.
+ *
+ * Returns 0 on success or a negative error code on failure.
+ */
+static int hdmi_drm_infoframe_unpack(struct hdmi_drm_infoframe *frame,
+ const void *buffer, size_t size)
+{
+ const u8 *ptr = buffer;
+ const u8 *temp;
+ u8 x_lsb, x_msb;
+ u8 y_lsb, y_msb;
+ int ret;
+ int i;
+
+ if (size < HDMI_INFOFRAME_SIZE(DRM))
+ return -EINVAL;
+
+ if (ptr[0] != HDMI_INFOFRAME_TYPE_DRM ||
+ ptr[1] != 1 ||
+ ptr[2] != HDMI_DRM_INFOFRAME_SIZE)
+ return -EINVAL;
+
+ if (hdmi_infoframe_checksum(buffer, HDMI_INFOFRAME_SIZE(DRM)) != 0)
+ return -EINVAL;
+
+ ret = hdmi_drm_infoframe_init(frame);
+ if (ret)
+ return ret;
+
+ ptr += HDMI_INFOFRAME_HEADER_SIZE;
+
+ frame->eotf = ptr[0] & 0x7;
+ frame->metadata_type = ptr[1] & 0x7;
+
+ temp = ptr + 2;
+ for (i = 0; i < 3; i++) {
+ x_lsb = *temp++;
+ x_msb = *temp++;
+ frame->display_primaries[i].x = (x_msb << 8) | x_lsb;
+ y_lsb = *temp++;
+ y_msb = *temp++;
+ frame->display_primaries[i].y = (y_msb << 8) | y_lsb;
+ }
+
+ frame->white_point.x = (ptr[15] << 8) | ptr[14];
+ frame->white_point.y = (ptr[17] << 8) | ptr[16];
+
+ frame->max_display_mastering_luminance = (ptr[19] << 8) | ptr[18];
+ frame->min_display_mastering_luminance = (ptr[21] << 8) | ptr[20];
+ frame->max_cll = (ptr[23] << 8) | ptr[22];
+ frame->max_fall = (ptr[25] << 8) | ptr[24];
+
+ return 0;
+}
+
+/**
* hdmi_infoframe_unpack() - unpack binary buffer to a HDMI infoframe
* @frame: HDMI infoframe
* @buffer: source buffer
@@ -1640,6 +1864,9 @@ int hdmi_infoframe_unpack(union hdmi_infoframe *frame,
case HDMI_INFOFRAME_TYPE_AVI:
ret = hdmi_avi_infoframe_unpack(&frame->avi, buffer, size);
break;
+ case HDMI_INFOFRAME_TYPE_DRM:
+ ret = hdmi_drm_infoframe_unpack(&frame->drm, buffer, size);
+ break;
case HDMI_INFOFRAME_TYPE_SPD:
ret = hdmi_spd_infoframe_unpack(&frame->spd, buffer, size);
break;
diff --git a/include/drm/bridge/dw_hdmi.h b/include/drm/bridge/dw_hdmi.h
index b4ca970a5b75..c402364aec0d 100644
--- a/include/drm/bridge/dw_hdmi.h
+++ b/include/drm/bridge/dw_hdmi.h
@@ -150,6 +150,8 @@ struct dw_hdmi *dw_hdmi_bind(struct platform_device *pdev,
struct drm_encoder *encoder,
const struct dw_hdmi_plat_data *plat_data);
+void dw_hdmi_resume(struct dw_hdmi *hdmi);
+
void dw_hdmi_setup_rx_sense(struct dw_hdmi *hdmi, bool hpd, bool rx_sense);
void dw_hdmi_set_sample_rate(struct dw_hdmi *hdmi, unsigned int rate);
diff --git a/include/drm/bridge/dw_mipi_dsi.h b/include/drm/bridge/dw_mipi_dsi.h
index 0c33b9e9e0f0..94cc64a342e1 100644
--- a/include/drm/bridge/dw_mipi_dsi.h
+++ b/include/drm/bridge/dw_mipi_dsi.h
@@ -9,10 +9,20 @@
#ifndef __DW_MIPI_DSI__
#define __DW_MIPI_DSI__
+#include <linux/types.h>
+
+#include <drm/drm_modes.h>
+
+struct drm_display_mode;
+struct drm_encoder;
struct dw_mipi_dsi;
+struct mipi_dsi_device;
+struct platform_device;
struct dw_mipi_dsi_phy_ops {
int (*init)(void *priv_data);
+ void (*power_on)(void *priv_data);
+ void (*power_off)(void *priv_data);
int (*get_lane_mbps)(void *priv_data,
const struct drm_display_mode *mode,
unsigned long mode_flags, u32 lanes, u32 format,
diff --git a/include/drm/drm_atomic.h b/include/drm/drm_atomic.h
index e937ff2beb04..927e1205d7aa 100644
--- a/include/drm/drm_atomic.h
+++ b/include/drm/drm_atomic.h
@@ -459,6 +459,13 @@ struct drm_private_state *
drm_atomic_get_new_private_obj_state(struct drm_atomic_state *state,
struct drm_private_obj *obj);
+struct drm_connector *
+drm_atomic_get_old_connector_for_encoder(struct drm_atomic_state *state,
+ struct drm_encoder *encoder);
+struct drm_connector *
+drm_atomic_get_new_connector_for_encoder(struct drm_atomic_state *state,
+ struct drm_encoder *encoder);
+
/**
* drm_atomic_get_existing_crtc_state - get crtc state, if it exists
* @state: global atomic state object
@@ -950,4 +957,19 @@ drm_atomic_crtc_needs_modeset(const struct drm_crtc_state *state)
state->connectors_changed;
}
+/**
+ * drm_atomic_crtc_effectively_active - compute whether crtc is actually active
+ * @state: &drm_crtc_state for the CRTC
+ *
+ * When in self refresh mode, the crtc_state->active value will be false, since
+ * the crtc is off. However in some cases we're interested in whether the crtc
+ * is active, or effectively active (ie: it's connected to an active display).
+ * In these cases, use this function instead of just checking active.
+ */
+static inline bool
+drm_atomic_crtc_effectively_active(const struct drm_crtc_state *state)
+{
+ return state->active || state->self_refresh_active;
+}
+
#endif /* DRM_ATOMIC_H_ */
diff --git a/include/drm/drm_atomic_helper.h b/include/drm/drm_atomic_helper.h
index 58214be3bf3d..bf4e07141d81 100644
--- a/include/drm/drm_atomic_helper.h
+++ b/include/drm/drm_atomic_helper.h
@@ -117,12 +117,8 @@ int drm_atomic_helper_update_plane(struct drm_plane *plane,
struct drm_modeset_acquire_ctx *ctx);
int drm_atomic_helper_disable_plane(struct drm_plane *plane,
struct drm_modeset_acquire_ctx *ctx);
-int __drm_atomic_helper_disable_plane(struct drm_plane *plane,
- struct drm_plane_state *plane_state);
int drm_atomic_helper_set_config(struct drm_mode_set *set,
struct drm_modeset_acquire_ctx *ctx);
-int __drm_atomic_helper_set_config(struct drm_mode_set *set,
- struct drm_atomic_state *state);
int drm_atomic_helper_disable_all(struct drm_device *dev,
struct drm_modeset_acquire_ctx *ctx);
diff --git a/include/drm/drm_atomic_state_helper.h b/include/drm/drm_atomic_state_helper.h
index 66c92cbd8e16..4e6d2e7a40b8 100644
--- a/include/drm/drm_atomic_state_helper.h
+++ b/include/drm/drm_atomic_state_helper.h
@@ -37,6 +37,8 @@ struct drm_private_state;
struct drm_modeset_acquire_ctx;
struct drm_device;
+void __drm_atomic_helper_crtc_reset(struct drm_crtc *crtc,
+ struct drm_crtc_state *state);
void drm_atomic_helper_crtc_reset(struct drm_crtc *crtc);
void __drm_atomic_helper_crtc_duplicate_state(struct drm_crtc *crtc,
struct drm_crtc_state *state);
diff --git a/include/drm/drm_auth.h b/include/drm/drm_auth.h
index 871008118bab..6bf8b2b78991 100644
--- a/include/drm/drm_auth.h
+++ b/include/drm/drm_auth.h
@@ -1,3 +1,6 @@
+#ifndef _DRM_AUTH_H_
+#define _DRM_AUTH_H_
+
/*
* Internal Header for the Direct Rendering Manager
*
@@ -25,8 +28,12 @@
* OTHER DEALINGS IN THE SOFTWARE.
*/
-#ifndef _DRM_AUTH_H_
-#define _DRM_AUTH_H_
+#include <linux/idr.h>
+#include <linux/kref.h>
+#include <linux/wait.h>
+
+struct drm_file;
+struct drm_hw_lock;
/*
* Legacy DRI1 locking data structure. Only here instead of in drm_legacy.h for
diff --git a/include/drm/drm_bridge.h b/include/drm/drm_bridge.h
index d4428913a4e1..7616f6562fe4 100644
--- a/include/drm/drm_bridge.h
+++ b/include/drm/drm_bridge.h
@@ -237,6 +237,103 @@ struct drm_bridge_funcs {
* The enable callback is optional.
*/
void (*enable)(struct drm_bridge *bridge);
+
+ /**
+ * @atomic_pre_enable:
+ *
+ * This callback should enable the bridge. It is called right before
+ * the preceding element in the display pipe is enabled. If the
+ * preceding element is a bridge this means it's called before that
+ * bridge's @atomic_pre_enable or @pre_enable function. If the preceding
+ * element is a &drm_encoder it's called right before the encoder's
+ * &drm_encoder_helper_funcs.atomic_enable hook.
+ *
+ * The display pipe (i.e. clocks and timing signals) feeding this bridge
+ * will not yet be running when this callback is called. The bridge must
+ * not enable the display link feeding the next bridge in the chain (if
+ * there is one) when this callback is called.
+ *
+ * Note that this function will only be invoked in the context of an
+ * atomic commit. It will not be invoked from &drm_bridge_pre_enable. It
+ * would be prudent to also provide an implementation of @pre_enable if
+ * you are expecting driver calls into &drm_bridge_pre_enable.
+ *
+ * The @atomic_pre_enable callback is optional.
+ */
+ void (*atomic_pre_enable)(struct drm_bridge *bridge,
+ struct drm_atomic_state *state);
+
+ /**
+ * @atomic_enable:
+ *
+ * This callback should enable the bridge. It is called right after
+ * the preceding element in the display pipe is enabled. If the
+ * preceding element is a bridge this means it's called after that
+ * bridge's @atomic_enable or @enable function. If the preceding element
+ * is a &drm_encoder it's called right after the encoder's
+ * &drm_encoder_helper_funcs.atomic_enable hook.
+ *
+ * The bridge can assume that the display pipe (i.e. clocks and timing
+ * signals) feeding it is running when this callback is called. This
+ * callback must enable the display link feeding the next bridge in the
+ * chain if there is one.
+ *
+ * Note that this function will only be invoked in the context of an
+ * atomic commit. It will not be invoked from &drm_bridge_enable. It
+ * would be prudent to also provide an implementation of @enable if
+ * you are expecting driver calls into &drm_bridge_enable.
+ *
+ * The enable callback is optional.
+ */
+ void (*atomic_enable)(struct drm_bridge *bridge,
+ struct drm_atomic_state *state);
+ /**
+ * @atomic_disable:
+ *
+ * This callback should disable the bridge. It is called right before
+ * the preceding element in the display pipe is disabled. If the
+ * preceding element is a bridge this means it's called before that
+ * bridge's @atomic_disable or @disable vfunc. If the preceding element
+ * is a &drm_encoder it's called right before the
+ * &drm_encoder_helper_funcs.atomic_disable hook.
+ *
+ * The bridge can assume that the display pipe (i.e. clocks and timing
+ * signals) feeding it is still running when this callback is called.
+ *
+ * Note that this function will only be invoked in the context of an
+ * atomic commit. It will not be invoked from &drm_bridge_disable. It
+ * would be prudent to also provide an implementation of @disable if
+ * you are expecting driver calls into &drm_bridge_disable.
+ *
+ * The disable callback is optional.
+ */
+ void (*atomic_disable)(struct drm_bridge *bridge,
+ struct drm_atomic_state *state);
+
+ /**
+ * @atomic_post_disable:
+ *
+ * This callback should disable the bridge. It is called right after the
+ * preceding element in the display pipe is disabled. If the preceding
+ * element is a bridge this means it's called after that bridge's
+ * @atomic_post_disable or @post_disable function. If the preceding
+ * element is a &drm_encoder it's called right after the encoder's
+ * &drm_encoder_helper_funcs.atomic_disable hook.
+ *
+ * The bridge must assume that the display pipe (i.e. clocks and timing
+ * signals) feeding it is no longer running when this callback is
+ * called.
+ *
+ * Note that this function will only be invoked in the context of an
+ * atomic commit. It will not be invoked from &drm_bridge_post_disable.
+ * It would be prudent to also provide an implementation of
+ * @post_disable if you are expecting driver calls into
+ * &drm_bridge_post_disable.
+ *
+ * The post_disable callback is optional.
+ */
+ void (*atomic_post_disable)(struct drm_bridge *bridge,
+ struct drm_atomic_state *state);
};
/**
@@ -265,6 +362,14 @@ struct drm_bridge_timings {
* input signal after the clock edge.
*/
u32 hold_time_ps;
+ /**
+ * @dual_link:
+ *
+ * True if the bus operates in dual-link mode. The exact meaning is
+ * dependent on the bus type. For LVDS buses, this indicates that even-
+ * and odd-numbered pixels are received on separate links.
+ */
+ bool dual_link;
};
/**
@@ -314,6 +419,15 @@ void drm_bridge_mode_set(struct drm_bridge *bridge,
void drm_bridge_pre_enable(struct drm_bridge *bridge);
void drm_bridge_enable(struct drm_bridge *bridge);
+void drm_atomic_bridge_disable(struct drm_bridge *bridge,
+ struct drm_atomic_state *state);
+void drm_atomic_bridge_post_disable(struct drm_bridge *bridge,
+ struct drm_atomic_state *state);
+void drm_atomic_bridge_pre_enable(struct drm_bridge *bridge,
+ struct drm_atomic_state *state);
+void drm_atomic_bridge_enable(struct drm_bridge *bridge,
+ struct drm_atomic_state *state);
+
#ifdef CONFIG_DRM_PANEL_BRIDGE
struct drm_bridge *drm_panel_bridge_add(struct drm_panel *panel,
u32 connector_type);
diff --git a/include/drm/drm_client.h b/include/drm/drm_client.h
index 268b2cf0052a..f2d5ed745733 100644
--- a/include/drm/drm_client.h
+++ b/include/drm/drm_client.h
@@ -3,8 +3,13 @@
#ifndef _DRM_CLIENT_H_
#define _DRM_CLIENT_H_
+#include <linux/lockdep.h>
+#include <linux/mutex.h>
#include <linux/types.h>
+#include <drm/drm_connector.h>
+#include <drm/drm_crtc.h>
+
struct drm_client_dev;
struct drm_device;
struct drm_file;
@@ -85,6 +90,16 @@ struct drm_client_dev {
* @file: DRM file
*/
struct drm_file *file;
+
+ /**
+ * @modeset_mutex: Protects @modesets.
+ */
+ struct mutex modeset_mutex;
+
+ /**
+ * @modesets: CRTC configurations
+ */
+ struct drm_mode_set *modesets;
};
int drm_client_init(struct drm_device *dev, struct drm_client_dev *client,
@@ -135,6 +150,37 @@ struct drm_client_buffer *
drm_client_framebuffer_create(struct drm_client_dev *client, u32 width, u32 height, u32 format);
void drm_client_framebuffer_delete(struct drm_client_buffer *buffer);
+int drm_client_modeset_create(struct drm_client_dev *client);
+void drm_client_modeset_free(struct drm_client_dev *client);
+int drm_client_modeset_probe(struct drm_client_dev *client, unsigned int width, unsigned int height);
+bool drm_client_panel_rotation(struct drm_mode_set *modeset, unsigned int *rotation);
+int drm_client_modeset_commit_force(struct drm_client_dev *client);
+int drm_client_modeset_commit(struct drm_client_dev *client);
+int drm_client_modeset_dpms(struct drm_client_dev *client, int mode);
+
+/**
+ * drm_client_for_each_modeset() - Iterate over client modesets
+ * @modeset: &drm_mode_set loop cursor
+ * @client: DRM client
+ */
+#define drm_client_for_each_modeset(modeset, client) \
+ for (({ lockdep_assert_held(&(client)->modeset_mutex); }), \
+ modeset = (client)->modesets; modeset->crtc; modeset++)
+
+/**
+ * drm_client_for_each_connector_iter - connector_list iterator macro
+ * @connector: &struct drm_connector pointer used as cursor
+ * @iter: &struct drm_connector_list_iter
+ *
+ * This iterates the connectors that are useable for internal clients (excludes
+ * writeback connectors).
+ *
+ * For more info see drm_for_each_connector_iter().
+ */
+#define drm_client_for_each_connector_iter(connector, iter) \
+ drm_for_each_connector_iter(connector, iter) \
+ if (connector->connector_type != DRM_MODE_CONNECTOR_WRITEBACK)
+
int drm_client_debugfs_init(struct drm_minor *minor);
#endif
diff --git a/include/drm/drm_connector.h b/include/drm/drm_connector.h
index 02a131202add..c6f8486d8b8f 100644
--- a/include/drm/drm_connector.h
+++ b/include/drm/drm_connector.h
@@ -517,6 +517,15 @@ struct drm_connector_state {
* Used by the atomic helpers to select the encoder, through the
* &drm_connector_helper_funcs.atomic_best_encoder or
* &drm_connector_helper_funcs.best_encoder callbacks.
+ *
+ * This is also used in the atomic helpers to map encoders to their
+ * current and previous connectors, see
+ * &drm_atomic_get_old_connector_for_encoder() and
+ * &drm_atomic_get_new_connector_for_encoder().
+ *
+ * NOTE: Atomic drivers must fill this out (either themselves or through
+ * helpers), for otherwise the GETCONNECTOR and GETENCODER IOCTLs will
+ * not return correct data to userspace.
*/
struct drm_encoder *best_encoder;
@@ -540,6 +549,20 @@ struct drm_connector_state {
struct drm_tv_connector_state tv;
/**
+ * @self_refresh_aware:
+ *
+ * This tracks whether a connector is aware of the self refresh state.
+ * It should be set to true for those connector implementations which
+ * understand the self refresh state. This is needed since the crtc
+ * registers the self refresh helpers and it doesn't know if the
+ * connectors downstream have implemented self refresh entry/exit.
+ *
+ * Drivers should set this to true in atomic_check if they know how to
+ * handle self_refresh requests.
+ */
+ bool self_refresh_aware;
+
+ /**
* @picture_aspect_ratio: Connector property to control the
* HDMI infoframe aspect ratio setting.
*
@@ -599,6 +622,12 @@ struct drm_connector_state {
* and the connector bpc limitations obtained from edid.
*/
u8 max_bpc;
+
+ /**
+ * @hdr_output_metadata:
+ * DRM blob property for HDR output metadata
+ */
+ struct drm_property_blob *hdr_output_metadata;
};
/**
@@ -1062,12 +1091,6 @@ struct drm_connector {
struct drm_property *vrr_capable_property;
/**
- * @content_protection_property: DRM ENUM property for content
- * protection. See drm_connector_attach_content_protection_property().
- */
- struct drm_property *content_protection_property;
-
- /**
* @colorspace_property: Connector property to set the suitable
* colorspace supported by the sink.
*/
@@ -1239,6 +1262,9 @@ struct drm_connector {
* &drm_mode_config.connector_free_work.
*/
struct llist_node free_node;
+
+ /** @hdr_sink_metadata: HDR Metadata Information read from sink */
+ struct hdr_sink_metadata hdr_sink_metadata;
};
#define obj_to_connector(x) container_of(x, struct drm_connector, base)
@@ -1345,8 +1371,6 @@ int drm_connector_attach_scaling_mode_property(struct drm_connector *connector,
u32 scaling_mode_mask);
int drm_connector_attach_vrr_capable_property(
struct drm_connector *connector);
-int drm_connector_attach_content_protection_property(
- struct drm_connector *connector);
int drm_mode_create_aspect_ratio_property(struct drm_device *dev);
int drm_mode_create_colorspace_property(struct drm_connector *connector);
int drm_mode_create_content_type_property(struct drm_device *dev);
diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h
index 58ad983d7cd6..128d8b210621 100644
--- a/include/drm/drm_crtc.h
+++ b/include/drm/drm_crtc.h
@@ -39,6 +39,7 @@
#include <drm/drm_framebuffer.h>
#include <drm/drm_modes.h>
#include <drm/drm_connector.h>
+#include <drm/drm_device.h>
#include <drm/drm_property.h>
#include <drm/drm_bridge.h>
#include <drm/drm_edid.h>
@@ -53,6 +54,7 @@ struct drm_mode_set;
struct drm_file;
struct drm_clip_rect;
struct drm_printer;
+struct drm_self_refresh_data;
struct device_node;
struct dma_fence;
struct edid;
@@ -300,6 +302,17 @@ struct drm_crtc_state {
bool vrr_enabled;
/**
+ * @self_refresh_active:
+ *
+ * Used by the self refresh helpers to denote when a self refresh
+ * transition is occurring. This will be set on enable/disable callbacks
+ * when self refresh is being enabled or disabled. In some cases, it may
+ * not be desirable to fully shut off the crtc during self refresh.
+ * CRTC's can inspect this flag and determine the best course of action.
+ */
+ bool self_refresh_active;
+
+ /**
* @event:
*
* Optional pointer to a DRM event to signal upon completion of the
@@ -1087,6 +1100,13 @@ struct drm_crtc {
* The name of the CRTC's fence timeline.
*/
char timeline_name[32];
+
+ /**
+ * @self_refresh_data: Holds the state for the self refresh helpers
+ *
+ * Initialized via drm_self_refresh_helper_register().
+ */
+ struct drm_self_refresh_data *self_refresh_data;
};
/**
diff --git a/include/drm/drm_debugfs.h b/include/drm/drm_debugfs.h
index ac0f75df1ac9..7501e323d383 100644
--- a/include/drm/drm_debugfs.h
+++ b/include/drm/drm_debugfs.h
@@ -32,6 +32,8 @@
#ifndef _DRM_DEBUGFS_H_
#define _DRM_DEBUGFS_H_
+#include <linux/types.h>
+#include <linux/seq_file.h>
/**
* struct drm_info_list - debugfs info list entry
*
diff --git a/include/drm/drm_device.h b/include/drm/drm_device.h
index 7f9ef709b2b6..1acfc3bbd3fb 100644
--- a/include/drm/drm_device.h
+++ b/include/drm/drm_device.h
@@ -17,6 +17,7 @@ struct drm_vblank_crtc;
struct drm_sg_mem;
struct drm_local_map;
struct drm_vma_offset_manager;
+struct drm_vram_mm;
struct drm_fb_helper;
struct inode;
@@ -286,6 +287,9 @@ struct drm_device {
/** @vma_offset_manager: GEM information */
struct drm_vma_offset_manager *vma_offset_manager;
+ /** @vram_mm: VRAM MM memory manager */
+ struct drm_vram_mm *vram_mm;
+
/**
* @switch_power_state:
*
diff --git a/include/drm/drm_dp_helper.h b/include/drm/drm_dp_helper.h
index 97ce790a5b5a..3fc534ee8174 100644
--- a/include/drm/drm_dp_helper.h
+++ b/include/drm/drm_dp_helper.h
@@ -1083,17 +1083,30 @@ struct dp_sdp_header {
#define EDP_SDP_HEADER_VALID_PAYLOAD_BYTES 0x1F
#define DP_SDP_PPS_HEADER_PAYLOAD_BYTES_MINUS_1 0x7F
-struct edp_vsc_psr {
+/**
+ * struct dp_sdp - DP secondary data packet
+ * @sdp_header: DP secondary data packet header
+ * @db: DP secondaray data packet data blocks
+ * VSC SDP Payload for PSR
+ * db[0]: Stereo Interface
+ * db[1]: 0 - PSR State; 1 - Update RFB; 2 - CRC Valid
+ * db[2]: CRC value bits 7:0 of the R or Cr component
+ * db[3]: CRC value bits 15:8 of the R or Cr component
+ * db[4]: CRC value bits 7:0 of the G or Y component
+ * db[5]: CRC value bits 15:8 of the G or Y component
+ * db[6]: CRC value bits 7:0 of the B or Cb component
+ * db[7]: CRC value bits 15:8 of the B or Cb component
+ * db[8] - db[31]: Reserved
+ * VSC SDP Payload for Pixel Encoding/Colorimetry Format
+ * db[0] - db[15]: Reserved
+ * db[16]: Pixel Encoding and Colorimetry Formats
+ * db[17]: Dynamic Range and Component Bit Depth
+ * db[18]: Content Type
+ * db[19] - db[31]: Reserved
+ */
+struct dp_sdp {
struct dp_sdp_header sdp_header;
- u8 DB0; /* Stereo Interface */
- u8 DB1; /* 0 - PSR State; 1 - Update RFB; 2 - CRC Valid */
- u8 DB2; /* CRC value bits 7:0 of the R or Cr component */
- u8 DB3; /* CRC value bits 15:8 of the R or Cr component */
- u8 DB4; /* CRC value bits 7:0 of the G or Y component */
- u8 DB5; /* CRC value bits 15:8 of the G or Y component */
- u8 DB6; /* CRC value bits 7:0 of the B or Cb component */
- u8 DB7; /* CRC value bits 15:8 of the B or Cb component */
- u8 DB8_31[24]; /* Reserved */
+ u8 db[32];
} __packed;
#define EDP_VSC_PSR_STATE_ACTIVE (1<<0)
diff --git a/include/drm/drm_edid.h b/include/drm/drm_edid.h
index c9ca0be54d9a..b9719418c3d2 100644
--- a/include/drm/drm_edid.h
+++ b/include/drm/drm_edid.h
@@ -25,6 +25,7 @@
#include <linux/types.h>
#include <linux/hdmi.h>
+#include <drm/drm_mode.h>
struct drm_device;
struct i2c_adapter;
@@ -176,21 +177,23 @@ struct detailed_timing {
#define DRM_EDID_INPUT_BLANK_TO_BLACK (1 << 4)
#define DRM_EDID_INPUT_VIDEO_LEVEL (3 << 5)
#define DRM_EDID_INPUT_DIGITAL (1 << 7)
-#define DRM_EDID_DIGITAL_DEPTH_MASK (7 << 4)
-#define DRM_EDID_DIGITAL_DEPTH_UNDEF (0 << 4)
-#define DRM_EDID_DIGITAL_DEPTH_6 (1 << 4)
-#define DRM_EDID_DIGITAL_DEPTH_8 (2 << 4)
-#define DRM_EDID_DIGITAL_DEPTH_10 (3 << 4)
-#define DRM_EDID_DIGITAL_DEPTH_12 (4 << 4)
-#define DRM_EDID_DIGITAL_DEPTH_14 (5 << 4)
-#define DRM_EDID_DIGITAL_DEPTH_16 (6 << 4)
-#define DRM_EDID_DIGITAL_DEPTH_RSVD (7 << 4)
-#define DRM_EDID_DIGITAL_TYPE_UNDEF (0)
-#define DRM_EDID_DIGITAL_TYPE_DVI (1)
-#define DRM_EDID_DIGITAL_TYPE_HDMI_A (2)
-#define DRM_EDID_DIGITAL_TYPE_HDMI_B (3)
-#define DRM_EDID_DIGITAL_TYPE_MDDI (4)
-#define DRM_EDID_DIGITAL_TYPE_DP (5)
+#define DRM_EDID_DIGITAL_DEPTH_MASK (7 << 4) /* 1.4 */
+#define DRM_EDID_DIGITAL_DEPTH_UNDEF (0 << 4) /* 1.4 */
+#define DRM_EDID_DIGITAL_DEPTH_6 (1 << 4) /* 1.4 */
+#define DRM_EDID_DIGITAL_DEPTH_8 (2 << 4) /* 1.4 */
+#define DRM_EDID_DIGITAL_DEPTH_10 (3 << 4) /* 1.4 */
+#define DRM_EDID_DIGITAL_DEPTH_12 (4 << 4) /* 1.4 */
+#define DRM_EDID_DIGITAL_DEPTH_14 (5 << 4) /* 1.4 */
+#define DRM_EDID_DIGITAL_DEPTH_16 (6 << 4) /* 1.4 */
+#define DRM_EDID_DIGITAL_DEPTH_RSVD (7 << 4) /* 1.4 */
+#define DRM_EDID_DIGITAL_TYPE_MASK (7 << 0) /* 1.4 */
+#define DRM_EDID_DIGITAL_TYPE_UNDEF (0 << 0) /* 1.4 */
+#define DRM_EDID_DIGITAL_TYPE_DVI (1 << 0) /* 1.4 */
+#define DRM_EDID_DIGITAL_TYPE_HDMI_A (2 << 0) /* 1.4 */
+#define DRM_EDID_DIGITAL_TYPE_HDMI_B (3 << 0) /* 1.4 */
+#define DRM_EDID_DIGITAL_TYPE_MDDI (4 << 0) /* 1.4 */
+#define DRM_EDID_DIGITAL_TYPE_DP (5 << 0) /* 1.4 */
+#define DRM_EDID_DIGITAL_DFP_1_X (1 << 0) /* 1.3 */
#define DRM_EDID_FEATURE_DEFAULT_GTF (1 << 0)
#define DRM_EDID_FEATURE_PREFERRED_TIMING (1 << 1)
@@ -370,6 +373,10 @@ drm_hdmi_avi_infoframe_quant_range(struct hdmi_avi_infoframe *frame,
const struct drm_display_mode *mode,
enum hdmi_quantization_range rgb_quant_range);
+int
+drm_hdmi_infoframe_set_hdr_metadata(struct hdmi_drm_infoframe *frame,
+ const struct drm_connector_state *conn_state);
+
/**
* drm_eld_mnl - Get ELD monitor name length in bytes.
* @eld: pointer to an eld memory structure with mnl set
diff --git a/include/drm/drm_fb_helper.h b/include/drm/drm_fb_helper.h
index 40af2866c26a..c8a8ae2a678a 100644
--- a/include/drm/drm_fb_helper.h
+++ b/include/drm/drm_fb_helper.h
@@ -43,17 +43,6 @@ enum mode_set_atomic {
ENTER_ATOMIC_MODE_SET,
};
-struct drm_fb_offset {
- int x, y;
-};
-
-struct drm_fb_helper_crtc {
- struct drm_mode_set mode_set;
- struct drm_display_mode *desired_mode;
- int x, y;
- int rotation;
-};
-
/**
* struct drm_fb_helper_surface_size - describes fbdev size and scanout surface size
* @fb_width: fbdev width
@@ -104,18 +93,10 @@ struct drm_fb_helper_funcs {
struct drm_fb_helper_surface_size *sizes);
};
-struct drm_fb_helper_connector {
- struct drm_connector *connector;
-};
-
/**
* struct drm_fb_helper - main structure to emulate fbdev on top of KMS
* @fb: Scanout framebuffer object
* @dev: DRM device
- * @crtc_count: number of possible CRTCs
- * @crtc_info: per-CRTC helper state (mode, x/y offset, etc)
- * @connector_count: number of connected connectors
- * @connector_info_alloc_count: size of connector_info
* @funcs: driver callbacks for fb helper
* @fbdev: emulated fbdev device info struct
* @pseudo_palette: fake palette of 16 colors
@@ -147,24 +128,6 @@ struct drm_fb_helper {
struct drm_framebuffer *fb;
struct drm_device *dev;
- int crtc_count;
- struct drm_fb_helper_crtc *crtc_info;
- int connector_count;
- int connector_info_alloc_count;
- /**
- * @sw_rotations:
- * Bitmask of all rotations requested for panel-orientation which
- * could not be handled in hardware. If only one bit is set
- * fbdev->fbcon_rotate_hint gets set to the requested rotation.
- */
- int sw_rotations;
- /**
- * @connector_info:
- *
- * Array of per-connector information. Do not iterate directly, but use
- * drm_fb_helper_for_each_connector.
- */
- struct drm_fb_helper_connector **connector_info;
const struct drm_fb_helper_funcs *funcs;
struct fb_info *fbdev;
u32 pseudo_palette[17];
@@ -304,18 +267,8 @@ int drm_fb_helper_ioctl(struct fb_info *info, unsigned int cmd,
int drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper);
int drm_fb_helper_initial_config(struct drm_fb_helper *fb_helper, int bpp_sel);
-int drm_fb_helper_single_add_all_connectors(struct drm_fb_helper *fb_helper);
int drm_fb_helper_debug_enter(struct fb_info *info);
int drm_fb_helper_debug_leave(struct fb_info *info);
-struct drm_display_mode *
-drm_has_preferred_mode(struct drm_fb_helper_connector *fb_connector,
- int width, int height);
-struct drm_display_mode *
-drm_pick_cmdline_mode(struct drm_fb_helper_connector *fb_helper_conn);
-
-int drm_fb_helper_add_one_connector(struct drm_fb_helper *fb_helper, struct drm_connector *connector);
-int drm_fb_helper_remove_one_connector(struct drm_fb_helper *fb_helper,
- struct drm_connector *connector);
int drm_fb_helper_fbdev_setup(struct drm_device *dev,
struct drm_fb_helper *fb_helper,
@@ -490,12 +443,6 @@ static inline int drm_fb_helper_initial_config(struct drm_fb_helper *fb_helper,
return 0;
}
-static inline int
-drm_fb_helper_single_add_all_connectors(struct drm_fb_helper *fb_helper)
-{
- return 0;
-}
-
static inline int drm_fb_helper_debug_enter(struct fb_info *info)
{
return 0;
@@ -506,34 +453,6 @@ static inline int drm_fb_helper_debug_leave(struct fb_info *info)
return 0;
}
-static inline struct drm_display_mode *
-drm_has_preferred_mode(struct drm_fb_helper_connector *fb_connector,
- int width, int height)
-{
- return NULL;
-}
-
-static inline struct drm_display_mode *
-drm_pick_cmdline_mode(struct drm_fb_helper_connector *fb_helper_conn,
- int width, int height)
-{
- return NULL;
-}
-
-static inline int
-drm_fb_helper_add_one_connector(struct drm_fb_helper *fb_helper,
- struct drm_connector *connector)
-{
- return 0;
-}
-
-static inline int
-drm_fb_helper_remove_one_connector(struct drm_fb_helper *fb_helper,
- struct drm_connector *connector)
-{
- return 0;
-}
-
static inline int
drm_fb_helper_fbdev_setup(struct drm_device *dev,
struct drm_fb_helper *fb_helper,
@@ -575,6 +494,27 @@ drm_fbdev_generic_setup(struct drm_device *dev, unsigned int preferred_bpp)
#endif
+/* TODO: There's a todo entry to remove these three */
+static inline int
+drm_fb_helper_single_add_all_connectors(struct drm_fb_helper *fb_helper)
+{
+ return 0;
+}
+
+static inline int
+drm_fb_helper_add_one_connector(struct drm_fb_helper *fb_helper,
+ struct drm_connector *connector)
+{
+ return 0;
+}
+
+static inline int
+drm_fb_helper_remove_one_connector(struct drm_fb_helper *fb_helper,
+ struct drm_connector *connector)
+{
+ return 0;
+}
+
/**
* drm_fb_helper_remove_conflicting_framebuffers - remove firmware-configured framebuffers
* @a: memory range, users of which are to be removed
diff --git a/include/drm/drm_fourcc.h b/include/drm/drm_fourcc.h
index b3d9d88ab290..306d1efeb5e0 100644
--- a/include/drm/drm_fourcc.h
+++ b/include/drm/drm_fourcc.h
@@ -260,6 +260,50 @@ drm_format_info_is_yuv_sampling_444(const struct drm_format_info *info)
return info->is_yuv && info->hsub == 1 && info->vsub == 1;
}
+/**
+ * drm_format_info_plane_width - width of the plane given the first plane
+ * @info: pixel format info
+ * @width: width of the first plane
+ * @plane: plane index
+ *
+ * Returns:
+ * The width of @plane, given that the width of the first plane is @width.
+ */
+static inline
+int drm_format_info_plane_width(const struct drm_format_info *info, int width,
+ int plane)
+{
+ if (!info || plane >= info->num_planes)
+ return 0;
+
+ if (plane == 0)
+ return width;
+
+ return width / info->hsub;
+}
+
+/**
+ * drm_format_info_plane_height - height of the plane given the first plane
+ * @info: pixel format info
+ * @height: height of the first plane
+ * @plane: plane index
+ *
+ * Returns:
+ * The height of @plane, given that the height of the first plane is @height.
+ */
+static inline
+int drm_format_info_plane_height(const struct drm_format_info *info, int height,
+ int plane)
+{
+ if (!info || plane >= info->num_planes)
+ return 0;
+
+ if (plane == 0)
+ return height;
+
+ return height / info->vsub;
+}
+
const struct drm_format_info *__drm_format_info(u32 format);
const struct drm_format_info *drm_format_info(u32 format);
const struct drm_format_info *
@@ -268,12 +312,6 @@ drm_get_format_info(struct drm_device *dev,
uint32_t drm_mode_legacy_fb_format(uint32_t bpp, uint32_t depth);
uint32_t drm_driver_legacy_fb_format(struct drm_device *dev,
uint32_t bpp, uint32_t depth);
-int drm_format_num_planes(uint32_t format);
-int drm_format_plane_cpp(uint32_t format, int plane);
-int drm_format_horz_chroma_subsampling(uint32_t format);
-int drm_format_vert_chroma_subsampling(uint32_t format);
-int drm_format_plane_width(int width, uint32_t format, int plane);
-int drm_format_plane_height(int height, uint32_t format, int plane);
unsigned int drm_format_info_block_width(const struct drm_format_info *info,
int plane);
unsigned int drm_format_info_block_height(const struct drm_format_info *info,
diff --git a/include/drm/drm_framebuffer.h b/include/drm/drm_framebuffer.h
index c23016748e3f..c0e0256e3e98 100644
--- a/include/drm/drm_framebuffer.h
+++ b/include/drm/drm_framebuffer.h
@@ -87,6 +87,9 @@ struct drm_framebuffer_funcs {
* for more information as all the semantics and arguments have a one to
* one mapping on this function.
*
+ * Atomic drivers should use drm_atomic_helper_dirtyfb() to implement
+ * this hook.
+ *
* RETURNS:
*
* 0 on success or a negative error code on failure.
diff --git a/include/drm/drm_gem_vram_helper.h b/include/drm/drm_gem_vram_helper.h
new file mode 100644
index 000000000000..9581ea0a4f7e
--- /dev/null
+++ b/include/drm/drm_gem_vram_helper.h
@@ -0,0 +1,153 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+
+#ifndef DRM_GEM_VRAM_HELPER_H
+#define DRM_GEM_VRAM_HELPER_H
+
+#include <drm/drm_gem.h>
+#include <drm/ttm/ttm_bo_api.h>
+#include <drm/ttm/ttm_placement.h>
+#include <linux/kernel.h> /* for container_of() */
+
+struct drm_mode_create_dumb;
+struct drm_vram_mm_funcs;
+struct filp;
+struct vm_area_struct;
+
+#define DRM_GEM_VRAM_PL_FLAG_VRAM TTM_PL_FLAG_VRAM
+#define DRM_GEM_VRAM_PL_FLAG_SYSTEM TTM_PL_FLAG_SYSTEM
+
+/*
+ * Buffer-object helpers
+ */
+
+/**
+ * struct drm_gem_vram_object - GEM object backed by VRAM
+ * @gem: GEM object
+ * @bo: TTM buffer object
+ * @kmap: Mapping information for @bo
+ * @placement: TTM placement information. Supported placements are \
+ %TTM_PL_VRAM and %TTM_PL_SYSTEM
+ * @placements: TTM placement information.
+ * @pin_count: Pin counter
+ *
+ * The type struct drm_gem_vram_object represents a GEM object that is
+ * backed by VRAM. It can be used for simple framebuffer devices with
+ * dedicated memory. The buffer object can be evicted to system memory if
+ * video memory becomes scarce.
+ */
+struct drm_gem_vram_object {
+ struct drm_gem_object gem;
+ struct ttm_buffer_object bo;
+ struct ttm_bo_kmap_obj kmap;
+
+ /* Supported placements are %TTM_PL_VRAM and %TTM_PL_SYSTEM */
+ struct ttm_placement placement;
+ struct ttm_place placements[2];
+
+ int pin_count;
+};
+
+/**
+ * Returns the container of type &struct drm_gem_vram_object
+ * for field bo.
+ * @bo: the VRAM buffer object
+ * Returns: The containing GEM VRAM object
+ */
+static inline struct drm_gem_vram_object *drm_gem_vram_of_bo(
+ struct ttm_buffer_object *bo)
+{
+ return container_of(bo, struct drm_gem_vram_object, bo);
+}
+
+/**
+ * Returns the container of type &struct drm_gem_vram_object
+ * for field gem.
+ * @gem: the GEM object
+ * Returns: The containing GEM VRAM object
+ */
+static inline struct drm_gem_vram_object *drm_gem_vram_of_gem(
+ struct drm_gem_object *gem)
+{
+ return container_of(gem, struct drm_gem_vram_object, gem);
+}
+
+struct drm_gem_vram_object *drm_gem_vram_create(struct drm_device *dev,
+ struct ttm_bo_device *bdev,
+ size_t size,
+ unsigned long pg_align,
+ bool interruptible);
+void drm_gem_vram_put(struct drm_gem_vram_object *gbo);
+u64 drm_gem_vram_mmap_offset(struct drm_gem_vram_object *gbo);
+s64 drm_gem_vram_offset(struct drm_gem_vram_object *gbo);
+int drm_gem_vram_pin(struct drm_gem_vram_object *gbo, unsigned long pl_flag);
+int drm_gem_vram_unpin(struct drm_gem_vram_object *gbo);
+void *drm_gem_vram_kmap(struct drm_gem_vram_object *gbo, bool map,
+ bool *is_iomem);
+void drm_gem_vram_kunmap(struct drm_gem_vram_object *gbo);
+
+int drm_gem_vram_fill_create_dumb(struct drm_file *file,
+ struct drm_device *dev,
+ struct ttm_bo_device *bdev,
+ unsigned long pg_align,
+ bool interruptible,
+ struct drm_mode_create_dumb *args);
+
+/*
+ * Helpers for struct ttm_bo_driver
+ */
+
+void drm_gem_vram_bo_driver_evict_flags(struct ttm_buffer_object *bo,
+ struct ttm_placement *pl);
+
+int drm_gem_vram_bo_driver_verify_access(struct ttm_buffer_object *bo,
+ struct file *filp);
+
+extern const struct drm_vram_mm_funcs drm_gem_vram_mm_funcs;
+
+/*
+ * Helpers for struct drm_driver
+ */
+
+void drm_gem_vram_driver_gem_free_object_unlocked(struct drm_gem_object *gem);
+int drm_gem_vram_driver_dumb_create(struct drm_file *file,
+ struct drm_device *dev,
+ struct drm_mode_create_dumb *args);
+int drm_gem_vram_driver_dumb_mmap_offset(struct drm_file *file,
+ struct drm_device *dev,
+ uint32_t handle, uint64_t *offset);
+
+/**
+ * define DRM_GEM_VRAM_DRIVER - default callback functions for \
+ &struct drm_driver
+ *
+ * Drivers that use VRAM MM and GEM VRAM can use this macro to initialize
+ * &struct drm_driver with default functions.
+ */
+#define DRM_GEM_VRAM_DRIVER \
+ .gem_free_object_unlocked = \
+ drm_gem_vram_driver_gem_free_object_unlocked, \
+ .dumb_create = drm_gem_vram_driver_dumb_create, \
+ .dumb_map_offset = drm_gem_vram_driver_dumb_mmap_offset
+
+/*
+ * PRIME helpers for struct drm_driver
+ */
+
+int drm_gem_vram_driver_gem_prime_pin(struct drm_gem_object *obj);
+void drm_gem_vram_driver_gem_prime_unpin(struct drm_gem_object *obj);
+void *drm_gem_vram_driver_gem_prime_vmap(struct drm_gem_object *obj);
+void drm_gem_vram_driver_gem_prime_vunmap(struct drm_gem_object *obj,
+ void *vaddr);
+int drm_gem_vram_driver_gem_prime_mmap(struct drm_gem_object *obj,
+ struct vm_area_struct *vma);
+
+#define DRM_GEM_VRAM_DRIVER_PRIME \
+ .gem_prime_export = drm_gem_prime_export, \
+ .gem_prime_import = drm_gem_prime_import, \
+ .gem_prime_pin = drm_gem_vram_driver_gem_prime_pin, \
+ .gem_prime_unpin = drm_gem_vram_driver_gem_prime_unpin, \
+ .gem_prime_vmap = drm_gem_vram_driver_gem_prime_vmap, \
+ .gem_prime_vunmap = drm_gem_vram_driver_gem_prime_vunmap, \
+ .gem_prime_mmap = drm_gem_vram_driver_gem_prime_mmap
+
+#endif
diff --git a/include/drm/drm_hdcp.h b/include/drm/drm_hdcp.h
index f243408ecf26..13771a496e2b 100644
--- a/include/drm/drm_hdcp.h
+++ b/include/drm/drm_hdcp.h
@@ -252,17 +252,44 @@ struct hdcp2_rep_stream_ready {
* host format and back
*/
static inline
-u32 drm_hdcp2_seq_num_to_u32(u8 seq_num[HDCP_2_2_SEQ_NUM_LEN])
+u32 drm_hdcp_be24_to_cpu(const u8 seq_num[HDCP_2_2_SEQ_NUM_LEN])
{
return (u32)(seq_num[2] | seq_num[1] << 8 | seq_num[0] << 16);
}
static inline
-void drm_hdcp2_u32_to_seq_num(u8 seq_num[HDCP_2_2_SEQ_NUM_LEN], u32 val)
+void drm_hdcp_cpu_to_be24(u8 seq_num[HDCP_2_2_SEQ_NUM_LEN], u32 val)
{
seq_num[0] = val >> 16;
seq_num[1] = val >> 8;
seq_num[2] = val;
}
+#define DRM_HDCP_SRM_GEN1_MAX_BYTES (5 * 1024)
+#define DRM_HDCP_1_4_SRM_ID 0x8
+#define DRM_HDCP_SRM_ID_MASK (0xF << 4)
+#define DRM_HDCP_1_4_VRL_LENGTH_SIZE 3
+#define DRM_HDCP_1_4_DCP_SIG_SIZE 40
+#define DRM_HDCP_2_SRM_ID 0x9
+#define DRM_HDCP_2_INDICATOR 0x1
+#define DRM_HDCP_2_INDICATOR_MASK 0xF
+#define DRM_HDCP_2_VRL_LENGTH_SIZE 3
+#define DRM_HDCP_2_DCP_SIG_SIZE 384
+#define DRM_HDCP_2_NO_OF_DEV_PLUS_RESERVED_SZ 4
+#define DRM_HDCP_2_KSV_COUNT_2_LSBITS(byte) (((byte) & 0xC) >> 6)
+
+struct hdcp_srm_header {
+ u8 srm_id;
+ u8 reserved;
+ __be16 srm_version;
+ u8 srm_gen_no;
+} __packed;
+
+struct drm_device;
+struct drm_connector;
+
+bool drm_hdcp_check_ksvs_revoked(struct drm_device *dev,
+ u8 *ksvs, u32 ksv_count);
+int drm_connector_attach_content_protection_property(
+ struct drm_connector *connector);
#endif
diff --git a/include/drm/drm_legacy.h b/include/drm/drm_legacy.h
index 2182a56ac421..58dc0c04bf99 100644
--- a/include/drm/drm_legacy.h
+++ b/include/drm/drm_legacy.h
@@ -1,11 +1,5 @@
#ifndef __DRM_DRM_LEGACY_H__
#define __DRM_DRM_LEGACY_H__
-
-#include <drm/drm_auth.h>
-#include <drm/drm_hashtab.h>
-
-struct drm_device;
-
/*
* Legacy driver interfaces for the Direct Rendering Manager
*
@@ -39,6 +33,12 @@ struct drm_device;
* OTHER DEALINGS IN THE SOFTWARE.
*/
+#include <drm/drm.h>
+#include <drm/drm_auth.h>
+#include <drm/drm_hashtab.h>
+
+struct drm_device;
+struct file;
/*
* Legacy Support for palateontologic DRM drivers
diff --git a/include/drm/drm_mode_config.h b/include/drm/drm_mode_config.h
index 7f60e8eb269a..759d462d028b 100644
--- a/include/drm/drm_mode_config.h
+++ b/include/drm/drm_mode_config.h
@@ -836,6 +836,19 @@ struct drm_mode_config {
*/
struct drm_property *writeback_out_fence_ptr_property;
+ /**
+ * @hdr_output_metadata_property: Connector property containing hdr
+ * metatada. This will be provided by userspace compositors based
+ * on HDR content
+ */
+ struct drm_property *hdr_output_metadata_property;
+
+ /**
+ * @content_protection_property: DRM ENUM property for content
+ * protection. See drm_connector_attach_content_protection_property().
+ */
+ struct drm_property *content_protection_property;
+
/* dumb ioctl parameters */
uint32_t preferred_depth, prefer_shadow;
diff --git a/include/drm/drm_modeset_helper_vtables.h b/include/drm/drm_modeset_helper_vtables.h
index f7bbd0b0ecd1..6b18c8adfe9d 100644
--- a/include/drm/drm_modeset_helper_vtables.h
+++ b/include/drm/drm_modeset_helper_vtables.h
@@ -680,6 +680,52 @@ struct drm_encoder_helper_funcs {
struct drm_connector *connector);
/**
+ * @atomic_disable:
+ *
+ * This callback should be used to disable the encoder. With the atomic
+ * drivers it is called before this encoder's CRTC has been shut off
+ * using their own &drm_crtc_helper_funcs.atomic_disable hook. If that
+ * sequence is too simple drivers can just add their own driver private
+ * encoder hooks and call them from CRTC's callback by looping over all
+ * encoders connected to it using for_each_encoder_on_crtc().
+ *
+ * This callback is a variant of @disable that provides the atomic state
+ * to the driver. If @atomic_disable is implemented, @disable is not
+ * called by the helpers.
+ *
+ * This hook is only used by atomic helpers. Atomic drivers don't need
+ * to implement it if there's no need to disable anything at the encoder
+ * level. To ensure that runtime PM handling (using either DPMS or the
+ * new "ACTIVE" property) works @atomic_disable must be the inverse of
+ * @atomic_enable.
+ */
+ void (*atomic_disable)(struct drm_encoder *encoder,
+ struct drm_atomic_state *state);
+
+ /**
+ * @atomic_enable:
+ *
+ * This callback should be used to enable the encoder. It is called
+ * after this encoder's CRTC has been enabled using their own
+ * &drm_crtc_helper_funcs.atomic_enable hook. If that sequence is
+ * too simple drivers can just add their own driver private encoder
+ * hooks and call them from CRTC's callback by looping over all encoders
+ * connected to it using for_each_encoder_on_crtc().
+ *
+ * This callback is a variant of @enable that provides the atomic state
+ * to the driver. If @atomic_enable is implemented, @enable is not
+ * called by the helpers.
+ *
+ * This hook is only used by atomic helpers, it is the opposite of
+ * @atomic_disable. Atomic drivers don't need to implement it if there's
+ * no need to enable anything at the encoder level. To ensure that
+ * runtime PM handling works @atomic_enable must be the inverse of
+ * @atomic_disable.
+ */
+ void (*atomic_enable)(struct drm_encoder *encoder,
+ struct drm_atomic_state *state);
+
+ /**
* @disable:
*
* This callback should be used to disable the encoder. With the atomic
@@ -695,6 +741,9 @@ struct drm_encoder_helper_funcs {
* handling (using either DPMS or the new "ACTIVE" property) works
* @disable must be the inverse of @enable for atomic drivers.
*
+ * For atomic drivers also consider @atomic_disable and save yourself
+ * from having to read the NOTE below!
+ *
* NOTE:
*
* With legacy CRTC helpers there's a big semantic difference between
@@ -719,11 +768,11 @@ struct drm_encoder_helper_funcs {
* hooks and call them from CRTC's callback by looping over all encoders
* connected to it using for_each_encoder_on_crtc().
*
- * This hook is used only by atomic helpers, for symmetry with @disable.
- * Atomic drivers don't need to implement it if there's no need to
- * enable anything at the encoder level. To ensure that runtime PM handling
- * (using either DPMS or the new "ACTIVE" property) works
- * @enable must be the inverse of @disable for atomic drivers.
+ * This hook is only used by atomic helpers, it is the opposite of
+ * @disable. Atomic drivers don't need to implement it if there's no
+ * need to enable anything at the encoder level. To ensure that
+ * runtime PM handling (using either DPMS or the new "ACTIVE" property)
+ * works @enable must be the inverse of @disable for atomic drivers.
*/
void (*enable)(struct drm_encoder *encoder);
@@ -979,7 +1028,7 @@ struct drm_connector_helper_funcs {
* deadlock.
*/
int (*atomic_check)(struct drm_connector *connector,
- struct drm_connector_state *state);
+ struct drm_atomic_state *state);
/**
* @atomic_commit:
diff --git a/include/drm/drm_plane.h b/include/drm/drm_plane.h
index 6078c700d9ba..cd5903ad33f7 100644
--- a/include/drm/drm_plane.h
+++ b/include/drm/drm_plane.h
@@ -69,7 +69,7 @@ struct drm_plane_state {
*
* Optional fence to wait for before scanning out @fb. The core atomic
* code will set this when userspace is using explicit fencing. Do not
- * write this directly for a driver's implicit fence, use
+ * write this field directly for a driver's implicit fence, use
* drm_atomic_set_fence_for_plane() to ensure that an explicit fence is
* preserved.
*
diff --git a/include/drm/drm_print.h b/include/drm/drm_print.h
index 3a4247319e63..a5d6f2f3e430 100644
--- a/include/drm/drm_print.h
+++ b/include/drm/drm_print.h
@@ -32,6 +32,8 @@
#include <linux/device.h>
#include <linux/debugfs.h>
+#include <drm/drm.h>
+
/**
* DOC: print
*
diff --git a/include/drm/drm_self_refresh_helper.h b/include/drm/drm_self_refresh_helper.h
new file mode 100644
index 000000000000..397a583ccca7
--- /dev/null
+++ b/include/drm/drm_self_refresh_helper.h
@@ -0,0 +1,20 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright (C) 2019 Google, Inc.
+ *
+ * Authors:
+ * Sean Paul <seanpaul@chromium.org>
+ */
+#ifndef DRM_SELF_REFRESH_HELPER_H_
+#define DRM_SELF_REFRESH_HELPER_H_
+
+struct drm_atomic_state;
+struct drm_crtc;
+
+void drm_self_refresh_helper_alter_state(struct drm_atomic_state *state);
+
+int drm_self_refresh_helper_init(struct drm_crtc *crtc,
+ unsigned int entry_delay_ms);
+
+void drm_self_refresh_helper_cleanup(struct drm_crtc *crtc);
+#endif
diff --git a/include/drm/drm_vram_mm_helper.h b/include/drm/drm_vram_mm_helper.h
new file mode 100644
index 000000000000..a8ffd8599b08
--- /dev/null
+++ b/include/drm/drm_vram_mm_helper.h
@@ -0,0 +1,102 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+
+#ifndef DRM_VRAM_MM_HELPER_H
+#define DRM_VRAM_MM_HELPER_H
+
+#include <drm/ttm/ttm_bo_driver.h>
+
+struct drm_device;
+
+/**
+ * struct drm_vram_mm_funcs - Callback functions for &struct drm_vram_mm
+ * @evict_flags: Provides an implementation for struct \
+ &ttm_bo_driver.evict_flags
+ * @verify_access: Provides an implementation for \
+ struct &ttm_bo_driver.verify_access
+ *
+ * These callback function integrate VRAM MM with TTM buffer objects. New
+ * functions can be added if necessary.
+ */
+struct drm_vram_mm_funcs {
+ void (*evict_flags)(struct ttm_buffer_object *bo,
+ struct ttm_placement *placement);
+ int (*verify_access)(struct ttm_buffer_object *bo, struct file *filp);
+};
+
+/**
+ * struct drm_vram_mm - An instance of VRAM MM
+ * @vram_base: Base address of the managed video memory
+ * @vram_size: Size of the managed video memory in bytes
+ * @bdev: The TTM BO device.
+ * @funcs: TTM BO functions
+ *
+ * The fields &struct drm_vram_mm.vram_base and
+ * &struct drm_vram_mm.vrm_size are managed by VRAM MM, but are
+ * available for public read access. Use the field
+ * &struct drm_vram_mm.bdev to access the TTM BO device.
+ */
+struct drm_vram_mm {
+ uint64_t vram_base;
+ size_t vram_size;
+
+ struct ttm_bo_device bdev;
+
+ const struct drm_vram_mm_funcs *funcs;
+};
+
+/**
+ * drm_vram_mm_of_bdev() - \
+ Returns the container of type &struct ttm_bo_device for field bdev.
+ * @bdev: the TTM BO device
+ *
+ * Returns:
+ * The containing instance of &struct drm_vram_mm
+ */
+static inline struct drm_vram_mm *drm_vram_mm_of_bdev(
+ struct ttm_bo_device *bdev)
+{
+ return container_of(bdev, struct drm_vram_mm, bdev);
+}
+
+int drm_vram_mm_init(struct drm_vram_mm *vmm, struct drm_device *dev,
+ uint64_t vram_base, size_t vram_size,
+ const struct drm_vram_mm_funcs *funcs);
+void drm_vram_mm_cleanup(struct drm_vram_mm *vmm);
+
+int drm_vram_mm_mmap(struct file *filp, struct vm_area_struct *vma,
+ struct drm_vram_mm *vmm);
+
+/*
+ * Helpers for integration with struct drm_device
+ */
+
+struct drm_vram_mm *drm_vram_helper_alloc_mm(
+ struct drm_device *dev, uint64_t vram_base, size_t vram_size,
+ const struct drm_vram_mm_funcs *funcs);
+void drm_vram_helper_release_mm(struct drm_device *dev);
+
+/*
+ * Helpers for &struct file_operations
+ */
+
+int drm_vram_mm_file_operations_mmap(
+ struct file *filp, struct vm_area_struct *vma);
+
+/**
+ * define DRM_VRAM_MM_FILE_OPERATIONS - default callback functions for \
+ &struct file_operations
+ *
+ * Drivers that use VRAM MM can use this macro to initialize
+ * &struct file_operations with default functions.
+ */
+#define DRM_VRAM_MM_FILE_OPERATIONS \
+ .llseek = no_llseek, \
+ .read = drm_read, \
+ .poll = drm_poll, \
+ .unlocked_ioctl = drm_ioctl, \
+ .compat_ioctl = drm_compat_ioctl, \
+ .mmap = drm_vram_mm_file_operations_mmap, \
+ .open = drm_open, \
+ .release = drm_release \
+
+#endif
diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h
index 0daca4d8dad9..57b4121c750a 100644
--- a/include/drm/gpu_scheduler.h
+++ b/include/drm/gpu_scheduler.h
@@ -167,9 +167,6 @@ struct drm_sched_fence *to_drm_sched_fence(struct dma_fence *f);
* @sched: the scheduler instance on which this job is scheduled.
* @s_fence: contains the fences for the scheduling of job.
* @finish_cb: the callback for the finished fence.
- * @finish_work: schedules the function @drm_sched_job_finish once the job has
- * finished to remove the job from the
- * @drm_gpu_scheduler.ring_mirror_list.
* @node: used to append this struct to the @drm_gpu_scheduler.ring_mirror_list.
* @id: a unique id assigned to each job scheduled on the scheduler.
* @karma: increment on every hang caused by this job. If this exceeds the hang
@@ -188,7 +185,6 @@ struct drm_sched_job {
struct drm_gpu_scheduler *sched;
struct drm_sched_fence *s_fence;
struct dma_fence_cb finish_cb;
- struct work_struct finish_work;
struct list_head node;
uint64_t id;
atomic_t karma;
@@ -263,6 +259,7 @@ struct drm_sched_backend_ops {
* guilty and it will be considered for scheduling further.
* @num_jobs: the number of jobs in queue in the scheduler
* @ready: marks if the underlying HW is ready to work
+ * @free_guilty: A hit to time out handler to free the guilty job.
*
* One scheduler is implemented for each hardware ring.
*/
@@ -283,6 +280,7 @@ struct drm_gpu_scheduler {
int hang_limit;
atomic_t num_jobs;
bool ready;
+ bool free_guilty;
};
int drm_sched_init(struct drm_gpu_scheduler *sched,
@@ -296,7 +294,7 @@ int drm_sched_job_init(struct drm_sched_job *job,
void *owner);
void drm_sched_job_cleanup(struct drm_sched_job *job);
void drm_sched_wakeup(struct drm_gpu_scheduler *sched);
-void drm_sched_stop(struct drm_gpu_scheduler *sched);
+void drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad);
void drm_sched_start(struct drm_gpu_scheduler *sched, bool full_recovery);
void drm_sched_resubmit_jobs(struct drm_gpu_scheduler *sched);
void drm_sched_increase_karma(struct drm_sched_job *bad);
diff --git a/include/drm/i915_pciids.h b/include/drm/i915_pciids.h
index 6477da22af28..6d60ea68c171 100644
--- a/include/drm/i915_pciids.h
+++ b/include/drm/i915_pciids.h
@@ -559,7 +559,6 @@
#define INTEL_ICL_PORT_F_IDS(info) \
INTEL_VGA_DEVICE(0x8A50, info), \
INTEL_VGA_DEVICE(0x8A5C, info), \
- INTEL_VGA_DEVICE(0x8A5D, info), \
INTEL_VGA_DEVICE(0x8A59, info), \
INTEL_VGA_DEVICE(0x8A58, info), \
INTEL_VGA_DEVICE(0x8A52, info), \
@@ -573,7 +572,8 @@
#define INTEL_ICL_11_IDS(info) \
INTEL_ICL_PORT_F_IDS(info), \
- INTEL_VGA_DEVICE(0x8A51, info)
+ INTEL_VGA_DEVICE(0x8A51, info), \
+ INTEL_VGA_DEVICE(0x8A5D, info)
/* EHL */
#define INTEL_EHL_IDS(info) \
diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h
index 129dabbc002d..c9b8ba492f24 100644
--- a/include/drm/ttm/ttm_bo_driver.h
+++ b/include/drm/ttm/ttm_bo_driver.h
@@ -767,11 +767,12 @@ static inline int ttm_bo_reserve_slowpath(struct ttm_buffer_object *bo,
*/
static inline void ttm_bo_unreserve(struct ttm_buffer_object *bo)
{
- if (!(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) {
- spin_lock(&bo->bdev->glob->lru_lock);
+ spin_lock(&bo->bdev->glob->lru_lock);
+ if (list_empty(&bo->lru))
ttm_bo_add_to_lru(bo);
- spin_unlock(&bo->bdev->glob->lru_lock);
- }
+ else
+ ttm_bo_move_to_lru_tail(bo, NULL);
+ spin_unlock(&bo->bdev->glob->lru_lock);
reservation_object_unlock(bo->resv);
}
diff --git a/include/drm/ttm/ttm_execbuf_util.h b/include/drm/ttm/ttm_execbuf_util.h
index 621615fa7728..7e46cc678e7e 100644
--- a/include/drm/ttm/ttm_execbuf_util.h
+++ b/include/drm/ttm/ttm_execbuf_util.h
@@ -70,6 +70,7 @@ extern void ttm_eu_backoff_reservation(struct ww_acquire_ctx *ticket,
* @list: thread private list of ttm_validate_buffer structs.
* @intr: should the wait be interruptible
* @dups: [out] optional list of duplicates.
+ * @del_lru: true if BOs should be removed from the LRU.
*
* Tries to reserve bos pointed to by the list entries for validation.
* If the function returns 0, all buffers are marked as "unfenced",
@@ -98,7 +99,7 @@ extern void ttm_eu_backoff_reservation(struct ww_acquire_ctx *ticket,
extern int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
struct list_head *list, bool intr,
- struct list_head *dups);
+ struct list_head *dups, bool del_lru);
/**
* function ttm_eu_fence_buffer_objects.
diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h
index 58725f890b5b..8a327566d7f4 100644
--- a/include/linux/dma-buf.h
+++ b/include/linux/dma-buf.h
@@ -39,19 +39,21 @@ struct dma_buf_attachment;
/**
* struct dma_buf_ops - operations possible on struct dma_buf
- * @map_atomic: [optional] maps a page from the buffer into kernel address
- * space, users may not block until the subsequent unmap call.
- * This callback must not sleep.
- * @unmap_atomic: [optional] unmaps a atomically mapped page from the buffer.
- * This Callback must not sleep.
- * @map: [optional] maps a page from the buffer into kernel address space.
- * @unmap: [optional] unmaps a page from the buffer.
* @vmap: [optional] creates a virtual mapping for the buffer into kernel
* address space. Same restrictions as for vmap and friends apply.
* @vunmap: [optional] unmaps a vmap from the buffer
*/
struct dma_buf_ops {
/**
+ * @cache_sgt_mapping:
+ *
+ * If true the framework will cache the first mapping made for each
+ * attachment. This avoids creating mappings for attachments multiple
+ * times.
+ */
+ bool cache_sgt_mapping;
+
+ /**
* @attach:
*
* This is called from dma_buf_attach() to make sure that a given
@@ -205,8 +207,6 @@ struct dma_buf_ops {
* to be restarted.
*/
int (*end_cpu_access)(struct dma_buf *, enum dma_data_direction);
- void *(*map)(struct dma_buf *, unsigned long);
- void (*unmap)(struct dma_buf *, unsigned long, void *);
/**
* @mmap:
@@ -245,6 +245,31 @@ struct dma_buf_ops {
*/
int (*mmap)(struct dma_buf *, struct vm_area_struct *vma);
+ /**
+ * @map:
+ *
+ * Maps a page from the buffer into kernel address space. The page is
+ * specified by offset into the buffer in PAGE_SIZE units.
+ *
+ * This callback is optional.
+ *
+ * Returns:
+ *
+ * Virtual address pointer where requested page can be accessed. NULL
+ * on error or when this function is unimplemented by the exporter.
+ */
+ void *(*map)(struct dma_buf *, unsigned long);
+
+ /**
+ * @unmap:
+ *
+ * Unmaps a page from the buffer. Page offset and address pointer should
+ * be the same as the one passed to and returned by matching call to map.
+ *
+ * This callback is optional.
+ */
+ void (*unmap)(struct dma_buf *, unsigned long, void *);
+
void *(*vmap)(struct dma_buf *);
void (*vunmap)(struct dma_buf *, void *vaddr);
};
@@ -307,6 +332,8 @@ struct dma_buf {
* @dmabuf: buffer for this attachment.
* @dev: device attached to the buffer.
* @node: list of dma_buf_attachment.
+ * @sgt: cached mapping.
+ * @dir: direction of cached mapping.
* @priv: exporter specific attachment data.
*
* This structure holds the attachment information between the dma_buf buffer
@@ -322,6 +349,8 @@ struct dma_buf_attachment {
struct dma_buf *dmabuf;
struct device *dev;
struct list_head node;
+ struct sg_table *sgt;
+ enum dma_data_direction dir;
void *priv;
};
diff --git a/include/linux/hdmi.h b/include/linux/hdmi.h
index 927ad6451105..9918a6c910c5 100644
--- a/include/linux/hdmi.h
+++ b/include/linux/hdmi.h
@@ -47,6 +47,7 @@ enum hdmi_infoframe_type {
HDMI_INFOFRAME_TYPE_AVI = 0x82,
HDMI_INFOFRAME_TYPE_SPD = 0x83,
HDMI_INFOFRAME_TYPE_AUDIO = 0x84,
+ HDMI_INFOFRAME_TYPE_DRM = 0x87,
};
#define HDMI_IEEE_OUI 0x000c03
@@ -55,6 +56,7 @@ enum hdmi_infoframe_type {
#define HDMI_AVI_INFOFRAME_SIZE 13
#define HDMI_SPD_INFOFRAME_SIZE 25
#define HDMI_AUDIO_INFOFRAME_SIZE 10
+#define HDMI_DRM_INFOFRAME_SIZE 26
#define HDMI_INFOFRAME_SIZE(type) \
(HDMI_INFOFRAME_HEADER_SIZE + HDMI_ ## type ## _INFOFRAME_SIZE)
@@ -152,6 +154,17 @@ enum hdmi_content_type {
HDMI_CONTENT_TYPE_GAME,
};
+enum hdmi_metadata_type {
+ HDMI_STATIC_METADATA_TYPE1 = 1,
+};
+
+enum hdmi_eotf {
+ HDMI_EOTF_TRADITIONAL_GAMMA_SDR,
+ HDMI_EOTF_TRADITIONAL_GAMMA_HDR,
+ HDMI_EOTF_SMPTE_ST2084,
+ HDMI_EOTF_BT_2100_HLG,
+};
+
struct hdmi_avi_infoframe {
enum hdmi_infoframe_type type;
unsigned char version;
@@ -175,12 +188,37 @@ struct hdmi_avi_infoframe {
unsigned short right_bar;
};
+/* DRM Infoframe as per CTA 861.G spec */
+struct hdmi_drm_infoframe {
+ enum hdmi_infoframe_type type;
+ unsigned char version;
+ unsigned char length;
+ enum hdmi_eotf eotf;
+ enum hdmi_metadata_type metadata_type;
+ struct {
+ u16 x, y;
+ } display_primaries[3];
+ struct {
+ u16 x, y;
+ } white_point;
+ u16 max_display_mastering_luminance;
+ u16 min_display_mastering_luminance;
+ u16 max_cll;
+ u16 max_fall;
+};
+
int hdmi_avi_infoframe_init(struct hdmi_avi_infoframe *frame);
ssize_t hdmi_avi_infoframe_pack(struct hdmi_avi_infoframe *frame, void *buffer,
size_t size);
ssize_t hdmi_avi_infoframe_pack_only(const struct hdmi_avi_infoframe *frame,
void *buffer, size_t size);
int hdmi_avi_infoframe_check(struct hdmi_avi_infoframe *frame);
+int hdmi_drm_infoframe_init(struct hdmi_drm_infoframe *frame);
+ssize_t hdmi_drm_infoframe_pack(struct hdmi_drm_infoframe *frame, void *buffer,
+ size_t size);
+ssize_t hdmi_drm_infoframe_pack_only(const struct hdmi_drm_infoframe *frame,
+ void *buffer, size_t size);
+int hdmi_drm_infoframe_check(struct hdmi_drm_infoframe *frame);
enum hdmi_spd_sdi {
HDMI_SPD_SDI_UNKNOWN,
@@ -320,6 +358,33 @@ struct hdmi_vendor_infoframe {
unsigned int s3d_ext_data;
};
+/* HDR Metadata as per 861.G spec */
+struct hdr_static_metadata {
+ __u8 eotf;
+ __u8 metadata_type;
+ __u16 max_cll;
+ __u16 max_fall;
+ __u16 min_cll;
+};
+
+/**
+ * struct hdr_sink_metadata - HDR sink metadata
+ *
+ * Metadata Information read from Sink's EDID
+ */
+struct hdr_sink_metadata {
+ /**
+ * @metadata_type: Static_Metadata_Descriptor_ID.
+ */
+ __u32 metadata_type;
+ /**
+ * @hdmi_type1: HDR Metadata Infoframe.
+ */
+ union {
+ struct hdr_static_metadata hdmi_type1;
+ };
+};
+
int hdmi_vendor_infoframe_init(struct hdmi_vendor_infoframe *frame);
ssize_t hdmi_vendor_infoframe_pack(struct hdmi_vendor_infoframe *frame,
void *buffer, size_t size);
@@ -344,6 +409,7 @@ union hdmi_vendor_any_infoframe {
* @spd: spd infoframe
* @vendor: union of all vendor infoframes
* @audio: audio infoframe
+ * @drm: Dynamic Range and Mastering infoframe
*
* This is used by the generic pack function. This works since all infoframes
* have the same header which also indicates which type of infoframe should be
@@ -355,6 +421,7 @@ union hdmi_infoframe {
struct hdmi_spd_infoframe spd;
union hdmi_vendor_any_infoframe vendor;
struct hdmi_audio_infoframe audio;
+ struct hdmi_drm_infoframe drm;
};
ssize_t hdmi_infoframe_pack(union hdmi_infoframe *frame, void *buffer,
diff --git a/include/linux/reservation.h b/include/linux/reservation.h
index ee750765cc94..644a22dbe53b 100644
--- a/include/linux/reservation.h
+++ b/include/linux/reservation.h
@@ -216,8 +216,12 @@ reservation_object_unlock(struct reservation_object *obj)
{
#ifdef CONFIG_DEBUG_MUTEXES
/* Test shared fence slot reservation */
- if (obj->fence)
- obj->fence->shared_max = obj->fence->shared_count;
+ if (rcu_access_pointer(obj->fence)) {
+ struct reservation_object_list *fence =
+ reservation_object_get_list(obj);
+
+ fence->shared_max = fence->shared_count;
+ }
#endif
ww_mutex_unlock(&obj->lock);
}
diff --git a/include/uapi/drm/drm.h b/include/uapi/drm/drm.h
index 661d73f9a919..8a5b2f8f8eb9 100644
--- a/include/uapi/drm/drm.h
+++ b/include/uapi/drm/drm.h
@@ -50,6 +50,7 @@ typedef unsigned int drm_handle_t;
#else /* One of the BSDs */
+#include <stdint.h>
#include <sys/ioccom.h>
#include <sys/types.h>
typedef int8_t __s8;
diff --git a/include/uapi/drm/drm_mode.h b/include/uapi/drm/drm_mode.h
index 83cd1636b9be..5ab331e5dc23 100644
--- a/include/uapi/drm/drm_mode.h
+++ b/include/uapi/drm/drm_mode.h
@@ -33,6 +33,15 @@
extern "C" {
#endif
+/**
+ * DOC: overview
+ *
+ * DRM exposes many UAPI and structure definition to have a consistent
+ * and standardized interface with user.
+ * Userspace can refer to these structure definitions and UAPI formats
+ * to communicate to driver
+ */
+
#define DRM_CONNECTOR_NAME_LEN 32
#define DRM_DISPLAY_MODE_LEN 32
#define DRM_PROP_NAME_LEN 32
@@ -630,6 +639,92 @@ struct drm_color_lut {
__u16 reserved;
};
+/**
+ * struct hdr_metadata_infoframe - HDR Metadata Infoframe Data.
+ *
+ * HDR Metadata Infoframe as per CTA 861.G spec. This is expected
+ * to match exactly with the spec.
+ *
+ * Userspace is expected to pass the metadata information as per
+ * the format described in this structure.
+ */
+struct hdr_metadata_infoframe {
+ /**
+ * @eotf: Electro-Optical Transfer Function (EOTF)
+ * used in the stream.
+ */
+ __u8 eotf;
+ /**
+ * @metadata_type: Static_Metadata_Descriptor_ID.
+ */
+ __u8 metadata_type;
+ /**
+ * @display_primaries: Color Primaries of the Data.
+ * These are coded as unsigned 16-bit values in units of
+ * 0.00002, where 0x0000 represents zero and 0xC350
+ * represents 1.0000.
+ * @display_primaries.x: X cordinate of color primary.
+ * @display_primaries.y: Y cordinate of color primary.
+ */
+ struct {
+ __u16 x, y;
+ } display_primaries[3];
+ /**
+ * @white_point: White Point of Colorspace Data.
+ * These are coded as unsigned 16-bit values in units of
+ * 0.00002, where 0x0000 represents zero and 0xC350
+ * represents 1.0000.
+ * @white_point.x: X cordinate of whitepoint of color primary.
+ * @white_point.y: Y cordinate of whitepoint of color primary.
+ */
+ struct {
+ __u16 x, y;
+ } white_point;
+ /**
+ * @max_display_mastering_luminance: Max Mastering Display Luminance.
+ * This value is coded as an unsigned 16-bit value in units of 1 cd/m2,
+ * where 0x0001 represents 1 cd/m2 and 0xFFFF represents 65535 cd/m2.
+ */
+ __u16 max_display_mastering_luminance;
+ /**
+ * @min_display_mastering_luminance: Min Mastering Display Luminance.
+ * This value is coded as an unsigned 16-bit value in units of
+ * 0.0001 cd/m2, where 0x0001 represents 0.0001 cd/m2 and 0xFFFF
+ * represents 6.5535 cd/m2.
+ */
+ __u16 min_display_mastering_luminance;
+ /**
+ * @max_cll: Max Content Light Level.
+ * This value is coded as an unsigned 16-bit value in units of 1 cd/m2,
+ * where 0x0001 represents 1 cd/m2 and 0xFFFF represents 65535 cd/m2.
+ */
+ __u16 max_cll;
+ /**
+ * @max_fall: Max Frame Average Light Level.
+ * This value is coded as an unsigned 16-bit value in units of 1 cd/m2,
+ * where 0x0001 represents 1 cd/m2 and 0xFFFF represents 65535 cd/m2.
+ */
+ __u16 max_fall;
+};
+
+/**
+ * struct hdr_output_metadata - HDR output metadata
+ *
+ * Metadata Information to be passed from userspace
+ */
+struct hdr_output_metadata {
+ /**
+ * @metadata_type: Static_Metadata_Descriptor_ID.
+ */
+ __u32 metadata_type;
+ /**
+ * @hdmi_metadata_type1: HDR Metadata Infoframe.
+ */
+ union {
+ struct hdr_metadata_infoframe hdmi_metadata_type1;
+ };
+};
+
#define DRM_MODE_PAGE_FLIP_EVENT 0x01
#define DRM_MODE_PAGE_FLIP_ASYNC 0x02
#define DRM_MODE_PAGE_FLIP_TARGET_ABSOLUTE 0x4
@@ -803,6 +898,10 @@ struct drm_format_modifier {
};
/**
+ * struct drm_mode_create_blob - Create New block property
+ * @data: Pointer to data to copy.
+ * @length: Length of data to copy.
+ * @blob_id: new property ID.
* Create a new 'blob' data property, copying length bytes from data pointer,
* and returning new blob ID.
*/
@@ -816,6 +915,8 @@ struct drm_mode_create_blob {
};
/**
+ * struct drm_mode_destroy_blob - Destroy user blob
+ * @blob_id: blob_id to destroy
* Destroy a user-created blob property.
*/
struct drm_mode_destroy_blob {
@@ -823,6 +924,12 @@ struct drm_mode_destroy_blob {
};
/**
+ * struct drm_mode_create_lease - Create lease
+ * @object_ids: Pointer to array of object ids.
+ * @object_count: Number of object ids.
+ * @flags: flags for new FD.
+ * @lessee_id: unique identifier for lessee.
+ * @fd: file descriptor to new drm_master file.
* Lease mode resources, creating another drm_master.
*/
struct drm_mode_create_lease {
@@ -840,6 +947,10 @@ struct drm_mode_create_lease {
};
/**
+ * struct drm_mode_list_lessees - List lessees
+ * @count_lessees: Number of lessees.
+ * @pad: pad.
+ * @lessees_ptr: Pointer to lessess.
* List lesses from a drm_master
*/
struct drm_mode_list_lessees {
@@ -860,6 +971,10 @@ struct drm_mode_list_lessees {
};
/**
+ * struct drm_mode_get_lease - Get Lease
+ * @count_objects: Number of leased objects.
+ * @pad: pad.
+ * @objects_ptr: Pointer to objects.
* Get leased objects
*/
struct drm_mode_get_lease {
@@ -880,6 +995,8 @@ struct drm_mode_get_lease {
};
/**
+ * struct drm_mode_revoke_lease - Revoke lease
+ * @lessee_id: Unique ID of lessee.
* Revoke lease
*/
struct drm_mode_revoke_lease {
diff --git a/include/uapi/drm/i915_drm.h b/include/uapi/drm/i915_drm.h
index 3a73f5316766..328d05e77d9f 100644
--- a/include/uapi/drm/i915_drm.h
+++ b/include/uapi/drm/i915_drm.h
@@ -136,6 +136,8 @@ enum drm_i915_gem_engine_class {
struct i915_engine_class_instance {
__u16 engine_class; /* see enum drm_i915_gem_engine_class */
__u16 engine_instance;
+#define I915_ENGINE_CLASS_INVALID_NONE -1
+#define I915_ENGINE_CLASS_INVALID_VIRTUAL -2
};
/**
@@ -355,6 +357,8 @@ typedef struct _drm_i915_sarea {
#define DRM_I915_PERF_ADD_CONFIG 0x37
#define DRM_I915_PERF_REMOVE_CONFIG 0x38
#define DRM_I915_QUERY 0x39
+#define DRM_I915_GEM_VM_CREATE 0x3a
+#define DRM_I915_GEM_VM_DESTROY 0x3b
/* Must be kept compact -- no holes */
#define DRM_IOCTL_I915_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t)
@@ -415,6 +419,8 @@ typedef struct _drm_i915_sarea {
#define DRM_IOCTL_I915_PERF_ADD_CONFIG DRM_IOW(DRM_COMMAND_BASE + DRM_I915_PERF_ADD_CONFIG, struct drm_i915_perf_oa_config)
#define DRM_IOCTL_I915_PERF_REMOVE_CONFIG DRM_IOW(DRM_COMMAND_BASE + DRM_I915_PERF_REMOVE_CONFIG, __u64)
#define DRM_IOCTL_I915_QUERY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_QUERY, struct drm_i915_query)
+#define DRM_IOCTL_I915_GEM_VM_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_VM_CREATE, struct drm_i915_gem_vm_control)
+#define DRM_IOCTL_I915_GEM_VM_DESTROY DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_VM_DESTROY, struct drm_i915_gem_vm_control)
/* Allow drivers to submit batchbuffers directly to hardware, relying
* on the security mechanisms provided by hardware.
@@ -598,6 +604,12 @@ typedef struct drm_i915_irq_wait {
*/
#define I915_PARAM_MMAP_GTT_COHERENT 52
+/*
+ * Query whether DRM_I915_GEM_EXECBUFFER2 supports coordination of parallel
+ * execution through use of explicit fence support.
+ * See I915_EXEC_FENCE_OUT and I915_EXEC_FENCE_SUBMIT.
+ */
+#define I915_PARAM_HAS_EXEC_SUBMIT_FENCE 53
/* Must be kept compact -- no holes and well documented */
typedef struct drm_i915_getparam {
@@ -1120,7 +1132,16 @@ struct drm_i915_gem_execbuffer2 {
*/
#define I915_EXEC_FENCE_ARRAY (1<<19)
-#define __I915_EXEC_UNKNOWN_FLAGS (-(I915_EXEC_FENCE_ARRAY<<1))
+/*
+ * Setting I915_EXEC_FENCE_SUBMIT implies that lower_32_bits(rsvd2) represent
+ * a sync_file fd to wait upon (in a nonblocking manner) prior to executing
+ * the batch.
+ *
+ * Returns -EINVAL if the sync_file fd cannot be found.
+ */
+#define I915_EXEC_FENCE_SUBMIT (1 << 20)
+
+#define __I915_EXEC_UNKNOWN_FLAGS (-(I915_EXEC_FENCE_SUBMIT << 1))
#define I915_EXEC_CONTEXT_ID_MASK (0xffffffff)
#define i915_execbuffer2_set_context_id(eb2, context) \
@@ -1464,8 +1485,9 @@ struct drm_i915_gem_context_create_ext {
__u32 ctx_id; /* output: id of new context*/
__u32 flags;
#define I915_CONTEXT_CREATE_FLAGS_USE_EXTENSIONS (1u << 0)
+#define I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE (1u << 1)
#define I915_CONTEXT_CREATE_FLAGS_UNKNOWN \
- (-(I915_CONTEXT_CREATE_FLAGS_USE_EXTENSIONS << 1))
+ (-(I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE << 1))
__u64 extensions;
};
@@ -1507,6 +1529,41 @@ struct drm_i915_gem_context_param {
* On creation, all new contexts are marked as recoverable.
*/
#define I915_CONTEXT_PARAM_RECOVERABLE 0x8
+
+ /*
+ * The id of the associated virtual memory address space (ppGTT) of
+ * this context. Can be retrieved and passed to another context
+ * (on the same fd) for both to use the same ppGTT and so share
+ * address layouts, and avoid reloading the page tables on context
+ * switches between themselves.
+ *
+ * See DRM_I915_GEM_VM_CREATE and DRM_I915_GEM_VM_DESTROY.
+ */
+#define I915_CONTEXT_PARAM_VM 0x9
+
+/*
+ * I915_CONTEXT_PARAM_ENGINES:
+ *
+ * Bind this context to operate on this subset of available engines. Henceforth,
+ * the I915_EXEC_RING selector for DRM_IOCTL_I915_GEM_EXECBUFFER2 operates as
+ * an index into this array of engines; I915_EXEC_DEFAULT selecting engine[0]
+ * and upwards. Slots 0...N are filled in using the specified (class, instance).
+ * Use
+ * engine_class: I915_ENGINE_CLASS_INVALID,
+ * engine_instance: I915_ENGINE_CLASS_INVALID_NONE
+ * to specify a gap in the array that can be filled in later, e.g. by a
+ * virtual engine used for load balancing.
+ *
+ * Setting the number of engines bound to the context to 0, by passing a zero
+ * sized argument, will revert back to default settings.
+ *
+ * See struct i915_context_param_engines.
+ *
+ * Extensions:
+ * i915_context_engines_load_balance (I915_CONTEXT_ENGINES_EXT_LOAD_BALANCE)
+ * i915_context_engines_bond (I915_CONTEXT_ENGINES_EXT_BOND)
+ */
+#define I915_CONTEXT_PARAM_ENGINES 0xa
/* Must be kept compact -- no holes and well documented */
__u64 value;
@@ -1540,9 +1597,10 @@ struct drm_i915_gem_context_param_sseu {
struct i915_engine_class_instance engine;
/*
- * Unused for now. Must be cleared to zero.
+ * Unknown flags must be cleared to zero.
*/
__u32 flags;
+#define I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX (1u << 0)
/*
* Mask of slices to enable for the context. Valid values are a subset
@@ -1570,12 +1628,115 @@ struct drm_i915_gem_context_param_sseu {
__u32 rsvd;
};
+/*
+ * i915_context_engines_load_balance:
+ *
+ * Enable load balancing across this set of engines.
+ *
+ * Into the I915_EXEC_DEFAULT slot [0], a virtual engine is created that when
+ * used will proxy the execbuffer request onto one of the set of engines
+ * in such a way as to distribute the load evenly across the set.
+ *
+ * The set of engines must be compatible (e.g. the same HW class) as they
+ * will share the same logical GPU context and ring.
+ *
+ * To intermix rendering with the virtual engine and direct rendering onto
+ * the backing engines (bypassing the load balancing proxy), the context must
+ * be defined to use a single timeline for all engines.
+ */
+struct i915_context_engines_load_balance {
+ struct i915_user_extension base;
+
+ __u16 engine_index;
+ __u16 num_siblings;
+ __u32 flags; /* all undefined flags must be zero */
+
+ __u64 mbz64; /* reserved for future use; must be zero */
+
+ struct i915_engine_class_instance engines[0];
+} __attribute__((packed));
+
+#define I915_DEFINE_CONTEXT_ENGINES_LOAD_BALANCE(name__, N__) struct { \
+ struct i915_user_extension base; \
+ __u16 engine_index; \
+ __u16 num_siblings; \
+ __u32 flags; \
+ __u64 mbz64; \
+ struct i915_engine_class_instance engines[N__]; \
+} __attribute__((packed)) name__
+
+/*
+ * i915_context_engines_bond:
+ *
+ * Constructed bonded pairs for execution within a virtual engine.
+ *
+ * All engines are equal, but some are more equal than others. Given
+ * the distribution of resources in the HW, it may be preferable to run
+ * a request on a given subset of engines in parallel to a request on a
+ * specific engine. We enable this selection of engines within a virtual
+ * engine by specifying bonding pairs, for any given master engine we will
+ * only execute on one of the corresponding siblings within the virtual engine.
+ *
+ * To execute a request in parallel on the master engine and a sibling requires
+ * coordination with a I915_EXEC_FENCE_SUBMIT.
+ */
+struct i915_context_engines_bond {
+ struct i915_user_extension base;
+
+ struct i915_engine_class_instance master;
+
+ __u16 virtual_index; /* index of virtual engine in ctx->engines[] */
+ __u16 num_bonds;
+
+ __u64 flags; /* all undefined flags must be zero */
+ __u64 mbz64[4]; /* reserved for future use; must be zero */
+
+ struct i915_engine_class_instance engines[0];
+} __attribute__((packed));
+
+#define I915_DEFINE_CONTEXT_ENGINES_BOND(name__, N__) struct { \
+ struct i915_user_extension base; \
+ struct i915_engine_class_instance master; \
+ __u16 virtual_index; \
+ __u16 num_bonds; \
+ __u64 flags; \
+ __u64 mbz64[4]; \
+ struct i915_engine_class_instance engines[N__]; \
+} __attribute__((packed)) name__
+
+struct i915_context_param_engines {
+ __u64 extensions; /* linked chain of extension blocks, 0 terminates */
+#define I915_CONTEXT_ENGINES_EXT_LOAD_BALANCE 0 /* see i915_context_engines_load_balance */
+#define I915_CONTEXT_ENGINES_EXT_BOND 1 /* see i915_context_engines_bond */
+ struct i915_engine_class_instance engines[0];
+} __attribute__((packed));
+
+#define I915_DEFINE_CONTEXT_PARAM_ENGINES(name__, N__) struct { \
+ __u64 extensions; \
+ struct i915_engine_class_instance engines[N__]; \
+} __attribute__((packed)) name__
+
struct drm_i915_gem_context_create_ext_setparam {
#define I915_CONTEXT_CREATE_EXT_SETPARAM 0
struct i915_user_extension base;
struct drm_i915_gem_context_param param;
};
+struct drm_i915_gem_context_create_ext_clone {
+#define I915_CONTEXT_CREATE_EXT_CLONE 1
+ struct i915_user_extension base;
+ __u32 clone_id;
+ __u32 flags;
+#define I915_CONTEXT_CLONE_ENGINES (1u << 0)
+#define I915_CONTEXT_CLONE_FLAGS (1u << 1)
+#define I915_CONTEXT_CLONE_SCHEDATTR (1u << 2)
+#define I915_CONTEXT_CLONE_SSEU (1u << 3)
+#define I915_CONTEXT_CLONE_TIMELINE (1u << 4)
+#define I915_CONTEXT_CLONE_VM (1u << 5)
+#define I915_CONTEXT_CLONE_UNKNOWN -(I915_CONTEXT_CLONE_VM << 1)
+ __u64 rsvd;
+};
+
struct drm_i915_gem_context_destroy {
__u32 ctx_id;
__u32 pad;
@@ -1821,6 +1982,7 @@ struct drm_i915_perf_oa_config {
struct drm_i915_query_item {
__u64 query_id;
#define DRM_I915_QUERY_TOPOLOGY_INFO 1
+#define DRM_I915_QUERY_ENGINE_INFO 2
/* Must be kept compact -- no holes and well documented */
/*
@@ -1919,6 +2081,47 @@ struct drm_i915_query_topology_info {
__u8 data[];
};
+/**
+ * struct drm_i915_engine_info
+ *
+ * Describes one engine and it's capabilities as known to the driver.
+ */
+struct drm_i915_engine_info {
+ /** Engine class and instance. */
+ struct i915_engine_class_instance engine;
+
+ /** Reserved field. */
+ __u32 rsvd0;
+
+ /** Engine flags. */
+ __u64 flags;
+
+ /** Capabilities of this engine. */
+ __u64 capabilities;
+#define I915_VIDEO_CLASS_CAPABILITY_HEVC (1 << 0)
+#define I915_VIDEO_AND_ENHANCE_CLASS_CAPABILITY_SFC (1 << 1)
+
+ /** Reserved fields. */
+ __u64 rsvd1[4];
+};
+
+/**
+ * struct drm_i915_query_engine_info
+ *
+ * Engine info query enumerates all engines known to the driver by filling in
+ * an array of struct drm_i915_engine_info structures.
+ */
+struct drm_i915_query_engine_info {
+ /** Number of struct drm_i915_engine_info structs following. */
+ __u32 num_engines;
+
+ /** MBZ */
+ __u32 rsvd[3];
+
+ /** Marker for drm_i915_engine_info structures. */
+ struct drm_i915_engine_info engines[];
+};
+
#if defined(__cplusplus)
}
#endif
diff --git a/include/uapi/drm/v3d_drm.h b/include/uapi/drm/v3d_drm.h
index ea70669d2138..58fbe48c91e9 100644
--- a/include/uapi/drm/v3d_drm.h
+++ b/include/uapi/drm/v3d_drm.h
@@ -37,6 +37,7 @@ extern "C" {
#define DRM_V3D_GET_PARAM 0x04
#define DRM_V3D_GET_BO_OFFSET 0x05
#define DRM_V3D_SUBMIT_TFU 0x06
+#define DRM_V3D_SUBMIT_CSD 0x07
#define DRM_IOCTL_V3D_SUBMIT_CL DRM_IOWR(DRM_COMMAND_BASE + DRM_V3D_SUBMIT_CL, struct drm_v3d_submit_cl)
#define DRM_IOCTL_V3D_WAIT_BO DRM_IOWR(DRM_COMMAND_BASE + DRM_V3D_WAIT_BO, struct drm_v3d_wait_bo)
@@ -45,6 +46,7 @@ extern "C" {
#define DRM_IOCTL_V3D_GET_PARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_V3D_GET_PARAM, struct drm_v3d_get_param)
#define DRM_IOCTL_V3D_GET_BO_OFFSET DRM_IOWR(DRM_COMMAND_BASE + DRM_V3D_GET_BO_OFFSET, struct drm_v3d_get_bo_offset)
#define DRM_IOCTL_V3D_SUBMIT_TFU DRM_IOW(DRM_COMMAND_BASE + DRM_V3D_SUBMIT_TFU, struct drm_v3d_submit_tfu)
+#define DRM_IOCTL_V3D_SUBMIT_CSD DRM_IOW(DRM_COMMAND_BASE + DRM_V3D_SUBMIT_CSD, struct drm_v3d_submit_csd)
/**
* struct drm_v3d_submit_cl - ioctl argument for submitting commands to the 3D
@@ -190,6 +192,7 @@ enum drm_v3d_param {
DRM_V3D_PARAM_V3D_CORE0_IDENT1,
DRM_V3D_PARAM_V3D_CORE0_IDENT2,
DRM_V3D_PARAM_SUPPORTS_TFU,
+ DRM_V3D_PARAM_SUPPORTS_CSD,
};
struct drm_v3d_get_param {
@@ -230,6 +233,31 @@ struct drm_v3d_submit_tfu {
__u32 out_sync;
};
+/* Submits a compute shader for dispatch. This job will block on any
+ * previous compute shaders submitted on this fd, and any other
+ * synchronization must be performed with in_sync/out_sync.
+ */
+struct drm_v3d_submit_csd {
+ __u32 cfg[7];
+ __u32 coef[4];
+
+ /* Pointer to a u32 array of the BOs that are referenced by the job.
+ */
+ __u64 bo_handles;
+
+ /* Number of BO handles passed in (size is that times 4). */
+ __u32 bo_handle_count;
+
+ /* sync object to block on before running the CSD job. Each
+ * CSD job will execute in the order submitted to its FD.
+ * Synchronization against rendering/TFU jobs or CSD from
+ * other fds requires using sync objects.
+ */
+ __u32 in_sync;
+ /* Sync object to signal when the CSD job is done. */
+ __u32 out_sync;
+};
+
#if defined(__cplusplus)
}
#endif
diff --git a/include/uapi/linux/kfd_ioctl.h b/include/uapi/linux/kfd_ioctl.h
index dc067ed0b72d..070d1bc7e725 100644
--- a/include/uapi/linux/kfd_ioctl.h
+++ b/include/uapi/linux/kfd_ioctl.h
@@ -35,9 +35,10 @@ struct kfd_ioctl_get_version_args {
};
/* For kfd_ioctl_create_queue_args.queue_type. */
-#define KFD_IOC_QUEUE_TYPE_COMPUTE 0
-#define KFD_IOC_QUEUE_TYPE_SDMA 1
-#define KFD_IOC_QUEUE_TYPE_COMPUTE_AQL 2
+#define KFD_IOC_QUEUE_TYPE_COMPUTE 0x0
+#define KFD_IOC_QUEUE_TYPE_SDMA 0x1
+#define KFD_IOC_QUEUE_TYPE_COMPUTE_AQL 0x2
+#define KFD_IOC_QUEUE_TYPE_SDMA_XGMI 0x3
#define KFD_MAX_QUEUE_PERCENTAGE 100
#define KFD_MAX_QUEUE_PRIORITY 15
@@ -338,6 +339,7 @@ struct kfd_ioctl_acquire_vm_args {
#define KFD_IOC_ALLOC_MEM_FLAGS_GTT (1 << 1)
#define KFD_IOC_ALLOC_MEM_FLAGS_USERPTR (1 << 2)
#define KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL (1 << 3)
+#define KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP (1 << 4)
/* Allocation flags: attributes/access options */
#define KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE (1 << 31)
#define KFD_IOC_ALLOC_MEM_FLAGS_EXECUTABLE (1 << 30)
@@ -408,6 +410,21 @@ struct kfd_ioctl_unmap_memory_from_gpu_args {
__u32 n_success; /* to/from KFD */
};
+/* Allocate GWS for specific queue
+ *
+ * @gpu_id: device identifier
+ * @queue_id: queue's id that GWS is allocated for
+ * @num_gws: how many GWS to allocate
+ * @first_gws: index of the first GWS allocated.
+ * only support contiguous GWS allocation
+ */
+struct kfd_ioctl_alloc_queue_gws_args {
+ __u32 gpu_id; /* to KFD */
+ __u32 queue_id; /* to KFD */
+ __u32 num_gws; /* to KFD */
+ __u32 first_gws; /* from KFD */
+};
+
struct kfd_ioctl_get_dmabuf_info_args {
__u64 size; /* from KFD */
__u64 metadata_ptr; /* to KFD */
@@ -426,6 +443,13 @@ struct kfd_ioctl_import_dmabuf_args {
__u32 dmabuf_fd; /* to KFD */
};
+/* Register offset inside the remapped mmio page
+ */
+enum kfd_mmio_remap {
+ KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL = 0,
+ KFD_MMIO_REMAP_HDP_REG_FLUSH_CNTL = 4,
+};
+
#define AMDKFD_IOCTL_BASE 'K'
#define AMDKFD_IO(nr) _IO(AMDKFD_IOCTL_BASE, nr)
#define AMDKFD_IOR(nr, type) _IOR(AMDKFD_IOCTL_BASE, nr, type)
@@ -520,7 +544,10 @@ struct kfd_ioctl_import_dmabuf_args {
#define AMDKFD_IOC_IMPORT_DMABUF \
AMDKFD_IOWR(0x1D, struct kfd_ioctl_import_dmabuf_args)
+#define AMDKFD_IOC_ALLOC_QUEUE_GWS \
+ AMDKFD_IOWR(0x1E, struct kfd_ioctl_alloc_queue_gws_args)
+
#define AMDKFD_COMMAND_START 0x01
-#define AMDKFD_COMMAND_END 0x1E
+#define AMDKFD_COMMAND_END 0x1F
#endif