summaryrefslogtreecommitdiffstats
path: root/drivers/gpu
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu')
-rw-r--r--drivers/gpu/drm/Kconfig6
-rw-r--r--drivers/gpu/drm/Makefile4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/Kconfig2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/Makefile3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h34
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c88
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c84
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_display.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fdinfo.c24
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c41
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_hdp.c48
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_hdp.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c29
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mca.c72
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mca.h9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.c46
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.c23
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.c36
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.h48
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c22
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c324
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c30
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c32
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c48
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c25
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c30
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c15
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c26
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c27
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c59
-rw-r--r--drivers/gpu/drm/amd/amdgpu/hdp_v4_0.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mca_v3_0.c44
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mca_v3_0.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v3_0.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nv.c77
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v13_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c1967
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.h (renamed from drivers/gpu/drm/amd/display/dc/core/dc_link.c)13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15.c76
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc21.c175
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vega20_ih.c15
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vi.c20
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_chardev.c71
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device.c11
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_migrate.c33
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_module.c1
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_priv.h1
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process.c75
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c4
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_topology.c10
-rw-r--r--drivers/gpu/drm/amd/display/Kconfig14
-rw-r--r--drivers/gpu/drm/amd/display/Makefile4
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/Makefile4
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c226
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h4
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c26
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.h14
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c33
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c3
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c69
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c88
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h15
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c149
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.h12
-rw-r--r--drivers/gpu/drm/amd/display/dc/Makefile7
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/Makefile2
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c16
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn201/dcn201_clk_mgr.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c79
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc.c377
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c38
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_enc_cfg.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_exports.c411
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_resource.c21
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_vm_helper.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc.h588
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c13
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_dp_types.h173
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_dsc.h11
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_hw_types.h16
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_link.h577
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_types.h117
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_aux.c9
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c27
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_transform.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dmub_abm.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dmub_psr.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c62
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dwb.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dwb.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c10
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dwb.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c23
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mmhubbub.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c11
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn201/dcn201_dpp.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn201/dcn201_hwseq.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn201/dcn201_mpc.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn201/dcn201_resource.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_afmt.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_stream_encoder.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c12
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mmhubbub.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c11
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c11
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c13
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_apg.c41
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_link_encoder.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c14
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dio_stream_encoder.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn316/dcn316_resource.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dccg.h36
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_stream_encoder.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_stream_encoder.h64
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hpo_dp_link_encoder.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubbub.h62
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubp.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c119
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_init.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_mpc.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_mpc.h13
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_optc.h71
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c23
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c47
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c12
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/Makefile2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c299
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn31/display_rq_dlg_calc_31.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn314/display_mode_vba_314.c299
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c86
-rw-r--r--drivers/gpu/drm/amd/display/dc/dsc/rc_calc.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/hdcp/hdcp_msg.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/core_types.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h54
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/dwb.h8
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h14
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h52
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw_sequencer_private.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/link.h351
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/resource.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/Makefile2
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c65
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.h19
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_trace.c21
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_trace.h17
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.c22
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_dp.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_detection.c123
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_detection.h15
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_dpms.c78
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_dpms.h23
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_factory.c274
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_factory.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_resource.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_validation.c14
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_validation.h7
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_ddc.c24
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_ddc.h31
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c68
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.h28
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.c176
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.h52
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.c30
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.h12
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_phy.c10
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_phy.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c26
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_auxless.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_auxless.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_dpia.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_dpia.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c51
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h30
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_hpd.c14
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_hpd.h7
-rw-r--r--drivers/gpu/drm/amd/display/dc/os_types.h4
-rw-r--r--drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h22
-rw-r--r--drivers/gpu/drm/amd/display/include/hdcp_msg_types.h (renamed from drivers/gpu/drm/amd/display/include/hdcp_types.h)0
-rw-r--r--drivers/gpu/drm/amd/display/include/link_service_types.h26
-rw-r--r--drivers/gpu/drm/amd/display/modules/freesync/freesync.c12
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.h2
-rw-r--r--drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h1
-rw-r--r--drivers/gpu/drm/amd/display/modules/power/power_helpers.c7
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/gc/gc_10_1_0_offset.h4
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/gc/gc_10_1_0_sh_mask.h54
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/gc/gc_10_3_0_offset.h4
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/gc/gc_10_3_0_sh_mask.h54
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/hdp/hdp_4_4_2_offset.h219
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/hdp/hdp_4_4_2_sh_mask.h663
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/mp/mp_13_0_6_offset.h456
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/mp/mp_13_0_6_sh_mask.h674
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/sdma/sdma_4_4_2_offset.h1109
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/sdma/sdma_4_4_2_sh_mask.h3276
-rw-r--r--drivers/gpu/drm/amd/include/kgd_pp_interface.h4
-rw-r--r--drivers/gpu/drm/amd/pm/amdgpu_dpm.c28
-rw-r--r--drivers/gpu/drm/amd/pm/amdgpu_pm.c79
-rw-r--r--drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h3
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c30
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h12
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu11_driver_if_vangogh.h4
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_4.h4
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_6.h141
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_pmfw.h212
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_ppsmc.h95
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h4
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h7
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c43
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c23
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/Makefile2
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c11
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c4
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c2069
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.h32
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c4
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c4
-rw-r--r--drivers/gpu/drm/arm/hdlcd_drv.c8
-rw-r--r--drivers/gpu/drm/arm/malidp_drv.c9
-rw-r--r--drivers/gpu/drm/aspeed/aspeed_gfx_drv.c4
-rw-r--r--drivers/gpu/drm/ast/ast_dp.c10
-rw-r--r--drivers/gpu/drm/ast/ast_dp501.c40
-rw-r--r--drivers/gpu/drm/ast/ast_drv.c2
-rw-r--r--drivers/gpu/drm/ast/ast_drv.h84
-rw-r--r--drivers/gpu/drm/ast/ast_i2c.c8
-rw-r--r--drivers/gpu/drm/ast/ast_main.c24
-rw-r--r--drivers/gpu/drm/ast/ast_mm.c4
-rw-r--r--drivers/gpu/drm/ast/ast_mode.c104
-rw-r--r--drivers/gpu/drm/ast/ast_post.c94
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c4
-rw-r--r--drivers/gpu/drm/bridge/Kconfig14
-rw-r--r--drivers/gpu/drm/bridge/Makefile1
-rw-r--r--drivers/gpu/drm/bridge/adv7511/adv7533.c25
-rw-r--r--drivers/gpu/drm/bridge/cadence/cdns-dsi-core.c6
-rw-r--r--drivers/gpu/drm/bridge/display-connector.c15
-rw-r--r--drivers/gpu/drm/bridge/fsl-ldb.c6
-rw-r--r--drivers/gpu/drm/bridge/imx/imx8qm-ldb-drv.c6
-rw-r--r--drivers/gpu/drm/bridge/imx/imx8qxp-ldb-drv.c6
-rw-r--r--drivers/gpu/drm/bridge/imx/imx8qxp-pixel-combiner.c6
-rw-r--r--drivers/gpu/drm/bridge/imx/imx8qxp-pixel-link.c6
-rw-r--r--drivers/gpu/drm/bridge/imx/imx8qxp-pxl2dpi.c6
-rw-r--r--drivers/gpu/drm/bridge/lvds-codec.c6
-rw-r--r--drivers/gpu/drm/bridge/nwl-dsi.c5
-rw-r--r--drivers/gpu/drm/bridge/panel.c2
-rw-r--r--drivers/gpu/drm/bridge/samsung-dsim.c1967
-rw-r--r--drivers/gpu/drm/bridge/sii9234.c5
-rw-r--r--drivers/gpu/drm/bridge/simple-bridge.c14
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-hdmi-ahb-audio.c6
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-hdmi-cec.c6
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-hdmi-gp-audio.c6
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-hdmi-i2s-audio.c6
-rw-r--r--drivers/gpu/drm/bridge/tc358762.c1
-rw-r--r--drivers/gpu/drm/bridge/thc63lvd1024.c6
-rw-r--r--drivers/gpu/drm/bridge/ti-tfp410.c6
-rw-r--r--drivers/gpu/drm/drm_atomic_helper.c20
-rw-r--r--drivers/gpu/drm/drm_connector.c28
-rw-r--r--drivers/gpu/drm/drm_debugfs.c4
-rw-r--r--drivers/gpu/drm/drm_displayid.c62
-rw-r--r--drivers/gpu/drm/drm_drv.c26
-rw-r--r--drivers/gpu/drm/drm_dumb_buffers.c5
-rw-r--r--drivers/gpu/drm/drm_edid.c65
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c63
-rw-r--r--drivers/gpu/drm/drm_fbdev_dma.c268
-rw-r--r--drivers/gpu/drm/drm_fbdev_generic.c279
-rw-r--r--drivers/gpu/drm/drm_file.c2
-rw-r--r--drivers/gpu/drm/drm_gem.c25
-rw-r--r--drivers/gpu/drm/drm_gem_shmem_helper.c65
-rw-r--r--drivers/gpu/drm/drm_gem_vram_helper.c11
-rw-r--r--drivers/gpu/drm/drm_internal.h3
-rw-r--r--drivers/gpu/drm/drm_modes.c3
-rw-r--r--drivers/gpu/drm/drm_of.c51
-rw-r--r--drivers/gpu/drm/drm_prime.c4
-rw-r--r--drivers/gpu/drm/drm_probe_helper.c5
-rw-r--r--drivers/gpu/drm/drm_suballoc.c457
-rw-r--r--drivers/gpu/drm/exynos/Kconfig3
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dsi.c1813
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c4
-rw-r--r--drivers/gpu/drm/gma500/Makefile1
-rw-r--r--drivers/gpu/drm/gma500/fbdev.c344
-rw-r--r--drivers/gpu/drm/gma500/framebuffer.c341
-rw-r--r--drivers/gpu/drm/gma500/psb_drv.c5
-rw-r--r--drivers/gpu/drm/gma500/psb_drv.h19
-rw-r--r--drivers/gpu/drm/gma500/psb_irq.c11
-rw-r--r--drivers/gpu/drm/i915/Makefile6
-rw-r--r--drivers/gpu/drm/i915/display/g4x_dp.c53
-rw-r--r--drivers/gpu/drm/i915/display/g4x_hdmi.c21
-rw-r--r--drivers/gpu/drm/i915/display/hsw_ips.c37
-rw-r--r--drivers/gpu/drm/i915/display/hsw_ips.h2
-rw-r--r--drivers/gpu/drm/i915/display/i9xx_wm.c4047
-rw-r--r--drivers/gpu/drm/i915/display/i9xx_wm.h21
-rw-r--r--drivers/gpu/drm/i915/display/icl_dsi.c316
-rw-r--r--drivers/gpu/drm/i915/display/intel_atomic_plane.c116
-rw-r--r--drivers/gpu/drm/i915/display/intel_atomic_plane.h1
-rw-r--r--drivers/gpu/drm/i915/display/intel_audio.c92
-rw-r--r--drivers/gpu/drm/i915/display/intel_backlight.c161
-rw-r--r--drivers/gpu/drm/i915/display/intel_bios.c266
-rw-r--r--drivers/gpu/drm/i915/display/intel_bios.h27
-rw-r--r--drivers/gpu/drm/i915/display/intel_bw.c49
-rw-r--r--drivers/gpu/drm/i915/display/intel_cdclk.c41
-rw-r--r--drivers/gpu/drm/i915/display/intel_color.c119
-rw-r--r--drivers/gpu/drm/i915/display/intel_color.h1
-rw-r--r--drivers/gpu/drm/i915/display/intel_combo_phy.c48
-rw-r--r--drivers/gpu/drm/i915/display/intel_combo_phy_regs.h4
-rw-r--r--drivers/gpu/drm/i915/display/intel_crt.c46
-rw-r--r--drivers/gpu/drm/i915/display/intel_crtc.c13
-rw-r--r--drivers/gpu/drm/i915/display/intel_crtc_state_dump.c16
-rw-r--r--drivers/gpu/drm/i915/display/intel_cursor.c6
-rw-r--r--drivers/gpu/drm/i915/display/intel_ddi.c230
-rw-r--r--drivers/gpu/drm/i915/display/intel_ddi.h9
-rw-r--r--drivers/gpu/drm/i915/display/intel_display.c760
-rw-r--r--drivers/gpu/drm/i915/display/intel_display.h7
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_core.h34
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_debugfs.c592
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_debugfs.h6
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power.c88
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power.h4
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power_well.c134
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_reg_defs.h10
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_rps.c81
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_rps.h22
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_types.h27
-rw-r--r--drivers/gpu/drm/i915/display/intel_dmc.c429
-rw-r--r--drivers/gpu/drm/i915/display/intel_dmc.h44
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp.c128
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_aux.c50
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_aux.h4
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c84
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_link_training.c48
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_link_training.h2
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_mst.c29
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpio_phy.c51
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpll_mgr.c165
-rw-r--r--drivers/gpu/drm/i915/display/intel_drrs.c16
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsb.c41
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsb.h5
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsi_dcs_backlight.c5
-rw-r--r--drivers/gpu/drm/i915/display/intel_dvo.c7
-rw-r--r--drivers/gpu/drm/i915/display/intel_fb.c7
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbdev.c32
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbdev.h8
-rw-r--r--drivers/gpu/drm/i915/display/intel_fdi.c157
-rw-r--r--drivers/gpu/drm/i915/display/intel_fifo_underrun.c20
-rw-r--r--drivers/gpu/drm/i915/display/intel_fifo_underrun.h3
-rw-r--r--drivers/gpu/drm/i915/display/intel_gmbus.c30
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdcp.c173
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdcp_gsc.c831
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdcp_gsc.h26
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdmi.c79
-rw-r--r--drivers/gpu/drm/i915/display/intel_hotplug.c9
-rw-r--r--drivers/gpu/drm/i915/display/intel_lpe_audio.c6
-rw-r--r--drivers/gpu/drm/i915/display/intel_lpe_audio.h4
-rw-r--r--drivers/gpu/drm/i915/display/intel_lspcon.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_lvds.c338
-rw-r--r--drivers/gpu/drm/i915/display/intel_lvds_regs.h65
-rw-r--r--drivers/gpu/drm/i915/display/intel_mg_phy_regs.h4
-rw-r--r--drivers/gpu/drm/i915/display/intel_modeset_setup.c59
-rw-r--r--drivers/gpu/drm/i915/display/intel_opregion.c40
-rw-r--r--drivers/gpu/drm/i915/display/intel_opregion.h5
-rw-r--r--drivers/gpu/drm/i915/display/intel_panel.c1
-rw-r--r--drivers/gpu/drm/i915/display/intel_pch_display.c72
-rw-r--r--drivers/gpu/drm/i915/display/intel_pch_refclk.c10
-rw-r--r--drivers/gpu/drm/i915/display/intel_pps.c15
-rw-r--r--drivers/gpu/drm/i915/display/intel_psr.c508
-rw-r--r--drivers/gpu/drm/i915/display/intel_psr.h19
-rw-r--r--drivers/gpu/drm/i915/display/intel_snps_phy.c62
-rw-r--r--drivers/gpu/drm/i915/display/intel_sprite.c186
-rw-r--r--drivers/gpu/drm/i915/display/intel_sprite_uapi.c127
-rw-r--r--drivers/gpu/drm/i915/display/intel_sprite_uapi.h15
-rw-r--r--drivers/gpu/drm/i915/display/intel_tc.c322
-rw-r--r--drivers/gpu/drm/i915/display/intel_tc.h5
-rw-r--r--drivers/gpu/drm/i915/display/intel_tv.c6
-rw-r--r--drivers/gpu/drm/i915/display/intel_vblank.c96
-rw-r--r--drivers/gpu/drm/i915/display/intel_vblank.h2
-rw-r--r--drivers/gpu/drm/i915/display/intel_vdsc.c1
-rw-r--r--drivers/gpu/drm/i915/display/intel_vdsc_regs.h461
-rw-r--r--drivers/gpu/drm/i915/display/intel_vrr.c10
-rw-r--r--drivers/gpu/drm/i915/display/intel_wm.c408
-rw-r--r--drivers/gpu/drm/i915/display/intel_wm.h37
-rw-r--r--drivers/gpu/drm/i915/display/intel_wm_types.h (renamed from drivers/gpu/drm/i915/intel_pm_types.h)8
-rw-r--r--drivers/gpu/drm/i915/display/skl_universal_plane.c9
-rw-r--r--drivers/gpu/drm/i915/display/skl_watermark.c309
-rw-r--r--drivers/gpu/drm/i915/display/skl_watermark.h7
-rw-r--r--drivers/gpu/drm/i915/display/vlv_dsi.c160
-rw-r--r--drivers/gpu/drm/i915/display/vlv_dsi_pll.c18
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_stolen.c2
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_ttm.c15
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_ttm.h2
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c4
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_ttm_pm.c7
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c19
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_cs.c137
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_types.h14
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gpu_commands.h3
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gsc.h2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt.c166
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_mcr.c4
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_pm.c28
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_print.h3
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_regs.h9
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_sysfs.c2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_rc6.c8
-rw-r--r--drivers/gpu/drm/i915/gt/intel_region_lmem.c2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_reset_types.h2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ring_submission.c4
-rw-r--r--drivers/gpu/drm/i915/gt/intel_rps.c29
-rw-r--r--drivers/gpu/drm/i915/gt/intel_rps_types.h2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_sseu.h2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_workarounds.c218
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_gt_pm.c2
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_llc.c1
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_rps.c10
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_tlb.c388
-rw-r--r--drivers/gpu/drm/i915/gt/sysfs_engines.c72
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_gsc_fw.c9
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_gsc_uc.c28
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_gsc_uc.h2
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_gsc_uc_heci_cmd_submit.c109
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_gsc_uc_heci_cmd_submit.h61
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_capture.c78
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_log.c21
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_print.h3
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_rc.c21
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c61
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c140
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_submission.h2
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_huc.c44
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_uc.c56
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c116
-rw-r--r--drivers/gpu/drm/i915/gt/uc/selftest_guc.c42
-rw-r--r--drivers/gpu/drm/i915/gt/uc/selftest_guc_hangcheck.c23
-rw-r--r--drivers/gpu/drm/i915/gt/uc/selftest_guc_multi_lrc.c11
-rw-r--r--drivers/gpu/drm/i915/gvt/display.c16
-rw-r--r--drivers/gpu/drm/i915/gvt/handlers.c18
-rw-r--r--drivers/gpu/drm/i915/i915_active.c28
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c1
-rw-r--r--drivers/gpu/drm/i915/i915_driver.c58
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h18
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c3
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.h1
-rw-r--r--drivers/gpu/drm/i915/i915_hwmon.c51
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c142
-rw-r--r--drivers/gpu/drm/i915/i915_pmu.c1
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h997
-rw-r--r--drivers/gpu/drm/i915/i915_reg_defs.h31
-rw-r--r--drivers/gpu/drm/i915/i915_request.c1
-rw-r--r--drivers/gpu/drm/i915/i915_sysfs.c1
-rw-r--r--drivers/gpu/drm/i915/i915_ttm_buddy_manager.c7
-rw-r--r--drivers/gpu/drm/i915/intel_device_info.c28
-rw-r--r--drivers/gpu/drm/i915/intel_device_info.h1
-rw-r--r--drivers/gpu/drm/i915/intel_gvt_mmio_table.c77
-rw-r--r--drivers/gpu/drm/i915/intel_pcode.c35
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c4112
-rw-r--r--drivers/gpu/drm/i915/intel_pm.h16
-rw-r--r--drivers/gpu/drm/i915/intel_runtime_pm.c2
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c19
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp.c65
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp.h2
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp_cmd_interface_42.h15
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp_cmd_interface_cmn.h3
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp_huc.c2
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp_pm.c4
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp_pm.h6
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp_session.c8
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp_session.h5
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp_tee.c46
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp_types.h3
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_live_selftests.h1
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_request.c134
-rw-r--r--drivers/gpu/drm/i915/soc/intel_dram.c152
-rw-r--r--drivers/gpu/drm/imx/Kconfig1
-rw-r--r--drivers/gpu/drm/imx/Makefile1
-rw-r--r--drivers/gpu/drm/imx/dcss/dcss-kms.c4
-rw-r--r--drivers/gpu/drm/imx/ipuv3/Kconfig2
-rw-r--r--drivers/gpu/drm/imx/ipuv3/imx-drm-core.c4
-rw-r--r--drivers/gpu/drm/imx/lcdc/Kconfig7
-rw-r--r--drivers/gpu/drm/imx/lcdc/Makefile1
-rw-r--r--drivers/gpu/drm/imx/lcdc/imx-lcdc.c546
-rw-r--r--drivers/gpu/drm/kmb/kmb_drv.c4
-rw-r--r--drivers/gpu/drm/lima/lima_gem.c12
-rw-r--r--drivers/gpu/drm/logicvc/logicvc_drm.c4
-rw-r--r--drivers/gpu/drm/mcde/mcde_drv.c6
-rw-r--r--drivers/gpu/drm/meson/meson_drv.c4
-rw-r--r--drivers/gpu/drm/meson/meson_venc.c4
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_drv.h3
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_mode.c22
-rw-r--r--drivers/gpu/drm/mxsfb/lcdif_drv.c4
-rw-r--r--drivers/gpu/drm/mxsfb/mxsfb_drv.c4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_hwmon.c10
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_led.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/runl.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/nvfw/acr.c2
-rw-r--r--drivers/gpu/drm/omapdrm/Kconfig2
-rw-r--r--drivers/gpu/drm/panel/Kconfig31
-rw-r--r--drivers/gpu/drm/panel/Makefile3
-rw-r--r--drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c126
-rw-r--r--drivers/gpu/drm/panel/panel-jadard-jd9365da-h3.c209
-rw-r--r--drivers/gpu/drm/panel/panel-magnachip-d53e6ea8966.c522
-rw-r--r--drivers/gpu/drm/panel/panel-novatek-nt36523.c777
-rw-r--r--drivers/gpu/drm/panel/panel-seiko-43wvf1g.c12
-rw-r--r--drivers/gpu/drm/panel/panel-sitronix-st7701.c144
-rw-r--r--drivers/gpu/drm/panel/panel-sony-td4353-jdi.c329
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_drv.c11
-rw-r--r--drivers/gpu/drm/pl111/pl111_drv.c4
-rw-r--r--drivers/gpu/drm/qxl/qxl_ttm.c11
-rw-r--r--drivers/gpu/drm/radeon/Kconfig1
-rw-r--r--drivers/gpu/drm/radeon/radeon.h55
-rw-r--r--drivers/gpu/drm/radeon/radeon_ib.c12
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.h25
-rw-r--r--drivers/gpu/drm/radeon/radeon_sa.c316
-rw-r--r--drivers/gpu/drm/radeon/radeon_semaphore.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c7
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_crtc.c38
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_encoder.c4
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_group.c16
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_regs.h26
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_vsp.c2
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_lvds.c176
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_lvds.h12
-rw-r--r--drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c5
-rw-r--r--drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c42
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_gem.c16
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop.c19
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop.h6
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop2.c81
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop2.h5
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_rgb.c19
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_rgb.h6
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_vop_reg.c18
-rw-r--r--drivers/gpu/drm/scheduler/sched_main.c29
-rw-r--r--drivers/gpu/drm/sti/Kconfig2
-rw-r--r--drivers/gpu/drm/sti/sti_drv.c4
-rw-r--r--drivers/gpu/drm/stm/Kconfig2
-rw-r--r--drivers/gpu/drm/stm/drv.c4
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_drv.c4
-rw-r--r--drivers/gpu/drm/tests/drm_format_helper_test.c146
-rw-r--r--drivers/gpu/drm/tests/drm_kunit_helpers.c6
-rw-r--r--drivers/gpu/drm/tidss/tidss_dispc.c12
-rw-r--r--drivers/gpu/drm/tidss/tidss_dispc.h8
-rw-r--r--drivers/gpu/drm/tidss/tidss_drv.c4
-rw-r--r--drivers/gpu/drm/tidss/tidss_plane.c20
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_drv.c4
-rw-r--r--drivers/gpu/drm/tiny/arcpgu.c4
-rw-r--r--drivers/gpu/drm/tiny/bochs.c1
-rw-r--r--drivers/gpu/drm/tiny/cirrus.c499
-rw-r--r--drivers/gpu/drm/tiny/simpledrm.c31
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c36
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c19
-rw-r--r--drivers/gpu/drm/ttm/ttm_device.c1
-rw-r--r--drivers/gpu/drm/ttm/ttm_resource.c1
-rw-r--r--drivers/gpu/drm/tve200/tve200_drv.c4
-rw-r--r--drivers/gpu/drm/v3d/v3d_gem.c26
-rw-r--r--drivers/gpu/drm/vc4/vc4_drv.c4
-rw-r--r--drivers/gpu/drm/vc4/vc4_drv.h2
-rw-r--r--drivers/gpu/drm/vc4/vc4_gem.c78
-rw-r--r--drivers/gpu/drm/vc4/vc4_hdmi.c46
-rw-r--r--drivers/gpu/drm/vc4/vc4_hdmi.h1
-rw-r--r--drivers/gpu/drm/vc4/vc4_validate.c4
-rw-r--r--drivers/gpu/drm/vgem/vgem_drv.h11
-rw-r--r--drivers/gpu/drm/vgem/vgem_fence.c1
-rw-r--r--drivers/gpu/drm/virtio/Kconfig11
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_display.c6
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_drv.c4
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_kms.c39
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_plane.c4
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_vq.c3
-rw-r--r--drivers/gpu/drm/vmwgfx/Makefile2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_bo.c407
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_bo.h203
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_cmd.c14
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c53
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_context.c36
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c65
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c26
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h245
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c105
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fence.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_gem.c93
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c233
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.h43
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c57
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_mob.c45
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c22
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c68
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource.c246
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource_priv.h10
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c53
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_shader.c67
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_so.c6
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c323
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_streamoutput.c20
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_surface.c107
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c134
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_va.c6
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_validation.c150
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_validation.h10
-rw-r--r--drivers/gpu/drm/xlnx/zynqmp_kms.c4
-rw-r--r--drivers/gpu/ipu-v3/Kconfig2
665 files changed, 37076 insertions, 18431 deletions
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index dc0f94f02a82..ba3fb04bb691 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -10,13 +10,13 @@ menuconfig DRM
depends on (AGP || AGP=n) && !EMULATED_CMPXCHG && HAS_DMA
select DRM_PANEL_ORIENTATION_QUIRKS
select HDMI
- select FB_CMDLINE
select I2C
select DMA_SHARED_BUFFER
select SYNC_FILE
# gallium uses SYS_kcmp for os_same_file_description() to de-duplicate
# device and dmabuf fd. Let's make sure that is available for our userspace.
select KCMP
+ select VIDEO_CMDLINE
select VIDEO_NOMODESET
help
Kernel-level support for the Direct Rendering Infrastructure (DRI)
@@ -232,6 +232,10 @@ config DRM_GEM_SHMEM_HELPER
help
Choose this if you need the GEM shmem helper functions
+config DRM_SUBALLOC_HELPER
+ tristate
+ depends on DRM
+
config DRM_SCHED
tristate
depends on DRM
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index ab4460fcd63f..a33257d2bc7f 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -82,12 +82,16 @@ obj-$(CONFIG_DRM_PANEL_ORIENTATION_QUIRKS) += drm_panel_orientation_quirks.o
obj-$(CONFIG_DRM_BUDDY) += drm_buddy.o
drm_dma_helper-y := drm_gem_dma_helper.o
+drm_dma_helper-$(CONFIG_DRM_FBDEV_EMULATION) += drm_fbdev_dma.o
drm_dma_helper-$(CONFIG_DRM_KMS_HELPER) += drm_fb_dma_helper.o
obj-$(CONFIG_DRM_GEM_DMA_HELPER) += drm_dma_helper.o
drm_shmem_helper-y := drm_gem_shmem_helper.o
obj-$(CONFIG_DRM_GEM_SHMEM_HELPER) += drm_shmem_helper.o
+drm_suballoc_helper-y := drm_suballoc.o
+obj-$(CONFIG_DRM_SUBALLOC_HELPER) += drm_suballoc_helper.o
+
drm_vram_helper-y := drm_gem_vram_helper.o
obj-$(CONFIG_DRM_VRAM_HELPER) += drm_vram_helper.o
diff --git a/drivers/gpu/drm/amd/amdgpu/Kconfig b/drivers/gpu/drm/amd/amdgpu/Kconfig
index a82d36ea88e2..12adca8c7819 100644
--- a/drivers/gpu/drm/amd/amdgpu/Kconfig
+++ b/drivers/gpu/drm/amd/amdgpu/Kconfig
@@ -3,6 +3,7 @@
config DRM_AMDGPU
tristate "AMD GPU"
depends on DRM && PCI && MMU
+ depends on !UML
select FW_LOADER
select DRM_DISPLAY_DP_HELPER
select DRM_DISPLAY_HDMI_HELPER
@@ -19,6 +20,7 @@ config DRM_AMDGPU
select BACKLIGHT_CLASS_DEVICE
select INTERVAL_TREE
select DRM_BUDDY
+ select DRM_SUBALLOC_HELPER
# amdgpu depends on ACPI_VIDEO when ACPI is enabled, for select to work
# ACPI_VIDEO's dependencies must also be selected.
select INPUT if ACPI
diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile
index 1d72cbc85348..5f9ac1bcb6bc 100644
--- a/drivers/gpu/drm/amd/amdgpu/Makefile
+++ b/drivers/gpu/drm/amd/amdgpu/Makefile
@@ -54,7 +54,7 @@ amdgpu-y += amdgpu_device.o amdgpu_kms.o \
amdgpu_ucode.o amdgpu_bo_list.o amdgpu_ctx.o amdgpu_sync.o \
amdgpu_gtt_mgr.o amdgpu_preempt_mgr.o amdgpu_vram_mgr.o amdgpu_virt.o \
amdgpu_atomfirmware.o amdgpu_vf_error.o amdgpu_sched.o \
- amdgpu_debugfs.o amdgpu_ids.o amdgpu_gmc.o \
+ amdgpu_debugfs.o amdgpu_ids.o amdgpu_gmc.o amdgpu_mmhub.o amdgpu_hdp.o \
amdgpu_xgmi.o amdgpu_csa.o amdgpu_ras.o amdgpu_vm_cpu.o \
amdgpu_vm_sdma.o amdgpu_discovery.o amdgpu_ras_eeprom.o amdgpu_nbio.o \
amdgpu_umc.o smu_v11_0_i2c.o amdgpu_fru_eeprom.o amdgpu_rap.o \
@@ -148,6 +148,7 @@ amdgpu-y += \
sdma_v3_0.o \
sdma_v4_0.o \
sdma_v4_4.o \
+ sdma_v4_4_2.o \
sdma_v5_0.o \
sdma_v5_2.o \
sdma_v6_0.o
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 164141bc8b4a..acb2a2b38514 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -50,7 +50,6 @@
#include <linux/hashtable.h>
#include <linux/dma-fence.h>
#include <linux/pci.h>
-#include <linux/aer.h>
#include <drm/ttm/ttm_bo.h>
#include <drm/ttm/ttm_placement.h>
@@ -424,29 +423,11 @@ struct amdgpu_clock {
* alignment).
*/
-#define AMDGPU_SA_NUM_FENCE_LISTS 32
-
struct amdgpu_sa_manager {
- wait_queue_head_t wq;
- struct amdgpu_bo *bo;
- struct list_head *hole;
- struct list_head flist[AMDGPU_SA_NUM_FENCE_LISTS];
- struct list_head olist;
- unsigned size;
- uint64_t gpu_addr;
- void *cpu_ptr;
- uint32_t domain;
- uint32_t align;
-};
-
-/* sub-allocation buffer */
-struct amdgpu_sa_bo {
- struct list_head olist;
- struct list_head flist;
- struct amdgpu_sa_manager *manager;
- unsigned soffset;
- unsigned eoffset;
- struct dma_fence *fence;
+ struct drm_suballoc_manager base;
+ struct amdgpu_bo *bo;
+ uint64_t gpu_addr;
+ void *cpu_ptr;
};
int amdgpu_fence_slab_init(void);
@@ -1023,7 +1004,6 @@ struct amdgpu_device {
bool in_runpm;
bool has_pr3;
- bool pm_sysfs_en;
bool ucode_sysfs_en;
bool psp_sysfs_en;
@@ -1113,18 +1093,14 @@ void amdgpu_mm_wreg8(struct amdgpu_device *adev, uint32_t offset, uint8_t value)
uint8_t amdgpu_mm_rreg8(struct amdgpu_device *adev, uint32_t offset);
u32 amdgpu_device_indirect_rreg(struct amdgpu_device *adev,
- u32 pcie_index, u32 pcie_data,
u32 reg_addr);
u64 amdgpu_device_indirect_rreg64(struct amdgpu_device *adev,
- u32 pcie_index, u32 pcie_data,
u32 reg_addr);
void amdgpu_device_indirect_wreg(struct amdgpu_device *adev,
- u32 pcie_index, u32 pcie_data,
u32 reg_addr, u32 reg_data);
void amdgpu_device_indirect_wreg64(struct amdgpu_device *adev,
- u32 pcie_index, u32 pcie_data,
u32 reg_addr, u64 reg_data);
-
+u32 amdgpu_device_get_rev_id(struct amdgpu_device *adev);
bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type);
bool amdgpu_device_has_dc_support(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
index 333780491867..01ba3589b60a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
@@ -308,6 +308,8 @@ int amdgpu_amdkfd_gpuvm_import_dmabuf(struct amdgpu_device *adev,
uint64_t va, void *drm_priv,
struct kgd_mem **mem, uint64_t *size,
uint64_t *mmap_offset);
+int amdgpu_amdkfd_gpuvm_export_dmabuf(struct kgd_mem *mem,
+ struct dma_buf **dmabuf);
int amdgpu_amdkfd_get_tile_config(struct amdgpu_device *adev,
struct tile_config *config);
void amdgpu_amdkfd_ras_poison_consumption_handler(struct amdgpu_device *adev,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
index d6320c836251..c87515210c4f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
@@ -711,6 +711,21 @@ kfd_mem_dmaunmap_attachment(struct kgd_mem *mem,
}
}
+static int kfd_mem_export_dmabuf(struct kgd_mem *mem)
+{
+ if (!mem->dmabuf) {
+ struct dma_buf *ret = amdgpu_gem_prime_export(
+ &mem->bo->tbo.base,
+ mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE ?
+ DRM_RDWR : 0);
+ if (IS_ERR(ret))
+ return PTR_ERR(ret);
+ mem->dmabuf = ret;
+ }
+
+ return 0;
+}
+
static int
kfd_mem_attach_dmabuf(struct amdgpu_device *adev, struct kgd_mem *mem,
struct amdgpu_bo **bo)
@@ -718,16 +733,9 @@ kfd_mem_attach_dmabuf(struct amdgpu_device *adev, struct kgd_mem *mem,
struct drm_gem_object *gobj;
int ret;
- if (!mem->dmabuf) {
- mem->dmabuf = amdgpu_gem_prime_export(&mem->bo->tbo.base,
- mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE ?
- DRM_RDWR : 0);
- if (IS_ERR(mem->dmabuf)) {
- ret = PTR_ERR(mem->dmabuf);
- mem->dmabuf = NULL;
- return ret;
- }
- }
+ ret = kfd_mem_export_dmabuf(mem);
+ if (ret)
+ return ret;
gobj = amdgpu_gem_prime_import(adev_to_drm(adev), mem->dmabuf);
if (IS_ERR(gobj))
@@ -1575,7 +1583,7 @@ size_t amdgpu_amdkfd_get_available_memory(struct amdgpu_device *adev)
{
uint64_t reserved_for_pt =
ESTIMATE_PT_SIZE(amdgpu_amdkfd_total_mem_size);
- size_t available;
+ ssize_t available;
spin_lock(&kfd_mem_limit.mem_limit_lock);
available = adev->gmc.real_vram_size
@@ -1584,6 +1592,9 @@ size_t amdgpu_amdkfd_get_available_memory(struct amdgpu_device *adev)
- reserved_for_pt;
spin_unlock(&kfd_mem_limit.mem_limit_lock);
+ if (available < 0)
+ available = 0;
+
return ALIGN_DOWN(available, VRAM_AVAILABLITY_ALIGN);
}
@@ -2210,30 +2221,27 @@ int amdgpu_amdkfd_gpuvm_import_dmabuf(struct amdgpu_device *adev,
struct amdgpu_bo *bo;
int ret;
- if (dma_buf->ops != &amdgpu_dmabuf_ops)
- /* Can't handle non-graphics buffers */
- return -EINVAL;
-
- obj = dma_buf->priv;
- if (drm_to_adev(obj->dev) != adev)
- /* Can't handle buffers from other devices */
- return -EINVAL;
+ obj = amdgpu_gem_prime_import(adev_to_drm(adev), dma_buf);
+ if (IS_ERR(obj))
+ return PTR_ERR(obj);
bo = gem_to_amdgpu_bo(obj);
if (!(bo->preferred_domains & (AMDGPU_GEM_DOMAIN_VRAM |
- AMDGPU_GEM_DOMAIN_GTT)))
+ AMDGPU_GEM_DOMAIN_GTT))) {
/* Only VRAM and GTT BOs are supported */
- return -EINVAL;
+ ret = -EINVAL;
+ goto err_put_obj;
+ }
*mem = kzalloc(sizeof(struct kgd_mem), GFP_KERNEL);
- if (!*mem)
- return -ENOMEM;
+ if (!*mem) {
+ ret = -ENOMEM;
+ goto err_put_obj;
+ }
ret = drm_vma_node_allow(&obj->vma_node, drm_priv);
- if (ret) {
- kfree(*mem);
- return ret;
- }
+ if (ret)
+ goto err_free_mem;
if (size)
*size = amdgpu_bo_size(bo);
@@ -2250,7 +2258,8 @@ int amdgpu_amdkfd_gpuvm_import_dmabuf(struct amdgpu_device *adev,
| KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE
| KFD_IOC_ALLOC_MEM_FLAGS_EXECUTABLE;
- drm_gem_object_get(&bo->tbo.base);
+ get_dma_buf(dma_buf);
+ (*mem)->dmabuf = dma_buf;
(*mem)->bo = bo;
(*mem)->va = va;
(*mem)->domain = (bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM) ?
@@ -2262,6 +2271,29 @@ int amdgpu_amdkfd_gpuvm_import_dmabuf(struct amdgpu_device *adev,
(*mem)->is_imported = true;
return 0;
+
+err_free_mem:
+ kfree(*mem);
+err_put_obj:
+ drm_gem_object_put(obj);
+ return ret;
+}
+
+int amdgpu_amdkfd_gpuvm_export_dmabuf(struct kgd_mem *mem,
+ struct dma_buf **dma_buf)
+{
+ int ret;
+
+ mutex_lock(&mem->lock);
+ ret = kfd_mem_export_dmabuf(mem);
+ if (ret)
+ goto out;
+
+ get_dma_buf(mem->dmabuf);
+ *dma_buf = mem->dmabuf;
+out:
+ mutex_unlock(&mem->lock);
+ return ret;
}
/* Evict a userptr BO by stopping the queues if necessary
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index c4a4e2fe6681..6298e3c1de39 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -35,6 +35,7 @@
#include <linux/devcoredump.h>
#include <generated/utsrelease.h>
#include <linux/pci-p2pdma.h>
+#include <linux/apple-gmux.h>
#include <drm/drm_aperture.h>
#include <drm/drm_atomic_helper.h>
@@ -675,20 +676,20 @@ void amdgpu_mm_wdoorbell64(struct amdgpu_device *adev, u32 index, u64 v)
* amdgpu_device_indirect_rreg - read an indirect register
*
* @adev: amdgpu_device pointer
- * @pcie_index: mmio register offset
- * @pcie_data: mmio register offset
* @reg_addr: indirect register address to read from
*
* Returns the value of indirect register @reg_addr
*/
u32 amdgpu_device_indirect_rreg(struct amdgpu_device *adev,
- u32 pcie_index, u32 pcie_data,
u32 reg_addr)
{
- unsigned long flags;
- u32 r;
+ unsigned long flags, pcie_index, pcie_data;
void __iomem *pcie_index_offset;
void __iomem *pcie_data_offset;
+ u32 r;
+
+ pcie_index = adev->nbio.funcs->get_pcie_index_offset(adev);
+ pcie_data = adev->nbio.funcs->get_pcie_data_offset(adev);
spin_lock_irqsave(&adev->pcie_idx_lock, flags);
pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
@@ -706,20 +707,20 @@ u32 amdgpu_device_indirect_rreg(struct amdgpu_device *adev,
* amdgpu_device_indirect_rreg64 - read a 64bits indirect register
*
* @adev: amdgpu_device pointer
- * @pcie_index: mmio register offset
- * @pcie_data: mmio register offset
* @reg_addr: indirect register address to read from
*
* Returns the value of indirect register @reg_addr
*/
u64 amdgpu_device_indirect_rreg64(struct amdgpu_device *adev,
- u32 pcie_index, u32 pcie_data,
u32 reg_addr)
{
- unsigned long flags;
- u64 r;
+ unsigned long flags, pcie_index, pcie_data;
void __iomem *pcie_index_offset;
void __iomem *pcie_data_offset;
+ u64 r;
+
+ pcie_index = adev->nbio.funcs->get_pcie_index_offset(adev);
+ pcie_data = adev->nbio.funcs->get_pcie_data_offset(adev);
spin_lock_irqsave(&adev->pcie_idx_lock, flags);
pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
@@ -749,13 +750,15 @@ u64 amdgpu_device_indirect_rreg64(struct amdgpu_device *adev,
*
*/
void amdgpu_device_indirect_wreg(struct amdgpu_device *adev,
- u32 pcie_index, u32 pcie_data,
u32 reg_addr, u32 reg_data)
{
- unsigned long flags;
+ unsigned long flags, pcie_index, pcie_data;
void __iomem *pcie_index_offset;
void __iomem *pcie_data_offset;
+ pcie_index = adev->nbio.funcs->get_pcie_index_offset(adev);
+ pcie_data = adev->nbio.funcs->get_pcie_data_offset(adev);
+
spin_lock_irqsave(&adev->pcie_idx_lock, flags);
pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
@@ -778,13 +781,15 @@ void amdgpu_device_indirect_wreg(struct amdgpu_device *adev,
*
*/
void amdgpu_device_indirect_wreg64(struct amdgpu_device *adev,
- u32 pcie_index, u32 pcie_data,
u32 reg_addr, u64 reg_data)
{
- unsigned long flags;
+ unsigned long flags, pcie_index, pcie_data;
void __iomem *pcie_index_offset;
void __iomem *pcie_data_offset;
+ pcie_index = adev->nbio.funcs->get_pcie_index_offset(adev);
+ pcie_data = adev->nbio.funcs->get_pcie_data_offset(adev);
+
spin_lock_irqsave(&adev->pcie_idx_lock, flags);
pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
@@ -803,6 +808,18 @@ void amdgpu_device_indirect_wreg64(struct amdgpu_device *adev,
}
/**
+ * amdgpu_device_get_rev_id - query device rev_id
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Return device rev_id
+ */
+u32 amdgpu_device_get_rev_id(struct amdgpu_device *adev)
+{
+ return adev->nbio.funcs->get_rev_id(adev);
+}
+
+/**
* amdgpu_invalid_rreg - dummy reg read function
*
* @adev: amdgpu_device pointer
@@ -3773,8 +3790,6 @@ int amdgpu_device_init(struct amdgpu_device *adev,
}
}
- pci_enable_pcie_error_reporting(adev->pdev);
-
/* Post card if necessary */
if (amdgpu_device_need_post(adev)) {
if (!adev->bios) {
@@ -3864,11 +3879,8 @@ fence_driver_init:
adev->mm_stats.log2_max_MBps = ilog2(max(1u, max_MBps));
r = amdgpu_pm_sysfs_init(adev);
- if (r) {
- adev->pm_sysfs_en = false;
- DRM_ERROR("registering pm debugfs failed (%d).\n", r);
- } else
- adev->pm_sysfs_en = true;
+ if (r)
+ DRM_ERROR("registering pm sysfs failed (%d).\n", r);
r = amdgpu_ucode_sysfs_init(adev);
if (r) {
@@ -3930,12 +3942,15 @@ fence_driver_init:
if ((adev->pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA)
vga_client_register(adev->pdev, amdgpu_device_vga_set_decode);
- if (amdgpu_device_supports_px(ddev)) {
- px = true;
+ px = amdgpu_device_supports_px(ddev);
+
+ if (px || (!pci_is_thunderbolt_attached(adev->pdev) &&
+ apple_gmux_detect(NULL, NULL)))
vga_switcheroo_register_client(adev->pdev,
&amdgpu_switcheroo_ops, px);
+
+ if (px)
vga_switcheroo_init_domain_pm_ops(adev->dev, &adev->vga_pm_domain);
- }
if (adev->gmc.xgmi.pending_reset)
queue_delayed_work(system_wq, &mgpu_info.delayed_reset_work,
@@ -4011,7 +4026,7 @@ void amdgpu_device_fini_hw(struct amdgpu_device *adev)
if (adev->mman.initialized)
drain_workqueue(adev->mman.bdev.wq);
- if (adev->pm_sysfs_en)
+ if (adev->pm.sysfs_initialized)
amdgpu_pm_sysfs_fini(adev);
if (adev->ucode_sysfs_en)
amdgpu_ucode_sysfs_fini(adev);
@@ -4039,6 +4054,7 @@ void amdgpu_device_fini_hw(struct amdgpu_device *adev)
void amdgpu_device_fini_sw(struct amdgpu_device *adev)
{
int idx;
+ bool px;
amdgpu_fence_driver_sw_fini(adev);
amdgpu_device_ip_fini(adev);
@@ -4057,10 +4073,16 @@ void amdgpu_device_fini_sw(struct amdgpu_device *adev)
kfree(adev->bios);
adev->bios = NULL;
- if (amdgpu_device_supports_px(adev_to_drm(adev))) {
+
+ px = amdgpu_device_supports_px(adev_to_drm(adev));
+
+ if (px || (!pci_is_thunderbolt_attached(adev->pdev) &&
+ apple_gmux_detect(NULL, NULL)))
vga_switcheroo_unregister_client(adev->pdev);
+
+ if (px)
vga_switcheroo_fini_domain_pm_ops(adev->dev);
- }
+
if ((adev->pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA)
vga_client_unregister(adev->pdev);
@@ -4145,8 +4167,6 @@ int amdgpu_device_suspend(struct drm_device *dev, bool fbcon)
if (amdgpu_acpi_smart_shift_update(dev, AMDGPU_SS_DEV_D3))
DRM_WARN("smart shift update failed\n");
- drm_kms_helper_poll_disable(dev);
-
if (fbcon)
drm_fb_helper_set_suspend_unlocked(adev_to_drm(adev)->fb_helper, true);
@@ -4243,8 +4263,6 @@ exit:
if (fbcon)
drm_fb_helper_set_suspend_unlocked(adev_to_drm(adev)->fb_helper, false);
- drm_kms_helper_poll_enable(dev);
-
amdgpu_ras_resume(adev);
if (adev->mode_info.num_crtc) {
@@ -5582,7 +5600,7 @@ int amdgpu_device_baco_enter(struct drm_device *dev)
struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
- if (!amdgpu_device_supports_baco(adev_to_drm(adev)))
+ if (!amdgpu_device_supports_baco(dev))
return -ENOTSUPP;
if (ras && adev->ras_enabled &&
@@ -5598,7 +5616,7 @@ int amdgpu_device_baco_exit(struct drm_device *dev)
struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
int ret = 0;
- if (!amdgpu_device_supports_baco(adev_to_drm(adev)))
+ if (!amdgpu_device_supports_baco(dev))
return -ENOTSUPP;
ret = amdgpu_dpm_baco_exit(adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
index 1a3cb53d2e0d..77a8b05d3868 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
@@ -41,6 +41,7 @@
#include "vega10_ih.h"
#include "vega20_ih.h"
#include "sdma_v4_0.h"
+#include "sdma_v4_4_2.h"
#include "uvd_v7_0.h"
#include "vce_v4_0.h"
#include "vcn_v1_0.h"
@@ -711,7 +712,7 @@ static void ip_hw_instance_release(struct kobject *kobj)
kfree(ip_hw_instance);
}
-static struct kobj_type ip_hw_instance_ktype = {
+static const struct kobj_type ip_hw_instance_ktype = {
.release = ip_hw_instance_release,
.sysfs_ops = &ip_hw_instance_sysfs_ops,
.default_groups = ip_hw_instance_groups,
@@ -730,7 +731,7 @@ static void ip_hw_id_release(struct kobject *kobj)
kfree(ip_hw_id);
}
-static struct kobj_type ip_hw_id_ktype = {
+static const struct kobj_type ip_hw_id_ktype = {
.release = ip_hw_id_release,
.sysfs_ops = &kobj_sysfs_ops,
};
@@ -793,18 +794,18 @@ static const struct sysfs_ops ip_die_entry_sysfs_ops = {
.show = ip_die_entry_attr_show,
};
-static struct kobj_type ip_die_entry_ktype = {
+static const struct kobj_type ip_die_entry_ktype = {
.release = ip_die_entry_release,
.sysfs_ops = &ip_die_entry_sysfs_ops,
.default_groups = ip_die_entry_groups,
};
-static struct kobj_type die_kobj_ktype = {
+static const struct kobj_type die_kobj_ktype = {
.release = die_kobj_release,
.sysfs_ops = &kobj_sysfs_ops,
};
-static struct kobj_type ip_discovery_ktype = {
+static const struct kobj_type ip_discovery_ktype = {
.release = ip_disc_release,
.sysfs_ops = &kobj_sysfs_ops,
};
@@ -1591,6 +1592,7 @@ static int amdgpu_discovery_set_ih_ip_blocks(struct amdgpu_device *adev)
case IP_VERSION(4, 2, 0):
case IP_VERSION(4, 2, 1):
case IP_VERSION(4, 4, 0):
+ case IP_VERSION(4, 4, 2):
amdgpu_device_ip_block_add(adev, &vega20_ih_ip_block);
break;
case IP_VERSION(5, 0, 0):
@@ -1649,6 +1651,7 @@ static int amdgpu_discovery_set_psp_ip_blocks(struct amdgpu_device *adev)
case IP_VERSION(13, 0, 2):
case IP_VERSION(13, 0, 3):
case IP_VERSION(13, 0, 5):
+ case IP_VERSION(13, 0, 6):
case IP_VERSION(13, 0, 7):
case IP_VERSION(13, 0, 8):
case IP_VERSION(13, 0, 10):
@@ -1842,6 +1845,9 @@ static int amdgpu_discovery_set_sdma_ip_blocks(struct amdgpu_device *adev)
case IP_VERSION(4, 4, 0):
amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block);
break;
+ case IP_VERSION(4, 4, 2):
+ amdgpu_device_ip_block_add(adev, &sdma_v4_4_2_ip_block);
+ break;
case IP_VERSION(5, 0, 0):
case IP_VERSION(5, 0, 1):
case IP_VERSION(5, 0, 2):
@@ -2304,6 +2310,7 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)
case IP_VERSION(4, 2, 0):
case IP_VERSION(4, 2, 1):
case IP_VERSION(4, 4, 0):
+ case IP_VERSION(4, 4, 2):
adev->hdp.funcs = &hdp_v4_0_funcs;
break;
case IP_VERSION(5, 0, 0):
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
index 503f89a766c3..d60fe7eb5579 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
@@ -1618,6 +1618,8 @@ int amdgpu_display_suspend_helper(struct amdgpu_device *adev)
struct drm_connector_list_iter iter;
int r;
+ drm_kms_helper_poll_disable(dev);
+
/* turn off display hw */
drm_modeset_lock_all(dev);
drm_connector_list_iter_begin(dev, &iter);
@@ -1694,6 +1696,8 @@ int amdgpu_display_resume_helper(struct amdgpu_device *adev)
drm_modeset_unlock_all(dev);
+ drm_kms_helper_poll_enable(dev);
+
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fdinfo.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fdinfo.c
index 99a7855ab1bc..c57252f004e8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fdinfo.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fdinfo.c
@@ -60,12 +60,13 @@ void amdgpu_show_fdinfo(struct seq_file *m, struct file *f)
struct amdgpu_fpriv *fpriv = file->driver_priv;
struct amdgpu_vm *vm = &fpriv->vm;
- uint64_t vram_mem = 0, gtt_mem = 0, cpu_mem = 0;
+ struct amdgpu_mem_stats stats;
ktime_t usage[AMDGPU_HW_IP_NUM];
uint32_t bus, dev, fn, domain;
unsigned int hw_ip;
int ret;
+ memset(&stats, 0, sizeof(stats));
bus = adev->pdev->bus->number;
domain = pci_domain_nr(adev->pdev->bus);
dev = PCI_SLOT(adev->pdev->devfn);
@@ -75,7 +76,7 @@ void amdgpu_show_fdinfo(struct seq_file *m, struct file *f)
if (ret)
return;
- amdgpu_vm_get_memory(vm, &vram_mem, &gtt_mem, &cpu_mem);
+ amdgpu_vm_get_memory(vm, &stats);
amdgpu_bo_unreserve(vm->root.bo);
amdgpu_ctx_mgr_usage(&fpriv->ctx_mgr, usage);
@@ -90,9 +91,22 @@ void amdgpu_show_fdinfo(struct seq_file *m, struct file *f)
seq_printf(m, "drm-driver:\t%s\n", file->minor->dev->driver->name);
seq_printf(m, "drm-pdev:\t%04x:%02x:%02x.%d\n", domain, bus, dev, fn);
seq_printf(m, "drm-client-id:\t%Lu\n", vm->immediate.fence_context);
- seq_printf(m, "drm-memory-vram:\t%llu KiB\n", vram_mem/1024UL);
- seq_printf(m, "drm-memory-gtt: \t%llu KiB\n", gtt_mem/1024UL);
- seq_printf(m, "drm-memory-cpu: \t%llu KiB\n", cpu_mem/1024UL);
+ seq_printf(m, "drm-memory-vram:\t%llu KiB\n", stats.vram/1024UL);
+ seq_printf(m, "drm-memory-gtt: \t%llu KiB\n", stats.gtt/1024UL);
+ seq_printf(m, "drm-memory-cpu: \t%llu KiB\n", stats.cpu/1024UL);
+ seq_printf(m, "amd-memory-visible-vram:\t%llu KiB\n",
+ stats.visible_vram/1024UL);
+ seq_printf(m, "amd-evicted-vram:\t%llu KiB\n",
+ stats.evicted_vram/1024UL);
+ seq_printf(m, "amd-evicted-visible-vram:\t%llu KiB\n",
+ stats.evicted_visible_vram/1024UL);
+ seq_printf(m, "amd-requested-vram:\t%llu KiB\n",
+ stats.requested_vram/1024UL);
+ seq_printf(m, "amd-requested-visible-vram:\t%llu KiB\n",
+ stats.requested_visible_vram/1024UL);
+ seq_printf(m, "amd-requested-gtt:\t%llu KiB\n",
+ stats.requested_gtt/1024UL);
+
for (hw_ip = 0; hw_ip < AMDGPU_HW_IP_NUM; ++hw_ip) {
if (!usage[hw_ip])
continue;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index d8e683688daa..863cb668e000 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -969,7 +969,7 @@ static int amdgpu_debugfs_gem_info_show(struct seq_file *m, void *unused)
* Therefore, we need to protect this ->comm access using RCU.
*/
rcu_read_lock();
- task = pid_task(file->pid, PIDTYPE_PID);
+ task = pid_task(file->pid, PIDTYPE_TGID);
seq_printf(m, "pid %8d command %s:\n", pid_nr(file->pid),
task ? task->comm : "<unknown>");
rcu_read_unlock();
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
index 35ed46b9249c..c50d59855011 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
@@ -725,7 +725,7 @@ int amdgpu_gfx_ras_sw_init(struct amdgpu_device *adev)
/* If not define special ras_late_init function, use gfx default ras_late_init */
if (!ras->ras_block.ras_late_init)
- ras->ras_block.ras_late_init = amdgpu_ras_block_late_init;
+ ras->ras_block.ras_late_init = amdgpu_gfx_ras_late_init;
/* If not defined special ras_cb function, use default ras_cb */
if (!ras->ras_block.ras_cb)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
index 12a6826caef4..655fc8bf936d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
@@ -447,13 +447,42 @@ void amdgpu_gmc_filter_faults_remove(struct amdgpu_device *adev, uint64_t addr,
} while (fault->timestamp < tmp);
}
-int amdgpu_gmc_ras_early_init(struct amdgpu_device *adev)
+int amdgpu_gmc_ras_sw_init(struct amdgpu_device *adev)
{
- if (!adev->gmc.xgmi.connected_to_cpu) {
- adev->gmc.xgmi.ras = &xgmi_ras;
- amdgpu_ras_register_ras_block(adev, &adev->gmc.xgmi.ras->ras_block);
- adev->gmc.xgmi.ras_if = &adev->gmc.xgmi.ras->ras_block.ras_comm;
- }
+ int r;
+
+ /* umc ras block */
+ r = amdgpu_umc_ras_sw_init(adev);
+ if (r)
+ return r;
+
+ /* mmhub ras block */
+ r = amdgpu_mmhub_ras_sw_init(adev);
+ if (r)
+ return r;
+
+ /* hdp ras block */
+ r = amdgpu_hdp_ras_sw_init(adev);
+ if (r)
+ return r;
+
+ /* mca.x ras block */
+ r = amdgpu_mca_mp0_ras_sw_init(adev);
+ if (r)
+ return r;
+
+ r = amdgpu_mca_mp1_ras_sw_init(adev);
+ if (r)
+ return r;
+
+ r = amdgpu_mca_mpio_ras_sw_init(adev);
+ if (r)
+ return r;
+
+ /* xgmi ras block */
+ r = amdgpu_xgmi_ras_sw_init(adev);
+ if (r)
+ return r;
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
index 0305b660cd17..232523e3e270 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
@@ -104,6 +104,8 @@ struct amdgpu_vmhub {
uint32_t vm_cntx_cntl_vm_fault;
uint32_t vm_l2_bank_select_reserved_cid2;
+ uint32_t vm_contexts_disable;
+
const struct amdgpu_vmhub_funcs *vmhub_funcs;
};
@@ -351,7 +353,7 @@ bool amdgpu_gmc_filter_faults(struct amdgpu_device *adev,
uint16_t pasid, uint64_t timestamp);
void amdgpu_gmc_filter_faults_remove(struct amdgpu_device *adev, uint64_t addr,
uint16_t pasid);
-int amdgpu_gmc_ras_early_init(struct amdgpu_device *adev);
+int amdgpu_gmc_ras_sw_init(struct amdgpu_device *adev);
int amdgpu_gmc_ras_late_init(struct amdgpu_device *adev);
void amdgpu_gmc_ras_fini(struct amdgpu_device *adev);
int amdgpu_gmc_allocate_vm_inv_eng(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_hdp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_hdp.c
new file mode 100644
index 000000000000..b6cf801939aa
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_hdp.c
@@ -0,0 +1,48 @@
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include "amdgpu.h"
+#include "amdgpu_ras.h"
+
+int amdgpu_hdp_ras_sw_init(struct amdgpu_device *adev)
+{
+ int err;
+ struct amdgpu_hdp_ras *ras;
+
+ if (!adev->hdp.ras)
+ return 0;
+
+ ras = adev->hdp.ras;
+ err = amdgpu_ras_register_ras_block(adev, &ras->ras_block);
+ if (err) {
+ dev_err(adev->dev, "Failed to register hdp ras block!\n");
+ return err;
+ }
+
+ strcpy(ras->ras_block.ras_comm.name, "hdp");
+ ras->ras_block.ras_comm.block = AMDGPU_RAS_BLOCK__HDP;
+ ras->ras_block.ras_comm.type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
+ adev->hdp.ras_if = &ras->ras_block.ras_comm;
+
+ /* hdp ras follows amdgpu_ras_block_late_init_default for late init */
+ return 0;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_hdp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_hdp.h
index ac5c61d3de2b..7b8a6152dc8d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_hdp.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_hdp.h
@@ -43,5 +43,5 @@ struct amdgpu_hdp {
struct amdgpu_hdp_ras *ras;
};
-int amdgpu_hdp_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block);
+int amdgpu_hdp_ras_sw_init(struct amdgpu_device *adev);
#endif /* __AMDGPU_HDP_H__ */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
index bcccc348dbe2..df7eb0b7c4b9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
@@ -69,7 +69,7 @@ int amdgpu_ib_get(struct amdgpu_device *adev, struct amdgpu_vm *vm,
if (size) {
r = amdgpu_sa_bo_new(&adev->ib_pools[pool_type],
- &ib->sa_bo, size, 256);
+ &ib->sa_bo, size);
if (r) {
dev_err(adev->dev, "failed to get a new IB (%d)\n", r);
return r;
@@ -309,8 +309,7 @@ int amdgpu_ib_pool_init(struct amdgpu_device *adev)
for (i = 0; i < AMDGPU_IB_POOL_MAX; i++) {
r = amdgpu_sa_bo_manager_init(adev, &adev->ib_pools[i],
- AMDGPU_IB_POOL_SIZE,
- AMDGPU_GPU_PAGE_SIZE,
+ AMDGPU_IB_POOL_SIZE, 256,
AMDGPU_GEM_DOMAIN_GTT);
if (r)
goto error;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c
index 6f81ed4fb0d9..479d9bcc99ee 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c
@@ -236,19 +236,28 @@ int amdgpu_jpeg_process_poison_irq(struct amdgpu_device *adev,
return 0;
}
-void jpeg_set_ras_funcs(struct amdgpu_device *adev)
+int amdgpu_jpeg_ras_sw_init(struct amdgpu_device *adev)
{
+ int err;
+ struct amdgpu_jpeg_ras *ras;
+
if (!adev->jpeg.ras)
- return;
+ return 0;
- amdgpu_ras_register_ras_block(adev, &adev->jpeg.ras->ras_block);
+ ras = adev->jpeg.ras;
+ err = amdgpu_ras_register_ras_block(adev, &ras->ras_block);
+ if (err) {
+ dev_err(adev->dev, "Failed to register jpeg ras block!\n");
+ return err;
+ }
- strcpy(adev->jpeg.ras->ras_block.ras_comm.name, "jpeg");
- adev->jpeg.ras->ras_block.ras_comm.block = AMDGPU_RAS_BLOCK__JPEG;
- adev->jpeg.ras->ras_block.ras_comm.type = AMDGPU_RAS_ERROR__POISON;
- adev->jpeg.ras_if = &adev->jpeg.ras->ras_block.ras_comm;
+ strcpy(ras->ras_block.ras_comm.name, "jpeg");
+ ras->ras_block.ras_comm.block = AMDGPU_RAS_BLOCK__JPEG;
+ ras->ras_block.ras_comm.type = AMDGPU_RAS_ERROR__POISON;
+ adev->jpeg.ras_if = &ras->ras_block.ras_comm;
- /* If don't define special ras_late_init function, use default ras_late_init */
- if (!adev->jpeg.ras->ras_block.ras_late_init)
- adev->jpeg.ras->ras_block.ras_late_init = amdgpu_ras_block_late_init;
+ if (!ras->ras_block.ras_late_init)
+ ras->ras_block.ras_late_init = amdgpu_ras_block_late_init;
+
+ return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h
index e8ca3e32ad52..0ca76f0f23e9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h
@@ -72,6 +72,6 @@ int amdgpu_jpeg_dec_ring_test_ib(struct amdgpu_ring *ring, long timeout);
int amdgpu_jpeg_process_poison_irq(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry);
-void jpeg_set_ras_funcs(struct amdgpu_device *adev);
+int amdgpu_jpeg_ras_sw_init(struct amdgpu_device *adev);
#endif /*__AMDGPU_JPEG_H__*/
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.c
index 51c2a82e2fa4..8d9ff9e151de 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.c
@@ -70,3 +70,75 @@ void amdgpu_mca_query_ras_error_count(struct amdgpu_device *adev,
amdgpu_mca_reset_error_count(adev, mc_status_addr);
}
+
+int amdgpu_mca_mp0_ras_sw_init(struct amdgpu_device *adev)
+{
+ int err;
+ struct amdgpu_mca_ras_block *ras;
+
+ if (!adev->mca.mp0.ras)
+ return 0;
+
+ ras = adev->mca.mp0.ras;
+
+ err = amdgpu_ras_register_ras_block(adev, &ras->ras_block);
+ if (err) {
+ dev_err(adev->dev, "Failed to register mca.mp0 ras block!\n");
+ return err;
+ }
+
+ strcpy(ras->ras_block.ras_comm.name, "mca.mp0");
+ ras->ras_block.ras_comm.block = AMDGPU_RAS_BLOCK__MCA;
+ ras->ras_block.ras_comm.type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
+ adev->mca.mp0.ras_if = &ras->ras_block.ras_comm;
+
+ return 0;
+}
+
+int amdgpu_mca_mp1_ras_sw_init(struct amdgpu_device *adev)
+{
+ int err;
+ struct amdgpu_mca_ras_block *ras;
+
+ if (!adev->mca.mp1.ras)
+ return 0;
+
+ ras = adev->mca.mp1.ras;
+
+ err = amdgpu_ras_register_ras_block(adev, &ras->ras_block);
+ if (err) {
+ dev_err(adev->dev, "Failed to register mca.mp1 ras block!\n");
+ return err;
+ }
+
+ strcpy(ras->ras_block.ras_comm.name, "mca.mp1");
+ ras->ras_block.ras_comm.block = AMDGPU_RAS_BLOCK__MCA;
+ ras->ras_block.ras_comm.type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
+ adev->mca.mp1.ras_if = &ras->ras_block.ras_comm;
+
+ return 0;
+}
+
+int amdgpu_mca_mpio_ras_sw_init(struct amdgpu_device *adev)
+{
+ int err;
+ struct amdgpu_mca_ras_block *ras;
+
+ if (!adev->mca.mpio.ras)
+ return 0;
+
+ ras = adev->mca.mpio.ras;
+
+ err = amdgpu_ras_register_ras_block(adev, &ras->ras_block);
+ if (err) {
+ dev_err(adev->dev, "Failed to register mca.mpio ras block!\n");
+ return err;
+ }
+
+ strcpy(ras->ras_block.ras_comm.name, "mca.mpio");
+ ras->ras_block.ras_comm.block = AMDGPU_RAS_BLOCK__MCA;
+ ras->ras_block.ras_comm.type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
+ adev->mca.mpio.ras_if = &ras->ras_block.ras_comm;
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.h
index 7ce16d16e34b..997a073e2409 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.h
@@ -30,12 +30,7 @@ struct amdgpu_mca_ras {
struct amdgpu_mca_ras_block *ras;
};
-struct amdgpu_mca_funcs {
- void (*init)(struct amdgpu_device *adev);
-};
-
struct amdgpu_mca {
- const struct amdgpu_mca_funcs *funcs;
struct amdgpu_mca_ras mp0;
struct amdgpu_mca_ras mp1;
struct amdgpu_mca_ras mpio;
@@ -55,5 +50,7 @@ void amdgpu_mca_reset_error_count(struct amdgpu_device *adev,
void amdgpu_mca_query_ras_error_count(struct amdgpu_device *adev,
uint64_t mc_status_addr,
void *ras_error_status);
-
+int amdgpu_mca_mp0_ras_sw_init(struct amdgpu_device *adev);
+int amdgpu_mca_mp1_ras_sw_init(struct amdgpu_device *adev);
+int amdgpu_mca_mpio_ras_sw_init(struct amdgpu_device *adev);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.c
new file mode 100644
index 000000000000..0f6b1021fef3
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.c
@@ -0,0 +1,46 @@
+/*
+ * Copyright (C) 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "amdgpu.h"
+#include "amdgpu_ras.h"
+
+int amdgpu_mmhub_ras_sw_init(struct amdgpu_device *adev)
+{
+ int err;
+ struct amdgpu_mmhub_ras *ras;
+
+ if (!adev->mmhub.ras)
+ return 0;
+
+ ras = adev->mmhub.ras;
+ err = amdgpu_ras_register_ras_block(adev, &ras->ras_block);
+ if (err) {
+ dev_err(adev->dev, "Failed to register mmhub ras block!\n");
+ return err;
+ }
+
+ strcpy(ras->ras_block.ras_comm.name, "mmhub");
+ ras->ras_block.ras_comm.block = AMDGPU_RAS_BLOCK__MMHUB;
+ ras->ras_block.ras_comm.type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
+ adev->mmhub.ras_if = &ras->ras_block.ras_comm;
+
+ /* mmhub ras follows amdgpu_ras_block_late_init_default for late init */
+ return 0;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.h
index 93430d3823c9..d21bb6dae56e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.h
@@ -48,5 +48,7 @@ struct amdgpu_mmhub {
struct amdgpu_mmhub_ras *ras;
};
+int amdgpu_mmhub_ras_sw_init(struct amdgpu_device *adev);
+
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.c
index 37d779b8e4a6..a3bc00577a7c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.c
@@ -22,6 +22,29 @@
#include "amdgpu.h"
#include "amdgpu_ras.h"
+int amdgpu_nbio_ras_sw_init(struct amdgpu_device *adev)
+{
+ int err;
+ struct amdgpu_nbio_ras *ras;
+
+ if (!adev->nbio.ras)
+ return 0;
+
+ ras = adev->nbio.ras;
+ err = amdgpu_ras_register_ras_block(adev, &ras->ras_block);
+ if (err) {
+ dev_err(adev->dev, "Failed to register pcie_bif ras block!\n");
+ return err;
+ }
+
+ strcpy(ras->ras_block.ras_comm.name, "pcie_bif");
+ ras->ras_block.ras_comm.block = AMDGPU_RAS_BLOCK__PCIE_BIF;
+ ras->ras_block.ras_comm.type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
+ adev->nbio.ras_if = &ras->ras_block.ras_comm;
+
+ return 0;
+}
+
int amdgpu_nbio_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block)
{
int r;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h
index a240336bbc6b..c686ff4bcc39 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h
@@ -106,5 +106,6 @@ struct amdgpu_nbio {
struct amdgpu_nbio_ras *ras;
};
+int amdgpu_nbio_ras_sw_init(struct amdgpu_device *adev);
int amdgpu_nbio_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index 6c7d672412b2..2bd1a54ee866 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -600,7 +600,7 @@ int amdgpu_bo_create(struct amdgpu_device *adev,
if (!amdgpu_gmc_vram_full_visible(&adev->gmc) &&
bo->tbo.resource->mem_type == TTM_PL_VRAM &&
- bo->tbo.resource->start < adev->gmc.visible_vram_size >> PAGE_SHIFT)
+ amdgpu_bo_in_cpu_visible_vram(bo))
amdgpu_cs_report_moved_bytes(adev, ctx.bytes_moved,
ctx.bytes_moved);
else
@@ -1265,24 +1265,41 @@ void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
trace_amdgpu_bo_move(abo, new_mem->mem_type, old_mem->mem_type);
}
-void amdgpu_bo_get_memory(struct amdgpu_bo *bo, uint64_t *vram_mem,
- uint64_t *gtt_mem, uint64_t *cpu_mem)
+void amdgpu_bo_get_memory(struct amdgpu_bo *bo,
+ struct amdgpu_mem_stats *stats)
{
unsigned int domain;
+ uint64_t size = amdgpu_bo_size(bo);
domain = amdgpu_mem_type_to_domain(bo->tbo.resource->mem_type);
switch (domain) {
case AMDGPU_GEM_DOMAIN_VRAM:
- *vram_mem += amdgpu_bo_size(bo);
+ stats->vram += size;
+ if (amdgpu_bo_in_cpu_visible_vram(bo))
+ stats->visible_vram += size;
break;
case AMDGPU_GEM_DOMAIN_GTT:
- *gtt_mem += amdgpu_bo_size(bo);
+ stats->gtt += size;
break;
case AMDGPU_GEM_DOMAIN_CPU:
default:
- *cpu_mem += amdgpu_bo_size(bo);
+ stats->cpu += size;
break;
}
+
+ if (bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM) {
+ stats->requested_vram += size;
+ if (bo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)
+ stats->requested_visible_vram += size;
+
+ if (domain != AMDGPU_GEM_DOMAIN_VRAM) {
+ stats->evicted_vram += size;
+ if (bo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)
+ stats->evicted_visible_vram += size;
+ }
+ } else if (bo->preferred_domains & AMDGPU_GEM_DOMAIN_GTT) {
+ stats->requested_gtt += size;
+ }
}
/**
@@ -1346,7 +1363,6 @@ vm_fault_t amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
struct ttm_operation_ctx ctx = { false, false };
struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo);
- unsigned long offset;
int r;
/* Remember that this BO was accessed by the CPU */
@@ -1355,8 +1371,7 @@ vm_fault_t amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
if (bo->resource->mem_type != TTM_PL_VRAM)
return 0;
- offset = bo->resource->start << PAGE_SHIFT;
- if ((offset + bo->base.size) <= adev->gmc.visible_vram_size)
+ if (amdgpu_bo_in_cpu_visible_vram(abo))
return 0;
/* Can't move a pinned BO to visible VRAM */
@@ -1378,10 +1393,9 @@ vm_fault_t amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
else if (unlikely(r))
return VM_FAULT_SIGBUS;
- offset = bo->resource->start << PAGE_SHIFT;
/* this should never happen */
if (bo->resource->mem_type == TTM_PL_VRAM &&
- (offset + bo->base.size) > adev->gmc.visible_vram_size)
+ !amdgpu_bo_in_cpu_visible_vram(abo))
return VM_FAULT_SIGBUS;
ttm_bo_move_to_lru_tail_unlocked(bo);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
index 93207badf83f..35b8106816a1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
@@ -126,6 +126,27 @@ struct amdgpu_bo_vm {
struct amdgpu_vm_bo_base entries[];
};
+struct amdgpu_mem_stats {
+ /* current VRAM usage, includes visible VRAM */
+ uint64_t vram;
+ /* current visible VRAM usage */
+ uint64_t visible_vram;
+ /* current GTT usage */
+ uint64_t gtt;
+ /* current system memory usage */
+ uint64_t cpu;
+ /* sum of evicted buffers, includes visible VRAM */
+ uint64_t evicted_vram;
+ /* sum of evicted buffers due to CPU access */
+ uint64_t evicted_visible_vram;
+ /* how much userspace asked for, includes vis.VRAM */
+ uint64_t requested_vram;
+ /* how much userspace asked for */
+ uint64_t requested_visible_vram;
+ /* how much userspace asked for */
+ uint64_t requested_gtt;
+};
+
static inline struct amdgpu_bo *ttm_to_amdgpu_bo(struct ttm_buffer_object *tbo)
{
return container_of(tbo, struct amdgpu_bo, tbo);
@@ -325,8 +346,8 @@ int amdgpu_bo_sync_wait_resv(struct amdgpu_device *adev, struct dma_resv *resv,
int amdgpu_bo_sync_wait(struct amdgpu_bo *bo, void *owner, bool intr);
u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo);
u64 amdgpu_bo_gpu_offset_no_check(struct amdgpu_bo *bo);
-void amdgpu_bo_get_memory(struct amdgpu_bo *bo, uint64_t *vram_mem,
- uint64_t *gtt_mem, uint64_t *cpu_mem);
+void amdgpu_bo_get_memory(struct amdgpu_bo *bo,
+ struct amdgpu_mem_stats *stats);
void amdgpu_bo_add_to_shadow_list(struct amdgpu_bo_vm *vmbo);
int amdgpu_bo_restore_shadow(struct amdgpu_bo *shadow,
struct dma_fence **fence);
@@ -336,15 +357,22 @@ uint32_t amdgpu_bo_get_preferred_domain(struct amdgpu_device *adev,
/*
* sub allocation
*/
+static inline struct amdgpu_sa_manager *
+to_amdgpu_sa_manager(struct drm_suballoc_manager *manager)
+{
+ return container_of(manager, struct amdgpu_sa_manager, base);
+}
-static inline uint64_t amdgpu_sa_bo_gpu_addr(struct amdgpu_sa_bo *sa_bo)
+static inline uint64_t amdgpu_sa_bo_gpu_addr(struct drm_suballoc *sa_bo)
{
- return sa_bo->manager->gpu_addr + sa_bo->soffset;
+ return to_amdgpu_sa_manager(sa_bo->manager)->gpu_addr +
+ drm_suballoc_soffset(sa_bo);
}
-static inline void * amdgpu_sa_bo_cpu_addr(struct amdgpu_sa_bo *sa_bo)
+static inline void *amdgpu_sa_bo_cpu_addr(struct drm_suballoc *sa_bo)
{
- return sa_bo->manager->cpu_ptr + sa_bo->soffset;
+ return to_amdgpu_sa_manager(sa_bo->manager)->cpu_ptr +
+ drm_suballoc_soffset(sa_bo);
}
int amdgpu_sa_bo_manager_init(struct amdgpu_device *adev,
@@ -355,11 +383,11 @@ void amdgpu_sa_bo_manager_fini(struct amdgpu_device *adev,
int amdgpu_sa_bo_manager_start(struct amdgpu_device *adev,
struct amdgpu_sa_manager *sa_manager);
int amdgpu_sa_bo_new(struct amdgpu_sa_manager *sa_manager,
- struct amdgpu_sa_bo **sa_bo,
- unsigned size, unsigned align);
+ struct drm_suballoc **sa_bo,
+ unsigned int size);
void amdgpu_sa_bo_free(struct amdgpu_device *adev,
- struct amdgpu_sa_bo **sa_bo,
- struct dma_fence *fence);
+ struct drm_suballoc **sa_bo,
+ struct dma_fence *fence);
#if defined(CONFIG_DEBUG_FS)
void amdgpu_sa_bo_dump_debug_info(struct amdgpu_sa_manager *sa_manager,
struct seq_file *m);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
index 3f5d13035aff..02f948adae72 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
@@ -191,6 +191,7 @@ static int psp_early_init(void *handle)
psp_v12_0_set_psp_funcs(psp);
break;
case IP_VERSION(13, 0, 2):
+ case IP_VERSION(13, 0, 6):
psp_v13_0_set_psp_funcs(psp);
break;
case IP_VERSION(13, 0, 1):
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
index 63dfcc98152d..11df6ee052b4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
@@ -2554,21 +2554,24 @@ int amdgpu_ras_init(struct amdgpu_device *adev)
/* initialize nbio ras function ahead of any other
* ras functions so hardware fatal error interrupt
* can be enabled as early as possible */
- switch (adev->asic_type) {
- case CHIP_VEGA20:
- case CHIP_ARCTURUS:
- case CHIP_ALDEBARAN:
- if (!adev->gmc.xgmi.connected_to_cpu) {
+ switch (adev->ip_versions[NBIO_HWIP][0]) {
+ case IP_VERSION(7, 4, 0):
+ case IP_VERSION(7, 4, 1):
+ case IP_VERSION(7, 4, 4):
+ if (!adev->gmc.xgmi.connected_to_cpu)
adev->nbio.ras = &nbio_v7_4_ras;
- amdgpu_ras_register_ras_block(adev, &adev->nbio.ras->ras_block);
- adev->nbio.ras_if = &adev->nbio.ras->ras_block.ras_comm;
- }
break;
default:
/* nbio ras is not available */
break;
}
+ /* nbio ras block needs to be enabled ahead of other ras blocks
+ * to handle fatal error */
+ r = amdgpu_nbio_ras_sw_init(adev);
+ if (r)
+ return r;
+
if (adev->nbio.ras &&
adev->nbio.ras->init_ras_controller_interrupt) {
r = adev->nbio.ras->init_ras_controller_interrupt(adev);
@@ -3073,9 +3076,6 @@ int amdgpu_ras_register_ras_block(struct amdgpu_device *adev,
if (!adev || !ras_block_obj)
return -EINVAL;
- if (!amdgpu_ras_asic_supported(adev))
- return 0;
-
ras_node = kzalloc(sizeof(*ras_node), GFP_KERNEL);
if (!ras_node)
return -ENOMEM;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
index 3989e755a5b4..018f36b10de8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
@@ -27,6 +27,7 @@
#include <drm/amdgpu_drm.h>
#include <drm/gpu_scheduler.h>
#include <drm/drm_print.h>
+#include <drm/drm_suballoc.h>
struct amdgpu_device;
struct amdgpu_ring;
@@ -92,7 +93,7 @@ enum amdgpu_ib_pool_type {
};
struct amdgpu_ib {
- struct amdgpu_sa_bo *sa_bo;
+ struct drm_suballoc *sa_bo;
uint32_t length_dw;
uint64_t gpu_addr;
uint32_t *ptr;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
index 524d10b21041..c6b4337eb20c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
@@ -44,327 +44,63 @@
#include "amdgpu.h"
-static void amdgpu_sa_bo_remove_locked(struct amdgpu_sa_bo *sa_bo);
-static void amdgpu_sa_bo_try_free(struct amdgpu_sa_manager *sa_manager);
-
int amdgpu_sa_bo_manager_init(struct amdgpu_device *adev,
struct amdgpu_sa_manager *sa_manager,
- unsigned size, u32 align, u32 domain)
+ unsigned int size, u32 suballoc_align, u32 domain)
{
- int i, r;
-
- init_waitqueue_head(&sa_manager->wq);
- sa_manager->bo = NULL;
- sa_manager->size = size;
- sa_manager->domain = domain;
- sa_manager->align = align;
- sa_manager->hole = &sa_manager->olist;
- INIT_LIST_HEAD(&sa_manager->olist);
- for (i = 0; i < AMDGPU_SA_NUM_FENCE_LISTS; ++i)
- INIT_LIST_HEAD(&sa_manager->flist[i]);
+ int r;
- r = amdgpu_bo_create_kernel(adev, size, align, domain, &sa_manager->bo,
- &sa_manager->gpu_addr, &sa_manager->cpu_ptr);
+ r = amdgpu_bo_create_kernel(adev, size, AMDGPU_GPU_PAGE_SIZE, domain,
+ &sa_manager->bo, &sa_manager->gpu_addr,
+ &sa_manager->cpu_ptr);
if (r) {
dev_err(adev->dev, "(%d) failed to allocate bo for manager\n", r);
return r;
}
- memset(sa_manager->cpu_ptr, 0, sa_manager->size);
+ memset(sa_manager->cpu_ptr, 0, size);
+ drm_suballoc_manager_init(&sa_manager->base, size, suballoc_align);
return r;
}
void amdgpu_sa_bo_manager_fini(struct amdgpu_device *adev,
struct amdgpu_sa_manager *sa_manager)
{
- struct amdgpu_sa_bo *sa_bo, *tmp;
-
if (sa_manager->bo == NULL) {
dev_err(adev->dev, "no bo for sa manager\n");
return;
}
- if (!list_empty(&sa_manager->olist)) {
- sa_manager->hole = &sa_manager->olist,
- amdgpu_sa_bo_try_free(sa_manager);
- if (!list_empty(&sa_manager->olist)) {
- dev_err(adev->dev, "sa_manager is not empty, clearing anyway\n");
- }
- }
- list_for_each_entry_safe(sa_bo, tmp, &sa_manager->olist, olist) {
- amdgpu_sa_bo_remove_locked(sa_bo);
- }
+ drm_suballoc_manager_fini(&sa_manager->base);
amdgpu_bo_free_kernel(&sa_manager->bo, &sa_manager->gpu_addr, &sa_manager->cpu_ptr);
- sa_manager->size = 0;
}
-static void amdgpu_sa_bo_remove_locked(struct amdgpu_sa_bo *sa_bo)
-{
- struct amdgpu_sa_manager *sa_manager = sa_bo->manager;
- if (sa_manager->hole == &sa_bo->olist) {
- sa_manager->hole = sa_bo->olist.prev;
- }
- list_del_init(&sa_bo->olist);
- list_del_init(&sa_bo->flist);
- dma_fence_put(sa_bo->fence);
- kfree(sa_bo);
-}
-
-static void amdgpu_sa_bo_try_free(struct amdgpu_sa_manager *sa_manager)
+int amdgpu_sa_bo_new(struct amdgpu_sa_manager *sa_manager,
+ struct drm_suballoc **sa_bo,
+ unsigned int size)
{
- struct amdgpu_sa_bo *sa_bo, *tmp;
+ struct drm_suballoc *sa = drm_suballoc_new(&sa_manager->base, size,
+ GFP_KERNEL, true, 0);
- if (sa_manager->hole->next == &sa_manager->olist)
- return;
+ if (IS_ERR(sa)) {
+ *sa_bo = NULL;
- sa_bo = list_entry(sa_manager->hole->next, struct amdgpu_sa_bo, olist);
- list_for_each_entry_safe_from(sa_bo, tmp, &sa_manager->olist, olist) {
- if (sa_bo->fence == NULL ||
- !dma_fence_is_signaled(sa_bo->fence)) {
- return;
- }
- amdgpu_sa_bo_remove_locked(sa_bo);
+ return PTR_ERR(sa);
}
-}
-static inline unsigned amdgpu_sa_bo_hole_soffset(struct amdgpu_sa_manager *sa_manager)
-{
- struct list_head *hole = sa_manager->hole;
-
- if (hole != &sa_manager->olist) {
- return list_entry(hole, struct amdgpu_sa_bo, olist)->eoffset;
- }
+ *sa_bo = sa;
return 0;
}
-static inline unsigned amdgpu_sa_bo_hole_eoffset(struct amdgpu_sa_manager *sa_manager)
-{
- struct list_head *hole = sa_manager->hole;
-
- if (hole->next != &sa_manager->olist) {
- return list_entry(hole->next, struct amdgpu_sa_bo, olist)->soffset;
- }
- return sa_manager->size;
-}
-
-static bool amdgpu_sa_bo_try_alloc(struct amdgpu_sa_manager *sa_manager,
- struct amdgpu_sa_bo *sa_bo,
- unsigned size, unsigned align)
-{
- unsigned soffset, eoffset, wasted;
-
- soffset = amdgpu_sa_bo_hole_soffset(sa_manager);
- eoffset = amdgpu_sa_bo_hole_eoffset(sa_manager);
- wasted = (align - (soffset % align)) % align;
-
- if ((eoffset - soffset) >= (size + wasted)) {
- soffset += wasted;
-
- sa_bo->manager = sa_manager;
- sa_bo->soffset = soffset;
- sa_bo->eoffset = soffset + size;
- list_add(&sa_bo->olist, sa_manager->hole);
- INIT_LIST_HEAD(&sa_bo->flist);
- sa_manager->hole = &sa_bo->olist;
- return true;
- }
- return false;
-}
-
-/**
- * amdgpu_sa_event - Check if we can stop waiting
- *
- * @sa_manager: pointer to the sa_manager
- * @size: number of bytes we want to allocate
- * @align: alignment we need to match
- *
- * Check if either there is a fence we can wait for or
- * enough free memory to satisfy the allocation directly
- */
-static bool amdgpu_sa_event(struct amdgpu_sa_manager *sa_manager,
- unsigned size, unsigned align)
-{
- unsigned soffset, eoffset, wasted;
- int i;
-
- for (i = 0; i < AMDGPU_SA_NUM_FENCE_LISTS; ++i)
- if (!list_empty(&sa_manager->flist[i]))
- return true;
-
- soffset = amdgpu_sa_bo_hole_soffset(sa_manager);
- eoffset = amdgpu_sa_bo_hole_eoffset(sa_manager);
- wasted = (align - (soffset % align)) % align;
-
- if ((eoffset - soffset) >= (size + wasted)) {
- return true;
- }
-
- return false;
-}
-
-static bool amdgpu_sa_bo_next_hole(struct amdgpu_sa_manager *sa_manager,
- struct dma_fence **fences,
- unsigned *tries)
-{
- struct amdgpu_sa_bo *best_bo = NULL;
- unsigned i, soffset, best, tmp;
-
- /* if hole points to the end of the buffer */
- if (sa_manager->hole->next == &sa_manager->olist) {
- /* try again with its beginning */
- sa_manager->hole = &sa_manager->olist;
- return true;
- }
-
- soffset = amdgpu_sa_bo_hole_soffset(sa_manager);
- /* to handle wrap around we add sa_manager->size */
- best = sa_manager->size * 2;
- /* go over all fence list and try to find the closest sa_bo
- * of the current last
- */
- for (i = 0; i < AMDGPU_SA_NUM_FENCE_LISTS; ++i) {
- struct amdgpu_sa_bo *sa_bo;
-
- fences[i] = NULL;
-
- if (list_empty(&sa_manager->flist[i]))
- continue;
-
- sa_bo = list_first_entry(&sa_manager->flist[i],
- struct amdgpu_sa_bo, flist);
-
- if (!dma_fence_is_signaled(sa_bo->fence)) {
- fences[i] = sa_bo->fence;
- continue;
- }
-
- /* limit the number of tries each ring gets */
- if (tries[i] > 2) {
- continue;
- }
-
- tmp = sa_bo->soffset;
- if (tmp < soffset) {
- /* wrap around, pretend it's after */
- tmp += sa_manager->size;
- }
- tmp -= soffset;
- if (tmp < best) {
- /* this sa bo is the closest one */
- best = tmp;
- best_bo = sa_bo;
- }
- }
-
- if (best_bo) {
- uint32_t idx = best_bo->fence->context;
-
- idx %= AMDGPU_SA_NUM_FENCE_LISTS;
- ++tries[idx];
- sa_manager->hole = best_bo->olist.prev;
-
- /* we knew that this one is signaled,
- so it's save to remote it */
- amdgpu_sa_bo_remove_locked(best_bo);
- return true;
- }
- return false;
-}
-
-int amdgpu_sa_bo_new(struct amdgpu_sa_manager *sa_manager,
- struct amdgpu_sa_bo **sa_bo,
- unsigned size, unsigned align)
-{
- struct dma_fence *fences[AMDGPU_SA_NUM_FENCE_LISTS];
- unsigned tries[AMDGPU_SA_NUM_FENCE_LISTS];
- unsigned count;
- int i, r;
- signed long t;
-
- if (WARN_ON_ONCE(align > sa_manager->align))
- return -EINVAL;
-
- if (WARN_ON_ONCE(size > sa_manager->size))
- return -EINVAL;
-
- *sa_bo = kmalloc(sizeof(struct amdgpu_sa_bo), GFP_KERNEL);
- if (!(*sa_bo))
- return -ENOMEM;
- (*sa_bo)->manager = sa_manager;
- (*sa_bo)->fence = NULL;
- INIT_LIST_HEAD(&(*sa_bo)->olist);
- INIT_LIST_HEAD(&(*sa_bo)->flist);
-
- spin_lock(&sa_manager->wq.lock);
- do {
- for (i = 0; i < AMDGPU_SA_NUM_FENCE_LISTS; ++i)
- tries[i] = 0;
-
- do {
- amdgpu_sa_bo_try_free(sa_manager);
-
- if (amdgpu_sa_bo_try_alloc(sa_manager, *sa_bo,
- size, align)) {
- spin_unlock(&sa_manager->wq.lock);
- return 0;
- }
-
- /* see if we can skip over some allocations */
- } while (amdgpu_sa_bo_next_hole(sa_manager, fences, tries));
-
- for (i = 0, count = 0; i < AMDGPU_SA_NUM_FENCE_LISTS; ++i)
- if (fences[i])
- fences[count++] = dma_fence_get(fences[i]);
-
- if (count) {
- spin_unlock(&sa_manager->wq.lock);
- t = dma_fence_wait_any_timeout(fences, count, false,
- MAX_SCHEDULE_TIMEOUT,
- NULL);
- for (i = 0; i < count; ++i)
- dma_fence_put(fences[i]);
-
- r = (t > 0) ? 0 : t;
- spin_lock(&sa_manager->wq.lock);
- } else {
- /* if we have nothing to wait for block */
- r = wait_event_interruptible_locked(
- sa_manager->wq,
- amdgpu_sa_event(sa_manager, size, align)
- );
- }
-
- } while (!r);
-
- spin_unlock(&sa_manager->wq.lock);
- kfree(*sa_bo);
- *sa_bo = NULL;
- return r;
-}
-
-void amdgpu_sa_bo_free(struct amdgpu_device *adev, struct amdgpu_sa_bo **sa_bo,
+void amdgpu_sa_bo_free(struct amdgpu_device *adev, struct drm_suballoc **sa_bo,
struct dma_fence *fence)
{
- struct amdgpu_sa_manager *sa_manager;
-
if (sa_bo == NULL || *sa_bo == NULL) {
return;
}
- sa_manager = (*sa_bo)->manager;
- spin_lock(&sa_manager->wq.lock);
- if (fence && !dma_fence_is_signaled(fence)) {
- uint32_t idx;
-
- (*sa_bo)->fence = dma_fence_get(fence);
- idx = fence->context % AMDGPU_SA_NUM_FENCE_LISTS;
- list_add_tail(&(*sa_bo)->flist, &sa_manager->flist[idx]);
- } else {
- amdgpu_sa_bo_remove_locked(*sa_bo);
- }
- wake_up_all_locked(&sa_manager->wq);
- spin_unlock(&sa_manager->wq.lock);
+ drm_suballoc_free(*sa_bo, fence);
*sa_bo = NULL;
}
@@ -373,26 +109,8 @@ void amdgpu_sa_bo_free(struct amdgpu_device *adev, struct amdgpu_sa_bo **sa_bo,
void amdgpu_sa_bo_dump_debug_info(struct amdgpu_sa_manager *sa_manager,
struct seq_file *m)
{
- struct amdgpu_sa_bo *i;
-
- spin_lock(&sa_manager->wq.lock);
- list_for_each_entry(i, &sa_manager->olist, olist) {
- uint64_t soffset = i->soffset + sa_manager->gpu_addr;
- uint64_t eoffset = i->eoffset + sa_manager->gpu_addr;
- if (&i->olist == sa_manager->hole) {
- seq_printf(m, ">");
- } else {
- seq_printf(m, " ");
- }
- seq_printf(m, "[0x%010llx 0x%010llx] size %8lld",
- soffset, eoffset, eoffset - soffset);
+ struct drm_printer p = drm_seq_file_printer(m);
- if (i->fence)
- seq_printf(m, " protected by 0x%016llx on context %llu",
- i->fence->seqno, i->fence->context);
-
- seq_printf(m, "\n");
- }
- spin_unlock(&sa_manager->wq.lock);
+ drm_suballoc_dump_debug_info(&sa_manager->base, &p, sa_manager->gpu_addr);
}
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index c5ef7f7bdc15..2cd081cbf706 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -466,11 +466,7 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict,
return r;
}
- /* Can't move a pinned BO */
abo = ttm_to_amdgpu_bo(bo);
- if (WARN_ON_ONCE(abo->tbo.pin_count > 0))
- return -EINVAL;
-
adev = amdgpu_ttm_adev(bo->bdev);
if (!old_mem || (old_mem->mem_type == TTM_PL_SYSTEM &&
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c
index 1b8574bc4463..da68ceaa024c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c
@@ -208,6 +208,36 @@ int amdgpu_umc_process_ras_data_cb(struct amdgpu_device *adev,
return amdgpu_umc_do_page_retirement(adev, ras_error_status, entry, true);
}
+int amdgpu_umc_ras_sw_init(struct amdgpu_device *adev)
+{
+ int err;
+ struct amdgpu_umc_ras *ras;
+
+ if (!adev->umc.ras)
+ return 0;
+
+ ras = adev->umc.ras;
+
+ err = amdgpu_ras_register_ras_block(adev, &ras->ras_block);
+ if (err) {
+ dev_err(adev->dev, "Failed to register umc ras block!\n");
+ return err;
+ }
+
+ strcpy(adev->umc.ras->ras_block.ras_comm.name, "umc");
+ ras->ras_block.ras_comm.block = AMDGPU_RAS_BLOCK__UMC;
+ ras->ras_block.ras_comm.type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
+ adev->umc.ras_if = &ras->ras_block.ras_comm;
+
+ if (!ras->ras_block.ras_late_init)
+ ras->ras_block.ras_late_init = amdgpu_umc_ras_late_init;
+
+ if (ras->ras_block.ras_cb)
+ ras->ras_block.ras_cb = amdgpu_umc_process_ras_data_cb;
+
+ return 0;
+}
+
int amdgpu_umc_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block)
{
int r;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h
index 36e19336f3b3..d7f1229ff11f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h
@@ -87,6 +87,7 @@ struct amdgpu_umc {
unsigned long active_mask;
};
+int amdgpu_umc_ras_sw_init(struct amdgpu_device *adev);
int amdgpu_umc_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block);
int amdgpu_umc_poison_handler(struct amdgpu_device *adev, bool reset);
int amdgpu_umc_process_ecc_irq(struct amdgpu_device *adev,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
index 632a6ded5735..6887109abb13 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
@@ -1118,14 +1118,11 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
{
struct amdgpu_device *adev = ring->adev;
struct dma_fence *f = NULL;
+ uint32_t offset, data[4];
struct amdgpu_job *job;
struct amdgpu_ib *ib;
- uint32_t data[4];
uint64_t addr;
- long r;
- int i;
- unsigned offset_idx = 0;
- unsigned offset[3] = { UVD_BASE_SI, 0, 0 };
+ int i, r;
r = amdgpu_job_alloc_with_ib(ring->adev, &adev->uvd.entity,
AMDGPU_FENCE_OWNER_UNDEFINED,
@@ -1134,16 +1131,15 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
if (r)
return r;
- if (adev->asic_type >= CHIP_VEGA10) {
- offset_idx = 1 + ring->me;
- offset[1] = adev->reg_offset[UVD_HWIP][0][1];
- offset[2] = adev->reg_offset[UVD_HWIP][1][1];
- }
+ if (adev->asic_type >= CHIP_VEGA10)
+ offset = adev->reg_offset[UVD_HWIP][ring->me][1];
+ else
+ offset = UVD_BASE_SI;
- data[0] = PACKET0(offset[offset_idx] + UVD_GPCOM_VCPU_DATA0, 0);
- data[1] = PACKET0(offset[offset_idx] + UVD_GPCOM_VCPU_DATA1, 0);
- data[2] = PACKET0(offset[offset_idx] + UVD_GPCOM_VCPU_CMD, 0);
- data[3] = PACKET0(offset[offset_idx] + UVD_NO_OP, 0);
+ data[0] = PACKET0(offset + UVD_GPCOM_VCPU_DATA0, 0);
+ data[1] = PACKET0(offset + UVD_GPCOM_VCPU_DATA1, 0);
+ data[2] = PACKET0(offset + UVD_GPCOM_VCPU_CMD, 0);
+ data[3] = PACKET0(offset + UVD_NO_OP, 0);
ib = &job->ibs[0];
addr = amdgpu_bo_gpu_offset(bo);
@@ -1160,14 +1156,6 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
ib->length_dw = 16;
if (direct) {
- r = dma_resv_wait_timeout(bo->tbo.base.resv,
- DMA_RESV_USAGE_KERNEL, false,
- msecs_to_jiffies(10));
- if (r == 0)
- r = -ETIMEDOUT;
- if (r < 0)
- goto err_free;
-
r = amdgpu_job_submit_direct(job, ring, &f);
if (r)
goto err_free;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
index 25217b05c0ea..e63fcc58e8e0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
@@ -26,6 +26,7 @@
#include <linux/firmware.h>
#include <linux/module.h>
+#include <linux/dmi.h>
#include <linux/pci.h>
#include <linux/debugfs.h>
#include <drm/drm_drv.h>
@@ -114,6 +115,24 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
(adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
adev->vcn.indirect_sram = true;
+ /*
+ * Some Steam Deck's BIOS versions are incompatible with the
+ * indirect SRAM mode, leading to amdgpu being unable to get
+ * properly probed (and even potentially crashing the kernel).
+ * Hence, check for these versions here - notice this is
+ * restricted to Vangogh (Deck's APU).
+ */
+ if (adev->ip_versions[UVD_HWIP][0] == IP_VERSION(3, 0, 2)) {
+ const char *bios_ver = dmi_get_system_info(DMI_BIOS_VERSION);
+
+ if (bios_ver && (!strncmp("F7A0113", bios_ver, 7) ||
+ !strncmp("F7A0114", bios_ver, 7))) {
+ adev->vcn.indirect_sram = false;
+ dev_info(adev->dev,
+ "Steam Deck quirk: indirect SRAM disabled on BIOS %s\n", bios_ver);
+ }
+ }
+
hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
adev->vcn.fw_version = le32_to_cpu(hdr->ucode_version);
@@ -1162,19 +1181,28 @@ int amdgpu_vcn_process_poison_irq(struct amdgpu_device *adev,
return 0;
}
-void amdgpu_vcn_set_ras_funcs(struct amdgpu_device *adev)
+int amdgpu_vcn_ras_sw_init(struct amdgpu_device *adev)
{
+ int err;
+ struct amdgpu_vcn_ras *ras;
+
if (!adev->vcn.ras)
- return;
+ return 0;
+
+ ras = adev->vcn.ras;
+ err = amdgpu_ras_register_ras_block(adev, &ras->ras_block);
+ if (err) {
+ dev_err(adev->dev, "Failed to register vcn ras block!\n");
+ return err;
+ }
- amdgpu_ras_register_ras_block(adev, &adev->vcn.ras->ras_block);
+ strcpy(ras->ras_block.ras_comm.name, "vcn");
+ ras->ras_block.ras_comm.block = AMDGPU_RAS_BLOCK__VCN;
+ ras->ras_block.ras_comm.type = AMDGPU_RAS_ERROR__POISON;
+ adev->vcn.ras_if = &ras->ras_block.ras_comm;
- strcpy(adev->vcn.ras->ras_block.ras_comm.name, "vcn");
- adev->vcn.ras->ras_block.ras_comm.block = AMDGPU_RAS_BLOCK__VCN;
- adev->vcn.ras->ras_block.ras_comm.type = AMDGPU_RAS_ERROR__POISON;
- adev->vcn.ras_if = &adev->vcn.ras->ras_block.ras_comm;
+ if (!ras->ras_block.ras_late_init)
+ ras->ras_block.ras_late_init = amdgpu_ras_block_late_init;
- /* If don't define special ras_late_init function, use default ras_late_init */
- if (!adev->vcn.ras->ras_block.ras_late_init)
- adev->vcn.ras->ras_block.ras_late_init = amdgpu_ras_block_late_init;
+ return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
index d3e2af902907..c730949ece7d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
@@ -400,6 +400,6 @@ void amdgpu_debugfs_vcn_fwlog_init(struct amdgpu_device *adev,
int amdgpu_vcn_process_poison_irq(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry);
-void amdgpu_vcn_set_ras_funcs(struct amdgpu_device *adev);
+int amdgpu_vcn_ras_sw_init(struct amdgpu_device *adev);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
index b9e9480448af..4f7bab52282a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
@@ -124,6 +124,8 @@ enum AMDGIM_FEATURE_FLAG {
AMDGIM_FEATURE_PP_ONE_VF = (1 << 4),
/* Indirect Reg Access enabled */
AMDGIM_FEATURE_INDIRECT_REG_ACCESS = (1 << 5),
+ /* AV1 Support MODE*/
+ AMDGIM_FEATURE_AV1_SUPPORT = (1 << 6),
};
enum AMDGIM_REG_ACCESS_FLAG {
@@ -322,6 +324,8 @@ static inline bool is_virtual_machine(void)
((!amdgpu_in_reset(adev)) && adev->virt.tdr_debug)
#define amdgpu_sriov_is_normal(adev) \
((!amdgpu_in_reset(adev)) && (!adev->virt.tdr_debug))
+#define amdgpu_sriov_is_av1_support(adev) \
+ ((adev)->virt.gim_feature & AMDGIM_FEATURE_AV1_SUPPORT)
bool amdgpu_virt_mmio_blocked(struct amdgpu_device *adev);
void amdgpu_virt_init_setting(struct amdgpu_device *adev);
void amdgpu_virt_kiq_reg_write_reg_wait(struct amdgpu_device *adev,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index b9441ab457ea..286e326bb4bd 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -867,6 +867,8 @@ int amdgpu_vm_update_range(struct amdgpu_device *adev, struct amdgpu_vm *vm,
pages_addr[idx - 1] + PAGE_SIZE))
break;
}
+ if (!contiguous)
+ count--;
num_entries = count *
AMDGPU_GPU_PAGES_IN_CPU_PAGE;
}
@@ -918,8 +920,8 @@ error_unlock:
return r;
}
-void amdgpu_vm_get_memory(struct amdgpu_vm *vm, uint64_t *vram_mem,
- uint64_t *gtt_mem, uint64_t *cpu_mem)
+void amdgpu_vm_get_memory(struct amdgpu_vm *vm,
+ struct amdgpu_mem_stats *stats)
{
struct amdgpu_bo_va *bo_va, *tmp;
@@ -927,41 +929,36 @@ void amdgpu_vm_get_memory(struct amdgpu_vm *vm, uint64_t *vram_mem,
list_for_each_entry_safe(bo_va, tmp, &vm->idle, base.vm_status) {
if (!bo_va->base.bo)
continue;
- amdgpu_bo_get_memory(bo_va->base.bo, vram_mem,
- gtt_mem, cpu_mem);
+ amdgpu_bo_get_memory(bo_va->base.bo, stats);
}
list_for_each_entry_safe(bo_va, tmp, &vm->evicted, base.vm_status) {
if (!bo_va->base.bo)
continue;
- amdgpu_bo_get_memory(bo_va->base.bo, vram_mem,
- gtt_mem, cpu_mem);
+ amdgpu_bo_get_memory(bo_va->base.bo, stats);
}
list_for_each_entry_safe(bo_va, tmp, &vm->relocated, base.vm_status) {
if (!bo_va->base.bo)
continue;
- amdgpu_bo_get_memory(bo_va->base.bo, vram_mem,
- gtt_mem, cpu_mem);
+ amdgpu_bo_get_memory(bo_va->base.bo, stats);
}
list_for_each_entry_safe(bo_va, tmp, &vm->moved, base.vm_status) {
if (!bo_va->base.bo)
continue;
- amdgpu_bo_get_memory(bo_va->base.bo, vram_mem,
- gtt_mem, cpu_mem);
+ amdgpu_bo_get_memory(bo_va->base.bo, stats);
}
list_for_each_entry_safe(bo_va, tmp, &vm->invalidated, base.vm_status) {
if (!bo_va->base.bo)
continue;
- amdgpu_bo_get_memory(bo_va->base.bo, vram_mem,
- gtt_mem, cpu_mem);
+ amdgpu_bo_get_memory(bo_va->base.bo, stats);
}
list_for_each_entry_safe(bo_va, tmp, &vm->done, base.vm_status) {
if (!bo_va->base.bo)
continue;
- amdgpu_bo_get_memory(bo_va->base.bo, vram_mem,
- gtt_mem, cpu_mem);
+ amdgpu_bo_get_memory(bo_va->base.bo, stats);
}
spin_unlock(&vm->status_lock);
}
+
/**
* amdgpu_vm_bo_update - update all BO mappings in the vm page table
*
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
index 856a64bc7a89..6f085f0b4ef3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
@@ -40,6 +40,7 @@ struct amdgpu_bo_va;
struct amdgpu_job;
struct amdgpu_bo_list_entry;
struct amdgpu_bo_vm;
+struct amdgpu_mem_stats;
/*
* GPUVM handling
@@ -457,8 +458,8 @@ void amdgpu_vm_set_task_info(struct amdgpu_vm *vm);
void amdgpu_vm_move_to_lru_tail(struct amdgpu_device *adev,
struct amdgpu_vm *vm);
-void amdgpu_vm_get_memory(struct amdgpu_vm *vm, uint64_t *vram_mem,
- uint64_t *gtt_mem, uint64_t *cpu_mem);
+void amdgpu_vm_get_memory(struct amdgpu_vm *vm,
+ struct amdgpu_mem_stats *stats);
int amdgpu_vm_pt_clear(struct amdgpu_device *adev, struct amdgpu_vm *vm,
struct amdgpu_bo_vm *vmbo, bool immediate);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
index 4340d08f7607..3fe24348d199 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
@@ -308,7 +308,7 @@ static const struct sysfs_ops amdgpu_xgmi_hive_ops = {
.show = amdgpu_xgmi_show_attrs,
};
-struct kobj_type amdgpu_xgmi_hive_type = {
+static const struct kobj_type amdgpu_xgmi_hive_type = {
.release = amdgpu_xgmi_hive_release,
.sysfs_ops = &amdgpu_xgmi_hive_ops,
.default_groups = amdgpu_xgmi_hive_groups,
@@ -1048,12 +1048,30 @@ struct amdgpu_ras_block_hw_ops xgmi_ras_hw_ops = {
struct amdgpu_xgmi_ras xgmi_ras = {
.ras_block = {
- .ras_comm = {
- .name = "xgmi_wafl",
- .block = AMDGPU_RAS_BLOCK__XGMI_WAFL,
- .type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE,
- },
.hw_ops = &xgmi_ras_hw_ops,
.ras_late_init = amdgpu_xgmi_ras_late_init,
},
};
+
+int amdgpu_xgmi_ras_sw_init(struct amdgpu_device *adev)
+{
+ int err;
+ struct amdgpu_xgmi_ras *ras;
+
+ if (!adev->gmc.xgmi.ras)
+ return 0;
+
+ ras = adev->gmc.xgmi.ras;
+ err = amdgpu_ras_register_ras_block(adev, &ras->ras_block);
+ if (err) {
+ dev_err(adev->dev, "Failed to register xgmi_wafl_pcs ras block!\n");
+ return err;
+ }
+
+ strcpy(ras->ras_block.ras_comm.name, "xgmi_wafl_pcs");
+ ras->ras_block.ras_comm.block = AMDGPU_RAS_BLOCK__XGMI_WAFL;
+ ras->ras_block.ras_comm.type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
+ adev->gmc.xgmi.ras_if = &ras->ras_block.ras_comm;
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h
index 30dcc1681b4e..86fbf56938f4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h
@@ -73,5 +73,6 @@ static inline bool amdgpu_xgmi_same_hive(struct amdgpu_device *adev,
adev->gmc.xgmi.hive_id &&
adev->gmc.xgmi.hive_id == bo_adev->gmc.xgmi.hive_id);
}
+int amdgpu_xgmi_ras_sw_init(struct amdgpu_device *adev);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h b/drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h
index 6c97148ca0ed..24d42d24e6a0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h
@@ -93,7 +93,8 @@ union amd_sriov_msg_feature_flags {
uint32_t mm_bw_management : 1;
uint32_t pp_one_vf_mode : 1;
uint32_t reg_indirect_acc : 1;
- uint32_t reserved : 26;
+ uint32_t av1_support : 1;
+ uint32_t reserved : 25;
} flags;
uint32_t all;
};
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
index 6983acc456b2..516409989235 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
@@ -7266,7 +7266,6 @@ static int gfx_v10_0_hw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int r;
- uint32_t tmp;
amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0);
amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0);
@@ -7285,17 +7284,9 @@ static int gfx_v10_0_hw_fini(void *handle)
if (amdgpu_sriov_vf(adev)) {
gfx_v10_0_cp_gfx_enable(adev, false);
- /* Program KIQ position of RLC_CP_SCHEDULERS during destroy */
- if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 3, 0)) {
- tmp = RREG32_SOC15(GC, 0, mmRLC_CP_SCHEDULERS_Sienna_Cichlid);
- tmp &= 0xffffff00;
- WREG32_SOC15(GC, 0, mmRLC_CP_SCHEDULERS_Sienna_Cichlid, tmp);
- } else {
- tmp = RREG32_SOC15(GC, 0, mmRLC_CP_SCHEDULERS);
- tmp &= 0xffffff00;
- WREG32_SOC15(GC, 0, mmRLC_CP_SCHEDULERS, tmp);
- }
-
+ /* Remove the steps of clearing KIQ position.
+ * It causes GFX hang when another Win guest is rendering.
+ */
return 0;
}
gfx_v10_0_cp_enable(adev, false);
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
index ab2556ca984e..d99821692ba3 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
@@ -699,25 +699,8 @@ static void gmc_v10_0_set_umc_funcs(struct amdgpu_device *adev)
default:
break;
}
- if (adev->umc.ras) {
- amdgpu_ras_register_ras_block(adev, &adev->umc.ras->ras_block);
-
- strcpy(adev->umc.ras->ras_block.ras_comm.name, "umc");
- adev->umc.ras->ras_block.ras_comm.block = AMDGPU_RAS_BLOCK__UMC;
- adev->umc.ras->ras_block.ras_comm.type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
- adev->umc.ras_if = &adev->umc.ras->ras_block.ras_comm;
-
- /* If don't define special ras_late_init function, use default ras_late_init */
- if (!adev->umc.ras->ras_block.ras_late_init)
- adev->umc.ras->ras_block.ras_late_init = amdgpu_umc_ras_late_init;
-
- /* If not defined special ras_cb function, use default ras_cb */
- if (!adev->umc.ras->ras_block.ras_cb)
- adev->umc.ras->ras_block.ras_cb = amdgpu_umc_process_ras_data_cb;
- }
}
-
static void gmc_v10_0_set_mmhub_funcs(struct amdgpu_device *adev)
{
switch (adev->ip_versions[MMHUB_HWIP][0]) {
@@ -754,7 +737,6 @@ static void gmc_v10_0_set_gfxhub_funcs(struct amdgpu_device *adev)
static int gmc_v10_0_early_init(void *handle)
{
- int r;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
gmc_v10_0_set_mmhub_funcs(adev);
@@ -770,10 +752,6 @@ static int gmc_v10_0_early_init(void *handle)
adev->gmc.private_aperture_end =
adev->gmc.private_aperture_start + (4ULL << 30) - 1;
- r = amdgpu_gmc_ras_early_init(adev);
- if (r)
- return r;
-
return 0;
}
@@ -1024,6 +1002,10 @@ static int gmc_v10_0_sw_init(void *handle)
amdgpu_vm_manager_init(adev);
+ r = amdgpu_gmc_ras_sw_init(adev);
+ if (r)
+ return r;
+
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c
index af7b3ba1ca00..fad199ed15f3 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c
@@ -581,23 +581,6 @@ static void gmc_v11_0_set_umc_funcs(struct amdgpu_device *adev)
default:
break;
}
-
- if (adev->umc.ras) {
- amdgpu_ras_register_ras_block(adev, &adev->umc.ras->ras_block);
-
- strcpy(adev->umc.ras->ras_block.ras_comm.name, "umc");
- adev->umc.ras->ras_block.ras_comm.block = AMDGPU_RAS_BLOCK__UMC;
- adev->umc.ras->ras_block.ras_comm.type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
- adev->umc.ras_if = &adev->umc.ras->ras_block.ras_comm;
-
- /* If don't define special ras_late_init function, use default ras_late_init */
- if (!adev->umc.ras->ras_block.ras_late_init)
- adev->umc.ras->ras_block.ras_late_init = amdgpu_umc_ras_late_init;
-
- /* If not define special ras_cb function, use default ras_cb */
- if (!adev->umc.ras->ras_block.ras_cb)
- adev->umc.ras->ras_block.ras_cb = amdgpu_umc_process_ras_data_cb;
- }
}
@@ -846,6 +829,10 @@ static int gmc_v11_0_sw_init(void *handle)
amdgpu_vm_manager_init(adev);
+ r = amdgpu_gmc_ras_sw_init(adev);
+ if (r)
+ return r;
+
return 0;
}
@@ -875,6 +862,12 @@ static int gmc_v11_0_sw_fini(void *handle)
static void gmc_v11_0_init_golden_registers(struct amdgpu_device *adev)
{
+ if (amdgpu_sriov_vf(adev)) {
+ struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0];
+
+ WREG32(hub->vm_contexts_disable, 0);
+ return;
+ }
}
/**
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
index b06170c00dfc..2a8dc9b52c2d 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
@@ -1318,23 +1318,6 @@ static void gmc_v9_0_set_umc_funcs(struct amdgpu_device *adev)
default:
break;
}
-
- if (adev->umc.ras) {
- amdgpu_ras_register_ras_block(adev, &adev->umc.ras->ras_block);
-
- strcpy(adev->umc.ras->ras_block.ras_comm.name, "umc");
- adev->umc.ras->ras_block.ras_comm.block = AMDGPU_RAS_BLOCK__UMC;
- adev->umc.ras->ras_block.ras_comm.type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
- adev->umc.ras_if = &adev->umc.ras->ras_block.ras_comm;
-
- /* If don't define special ras_late_init function, use default ras_late_init */
- if (!adev->umc.ras->ras_block.ras_late_init)
- adev->umc.ras->ras_block.ras_late_init = amdgpu_umc_ras_late_init;
-
- /* If not defined special ras_cb function, use default ras_cb */
- if (!adev->umc.ras->ras_block.ras_cb)
- adev->umc.ras->ras_block.ras_cb = amdgpu_umc_process_ras_data_cb;
- }
}
static void gmc_v9_0_set_mmhub_funcs(struct amdgpu_device *adev)
@@ -1368,15 +1351,6 @@ static void gmc_v9_0_set_mmhub_ras_funcs(struct amdgpu_device *adev)
/* mmhub ras is not available */
break;
}
-
- if (adev->mmhub.ras) {
- amdgpu_ras_register_ras_block(adev, &adev->mmhub.ras->ras_block);
-
- strcpy(adev->mmhub.ras->ras_block.ras_comm.name, "mmhub");
- adev->mmhub.ras->ras_block.ras_comm.block = AMDGPU_RAS_BLOCK__MMHUB;
- adev->mmhub.ras->ras_block.ras_comm.type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
- adev->mmhub.ras_if = &adev->mmhub.ras->ras_block.ras_comm;
- }
}
static void gmc_v9_0_set_gfxhub_funcs(struct amdgpu_device *adev)
@@ -1387,26 +1361,34 @@ static void gmc_v9_0_set_gfxhub_funcs(struct amdgpu_device *adev)
static void gmc_v9_0_set_hdp_ras_funcs(struct amdgpu_device *adev)
{
adev->hdp.ras = &hdp_v4_0_ras;
- amdgpu_ras_register_ras_block(adev, &adev->hdp.ras->ras_block);
- adev->hdp.ras_if = &adev->hdp.ras->ras_block.ras_comm;
}
-static void gmc_v9_0_set_mca_funcs(struct amdgpu_device *adev)
+static void gmc_v9_0_set_mca_ras_funcs(struct amdgpu_device *adev)
{
+ struct amdgpu_mca *mca = &adev->mca;
+
/* is UMC the right IP to check for MCA? Maybe DF? */
switch (adev->ip_versions[UMC_HWIP][0]) {
case IP_VERSION(6, 7, 0):
- if (!adev->gmc.xgmi.connected_to_cpu)
- adev->mca.funcs = &mca_v3_0_funcs;
+ if (!adev->gmc.xgmi.connected_to_cpu) {
+ mca->mp0.ras = &mca_v3_0_mp0_ras;
+ mca->mp1.ras = &mca_v3_0_mp1_ras;
+ mca->mpio.ras = &mca_v3_0_mpio_ras;
+ }
break;
default:
break;
}
}
+static void gmc_v9_0_set_xgmi_ras_funcs(struct amdgpu_device *adev)
+{
+ if (!adev->gmc.xgmi.connected_to_cpu)
+ adev->gmc.xgmi.ras = &xgmi_ras;
+}
+
static int gmc_v9_0_early_init(void *handle)
{
- int r;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
/* ARCT and VEGA20 don't have XGMI defined in their IP discovery tables */
@@ -1427,7 +1409,8 @@ static int gmc_v9_0_early_init(void *handle)
gmc_v9_0_set_mmhub_ras_funcs(adev);
gmc_v9_0_set_gfxhub_funcs(adev);
gmc_v9_0_set_hdp_ras_funcs(adev);
- gmc_v9_0_set_mca_funcs(adev);
+ gmc_v9_0_set_mca_ras_funcs(adev);
+ gmc_v9_0_set_xgmi_ras_funcs(adev);
adev->gmc.shared_aperture_start = 0x2000000000000000ULL;
adev->gmc.shared_aperture_end =
@@ -1436,10 +1419,6 @@ static int gmc_v9_0_early_init(void *handle)
adev->gmc.private_aperture_end =
adev->gmc.private_aperture_start + (4ULL << 30) - 1;
- r = amdgpu_gmc_ras_early_init(adev);
- if (r)
- return r;
-
return 0;
}
@@ -1644,8 +1623,6 @@ static int gmc_v9_0_sw_init(void *handle)
adev->gfxhub.funcs->init(adev);
adev->mmhub.funcs->init(adev);
- if (adev->mca.funcs)
- adev->mca.funcs->init(adev);
spin_lock_init(&adev->gmc.invalidate_lock);
@@ -1798,6 +1775,10 @@ static int gmc_v9_0_sw_init(void *handle)
gmc_v9_0_save_registers(adev);
+ r = amdgpu_gmc_ras_sw_init(adev);
+ if (r)
+ return r;
+
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/hdp_v4_0.c b/drivers/gpu/drm/amd/amdgpu/hdp_v4_0.c
index adf89680f53e..71d1a2e3bac9 100644
--- a/drivers/gpu/drm/amd/amdgpu/hdp_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/hdp_v4_0.c
@@ -49,7 +49,8 @@ static void hdp_v4_0_flush_hdp(struct amdgpu_device *adev,
static void hdp_v4_0_invalidate_hdp(struct amdgpu_device *adev,
struct amdgpu_ring *ring)
{
- if (adev->ip_versions[HDP_HWIP][0] == IP_VERSION(4, 4, 0))
+ if (adev->ip_versions[HDP_HWIP][0] == IP_VERSION(4, 4, 0) ||
+ adev->ip_versions[HDP_HWIP][0] == IP_VERSION(4, 4, 2))
return;
if (!ring || !ring->funcs->emit_wreg)
@@ -160,11 +161,6 @@ struct amdgpu_ras_block_hw_ops hdp_v4_0_ras_hw_ops = {
struct amdgpu_hdp_ras hdp_v4_0_ras = {
.ras_block = {
- .ras_comm = {
- .name = "hdp",
- .block = AMDGPU_RAS_BLOCK__HDP,
- .type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE,
- },
.hw_ops = &hdp_v4_0_ras_hw_ops,
},
};
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c
index f2b743a93915..6b1887808782 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c
@@ -138,6 +138,10 @@ static int jpeg_v2_5_sw_init(void *handle)
adev->jpeg.inst[i].external.jpeg_pitch = SOC15_REG_OFFSET(JPEG, i, mmUVD_JPEG_PITCH);
}
+ r = amdgpu_jpeg_ras_sw_init(adev);
+ if (r)
+ return r;
+
return 0;
}
@@ -806,6 +810,4 @@ static void jpeg_v2_5_set_ras_funcs(struct amdgpu_device *adev)
default:
break;
}
-
- jpeg_set_ras_funcs(adev);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c
index 3beb731b2ce5..3129094baccc 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c
@@ -113,6 +113,10 @@ static int jpeg_v4_0_sw_init(void *handle)
adev->jpeg.internal.jpeg_pitch = regUVD_JPEG_PITCH_INTERNAL_OFFSET;
adev->jpeg.inst->external.jpeg_pitch = SOC15_REG_OFFSET(JPEG, 0, regUVD_JPEG_PITCH);
+ r = amdgpu_jpeg_ras_sw_init(adev);
+ if (r)
+ return r;
+
return 0;
}
@@ -685,6 +689,4 @@ static void jpeg_v4_0_set_ras_funcs(struct amdgpu_device *adev)
default:
break;
}
-
- jpeg_set_ras_funcs(adev);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/mca_v3_0.c b/drivers/gpu/drm/amd/amdgpu/mca_v3_0.c
index d4bd7d1d2649..6dae4a2e2767 100644
--- a/drivers/gpu/drm/amd/amdgpu/mca_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mca_v3_0.c
@@ -51,19 +51,13 @@ static int mca_v3_0_ras_block_match(struct amdgpu_ras_block_object *block_obj,
return -EINVAL;
}
-const struct amdgpu_ras_block_hw_ops mca_v3_0_mp0_hw_ops = {
+static const struct amdgpu_ras_block_hw_ops mca_v3_0_mp0_hw_ops = {
.query_ras_error_count = mca_v3_0_mp0_query_ras_error_count,
.query_ras_error_address = NULL,
};
struct amdgpu_mca_ras_block mca_v3_0_mp0_ras = {
.ras_block = {
- .ras_comm = {
- .block = AMDGPU_RAS_BLOCK__MCA,
- .sub_block_index = AMDGPU_RAS_MCA_BLOCK__MP0,
- .type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE,
- .name = "mp0",
- },
.hw_ops = &mca_v3_0_mp0_hw_ops,
.ras_block_match = mca_v3_0_ras_block_match,
},
@@ -77,19 +71,13 @@ static void mca_v3_0_mp1_query_ras_error_count(struct amdgpu_device *adev,
ras_error_status);
}
-const struct amdgpu_ras_block_hw_ops mca_v3_0_mp1_hw_ops = {
+static const struct amdgpu_ras_block_hw_ops mca_v3_0_mp1_hw_ops = {
.query_ras_error_count = mca_v3_0_mp1_query_ras_error_count,
.query_ras_error_address = NULL,
};
struct amdgpu_mca_ras_block mca_v3_0_mp1_ras = {
.ras_block = {
- .ras_comm = {
- .block = AMDGPU_RAS_BLOCK__MCA,
- .sub_block_index = AMDGPU_RAS_MCA_BLOCK__MP1,
- .type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE,
- .name = "mp1",
- },
.hw_ops = &mca_v3_0_mp1_hw_ops,
.ras_block_match = mca_v3_0_ras_block_match,
},
@@ -103,40 +91,14 @@ static void mca_v3_0_mpio_query_ras_error_count(struct amdgpu_device *adev,
ras_error_status);
}
-const struct amdgpu_ras_block_hw_ops mca_v3_0_mpio_hw_ops = {
+static const struct amdgpu_ras_block_hw_ops mca_v3_0_mpio_hw_ops = {
.query_ras_error_count = mca_v3_0_mpio_query_ras_error_count,
.query_ras_error_address = NULL,
};
struct amdgpu_mca_ras_block mca_v3_0_mpio_ras = {
.ras_block = {
- .ras_comm = {
- .block = AMDGPU_RAS_BLOCK__MCA,
- .sub_block_index = AMDGPU_RAS_MCA_BLOCK__MPIO,
- .type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE,
- .name = "mpio",
- },
.hw_ops = &mca_v3_0_mpio_hw_ops,
.ras_block_match = mca_v3_0_ras_block_match,
},
};
-
-
-static void mca_v3_0_init(struct amdgpu_device *adev)
-{
- struct amdgpu_mca *mca = &adev->mca;
-
- mca->mp0.ras = &mca_v3_0_mp0_ras;
- mca->mp1.ras = &mca_v3_0_mp1_ras;
- mca->mpio.ras = &mca_v3_0_mpio_ras;
- amdgpu_ras_register_ras_block(adev, &mca->mp0.ras->ras_block);
- amdgpu_ras_register_ras_block(adev, &mca->mp1.ras->ras_block);
- amdgpu_ras_register_ras_block(adev, &mca->mpio.ras->ras_block);
- mca->mp0.ras_if = &mca->mp0.ras->ras_block.ras_comm;
- mca->mp1.ras_if = &mca->mp1.ras->ras_block.ras_comm;
- mca->mpio.ras_if = &mca->mpio.ras->ras_block.ras_comm;
-}
-
-const struct amdgpu_mca_funcs mca_v3_0_funcs = {
- .init = mca_v3_0_init,
-}; \ No newline at end of file
diff --git a/drivers/gpu/drm/amd/amdgpu/mca_v3_0.h b/drivers/gpu/drm/amd/amdgpu/mca_v3_0.h
index b899b86194c2..d3eaef0d7f2d 100644
--- a/drivers/gpu/drm/amd/amdgpu/mca_v3_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/mca_v3_0.h
@@ -21,6 +21,8 @@
#ifndef __MCA_V3_0_H__
#define __MCA_V3_0_H__
-extern const struct amdgpu_mca_funcs mca_v3_0_funcs;
+extern struct amdgpu_mca_ras_block mca_v3_0_mp0_ras;
+extern struct amdgpu_mca_ras_block mca_v3_0_mp1_ras;
+extern struct amdgpu_mca_ras_block mca_v3_0_mpio_ras;
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0.c
index 164948c50ac3..17a792616979 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0.c
@@ -517,6 +517,9 @@ static void mmhub_v3_0_init(struct amdgpu_device *adev)
hub->vm_l2_bank_select_reserved_cid2 =
SOC15_REG_OFFSET(MMHUB, 0, regMMVM_L2_BANK_SELECT_RESERVED_CID2);
+ hub->vm_contexts_disable =
+ SOC15_REG_OFFSET(MMHUB, 0, regMMVM_CONTEXTS_DISABLE);
+
hub->vmhub_funcs = &mmhub_v3_0_vmhub_funcs;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/nv.c b/drivers/gpu/drm/amd/amdgpu/nv.c
index 855d390c41de..15f3c6745ea9 100644
--- a/drivers/gpu/drm/amd/amdgpu/nv.c
+++ b/drivers/gpu/drm/amd/amdgpu/nv.c
@@ -280,47 +280,6 @@ static int nv_query_video_codecs(struct amdgpu_device *adev, bool encode,
}
}
-/*
- * Indirect registers accessor
- */
-static u32 nv_pcie_rreg(struct amdgpu_device *adev, u32 reg)
-{
- unsigned long address, data;
- address = adev->nbio.funcs->get_pcie_index_offset(adev);
- data = adev->nbio.funcs->get_pcie_data_offset(adev);
-
- return amdgpu_device_indirect_rreg(adev, address, data, reg);
-}
-
-static void nv_pcie_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
-{
- unsigned long address, data;
-
- address = adev->nbio.funcs->get_pcie_index_offset(adev);
- data = adev->nbio.funcs->get_pcie_data_offset(adev);
-
- amdgpu_device_indirect_wreg(adev, address, data, reg, v);
-}
-
-static u64 nv_pcie_rreg64(struct amdgpu_device *adev, u32 reg)
-{
- unsigned long address, data;
- address = adev->nbio.funcs->get_pcie_index_offset(adev);
- data = adev->nbio.funcs->get_pcie_data_offset(adev);
-
- return amdgpu_device_indirect_rreg64(adev, address, data, reg);
-}
-
-static void nv_pcie_wreg64(struct amdgpu_device *adev, u32 reg, u64 v)
-{
- unsigned long address, data;
-
- address = adev->nbio.funcs->get_pcie_index_offset(adev);
- data = adev->nbio.funcs->get_pcie_data_offset(adev);
-
- amdgpu_device_indirect_wreg64(adev, address, data, reg, v);
-}
-
static u32 nv_didt_rreg(struct amdgpu_device *adev, u32 reg)
{
unsigned long flags, address, data;
@@ -561,21 +520,6 @@ static int nv_set_vce_clocks(struct amdgpu_device *adev, u32 evclk, u32 ecclk)
return 0;
}
-static void nv_pcie_gen3_enable(struct amdgpu_device *adev)
-{
- if (pci_is_root_bus(adev->pdev->bus))
- return;
-
- if (amdgpu_pcie_gen2 == 0)
- return;
-
- if (!(adev->pm.pcie_gen_mask & (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
- CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)))
- return;
-
- /* todo */
-}
-
static void nv_program_aspm(struct amdgpu_device *adev)
{
if (!amdgpu_device_should_use_aspm(adev))
@@ -608,11 +552,6 @@ void nv_set_virt_ops(struct amdgpu_device *adev)
adev->virt.ops = &xgpu_nv_virt_ops;
}
-static uint32_t nv_get_rev_id(struct amdgpu_device *adev)
-{
- return adev->nbio.funcs->get_rev_id(adev);
-}
-
static bool nv_need_full_reset(struct amdgpu_device *adev)
{
return true;
@@ -738,10 +677,10 @@ static int nv_common_early_init(void *handle)
}
adev->smc_rreg = NULL;
adev->smc_wreg = NULL;
- adev->pcie_rreg = &nv_pcie_rreg;
- adev->pcie_wreg = &nv_pcie_wreg;
- adev->pcie_rreg64 = &nv_pcie_rreg64;
- adev->pcie_wreg64 = &nv_pcie_wreg64;
+ adev->pcie_rreg = &amdgpu_device_indirect_rreg;
+ adev->pcie_wreg = &amdgpu_device_indirect_wreg;
+ adev->pcie_rreg64 = &amdgpu_device_indirect_rreg64;
+ adev->pcie_wreg64 = &amdgpu_device_indirect_wreg64;
adev->pciep_rreg = amdgpu_device_pcie_port_rreg;
adev->pciep_wreg = amdgpu_device_pcie_port_wreg;
@@ -754,7 +693,7 @@ static int nv_common_early_init(void *handle)
adev->asic_funcs = &nv_asic_funcs;
- adev->rev_id = nv_get_rev_id(adev);
+ adev->rev_id = amdgpu_device_get_rev_id(adev);
adev->external_rev_id = 0xff;
/* TODO: split the GC and PG flags based on the relevant IP version for which
* they are relevant.
@@ -1055,8 +994,8 @@ static int nv_common_late_init(void *handle)
amdgpu_virt_update_sriov_video_codec(adev,
sriov_sc_video_codecs_encode_array,
ARRAY_SIZE(sriov_sc_video_codecs_encode_array),
- sriov_sc_video_codecs_decode_array_vcn1,
- ARRAY_SIZE(sriov_sc_video_codecs_decode_array_vcn1));
+ sriov_sc_video_codecs_decode_array_vcn0,
+ ARRAY_SIZE(sriov_sc_video_codecs_decode_array_vcn0));
}
}
@@ -1088,8 +1027,6 @@ static int nv_common_hw_init(void *handle)
if (adev->nbio.funcs->apply_l1_link_width_reconfig_wa)
adev->nbio.funcs->apply_l1_link_width_reconfig_wa(adev);
- /* enable pcie gen2/3 link */
- nv_pcie_gen3_enable(adev);
/* enable aspm */
nv_program_aspm(adev);
/* setup nbio registers */
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c
index d62fcc77af95..caee76ab7110 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c
@@ -48,6 +48,7 @@ MODULE_FIRMWARE("amdgpu/psp_13_0_10_sos.bin");
MODULE_FIRMWARE("amdgpu/psp_13_0_10_ta.bin");
MODULE_FIRMWARE("amdgpu/psp_13_0_11_toc.bin");
MODULE_FIRMWARE("amdgpu/psp_13_0_11_ta.bin");
+MODULE_FIRMWARE("amdgpu/psp_13_0_6_sos.bin");
/* For large FW files the time to complete can be very long */
#define USBC_PD_POLLING_LIMIT_S 240
@@ -100,6 +101,7 @@ static int psp_v13_0_init_microcode(struct psp_context *psp)
return err;
break;
case IP_VERSION(13, 0, 0):
+ case IP_VERSION(13, 0, 6):
case IP_VERSION(13, 0, 7):
case IP_VERSION(13, 0, 10):
err = psp_init_sos_microcode(psp, ucode_prefix);
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c
new file mode 100644
index 000000000000..1b04700a4d55
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c
@@ -0,0 +1,1967 @@
+/*
+ * Copyright 2022 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <linux/delay.h>
+#include <linux/firmware.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+
+#include "amdgpu.h"
+#include "amdgpu_ucode.h"
+#include "amdgpu_trace.h"
+
+#include "sdma/sdma_4_4_2_offset.h"
+#include "sdma/sdma_4_4_2_sh_mask.h"
+
+#include "soc15_common.h"
+#include "soc15.h"
+#include "vega10_sdma_pkt_open.h"
+
+#include "ivsrcid/sdma0/irqsrcs_sdma0_4_0.h"
+#include "ivsrcid/sdma1/irqsrcs_sdma1_4_0.h"
+
+#include "amdgpu_ras.h"
+
+MODULE_FIRMWARE("amdgpu/sdma_4_4_2.bin");
+
+#define WREG32_SDMA(instance, offset, value) \
+ WREG32(sdma_v4_4_2_get_reg_offset(adev, (instance), (offset)), value)
+#define RREG32_SDMA(instance, offset) \
+ RREG32(sdma_v4_4_2_get_reg_offset(adev, (instance), (offset)))
+
+static void sdma_v4_4_2_set_ring_funcs(struct amdgpu_device *adev);
+static void sdma_v4_4_2_set_buffer_funcs(struct amdgpu_device *adev);
+static void sdma_v4_4_2_set_vm_pte_funcs(struct amdgpu_device *adev);
+static void sdma_v4_4_2_set_irq_funcs(struct amdgpu_device *adev);
+
+static u32 sdma_v4_4_2_get_reg_offset(struct amdgpu_device *adev,
+ u32 instance, u32 offset)
+{
+ return (adev->reg_offset[SDMA0_HWIP][instance][0] + offset);
+}
+
+static unsigned sdma_v4_4_2_seq_to_irq_id(int seq_num)
+{
+ switch (seq_num) {
+ case 0:
+ return SOC15_IH_CLIENTID_SDMA0;
+ case 1:
+ return SOC15_IH_CLIENTID_SDMA1;
+ case 2:
+ return SOC15_IH_CLIENTID_SDMA2;
+ case 3:
+ return SOC15_IH_CLIENTID_SDMA3;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int sdma_v4_4_2_irq_id_to_seq(unsigned client_id)
+{
+ switch (client_id) {
+ case SOC15_IH_CLIENTID_SDMA0:
+ return 0;
+ case SOC15_IH_CLIENTID_SDMA1:
+ return 1;
+ case SOC15_IH_CLIENTID_SDMA2:
+ return 2;
+ case SOC15_IH_CLIENTID_SDMA3:
+ return 3;
+ default:
+ return -EINVAL;
+ }
+}
+
+static void sdma_v4_4_2_init_golden_registers(struct amdgpu_device *adev)
+{
+ switch (adev->ip_versions[SDMA0_HWIP][0]) {
+ case IP_VERSION(4, 4, 2):
+ break;
+ default:
+ break;
+ }
+}
+
+/**
+ * sdma_v4_4_2_init_microcode - load ucode images from disk
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Use the firmware interface to load the ucode images into
+ * the driver (not loaded into hw).
+ * Returns 0 on success, error on failure.
+ */
+static int sdma_v4_4_2_init_microcode(struct amdgpu_device *adev)
+{
+ int ret, i;
+
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ if (adev->ip_versions[SDMA0_HWIP][0] == IP_VERSION(4, 4, 2)) {
+ ret = amdgpu_sdma_init_microcode(adev, 0, true);
+ break;
+ } else {
+ ret = amdgpu_sdma_init_microcode(adev, i, false);
+ if (ret)
+ return ret;
+ }
+ }
+
+ return ret;
+}
+
+/**
+ * sdma_v4_4_2_ring_get_rptr - get the current read pointer
+ *
+ * @ring: amdgpu ring pointer
+ *
+ * Get the current rptr from the hardware.
+ */
+static uint64_t sdma_v4_4_2_ring_get_rptr(struct amdgpu_ring *ring)
+{
+ u64 *rptr;
+
+ /* XXX check if swapping is necessary on BE */
+ rptr = ((u64 *)&ring->adev->wb.wb[ring->rptr_offs]);
+
+ DRM_DEBUG("rptr before shift == 0x%016llx\n", *rptr);
+ return ((*rptr) >> 2);
+}
+
+/**
+ * sdma_v4_4_2_ring_get_wptr - get the current write pointer
+ *
+ * @ring: amdgpu ring pointer
+ *
+ * Get the current wptr from the hardware.
+ */
+static uint64_t sdma_v4_4_2_ring_get_wptr(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+ u64 wptr;
+
+ if (ring->use_doorbell) {
+ /* XXX check if swapping is necessary on BE */
+ wptr = READ_ONCE(*((u64 *)&adev->wb.wb[ring->wptr_offs]));
+ DRM_DEBUG("wptr/doorbell before shift == 0x%016llx\n", wptr);
+ } else {
+ wptr = RREG32_SDMA(ring->me, regSDMA_GFX_RB_WPTR_HI);
+ wptr = wptr << 32;
+ wptr |= RREG32_SDMA(ring->me, regSDMA_GFX_RB_WPTR);
+ DRM_DEBUG("wptr before shift [%i] wptr == 0x%016llx\n",
+ ring->me, wptr);
+ }
+
+ return wptr >> 2;
+}
+
+/**
+ * sdma_v4_4_2_ring_set_wptr - commit the write pointer
+ *
+ * @ring: amdgpu ring pointer
+ *
+ * Write the wptr back to the hardware.
+ */
+static void sdma_v4_4_2_ring_set_wptr(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+
+ DRM_DEBUG("Setting write pointer\n");
+ if (ring->use_doorbell) {
+ u64 *wb = (u64 *)&adev->wb.wb[ring->wptr_offs];
+
+ DRM_DEBUG("Using doorbell -- "
+ "wptr_offs == 0x%08x "
+ "lower_32_bits(ring->wptr) << 2 == 0x%08x "
+ "upper_32_bits(ring->wptr) << 2 == 0x%08x\n",
+ ring->wptr_offs,
+ lower_32_bits(ring->wptr << 2),
+ upper_32_bits(ring->wptr << 2));
+ /* XXX check if swapping is necessary on BE */
+ WRITE_ONCE(*wb, (ring->wptr << 2));
+ DRM_DEBUG("calling WDOORBELL64(0x%08x, 0x%016llx)\n",
+ ring->doorbell_index, ring->wptr << 2);
+ WDOORBELL64(ring->doorbell_index, ring->wptr << 2);
+ } else {
+ DRM_DEBUG("Not using doorbell -- "
+ "regSDMA%i_GFX_RB_WPTR == 0x%08x "
+ "regSDMA%i_GFX_RB_WPTR_HI == 0x%08x\n",
+ ring->me,
+ lower_32_bits(ring->wptr << 2),
+ ring->me,
+ upper_32_bits(ring->wptr << 2));
+ WREG32_SDMA(ring->me, regSDMA_GFX_RB_WPTR,
+ lower_32_bits(ring->wptr << 2));
+ WREG32_SDMA(ring->me, regSDMA_GFX_RB_WPTR_HI,
+ upper_32_bits(ring->wptr << 2));
+ }
+}
+
+/**
+ * sdma_v4_4_2_page_ring_get_wptr - get the current write pointer
+ *
+ * @ring: amdgpu ring pointer
+ *
+ * Get the current wptr from the hardware.
+ */
+static uint64_t sdma_v4_4_2_page_ring_get_wptr(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+ u64 wptr;
+
+ if (ring->use_doorbell) {
+ /* XXX check if swapping is necessary on BE */
+ wptr = READ_ONCE(*((u64 *)&adev->wb.wb[ring->wptr_offs]));
+ } else {
+ wptr = RREG32_SDMA(ring->me, regSDMA_PAGE_RB_WPTR_HI);
+ wptr = wptr << 32;
+ wptr |= RREG32_SDMA(ring->me, regSDMA_PAGE_RB_WPTR);
+ }
+
+ return wptr >> 2;
+}
+
+/**
+ * sdma_v4_4_2_page_ring_set_wptr - commit the write pointer
+ *
+ * @ring: amdgpu ring pointer
+ *
+ * Write the wptr back to the hardware.
+ */
+static void sdma_v4_4_2_page_ring_set_wptr(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+
+ if (ring->use_doorbell) {
+ u64 *wb = (u64 *)&adev->wb.wb[ring->wptr_offs];
+
+ /* XXX check if swapping is necessary on BE */
+ WRITE_ONCE(*wb, (ring->wptr << 2));
+ WDOORBELL64(ring->doorbell_index, ring->wptr << 2);
+ } else {
+ uint64_t wptr = ring->wptr << 2;
+
+ WREG32_SDMA(ring->me, regSDMA_PAGE_RB_WPTR,
+ lower_32_bits(wptr));
+ WREG32_SDMA(ring->me, regSDMA_PAGE_RB_WPTR_HI,
+ upper_32_bits(wptr));
+ }
+}
+
+static void sdma_v4_4_2_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
+{
+ struct amdgpu_sdma_instance *sdma = amdgpu_sdma_get_instance_from_ring(ring);
+ int i;
+
+ for (i = 0; i < count; i++)
+ if (sdma && sdma->burst_nop && (i == 0))
+ amdgpu_ring_write(ring, ring->funcs->nop |
+ SDMA_PKT_NOP_HEADER_COUNT(count - 1));
+ else
+ amdgpu_ring_write(ring, ring->funcs->nop);
+}
+
+/**
+ * sdma_v4_4_2_ring_emit_ib - Schedule an IB on the DMA engine
+ *
+ * @ring: amdgpu ring pointer
+ * @job: job to retrieve vmid from
+ * @ib: IB object to schedule
+ * @flags: unused
+ *
+ * Schedule an IB in the DMA ring.
+ */
+static void sdma_v4_4_2_ring_emit_ib(struct amdgpu_ring *ring,
+ struct amdgpu_job *job,
+ struct amdgpu_ib *ib,
+ uint32_t flags)
+{
+ unsigned vmid = AMDGPU_JOB_GET_VMID(job);
+
+ /* IB packet must end on a 8 DW boundary */
+ sdma_v4_4_2_ring_insert_nop(ring, (2 - lower_32_bits(ring->wptr)) & 7);
+
+ amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_INDIRECT) |
+ SDMA_PKT_INDIRECT_HEADER_VMID(vmid & 0xf));
+ /* base must be 32 byte aligned */
+ amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr) & 0xffffffe0);
+ amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
+ amdgpu_ring_write(ring, ib->length_dw);
+ amdgpu_ring_write(ring, 0);
+ amdgpu_ring_write(ring, 0);
+
+}
+
+static void sdma_v4_4_2_wait_reg_mem(struct amdgpu_ring *ring,
+ int mem_space, int hdp,
+ uint32_t addr0, uint32_t addr1,
+ uint32_t ref, uint32_t mask,
+ uint32_t inv)
+{
+ amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) |
+ SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(hdp) |
+ SDMA_PKT_POLL_REGMEM_HEADER_MEM_POLL(mem_space) |
+ SDMA_PKT_POLL_REGMEM_HEADER_FUNC(3)); /* == */
+ if (mem_space) {
+ /* memory */
+ amdgpu_ring_write(ring, addr0);
+ amdgpu_ring_write(ring, addr1);
+ } else {
+ /* registers */
+ amdgpu_ring_write(ring, addr0 << 2);
+ amdgpu_ring_write(ring, addr1 << 2);
+ }
+ amdgpu_ring_write(ring, ref); /* reference */
+ amdgpu_ring_write(ring, mask); /* mask */
+ amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
+ SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(inv)); /* retry count, poll interval */
+}
+
+/**
+ * sdma_v4_4_2_ring_emit_hdp_flush - emit an hdp flush on the DMA ring
+ *
+ * @ring: amdgpu ring pointer
+ *
+ * Emit an hdp flush packet on the requested DMA ring.
+ */
+static void sdma_v4_4_2_ring_emit_hdp_flush(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+ u32 ref_and_mask = 0;
+ const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio.hdp_flush_reg;
+
+ ref_and_mask = nbio_hf_reg->ref_and_mask_sdma0 << ring->me;
+
+ sdma_v4_4_2_wait_reg_mem(ring, 0, 1,
+ adev->nbio.funcs->get_hdp_flush_done_offset(adev),
+ adev->nbio.funcs->get_hdp_flush_req_offset(adev),
+ ref_and_mask, ref_and_mask, 10);
+}
+
+/**
+ * sdma_v4_4_2_ring_emit_fence - emit a fence on the DMA ring
+ *
+ * @ring: amdgpu ring pointer
+ * @addr: address
+ * @seq: sequence number
+ * @flags: fence related flags
+ *
+ * Add a DMA fence packet to the ring to write
+ * the fence seq number and DMA trap packet to generate
+ * an interrupt if needed.
+ */
+static void sdma_v4_4_2_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
+ unsigned flags)
+{
+ bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT;
+ /* write the fence */
+ amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_FENCE));
+ /* zero in first two bits */
+ BUG_ON(addr & 0x3);
+ amdgpu_ring_write(ring, lower_32_bits(addr));
+ amdgpu_ring_write(ring, upper_32_bits(addr));
+ amdgpu_ring_write(ring, lower_32_bits(seq));
+
+ /* optionally write high bits as well */
+ if (write64bit) {
+ addr += 4;
+ amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_FENCE));
+ /* zero in first two bits */
+ BUG_ON(addr & 0x3);
+ amdgpu_ring_write(ring, lower_32_bits(addr));
+ amdgpu_ring_write(ring, upper_32_bits(addr));
+ amdgpu_ring_write(ring, upper_32_bits(seq));
+ }
+
+ /* generate an interrupt */
+ amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_TRAP));
+ amdgpu_ring_write(ring, SDMA_PKT_TRAP_INT_CONTEXT_INT_CONTEXT(0));
+}
+
+
+/**
+ * sdma_v4_4_2_gfx_stop - stop the gfx async dma engines
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Stop the gfx async dma ring buffers.
+ */
+static void sdma_v4_4_2_gfx_stop(struct amdgpu_device *adev)
+{
+ struct amdgpu_ring *sdma[AMDGPU_MAX_SDMA_INSTANCES];
+ u32 rb_cntl, ib_cntl;
+ int i, unset = 0;
+
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ sdma[i] = &adev->sdma.instance[i].ring;
+
+ if ((adev->mman.buffer_funcs_ring == sdma[i]) && unset != 1) {
+ amdgpu_ttm_set_buffer_funcs_status(adev, false);
+ unset = 1;
+ }
+
+ rb_cntl = RREG32_SDMA(i, regSDMA_GFX_RB_CNTL);
+ rb_cntl = REG_SET_FIELD(rb_cntl, SDMA_GFX_RB_CNTL, RB_ENABLE, 0);
+ WREG32_SDMA(i, regSDMA_GFX_RB_CNTL, rb_cntl);
+ ib_cntl = RREG32_SDMA(i, regSDMA_GFX_IB_CNTL);
+ ib_cntl = REG_SET_FIELD(ib_cntl, SDMA_GFX_IB_CNTL, IB_ENABLE, 0);
+ WREG32_SDMA(i, regSDMA_GFX_IB_CNTL, ib_cntl);
+ }
+}
+
+/**
+ * sdma_v4_4_2_rlc_stop - stop the compute async dma engines
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Stop the compute async dma queues.
+ */
+static void sdma_v4_4_2_rlc_stop(struct amdgpu_device *adev)
+{
+ /* XXX todo */
+}
+
+/**
+ * sdma_v4_4_2_page_stop - stop the page async dma engines
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Stop the page async dma ring buffers.
+ */
+static void sdma_v4_4_2_page_stop(struct amdgpu_device *adev)
+{
+ struct amdgpu_ring *sdma[AMDGPU_MAX_SDMA_INSTANCES];
+ u32 rb_cntl, ib_cntl;
+ int i;
+ bool unset = false;
+
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ sdma[i] = &adev->sdma.instance[i].page;
+
+ if ((adev->mman.buffer_funcs_ring == sdma[i]) &&
+ (!unset)) {
+ amdgpu_ttm_set_buffer_funcs_status(adev, false);
+ unset = true;
+ }
+
+ rb_cntl = RREG32_SDMA(i, regSDMA_PAGE_RB_CNTL);
+ rb_cntl = REG_SET_FIELD(rb_cntl, SDMA_PAGE_RB_CNTL,
+ RB_ENABLE, 0);
+ WREG32_SDMA(i, regSDMA_PAGE_RB_CNTL, rb_cntl);
+ ib_cntl = RREG32_SDMA(i, regSDMA_PAGE_IB_CNTL);
+ ib_cntl = REG_SET_FIELD(ib_cntl, SDMA_PAGE_IB_CNTL,
+ IB_ENABLE, 0);
+ WREG32_SDMA(i, regSDMA_PAGE_IB_CNTL, ib_cntl);
+ }
+}
+
+/**
+ * sdma_v4_4_2_ctx_switch_enable - stop the async dma engines context switch
+ *
+ * @adev: amdgpu_device pointer
+ * @enable: enable/disable the DMA MEs context switch.
+ *
+ * Halt or unhalt the async dma engines context switch.
+ */
+static void sdma_v4_4_2_ctx_switch_enable(struct amdgpu_device *adev, bool enable)
+{
+ u32 f32_cntl, phase_quantum = 0;
+ int i;
+
+ if (amdgpu_sdma_phase_quantum) {
+ unsigned value = amdgpu_sdma_phase_quantum;
+ unsigned unit = 0;
+
+ while (value > (SDMA_PHASE0_QUANTUM__VALUE_MASK >>
+ SDMA_PHASE0_QUANTUM__VALUE__SHIFT)) {
+ value = (value + 1) >> 1;
+ unit++;
+ }
+ if (unit > (SDMA_PHASE0_QUANTUM__UNIT_MASK >>
+ SDMA_PHASE0_QUANTUM__UNIT__SHIFT)) {
+ value = (SDMA_PHASE0_QUANTUM__VALUE_MASK >>
+ SDMA_PHASE0_QUANTUM__VALUE__SHIFT);
+ unit = (SDMA_PHASE0_QUANTUM__UNIT_MASK >>
+ SDMA_PHASE0_QUANTUM__UNIT__SHIFT);
+ WARN_ONCE(1,
+ "clamping sdma_phase_quantum to %uK clock cycles\n",
+ value << unit);
+ }
+ phase_quantum =
+ value << SDMA_PHASE0_QUANTUM__VALUE__SHIFT |
+ unit << SDMA_PHASE0_QUANTUM__UNIT__SHIFT;
+ }
+
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ f32_cntl = RREG32_SDMA(i, regSDMA_CNTL);
+ f32_cntl = REG_SET_FIELD(f32_cntl, SDMA_CNTL,
+ AUTO_CTXSW_ENABLE, enable ? 1 : 0);
+ if (enable && amdgpu_sdma_phase_quantum) {
+ WREG32_SDMA(i, regSDMA_PHASE0_QUANTUM, phase_quantum);
+ WREG32_SDMA(i, regSDMA_PHASE1_QUANTUM, phase_quantum);
+ WREG32_SDMA(i, regSDMA_PHASE2_QUANTUM, phase_quantum);
+ }
+ WREG32_SDMA(i, regSDMA_CNTL, f32_cntl);
+
+ /* Extend page fault timeout to avoid interrupt storm */
+ WREG32_SDMA(i, regSDMA_UTCL1_TIMEOUT, 0x00800080);
+ }
+
+}
+
+/**
+ * sdma_v4_4_2_enable - stop the async dma engines
+ *
+ * @adev: amdgpu_device pointer
+ * @enable: enable/disable the DMA MEs.
+ *
+ * Halt or unhalt the async dma engines.
+ */
+static void sdma_v4_4_2_enable(struct amdgpu_device *adev, bool enable)
+{
+ u32 f32_cntl;
+ int i;
+
+ if (!enable) {
+ sdma_v4_4_2_gfx_stop(adev);
+ sdma_v4_4_2_rlc_stop(adev);
+ if (adev->sdma.has_page_queue)
+ sdma_v4_4_2_page_stop(adev);
+ }
+
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ f32_cntl = RREG32_SDMA(i, regSDMA_F32_CNTL);
+ f32_cntl = REG_SET_FIELD(f32_cntl, SDMA_F32_CNTL, HALT, enable ? 0 : 1);
+ WREG32_SDMA(i, regSDMA_F32_CNTL, f32_cntl);
+ }
+}
+
+/*
+ * sdma_v4_4_2_rb_cntl - get parameters for rb_cntl
+ */
+static uint32_t sdma_v4_4_2_rb_cntl(struct amdgpu_ring *ring, uint32_t rb_cntl)
+{
+ /* Set ring buffer size in dwords */
+ uint32_t rb_bufsz = order_base_2(ring->ring_size / 4);
+
+ barrier(); /* work around https://bugs.llvm.org/show_bug.cgi?id=42576 */
+ rb_cntl = REG_SET_FIELD(rb_cntl, SDMA_GFX_RB_CNTL, RB_SIZE, rb_bufsz);
+#ifdef __BIG_ENDIAN
+ rb_cntl = REG_SET_FIELD(rb_cntl, SDMA_GFX_RB_CNTL, RB_SWAP_ENABLE, 1);
+ rb_cntl = REG_SET_FIELD(rb_cntl, SDMA_GFX_RB_CNTL,
+ RPTR_WRITEBACK_SWAP_ENABLE, 1);
+#endif
+ return rb_cntl;
+}
+
+/**
+ * sdma_v4_4_2_gfx_resume - setup and start the async dma engines
+ *
+ * @adev: amdgpu_device pointer
+ * @i: instance to resume
+ *
+ * Set up the gfx DMA ring buffers and enable them.
+ * Returns 0 for success, error for failure.
+ */
+static void sdma_v4_4_2_gfx_resume(struct amdgpu_device *adev, unsigned int i)
+{
+ struct amdgpu_ring *ring = &adev->sdma.instance[i].ring;
+ u32 rb_cntl, ib_cntl, wptr_poll_cntl;
+ u32 wb_offset;
+ u32 doorbell;
+ u32 doorbell_offset;
+ u64 wptr_gpu_addr;
+
+ wb_offset = (ring->rptr_offs * 4);
+
+ rb_cntl = RREG32_SDMA(i, regSDMA_GFX_RB_CNTL);
+ rb_cntl = sdma_v4_4_2_rb_cntl(ring, rb_cntl);
+ WREG32_SDMA(i, regSDMA_GFX_RB_CNTL, rb_cntl);
+
+ /* Initialize the ring buffer's read and write pointers */
+ WREG32_SDMA(i, regSDMA_GFX_RB_RPTR, 0);
+ WREG32_SDMA(i, regSDMA_GFX_RB_RPTR_HI, 0);
+ WREG32_SDMA(i, regSDMA_GFX_RB_WPTR, 0);
+ WREG32_SDMA(i, regSDMA_GFX_RB_WPTR_HI, 0);
+
+ /* set the wb address whether it's enabled or not */
+ WREG32_SDMA(i, regSDMA_GFX_RB_RPTR_ADDR_HI,
+ upper_32_bits(adev->wb.gpu_addr + wb_offset) & 0xFFFFFFFF);
+ WREG32_SDMA(i, regSDMA_GFX_RB_RPTR_ADDR_LO,
+ lower_32_bits(adev->wb.gpu_addr + wb_offset) & 0xFFFFFFFC);
+
+ rb_cntl = REG_SET_FIELD(rb_cntl, SDMA_GFX_RB_CNTL,
+ RPTR_WRITEBACK_ENABLE, 1);
+
+ WREG32_SDMA(i, regSDMA_GFX_RB_BASE, ring->gpu_addr >> 8);
+ WREG32_SDMA(i, regSDMA_GFX_RB_BASE_HI, ring->gpu_addr >> 40);
+
+ ring->wptr = 0;
+
+ /* before programing wptr to a less value, need set minor_ptr_update first */
+ WREG32_SDMA(i, regSDMA_GFX_MINOR_PTR_UPDATE, 1);
+
+ doorbell = RREG32_SDMA(i, regSDMA_GFX_DOORBELL);
+ doorbell_offset = RREG32_SDMA(i, regSDMA_GFX_DOORBELL_OFFSET);
+
+ doorbell = REG_SET_FIELD(doorbell, SDMA_GFX_DOORBELL, ENABLE,
+ ring->use_doorbell);
+ doorbell_offset = REG_SET_FIELD(doorbell_offset,
+ SDMA_GFX_DOORBELL_OFFSET,
+ OFFSET, ring->doorbell_index);
+ WREG32_SDMA(i, regSDMA_GFX_DOORBELL, doorbell);
+ WREG32_SDMA(i, regSDMA_GFX_DOORBELL_OFFSET, doorbell_offset);
+
+ sdma_v4_4_2_ring_set_wptr(ring);
+
+ /* set minor_ptr_update to 0 after wptr programed */
+ WREG32_SDMA(i, regSDMA_GFX_MINOR_PTR_UPDATE, 0);
+
+ /* setup the wptr shadow polling */
+ wptr_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
+ WREG32_SDMA(i, regSDMA_GFX_RB_WPTR_POLL_ADDR_LO,
+ lower_32_bits(wptr_gpu_addr));
+ WREG32_SDMA(i, regSDMA_GFX_RB_WPTR_POLL_ADDR_HI,
+ upper_32_bits(wptr_gpu_addr));
+ wptr_poll_cntl = RREG32_SDMA(i, regSDMA_GFX_RB_WPTR_POLL_CNTL);
+ wptr_poll_cntl = REG_SET_FIELD(wptr_poll_cntl,
+ SDMA_GFX_RB_WPTR_POLL_CNTL,
+ F32_POLL_ENABLE, amdgpu_sriov_vf(adev)? 1 : 0);
+ WREG32_SDMA(i, regSDMA_GFX_RB_WPTR_POLL_CNTL, wptr_poll_cntl);
+
+ /* enable DMA RB */
+ rb_cntl = REG_SET_FIELD(rb_cntl, SDMA_GFX_RB_CNTL, RB_ENABLE, 1);
+ WREG32_SDMA(i, regSDMA_GFX_RB_CNTL, rb_cntl);
+
+ ib_cntl = RREG32_SDMA(i, regSDMA_GFX_IB_CNTL);
+ ib_cntl = REG_SET_FIELD(ib_cntl, SDMA_GFX_IB_CNTL, IB_ENABLE, 1);
+#ifdef __BIG_ENDIAN
+ ib_cntl = REG_SET_FIELD(ib_cntl, SDMA_GFX_IB_CNTL, IB_SWAP_ENABLE, 1);
+#endif
+ /* enable DMA IBs */
+ WREG32_SDMA(i, regSDMA_GFX_IB_CNTL, ib_cntl);
+
+ ring->sched.ready = true;
+}
+
+/**
+ * sdma_v4_4_2_page_resume - setup and start the async dma engines
+ *
+ * @adev: amdgpu_device pointer
+ * @i: instance to resume
+ *
+ * Set up the page DMA ring buffers and enable them.
+ * Returns 0 for success, error for failure.
+ */
+static void sdma_v4_4_2_page_resume(struct amdgpu_device *adev, unsigned int i)
+{
+ struct amdgpu_ring *ring = &adev->sdma.instance[i].page;
+ u32 rb_cntl, ib_cntl, wptr_poll_cntl;
+ u32 wb_offset;
+ u32 doorbell;
+ u32 doorbell_offset;
+ u64 wptr_gpu_addr;
+
+ wb_offset = (ring->rptr_offs * 4);
+
+ rb_cntl = RREG32_SDMA(i, regSDMA_PAGE_RB_CNTL);
+ rb_cntl = sdma_v4_4_2_rb_cntl(ring, rb_cntl);
+ WREG32_SDMA(i, regSDMA_PAGE_RB_CNTL, rb_cntl);
+
+ /* Initialize the ring buffer's read and write pointers */
+ WREG32_SDMA(i, regSDMA_PAGE_RB_RPTR, 0);
+ WREG32_SDMA(i, regSDMA_PAGE_RB_RPTR_HI, 0);
+ WREG32_SDMA(i, regSDMA_PAGE_RB_WPTR, 0);
+ WREG32_SDMA(i, regSDMA_PAGE_RB_WPTR_HI, 0);
+
+ /* set the wb address whether it's enabled or not */
+ WREG32_SDMA(i, regSDMA_PAGE_RB_RPTR_ADDR_HI,
+ upper_32_bits(adev->wb.gpu_addr + wb_offset) & 0xFFFFFFFF);
+ WREG32_SDMA(i, regSDMA_PAGE_RB_RPTR_ADDR_LO,
+ lower_32_bits(adev->wb.gpu_addr + wb_offset) & 0xFFFFFFFC);
+
+ rb_cntl = REG_SET_FIELD(rb_cntl, SDMA_PAGE_RB_CNTL,
+ RPTR_WRITEBACK_ENABLE, 1);
+
+ WREG32_SDMA(i, regSDMA_PAGE_RB_BASE, ring->gpu_addr >> 8);
+ WREG32_SDMA(i, regSDMA_PAGE_RB_BASE_HI, ring->gpu_addr >> 40);
+
+ ring->wptr = 0;
+
+ /* before programing wptr to a less value, need set minor_ptr_update first */
+ WREG32_SDMA(i, regSDMA_PAGE_MINOR_PTR_UPDATE, 1);
+
+ doorbell = RREG32_SDMA(i, regSDMA_PAGE_DOORBELL);
+ doorbell_offset = RREG32_SDMA(i, regSDMA_PAGE_DOORBELL_OFFSET);
+
+ doorbell = REG_SET_FIELD(doorbell, SDMA_PAGE_DOORBELL, ENABLE,
+ ring->use_doorbell);
+ doorbell_offset = REG_SET_FIELD(doorbell_offset,
+ SDMA_PAGE_DOORBELL_OFFSET,
+ OFFSET, ring->doorbell_index);
+ WREG32_SDMA(i, regSDMA_PAGE_DOORBELL, doorbell);
+ WREG32_SDMA(i, regSDMA_PAGE_DOORBELL_OFFSET, doorbell_offset);
+
+ /* paging queue doorbell range is setup at sdma_v4_4_2_gfx_resume */
+ sdma_v4_4_2_page_ring_set_wptr(ring);
+
+ /* set minor_ptr_update to 0 after wptr programed */
+ WREG32_SDMA(i, regSDMA_PAGE_MINOR_PTR_UPDATE, 0);
+
+ /* setup the wptr shadow polling */
+ wptr_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
+ WREG32_SDMA(i, regSDMA_PAGE_RB_WPTR_POLL_ADDR_LO,
+ lower_32_bits(wptr_gpu_addr));
+ WREG32_SDMA(i, regSDMA_PAGE_RB_WPTR_POLL_ADDR_HI,
+ upper_32_bits(wptr_gpu_addr));
+ wptr_poll_cntl = RREG32_SDMA(i, regSDMA_PAGE_RB_WPTR_POLL_CNTL);
+ wptr_poll_cntl = REG_SET_FIELD(wptr_poll_cntl,
+ SDMA_PAGE_RB_WPTR_POLL_CNTL,
+ F32_POLL_ENABLE, amdgpu_sriov_vf(adev)? 1 : 0);
+ WREG32_SDMA(i, regSDMA_PAGE_RB_WPTR_POLL_CNTL, wptr_poll_cntl);
+
+ /* enable DMA RB */
+ rb_cntl = REG_SET_FIELD(rb_cntl, SDMA_PAGE_RB_CNTL, RB_ENABLE, 1);
+ WREG32_SDMA(i, regSDMA_PAGE_RB_CNTL, rb_cntl);
+
+ ib_cntl = RREG32_SDMA(i, regSDMA_PAGE_IB_CNTL);
+ ib_cntl = REG_SET_FIELD(ib_cntl, SDMA_PAGE_IB_CNTL, IB_ENABLE, 1);
+#ifdef __BIG_ENDIAN
+ ib_cntl = REG_SET_FIELD(ib_cntl, SDMA_PAGE_IB_CNTL, IB_SWAP_ENABLE, 1);
+#endif
+ /* enable DMA IBs */
+ WREG32_SDMA(i, regSDMA_PAGE_IB_CNTL, ib_cntl);
+
+ ring->sched.ready = true;
+}
+
+static void sdma_v4_4_2_init_pg(struct amdgpu_device *adev)
+{
+
+}
+
+/**
+ * sdma_v4_4_2_rlc_resume - setup and start the async dma engines
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Set up the compute DMA queues and enable them.
+ * Returns 0 for success, error for failure.
+ */
+static int sdma_v4_4_2_rlc_resume(struct amdgpu_device *adev)
+{
+ sdma_v4_4_2_init_pg(adev);
+
+ return 0;
+}
+
+/**
+ * sdma_v4_4_2_load_microcode - load the sDMA ME ucode
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Loads the sDMA0/1 ucode.
+ * Returns 0 for success, -EINVAL if the ucode is not available.
+ */
+static int sdma_v4_4_2_load_microcode(struct amdgpu_device *adev)
+{
+ const struct sdma_firmware_header_v1_0 *hdr;
+ const __le32 *fw_data;
+ u32 fw_size;
+ int i, j;
+
+ /* halt the MEs */
+ sdma_v4_4_2_enable(adev, false);
+
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ if (!adev->sdma.instance[i].fw)
+ return -EINVAL;
+
+ hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma.instance[i].fw->data;
+ amdgpu_ucode_print_sdma_hdr(&hdr->header);
+ fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
+
+ fw_data = (const __le32 *)
+ (adev->sdma.instance[i].fw->data +
+ le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+
+ WREG32_SDMA(i, regSDMA_UCODE_ADDR, 0);
+
+ for (j = 0; j < fw_size; j++)
+ WREG32_SDMA(i, regSDMA_UCODE_DATA,
+ le32_to_cpup(fw_data++));
+
+ WREG32_SDMA(i, regSDMA_UCODE_ADDR,
+ adev->sdma.instance[i].fw_version);
+ }
+
+ return 0;
+}
+
+/**
+ * sdma_v4_4_2_start - setup and start the async dma engines
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Set up the DMA engines and enable them.
+ * Returns 0 for success, error for failure.
+ */
+static int sdma_v4_4_2_start(struct amdgpu_device *adev)
+{
+ struct amdgpu_ring *ring;
+ int i, r = 0;
+
+ if (amdgpu_sriov_vf(adev)) {
+ sdma_v4_4_2_ctx_switch_enable(adev, false);
+ sdma_v4_4_2_enable(adev, false);
+ } else {
+ /* bypass sdma microcode loading on Gopher */
+ if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP &&
+ !(adev->pdev->device == 0x49) && !(adev->pdev->device == 0x50)) {
+ r = sdma_v4_4_2_load_microcode(adev);
+ if (r)
+ return r;
+ }
+
+ /* unhalt the MEs */
+ sdma_v4_4_2_enable(adev, true);
+ /* enable sdma ring preemption */
+ sdma_v4_4_2_ctx_switch_enable(adev, true);
+ }
+
+ /* start the gfx rings and rlc compute queues */
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ uint32_t temp;
+
+ WREG32_SDMA(i, regSDMA_SEM_WAIT_FAIL_TIMER_CNTL, 0);
+ sdma_v4_4_2_gfx_resume(adev, i);
+ if (adev->sdma.has_page_queue)
+ sdma_v4_4_2_page_resume(adev, i);
+
+ /* set utc l1 enable flag always to 1 */
+ temp = RREG32_SDMA(i, regSDMA_CNTL);
+ temp = REG_SET_FIELD(temp, SDMA_CNTL, UTC_L1_ENABLE, 1);
+ WREG32_SDMA(i, regSDMA_CNTL, temp);
+
+ if (!amdgpu_sriov_vf(adev)) {
+ ring = &adev->sdma.instance[i].ring;
+ adev->nbio.funcs->sdma_doorbell_range(adev, i,
+ ring->use_doorbell, ring->doorbell_index,
+ adev->doorbell_index.sdma_doorbell_range);
+
+ /* unhalt engine */
+ temp = RREG32_SDMA(i, regSDMA_F32_CNTL);
+ temp = REG_SET_FIELD(temp, SDMA_F32_CNTL, HALT, 0);
+ WREG32_SDMA(i, regSDMA_F32_CNTL, temp);
+ }
+ }
+
+ if (amdgpu_sriov_vf(adev)) {
+ sdma_v4_4_2_ctx_switch_enable(adev, true);
+ sdma_v4_4_2_enable(adev, true);
+ } else {
+ r = sdma_v4_4_2_rlc_resume(adev);
+ if (r)
+ return r;
+ }
+
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ ring = &adev->sdma.instance[i].ring;
+
+ r = amdgpu_ring_test_helper(ring);
+ if (r)
+ return r;
+
+ if (adev->sdma.has_page_queue) {
+ struct amdgpu_ring *page = &adev->sdma.instance[i].page;
+
+ r = amdgpu_ring_test_helper(page);
+ if (r)
+ return r;
+
+ if (adev->mman.buffer_funcs_ring == page)
+ amdgpu_ttm_set_buffer_funcs_status(adev, true);
+ }
+
+ if (adev->mman.buffer_funcs_ring == ring)
+ amdgpu_ttm_set_buffer_funcs_status(adev, true);
+ }
+
+ return r;
+}
+
+/**
+ * sdma_v4_4_2_ring_test_ring - simple async dma engine test
+ *
+ * @ring: amdgpu_ring structure holding ring information
+ *
+ * Test the DMA engine by writing using it to write an
+ * value to memory.
+ * Returns 0 for success, error for failure.
+ */
+static int sdma_v4_4_2_ring_test_ring(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+ unsigned i;
+ unsigned index;
+ int r;
+ u32 tmp;
+ u64 gpu_addr;
+
+ r = amdgpu_device_wb_get(adev, &index);
+ if (r)
+ return r;
+
+ gpu_addr = adev->wb.gpu_addr + (index * 4);
+ tmp = 0xCAFEDEAD;
+ adev->wb.wb[index] = cpu_to_le32(tmp);
+
+ r = amdgpu_ring_alloc(ring, 5);
+ if (r)
+ goto error_free_wb;
+
+ amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) |
+ SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR));
+ amdgpu_ring_write(ring, lower_32_bits(gpu_addr));
+ amdgpu_ring_write(ring, upper_32_bits(gpu_addr));
+ amdgpu_ring_write(ring, SDMA_PKT_WRITE_UNTILED_DW_3_COUNT(0));
+ amdgpu_ring_write(ring, 0xDEADBEEF);
+ amdgpu_ring_commit(ring);
+
+ for (i = 0; i < adev->usec_timeout; i++) {
+ tmp = le32_to_cpu(adev->wb.wb[index]);
+ if (tmp == 0xDEADBEEF)
+ break;
+ udelay(1);
+ }
+
+ if (i >= adev->usec_timeout)
+ r = -ETIMEDOUT;
+
+error_free_wb:
+ amdgpu_device_wb_free(adev, index);
+ return r;
+}
+
+/**
+ * sdma_v4_4_2_ring_test_ib - test an IB on the DMA engine
+ *
+ * @ring: amdgpu_ring structure holding ring information
+ * @timeout: timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT
+ *
+ * Test a simple IB in the DMA ring.
+ * Returns 0 on success, error on failure.
+ */
+static int sdma_v4_4_2_ring_test_ib(struct amdgpu_ring *ring, long timeout)
+{
+ struct amdgpu_device *adev = ring->adev;
+ struct amdgpu_ib ib;
+ struct dma_fence *f = NULL;
+ unsigned index;
+ long r;
+ u32 tmp = 0;
+ u64 gpu_addr;
+
+ r = amdgpu_device_wb_get(adev, &index);
+ if (r)
+ return r;
+
+ gpu_addr = adev->wb.gpu_addr + (index * 4);
+ tmp = 0xCAFEDEAD;
+ adev->wb.wb[index] = cpu_to_le32(tmp);
+ memset(&ib, 0, sizeof(ib));
+ r = amdgpu_ib_get(adev, NULL, 256,
+ AMDGPU_IB_POOL_DIRECT, &ib);
+ if (r)
+ goto err0;
+
+ ib.ptr[0] = SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) |
+ SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR);
+ ib.ptr[1] = lower_32_bits(gpu_addr);
+ ib.ptr[2] = upper_32_bits(gpu_addr);
+ ib.ptr[3] = SDMA_PKT_WRITE_UNTILED_DW_3_COUNT(0);
+ ib.ptr[4] = 0xDEADBEEF;
+ ib.ptr[5] = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP);
+ ib.ptr[6] = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP);
+ ib.ptr[7] = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP);
+ ib.length_dw = 8;
+
+ r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
+ if (r)
+ goto err1;
+
+ r = dma_fence_wait_timeout(f, false, timeout);
+ if (r == 0) {
+ r = -ETIMEDOUT;
+ goto err1;
+ } else if (r < 0) {
+ goto err1;
+ }
+ tmp = le32_to_cpu(adev->wb.wb[index]);
+ if (tmp == 0xDEADBEEF)
+ r = 0;
+ else
+ r = -EINVAL;
+
+err1:
+ amdgpu_ib_free(adev, &ib, NULL);
+ dma_fence_put(f);
+err0:
+ amdgpu_device_wb_free(adev, index);
+ return r;
+}
+
+
+/**
+ * sdma_v4_4_2_vm_copy_pte - update PTEs by copying them from the GART
+ *
+ * @ib: indirect buffer to fill with commands
+ * @pe: addr of the page entry
+ * @src: src addr to copy from
+ * @count: number of page entries to update
+ *
+ * Update PTEs by copying them from the GART using sDMA.
+ */
+static void sdma_v4_4_2_vm_copy_pte(struct amdgpu_ib *ib,
+ uint64_t pe, uint64_t src,
+ unsigned count)
+{
+ unsigned bytes = count * 8;
+
+ ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_COPY) |
+ SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR);
+ ib->ptr[ib->length_dw++] = bytes - 1;
+ ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */
+ ib->ptr[ib->length_dw++] = lower_32_bits(src);
+ ib->ptr[ib->length_dw++] = upper_32_bits(src);
+ ib->ptr[ib->length_dw++] = lower_32_bits(pe);
+ ib->ptr[ib->length_dw++] = upper_32_bits(pe);
+
+}
+
+/**
+ * sdma_v4_4_2_vm_write_pte - update PTEs by writing them manually
+ *
+ * @ib: indirect buffer to fill with commands
+ * @pe: addr of the page entry
+ * @value: dst addr to write into pe
+ * @count: number of page entries to update
+ * @incr: increase next addr by incr bytes
+ *
+ * Update PTEs by writing them manually using sDMA.
+ */
+static void sdma_v4_4_2_vm_write_pte(struct amdgpu_ib *ib, uint64_t pe,
+ uint64_t value, unsigned count,
+ uint32_t incr)
+{
+ unsigned ndw = count * 2;
+
+ ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) |
+ SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR);
+ ib->ptr[ib->length_dw++] = lower_32_bits(pe);
+ ib->ptr[ib->length_dw++] = upper_32_bits(pe);
+ ib->ptr[ib->length_dw++] = ndw - 1;
+ for (; ndw > 0; ndw -= 2) {
+ ib->ptr[ib->length_dw++] = lower_32_bits(value);
+ ib->ptr[ib->length_dw++] = upper_32_bits(value);
+ value += incr;
+ }
+}
+
+/**
+ * sdma_v4_4_2_vm_set_pte_pde - update the page tables using sDMA
+ *
+ * @ib: indirect buffer to fill with commands
+ * @pe: addr of the page entry
+ * @addr: dst addr to write into pe
+ * @count: number of page entries to update
+ * @incr: increase next addr by incr bytes
+ * @flags: access flags
+ *
+ * Update the page tables using sDMA.
+ */
+static void sdma_v4_4_2_vm_set_pte_pde(struct amdgpu_ib *ib,
+ uint64_t pe,
+ uint64_t addr, unsigned count,
+ uint32_t incr, uint64_t flags)
+{
+ /* for physically contiguous pages (vram) */
+ ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_PTEPDE);
+ ib->ptr[ib->length_dw++] = lower_32_bits(pe); /* dst addr */
+ ib->ptr[ib->length_dw++] = upper_32_bits(pe);
+ ib->ptr[ib->length_dw++] = lower_32_bits(flags); /* mask */
+ ib->ptr[ib->length_dw++] = upper_32_bits(flags);
+ ib->ptr[ib->length_dw++] = lower_32_bits(addr); /* value */
+ ib->ptr[ib->length_dw++] = upper_32_bits(addr);
+ ib->ptr[ib->length_dw++] = incr; /* increment size */
+ ib->ptr[ib->length_dw++] = 0;
+ ib->ptr[ib->length_dw++] = count - 1; /* number of entries */
+}
+
+/**
+ * sdma_v4_4_2_ring_pad_ib - pad the IB to the required number of dw
+ *
+ * @ring: amdgpu_ring structure holding ring information
+ * @ib: indirect buffer to fill with padding
+ */
+static void sdma_v4_4_2_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib)
+{
+ struct amdgpu_sdma_instance *sdma = amdgpu_sdma_get_instance_from_ring(ring);
+ u32 pad_count;
+ int i;
+
+ pad_count = (-ib->length_dw) & 7;
+ for (i = 0; i < pad_count; i++)
+ if (sdma && sdma->burst_nop && (i == 0))
+ ib->ptr[ib->length_dw++] =
+ SDMA_PKT_HEADER_OP(SDMA_OP_NOP) |
+ SDMA_PKT_NOP_HEADER_COUNT(pad_count - 1);
+ else
+ ib->ptr[ib->length_dw++] =
+ SDMA_PKT_HEADER_OP(SDMA_OP_NOP);
+}
+
+
+/**
+ * sdma_v4_4_2_ring_emit_pipeline_sync - sync the pipeline
+ *
+ * @ring: amdgpu_ring pointer
+ *
+ * Make sure all previous operations are completed (CIK).
+ */
+static void sdma_v4_4_2_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
+{
+ uint32_t seq = ring->fence_drv.sync_seq;
+ uint64_t addr = ring->fence_drv.gpu_addr;
+
+ /* wait for idle */
+ sdma_v4_4_2_wait_reg_mem(ring, 1, 0,
+ addr & 0xfffffffc,
+ upper_32_bits(addr) & 0xffffffff,
+ seq, 0xffffffff, 4);
+}
+
+
+/**
+ * sdma_v4_4_2_ring_emit_vm_flush - vm flush using sDMA
+ *
+ * @ring: amdgpu_ring pointer
+ * @vmid: vmid number to use
+ * @pd_addr: address
+ *
+ * Update the page table base and flush the VM TLB
+ * using sDMA.
+ */
+static void sdma_v4_4_2_ring_emit_vm_flush(struct amdgpu_ring *ring,
+ unsigned vmid, uint64_t pd_addr)
+{
+ amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
+}
+
+static void sdma_v4_4_2_ring_emit_wreg(struct amdgpu_ring *ring,
+ uint32_t reg, uint32_t val)
+{
+ amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) |
+ SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf));
+ amdgpu_ring_write(ring, reg);
+ amdgpu_ring_write(ring, val);
+}
+
+static void sdma_v4_4_2_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
+ uint32_t val, uint32_t mask)
+{
+ sdma_v4_4_2_wait_reg_mem(ring, 0, 0, reg, 0, val, mask, 10);
+}
+
+static bool sdma_v4_4_2_fw_support_paging_queue(struct amdgpu_device *adev)
+{
+ switch (adev->ip_versions[SDMA0_HWIP][0]) {
+ case IP_VERSION(4, 4, 2):
+ return false;
+ default:
+ return false;
+ }
+}
+
+static int sdma_v4_4_2_early_init(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ int r;
+
+ r = sdma_v4_4_2_init_microcode(adev);
+ if (r) {
+ DRM_ERROR("Failed to load sdma firmware!\n");
+ return r;
+ }
+
+ /* TODO: Page queue breaks driver reload under SRIOV */
+ if (sdma_v4_4_2_fw_support_paging_queue(adev))
+ adev->sdma.has_page_queue = true;
+
+ sdma_v4_4_2_set_ring_funcs(adev);
+ sdma_v4_4_2_set_buffer_funcs(adev);
+ sdma_v4_4_2_set_vm_pte_funcs(adev);
+ sdma_v4_4_2_set_irq_funcs(adev);
+
+ return 0;
+}
+
+#if 0
+static int sdma_v4_4_2_process_ras_data_cb(struct amdgpu_device *adev,
+ void *err_data,
+ struct amdgpu_iv_entry *entry);
+#endif
+
+static int sdma_v4_4_2_late_init(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+#if 0
+ struct ras_ih_if ih_info = {
+ .cb = sdma_v4_4_2_process_ras_data_cb,
+ };
+#endif
+ if (!amdgpu_persistent_edc_harvesting_supported(adev)) {
+ if (adev->sdma.ras && adev->sdma.ras->ras_block.hw_ops &&
+ adev->sdma.ras->ras_block.hw_ops->reset_ras_error_count)
+ adev->sdma.ras->ras_block.hw_ops->reset_ras_error_count(adev);
+ }
+
+ return 0;
+}
+
+static int sdma_v4_4_2_sw_init(void *handle)
+{
+ struct amdgpu_ring *ring;
+ int r, i;
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ /* SDMA trap event */
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ r = amdgpu_irq_add_id(adev, sdma_v4_4_2_seq_to_irq_id(i),
+ SDMA0_4_0__SRCID__SDMA_TRAP,
+ &adev->sdma.trap_irq);
+ if (r)
+ return r;
+ }
+
+ /* SDMA SRAM ECC event */
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ r = amdgpu_irq_add_id(adev, sdma_v4_4_2_seq_to_irq_id(i),
+ SDMA0_4_0__SRCID__SDMA_SRAM_ECC,
+ &adev->sdma.ecc_irq);
+ if (r)
+ return r;
+ }
+
+ /* SDMA VM_HOLE/DOORBELL_INV/POLL_TIMEOUT/SRBM_WRITE_PROTECTION event*/
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ r = amdgpu_irq_add_id(adev, sdma_v4_4_2_seq_to_irq_id(i),
+ SDMA0_4_0__SRCID__SDMA_VM_HOLE,
+ &adev->sdma.vm_hole_irq);
+ if (r)
+ return r;
+
+ r = amdgpu_irq_add_id(adev, sdma_v4_4_2_seq_to_irq_id(i),
+ SDMA0_4_0__SRCID__SDMA_DOORBELL_INVALID,
+ &adev->sdma.doorbell_invalid_irq);
+ if (r)
+ return r;
+
+ r = amdgpu_irq_add_id(adev, sdma_v4_4_2_seq_to_irq_id(i),
+ SDMA0_4_0__SRCID__SDMA_POLL_TIMEOUT,
+ &adev->sdma.pool_timeout_irq);
+ if (r)
+ return r;
+
+ r = amdgpu_irq_add_id(adev, sdma_v4_4_2_seq_to_irq_id(i),
+ SDMA0_4_0__SRCID__SDMA_SRBMWRITE,
+ &adev->sdma.srbm_write_irq);
+ if (r)
+ return r;
+ }
+
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ ring = &adev->sdma.instance[i].ring;
+ ring->ring_obj = NULL;
+ ring->use_doorbell = true;
+
+ DRM_DEBUG("SDMA %d use_doorbell being set to: [%s]\n", i,
+ ring->use_doorbell?"true":"false");
+
+ /* doorbell size is 2 dwords, get DWORD offset */
+ ring->doorbell_index = adev->doorbell_index.sdma_engine[i] << 1;
+
+ sprintf(ring->name, "sdma%d", i);
+ r = amdgpu_ring_init(adev, ring, 1024, &adev->sdma.trap_irq,
+ AMDGPU_SDMA_IRQ_INSTANCE0 + i,
+ AMDGPU_RING_PRIO_DEFAULT, NULL);
+ if (r)
+ return r;
+
+ if (adev->sdma.has_page_queue) {
+ ring = &adev->sdma.instance[i].page;
+ ring->ring_obj = NULL;
+ ring->use_doorbell = true;
+
+ /* paging queue use same doorbell index/routing as gfx queue
+ * with 0x400 (4096 dwords) offset on second doorbell page
+ */
+ ring->doorbell_index = adev->doorbell_index.sdma_engine[i] << 1;
+ ring->doorbell_index += 0x400;
+
+ sprintf(ring->name, "page%d", i);
+ r = amdgpu_ring_init(adev, ring, 1024,
+ &adev->sdma.trap_irq,
+ AMDGPU_SDMA_IRQ_INSTANCE0 + i,
+ AMDGPU_RING_PRIO_DEFAULT, NULL);
+ if (r)
+ return r;
+ }
+ }
+
+ return r;
+}
+
+static int sdma_v4_4_2_sw_fini(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ int i;
+
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ amdgpu_ring_fini(&adev->sdma.instance[i].ring);
+ if (adev->sdma.has_page_queue)
+ amdgpu_ring_fini(&adev->sdma.instance[i].page);
+ }
+
+ if (adev->ip_versions[SDMA0_HWIP][0] == IP_VERSION(4, 4, 2))
+ amdgpu_sdma_destroy_inst_ctx(adev, true);
+ else
+ amdgpu_sdma_destroy_inst_ctx(adev, false);
+
+ return 0;
+}
+
+static int sdma_v4_4_2_hw_init(void *handle)
+{
+ int r;
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ if (adev->flags & AMD_IS_APU)
+ amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_SDMA, false);
+
+ if (!amdgpu_sriov_vf(adev))
+ sdma_v4_4_2_init_golden_registers(adev);
+
+ r = sdma_v4_4_2_start(adev);
+
+ return r;
+}
+
+static int sdma_v4_4_2_hw_fini(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ int i;
+
+ if (amdgpu_sriov_vf(adev))
+ return 0;
+
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ amdgpu_irq_put(adev, &adev->sdma.ecc_irq,
+ AMDGPU_SDMA_IRQ_INSTANCE0 + i);
+ }
+
+ sdma_v4_4_2_ctx_switch_enable(adev, false);
+ sdma_v4_4_2_enable(adev, false);
+
+ return 0;
+}
+
+static int sdma_v4_4_2_suspend(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ return sdma_v4_4_2_hw_fini(adev);
+}
+
+static int sdma_v4_4_2_resume(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ return sdma_v4_4_2_hw_init(adev);
+}
+
+static bool sdma_v4_4_2_is_idle(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ u32 i;
+
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ u32 tmp = RREG32_SDMA(i, regSDMA_STATUS_REG);
+
+ if (!(tmp & SDMA_STATUS_REG__IDLE_MASK))
+ return false;
+ }
+
+ return true;
+}
+
+static int sdma_v4_4_2_wait_for_idle(void *handle)
+{
+ unsigned i, j;
+ u32 sdma[AMDGPU_MAX_SDMA_INSTANCES];
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ for (i = 0; i < adev->usec_timeout; i++) {
+ for (j = 0; j < adev->sdma.num_instances; j++) {
+ sdma[j] = RREG32_SDMA(j, regSDMA_STATUS_REG);
+ if (!(sdma[j] & SDMA_STATUS_REG__IDLE_MASK))
+ break;
+ }
+ if (j == adev->sdma.num_instances)
+ return 0;
+ udelay(1);
+ }
+ return -ETIMEDOUT;
+}
+
+static int sdma_v4_4_2_soft_reset(void *handle)
+{
+ /* todo */
+
+ return 0;
+}
+
+static int sdma_v4_4_2_set_trap_irq_state(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ unsigned type,
+ enum amdgpu_interrupt_state state)
+{
+ u32 sdma_cntl;
+
+ sdma_cntl = RREG32_SDMA(type, regSDMA_CNTL);
+ sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA_CNTL, TRAP_ENABLE,
+ state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
+ WREG32_SDMA(type, regSDMA_CNTL, sdma_cntl);
+
+ return 0;
+}
+
+static int sdma_v4_4_2_process_trap_irq(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry)
+{
+ uint32_t instance;
+
+ DRM_DEBUG("IH: SDMA trap\n");
+ instance = sdma_v4_4_2_irq_id_to_seq(entry->client_id);
+ switch (entry->ring_id) {
+ case 0:
+ amdgpu_fence_process(&adev->sdma.instance[instance].ring);
+ break;
+ default:
+ break;
+ }
+ return 0;
+}
+
+#if 0
+static int sdma_v4_4_2_process_ras_data_cb(struct amdgpu_device *adev,
+ void *err_data,
+ struct amdgpu_iv_entry *entry)
+{
+ int instance;
+
+ /* When “Full RAS” is enabled, the per-IP interrupt sources should
+ * be disabled and the driver should only look for the aggregated
+ * interrupt via sync flood
+ */
+ if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX))
+ goto out;
+
+ instance = sdma_v4_4_2_irq_id_to_seq(entry->client_id);
+ if (instance < 0)
+ goto out;
+
+ amdgpu_sdma_process_ras_data_cb(adev, err_data, entry);
+
+out:
+ return AMDGPU_RAS_SUCCESS;
+}
+#endif
+
+static int sdma_v4_4_2_process_illegal_inst_irq(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry)
+{
+ int instance;
+
+ DRM_ERROR("Illegal instruction in SDMA command stream\n");
+
+ instance = sdma_v4_4_2_irq_id_to_seq(entry->client_id);
+ if (instance < 0)
+ return 0;
+
+ switch (entry->ring_id) {
+ case 0:
+ drm_sched_fault(&adev->sdma.instance[instance].ring.sched);
+ break;
+ }
+ return 0;
+}
+
+static int sdma_v4_4_2_set_ecc_irq_state(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ unsigned type,
+ enum amdgpu_interrupt_state state)
+{
+ u32 sdma_edc_config;
+
+ sdma_edc_config = RREG32_SDMA(type, regCC_SDMA_EDC_CONFIG);
+ /*
+ * FIXME: This was inherited from Aldebaran, but no this field
+ * definition in the regspec of both Aldebaran and SDMA 4.4.2
+ */
+ sdma_edc_config |= (state == AMDGPU_IRQ_STATE_ENABLE) ? (1 << 2) : 0;
+ WREG32_SDMA(type, regCC_SDMA_EDC_CONFIG, sdma_edc_config);
+
+ return 0;
+}
+
+static int sdma_v4_4_2_print_iv_entry(struct amdgpu_device *adev,
+ struct amdgpu_iv_entry *entry)
+{
+ int instance;
+ struct amdgpu_task_info task_info;
+ u64 addr;
+
+ instance = sdma_v4_4_2_irq_id_to_seq(entry->client_id);
+ if (instance < 0 || instance >= adev->sdma.num_instances) {
+ dev_err(adev->dev, "sdma instance invalid %d\n", instance);
+ return -EINVAL;
+ }
+
+ addr = (u64)entry->src_data[0] << 12;
+ addr |= ((u64)entry->src_data[1] & 0xf) << 44;
+
+ memset(&task_info, 0, sizeof(struct amdgpu_task_info));
+ amdgpu_vm_get_task_info(adev, entry->pasid, &task_info);
+
+ dev_dbg_ratelimited(adev->dev,
+ "[sdma%d] address:0x%016llx src_id:%u ring:%u vmid:%u "
+ "pasid:%u, for process %s pid %d thread %s pid %d\n",
+ instance, addr, entry->src_id, entry->ring_id, entry->vmid,
+ entry->pasid, task_info.process_name, task_info.tgid,
+ task_info.task_name, task_info.pid);
+ return 0;
+}
+
+static int sdma_v4_4_2_process_vm_hole_irq(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry)
+{
+ dev_dbg_ratelimited(adev->dev, "MC or SEM address in VM hole\n");
+ sdma_v4_4_2_print_iv_entry(adev, entry);
+ return 0;
+}
+
+static int sdma_v4_4_2_process_doorbell_invalid_irq(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry)
+{
+
+ dev_dbg_ratelimited(adev->dev, "SDMA received a doorbell from BIF with byte_enable !=0xff\n");
+ sdma_v4_4_2_print_iv_entry(adev, entry);
+ return 0;
+}
+
+static int sdma_v4_4_2_process_pool_timeout_irq(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry)
+{
+ dev_dbg_ratelimited(adev->dev,
+ "Polling register/memory timeout executing POLL_REG/MEM with finite timer\n");
+ sdma_v4_4_2_print_iv_entry(adev, entry);
+ return 0;
+}
+
+static int sdma_v4_4_2_process_srbm_write_irq(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry)
+{
+ dev_dbg_ratelimited(adev->dev,
+ "SDMA gets an Register Write SRBM_WRITE command in non-privilege command buffer\n");
+ sdma_v4_4_2_print_iv_entry(adev, entry);
+ return 0;
+}
+
+static void sdma_v4_4_2_update_medium_grain_clock_gating(
+ struct amdgpu_device *adev,
+ bool enable)
+{
+ uint32_t data, def;
+ int i;
+
+ if (enable && (adev->cg_flags & AMD_CG_SUPPORT_SDMA_MGCG)) {
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ def = data = RREG32_SDMA(i, regSDMA_CLK_CTRL);
+ data &= ~(SDMA_CLK_CTRL__SOFT_OVERRIDE7_MASK |
+ SDMA_CLK_CTRL__SOFT_OVERRIDE6_MASK |
+ SDMA_CLK_CTRL__SOFT_OVERRIDE5_MASK |
+ SDMA_CLK_CTRL__SOFT_OVERRIDE4_MASK |
+ SDMA_CLK_CTRL__SOFT_OVERRIDE3_MASK |
+ SDMA_CLK_CTRL__SOFT_OVERRIDE2_MASK |
+ SDMA_CLK_CTRL__SOFT_OVERRIDE1_MASK |
+ SDMA_CLK_CTRL__SOFT_OVERRIDE0_MASK);
+ if (def != data)
+ WREG32_SDMA(i, regSDMA_CLK_CTRL, data);
+ }
+ } else {
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ def = data = RREG32_SDMA(i, regSDMA_CLK_CTRL);
+ data |= (SDMA_CLK_CTRL__SOFT_OVERRIDE7_MASK |
+ SDMA_CLK_CTRL__SOFT_OVERRIDE6_MASK |
+ SDMA_CLK_CTRL__SOFT_OVERRIDE5_MASK |
+ SDMA_CLK_CTRL__SOFT_OVERRIDE4_MASK |
+ SDMA_CLK_CTRL__SOFT_OVERRIDE3_MASK |
+ SDMA_CLK_CTRL__SOFT_OVERRIDE2_MASK |
+ SDMA_CLK_CTRL__SOFT_OVERRIDE1_MASK |
+ SDMA_CLK_CTRL__SOFT_OVERRIDE0_MASK);
+ if (def != data)
+ WREG32_SDMA(i, regSDMA_CLK_CTRL, data);
+ }
+ }
+}
+
+
+static void sdma_v4_4_2_update_medium_grain_light_sleep(
+ struct amdgpu_device *adev,
+ bool enable)
+{
+ uint32_t data, def;
+ int i;
+
+ if (enable && (adev->cg_flags & AMD_CG_SUPPORT_SDMA_LS)) {
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ /* 1-not override: enable sdma mem light sleep */
+ def = data = RREG32_SDMA(0, regSDMA_POWER_CNTL);
+ data |= SDMA_POWER_CNTL__MEM_POWER_OVERRIDE_MASK;
+ if (def != data)
+ WREG32_SDMA(0, regSDMA_POWER_CNTL, data);
+ }
+ } else {
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ /* 0-override:disable sdma mem light sleep */
+ def = data = RREG32_SDMA(0, regSDMA_POWER_CNTL);
+ data &= ~SDMA_POWER_CNTL__MEM_POWER_OVERRIDE_MASK;
+ if (def != data)
+ WREG32_SDMA(0, regSDMA_POWER_CNTL, data);
+ }
+ }
+}
+
+static int sdma_v4_4_2_set_clockgating_state(void *handle,
+ enum amd_clockgating_state state)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ if (amdgpu_sriov_vf(adev))
+ return 0;
+
+ sdma_v4_4_2_update_medium_grain_clock_gating(adev,
+ state == AMD_CG_STATE_GATE);
+ sdma_v4_4_2_update_medium_grain_light_sleep(adev,
+ state == AMD_CG_STATE_GATE);
+ return 0;
+}
+
+static int sdma_v4_4_2_set_powergating_state(void *handle,
+ enum amd_powergating_state state)
+{
+ return 0;
+}
+
+static void sdma_v4_4_2_get_clockgating_state(void *handle, u64 *flags)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ int data;
+
+ if (amdgpu_sriov_vf(adev))
+ *flags = 0;
+
+ /* AMD_CG_SUPPORT_SDMA_MGCG */
+ data = RREG32(SOC15_REG_OFFSET(SDMA0, 0, regSDMA_CLK_CTRL));
+ if (!(data & SDMA_CLK_CTRL__SOFT_OVERRIDE7_MASK))
+ *flags |= AMD_CG_SUPPORT_SDMA_MGCG;
+
+ /* AMD_CG_SUPPORT_SDMA_LS */
+ data = RREG32(SOC15_REG_OFFSET(SDMA0, 0, regSDMA_POWER_CNTL));
+ if (data & SDMA_POWER_CNTL__MEM_POWER_OVERRIDE_MASK)
+ *flags |= AMD_CG_SUPPORT_SDMA_LS;
+}
+
+const struct amd_ip_funcs sdma_v4_4_2_ip_funcs = {
+ .name = "sdma_v4_4_2",
+ .early_init = sdma_v4_4_2_early_init,
+ .late_init = sdma_v4_4_2_late_init,
+ .sw_init = sdma_v4_4_2_sw_init,
+ .sw_fini = sdma_v4_4_2_sw_fini,
+ .hw_init = sdma_v4_4_2_hw_init,
+ .hw_fini = sdma_v4_4_2_hw_fini,
+ .suspend = sdma_v4_4_2_suspend,
+ .resume = sdma_v4_4_2_resume,
+ .is_idle = sdma_v4_4_2_is_idle,
+ .wait_for_idle = sdma_v4_4_2_wait_for_idle,
+ .soft_reset = sdma_v4_4_2_soft_reset,
+ .set_clockgating_state = sdma_v4_4_2_set_clockgating_state,
+ .set_powergating_state = sdma_v4_4_2_set_powergating_state,
+ .get_clockgating_state = sdma_v4_4_2_get_clockgating_state,
+};
+
+static const struct amdgpu_ring_funcs sdma_v4_4_2_ring_funcs = {
+ .type = AMDGPU_RING_TYPE_SDMA,
+ .align_mask = 0xf,
+ .nop = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP),
+ .support_64bit_ptrs = true,
+ .vmhub = AMDGPU_MMHUB_0,
+ .get_rptr = sdma_v4_4_2_ring_get_rptr,
+ .get_wptr = sdma_v4_4_2_ring_get_wptr,
+ .set_wptr = sdma_v4_4_2_ring_set_wptr,
+ .emit_frame_size =
+ 6 + /* sdma_v4_4_2_ring_emit_hdp_flush */
+ 3 + /* hdp invalidate */
+ 6 + /* sdma_v4_4_2_ring_emit_pipeline_sync */
+ /* sdma_v4_4_2_ring_emit_vm_flush */
+ SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 +
+ SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 6 +
+ 10 + 10 + 10, /* sdma_v4_4_2_ring_emit_fence x3 for user fence, vm fence */
+ .emit_ib_size = 7 + 6, /* sdma_v4_4_2_ring_emit_ib */
+ .emit_ib = sdma_v4_4_2_ring_emit_ib,
+ .emit_fence = sdma_v4_4_2_ring_emit_fence,
+ .emit_pipeline_sync = sdma_v4_4_2_ring_emit_pipeline_sync,
+ .emit_vm_flush = sdma_v4_4_2_ring_emit_vm_flush,
+ .emit_hdp_flush = sdma_v4_4_2_ring_emit_hdp_flush,
+ .test_ring = sdma_v4_4_2_ring_test_ring,
+ .test_ib = sdma_v4_4_2_ring_test_ib,
+ .insert_nop = sdma_v4_4_2_ring_insert_nop,
+ .pad_ib = sdma_v4_4_2_ring_pad_ib,
+ .emit_wreg = sdma_v4_4_2_ring_emit_wreg,
+ .emit_reg_wait = sdma_v4_4_2_ring_emit_reg_wait,
+ .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
+};
+
+static const struct amdgpu_ring_funcs sdma_v4_4_2_page_ring_funcs = {
+ .type = AMDGPU_RING_TYPE_SDMA,
+ .align_mask = 0xf,
+ .nop = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP),
+ .support_64bit_ptrs = true,
+ .vmhub = AMDGPU_MMHUB_0,
+ .get_rptr = sdma_v4_4_2_ring_get_rptr,
+ .get_wptr = sdma_v4_4_2_page_ring_get_wptr,
+ .set_wptr = sdma_v4_4_2_page_ring_set_wptr,
+ .emit_frame_size =
+ 6 + /* sdma_v4_4_2_ring_emit_hdp_flush */
+ 3 + /* hdp invalidate */
+ 6 + /* sdma_v4_4_2_ring_emit_pipeline_sync */
+ /* sdma_v4_4_2_ring_emit_vm_flush */
+ SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 +
+ SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 6 +
+ 10 + 10 + 10, /* sdma_v4_4_2_ring_emit_fence x3 for user fence, vm fence */
+ .emit_ib_size = 7 + 6, /* sdma_v4_4_2_ring_emit_ib */
+ .emit_ib = sdma_v4_4_2_ring_emit_ib,
+ .emit_fence = sdma_v4_4_2_ring_emit_fence,
+ .emit_pipeline_sync = sdma_v4_4_2_ring_emit_pipeline_sync,
+ .emit_vm_flush = sdma_v4_4_2_ring_emit_vm_flush,
+ .emit_hdp_flush = sdma_v4_4_2_ring_emit_hdp_flush,
+ .test_ring = sdma_v4_4_2_ring_test_ring,
+ .test_ib = sdma_v4_4_2_ring_test_ib,
+ .insert_nop = sdma_v4_4_2_ring_insert_nop,
+ .pad_ib = sdma_v4_4_2_ring_pad_ib,
+ .emit_wreg = sdma_v4_4_2_ring_emit_wreg,
+ .emit_reg_wait = sdma_v4_4_2_ring_emit_reg_wait,
+ .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
+};
+
+static void sdma_v4_4_2_set_ring_funcs(struct amdgpu_device *adev)
+{
+ int i;
+
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ adev->sdma.instance[i].ring.funcs = &sdma_v4_4_2_ring_funcs;
+ adev->sdma.instance[i].ring.me = i;
+ if (adev->sdma.has_page_queue) {
+ adev->sdma.instance[i].page.funcs =
+ &sdma_v4_4_2_page_ring_funcs;
+ adev->sdma.instance[i].page.me = i;
+ }
+ }
+}
+
+static const struct amdgpu_irq_src_funcs sdma_v4_4_2_trap_irq_funcs = {
+ .set = sdma_v4_4_2_set_trap_irq_state,
+ .process = sdma_v4_4_2_process_trap_irq,
+};
+
+static const struct amdgpu_irq_src_funcs sdma_v4_4_2_illegal_inst_irq_funcs = {
+ .process = sdma_v4_4_2_process_illegal_inst_irq,
+};
+
+static const struct amdgpu_irq_src_funcs sdma_v4_4_2_ecc_irq_funcs = {
+ .set = sdma_v4_4_2_set_ecc_irq_state,
+ .process = amdgpu_sdma_process_ecc_irq,
+};
+
+static const struct amdgpu_irq_src_funcs sdma_v4_4_2_vm_hole_irq_funcs = {
+ .process = sdma_v4_4_2_process_vm_hole_irq,
+};
+
+static const struct amdgpu_irq_src_funcs sdma_v4_4_2_doorbell_invalid_irq_funcs = {
+ .process = sdma_v4_4_2_process_doorbell_invalid_irq,
+};
+
+static const struct amdgpu_irq_src_funcs sdma_v4_4_2_pool_timeout_irq_funcs = {
+ .process = sdma_v4_4_2_process_pool_timeout_irq,
+};
+
+static const struct amdgpu_irq_src_funcs sdma_v4_4_2_srbm_write_irq_funcs = {
+ .process = sdma_v4_4_2_process_srbm_write_irq,
+};
+
+static void sdma_v4_4_2_set_irq_funcs(struct amdgpu_device *adev)
+{
+ adev->sdma.trap_irq.num_types = adev->sdma.num_instances;
+ adev->sdma.ecc_irq.num_types = adev->sdma.num_instances;
+ adev->sdma.vm_hole_irq.num_types = adev->sdma.num_instances;
+ adev->sdma.doorbell_invalid_irq.num_types = adev->sdma.num_instances;
+ adev->sdma.pool_timeout_irq.num_types = adev->sdma.num_instances;
+ adev->sdma.srbm_write_irq.num_types = adev->sdma.num_instances;
+
+ adev->sdma.trap_irq.funcs = &sdma_v4_4_2_trap_irq_funcs;
+ adev->sdma.illegal_inst_irq.funcs = &sdma_v4_4_2_illegal_inst_irq_funcs;
+ adev->sdma.ecc_irq.funcs = &sdma_v4_4_2_ecc_irq_funcs;
+ adev->sdma.vm_hole_irq.funcs = &sdma_v4_4_2_vm_hole_irq_funcs;
+ adev->sdma.doorbell_invalid_irq.funcs = &sdma_v4_4_2_doorbell_invalid_irq_funcs;
+ adev->sdma.pool_timeout_irq.funcs = &sdma_v4_4_2_pool_timeout_irq_funcs;
+ adev->sdma.srbm_write_irq.funcs = &sdma_v4_4_2_srbm_write_irq_funcs;
+}
+
+/**
+ * sdma_v4_4_2_emit_copy_buffer - copy buffer using the sDMA engine
+ *
+ * @ib: indirect buffer to copy to
+ * @src_offset: src GPU address
+ * @dst_offset: dst GPU address
+ * @byte_count: number of bytes to xfer
+ * @tmz: if a secure copy should be used
+ *
+ * Copy GPU buffers using the DMA engine.
+ * Used by the amdgpu ttm implementation to move pages if
+ * registered as the asic copy callback.
+ */
+static void sdma_v4_4_2_emit_copy_buffer(struct amdgpu_ib *ib,
+ uint64_t src_offset,
+ uint64_t dst_offset,
+ uint32_t byte_count,
+ bool tmz)
+{
+ ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_COPY) |
+ SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR) |
+ SDMA_PKT_COPY_LINEAR_HEADER_TMZ(tmz ? 1 : 0);
+ ib->ptr[ib->length_dw++] = byte_count - 1;
+ ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */
+ ib->ptr[ib->length_dw++] = lower_32_bits(src_offset);
+ ib->ptr[ib->length_dw++] = upper_32_bits(src_offset);
+ ib->ptr[ib->length_dw++] = lower_32_bits(dst_offset);
+ ib->ptr[ib->length_dw++] = upper_32_bits(dst_offset);
+}
+
+/**
+ * sdma_v4_4_2_emit_fill_buffer - fill buffer using the sDMA engine
+ *
+ * @ib: indirect buffer to copy to
+ * @src_data: value to write to buffer
+ * @dst_offset: dst GPU address
+ * @byte_count: number of bytes to xfer
+ *
+ * Fill GPU buffers using the DMA engine.
+ */
+static void sdma_v4_4_2_emit_fill_buffer(struct amdgpu_ib *ib,
+ uint32_t src_data,
+ uint64_t dst_offset,
+ uint32_t byte_count)
+{
+ ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_CONST_FILL);
+ ib->ptr[ib->length_dw++] = lower_32_bits(dst_offset);
+ ib->ptr[ib->length_dw++] = upper_32_bits(dst_offset);
+ ib->ptr[ib->length_dw++] = src_data;
+ ib->ptr[ib->length_dw++] = byte_count - 1;
+}
+
+static const struct amdgpu_buffer_funcs sdma_v4_4_2_buffer_funcs = {
+ .copy_max_bytes = 0x400000,
+ .copy_num_dw = 7,
+ .emit_copy_buffer = sdma_v4_4_2_emit_copy_buffer,
+
+ .fill_max_bytes = 0x400000,
+ .fill_num_dw = 5,
+ .emit_fill_buffer = sdma_v4_4_2_emit_fill_buffer,
+};
+
+static void sdma_v4_4_2_set_buffer_funcs(struct amdgpu_device *adev)
+{
+ adev->mman.buffer_funcs = &sdma_v4_4_2_buffer_funcs;
+ if (adev->sdma.has_page_queue)
+ adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].page;
+ else
+ adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring;
+}
+
+static const struct amdgpu_vm_pte_funcs sdma_v4_4_2_vm_pte_funcs = {
+ .copy_pte_num_dw = 7,
+ .copy_pte = sdma_v4_4_2_vm_copy_pte,
+
+ .write_pte = sdma_v4_4_2_vm_write_pte,
+ .set_pte_pde = sdma_v4_4_2_vm_set_pte_pde,
+};
+
+static void sdma_v4_4_2_set_vm_pte_funcs(struct amdgpu_device *adev)
+{
+ struct drm_gpu_scheduler *sched;
+ unsigned i;
+
+ adev->vm_manager.vm_pte_funcs = &sdma_v4_4_2_vm_pte_funcs;
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ if (adev->sdma.has_page_queue)
+ sched = &adev->sdma.instance[i].page.sched;
+ else
+ sched = &adev->sdma.instance[i].ring.sched;
+ adev->vm_manager.vm_pte_scheds[i] = sched;
+ }
+ adev->vm_manager.vm_pte_num_scheds = adev->sdma.num_instances;
+}
+
+const struct amdgpu_ip_block_version sdma_v4_4_2_ip_block = {
+ .type = AMD_IP_BLOCK_TYPE_SDMA,
+ .major = 4,
+ .minor = 4,
+ .rev = 0,
+ .funcs = &sdma_v4_4_2_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.h
index c26e7258a91c..4814e8a074d6 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.h
@@ -1,5 +1,5 @@
/*
- * Copyright 2012-15 Advanced Micro Devices, Inc.
+ * Copyright 2022 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -19,9 +19,12 @@
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
- * Authors: AMD
- *
*/
-// TODO - remove this file after external build dependencies is resolved.
-/* NOTE: This file is pending to be removed, do not add new code to this file */ \ No newline at end of file
+#ifndef __SDMA_V4_4_2_H__
+#define __SDMA_V4_4_2_H__
+
+extern const struct amd_ip_funcs sdma_v4_4_2_ip_funcs;
+extern const struct amdgpu_ip_block_version sdma_v4_4_2_ip_block;
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
index 2eddd7f6cd41..7d04c39332ad 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
@@ -191,47 +191,6 @@ static int soc15_query_video_codecs(struct amdgpu_device *adev, bool encode,
}
}
-/*
- * Indirect registers accessor
- */
-static u32 soc15_pcie_rreg(struct amdgpu_device *adev, u32 reg)
-{
- unsigned long address, data;
- address = adev->nbio.funcs->get_pcie_index_offset(adev);
- data = adev->nbio.funcs->get_pcie_data_offset(adev);
-
- return amdgpu_device_indirect_rreg(adev, address, data, reg);
-}
-
-static void soc15_pcie_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
-{
- unsigned long address, data;
-
- address = adev->nbio.funcs->get_pcie_index_offset(adev);
- data = adev->nbio.funcs->get_pcie_data_offset(adev);
-
- amdgpu_device_indirect_wreg(adev, address, data, reg, v);
-}
-
-static u64 soc15_pcie_rreg64(struct amdgpu_device *adev, u32 reg)
-{
- unsigned long address, data;
- address = adev->nbio.funcs->get_pcie_index_offset(adev);
- data = adev->nbio.funcs->get_pcie_data_offset(adev);
-
- return amdgpu_device_indirect_rreg64(adev, address, data, reg);
-}
-
-static void soc15_pcie_wreg64(struct amdgpu_device *adev, u32 reg, u64 v)
-{
- unsigned long address, data;
-
- address = adev->nbio.funcs->get_pcie_index_offset(adev);
- data = adev->nbio.funcs->get_pcie_data_offset(adev);
-
- amdgpu_device_indirect_wreg64(adev, address, data, reg, v);
-}
-
static u32 soc15_uvd_ctx_rreg(struct amdgpu_device *adev, u32 reg)
{
unsigned long flags, address, data;
@@ -651,24 +610,6 @@ static int soc15_set_vce_clocks(struct amdgpu_device *adev, u32 evclk, u32 ecclk
return 0;
}
-static void soc15_pcie_gen3_enable(struct amdgpu_device *adev)
-{
- if (pci_is_root_bus(adev->pdev->bus))
- return;
-
- if (amdgpu_pcie_gen2 == 0)
- return;
-
- if (adev->flags & AMD_IS_APU)
- return;
-
- if (!(adev->pm.pcie_gen_mask & (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
- CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)))
- return;
-
- /* todo */
-}
-
static void soc15_program_aspm(struct amdgpu_device *adev)
{
if (!amdgpu_device_should_use_aspm(adev))
@@ -695,11 +636,6 @@ const struct amdgpu_ip_block_version vega10_common_ip_block =
.funcs = &soc15_common_ip_funcs,
};
-static uint32_t soc15_get_rev_id(struct amdgpu_device *adev)
-{
- return adev->nbio.funcs->get_rev_id(adev);
-}
-
static void soc15_reg_base_init(struct amdgpu_device *adev)
{
/* Set IP register base before any HW register access */
@@ -936,10 +872,10 @@ static int soc15_common_early_init(void *handle)
}
adev->smc_rreg = NULL;
adev->smc_wreg = NULL;
- adev->pcie_rreg = &soc15_pcie_rreg;
- adev->pcie_wreg = &soc15_pcie_wreg;
- adev->pcie_rreg64 = &soc15_pcie_rreg64;
- adev->pcie_wreg64 = &soc15_pcie_wreg64;
+ adev->pcie_rreg = &amdgpu_device_indirect_rreg;
+ adev->pcie_wreg = &amdgpu_device_indirect_wreg;
+ adev->pcie_rreg64 = &amdgpu_device_indirect_rreg64;
+ adev->pcie_wreg64 = &amdgpu_device_indirect_wreg64;
adev->uvd_ctx_rreg = &soc15_uvd_ctx_rreg;
adev->uvd_ctx_wreg = &soc15_uvd_ctx_wreg;
adev->didt_rreg = &soc15_didt_rreg;
@@ -949,7 +885,7 @@ static int soc15_common_early_init(void *handle)
adev->se_cac_rreg = &soc15_se_cac_rreg;
adev->se_cac_wreg = &soc15_se_cac_wreg;
- adev->rev_id = soc15_get_rev_id(adev);
+ adev->rev_id = amdgpu_device_get_rev_id(adev);
adev->external_rev_id = 0xFF;
/* TODO: split the GC and PG flags based on the relevant IP version for which
* they are relevant.
@@ -1230,8 +1166,6 @@ static int soc15_common_hw_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- /* enable pcie gen2/3 link */
- soc15_pcie_gen3_enable(adev);
/* enable aspm */
soc15_program_aspm(adev);
/* setup nbio registers */
diff --git a/drivers/gpu/drm/amd/amdgpu/soc21.c b/drivers/gpu/drm/amd/amdgpu/soc21.c
index 061793d390cc..67580761b44d 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc21.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc21.c
@@ -102,6 +102,59 @@ static const struct amdgpu_video_codecs vcn_4_0_0_video_codecs_decode_vcn1 =
.codec_array = vcn_4_0_0_video_codecs_decode_array_vcn1,
};
+/* SRIOV SOC21, not const since data is controlled by host */
+static struct amdgpu_video_codec_info sriov_vcn_4_0_0_video_codecs_encode_array_vcn0[] = {
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 2304, 0)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 4096, 2304, 0)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_AV1, 8192, 4352, 0)},
+};
+
+static struct amdgpu_video_codec_info sriov_vcn_4_0_0_video_codecs_encode_array_vcn1[] = {
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 2304, 0)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 4096, 2304, 0)},
+};
+
+static struct amdgpu_video_codecs sriov_vcn_4_0_0_video_codecs_encode_vcn0 = {
+ .codec_count = ARRAY_SIZE(sriov_vcn_4_0_0_video_codecs_encode_array_vcn0),
+ .codec_array = sriov_vcn_4_0_0_video_codecs_encode_array_vcn0,
+};
+
+static struct amdgpu_video_codecs sriov_vcn_4_0_0_video_codecs_encode_vcn1 = {
+ .codec_count = ARRAY_SIZE(sriov_vcn_4_0_0_video_codecs_encode_array_vcn1),
+ .codec_array = sriov_vcn_4_0_0_video_codecs_encode_array_vcn1,
+};
+
+static struct amdgpu_video_codec_info sriov_vcn_4_0_0_video_codecs_decode_array_vcn0[] = {
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 4096, 4096, 3)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 4096, 4096, 5)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 52)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 4096, 4096, 4)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_AV1, 8192, 4352, 0)},
+};
+
+static struct amdgpu_video_codec_info sriov_vcn_4_0_0_video_codecs_decode_array_vcn1[] = {
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 4096, 4096, 3)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 4096, 4096, 5)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 52)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 4096, 4096, 4)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)},
+};
+
+static struct amdgpu_video_codecs sriov_vcn_4_0_0_video_codecs_decode_vcn0 = {
+ .codec_count = ARRAY_SIZE(sriov_vcn_4_0_0_video_codecs_decode_array_vcn0),
+ .codec_array = sriov_vcn_4_0_0_video_codecs_decode_array_vcn0,
+};
+
+static struct amdgpu_video_codecs sriov_vcn_4_0_0_video_codecs_decode_vcn1 = {
+ .codec_count = ARRAY_SIZE(sriov_vcn_4_0_0_video_codecs_decode_array_vcn1),
+ .codec_array = sriov_vcn_4_0_0_video_codecs_decode_array_vcn1,
+};
+
static int soc21_query_video_codecs(struct amdgpu_device *adev, bool encode,
const struct amdgpu_video_codecs **codecs)
{
@@ -112,62 +165,37 @@ static int soc21_query_video_codecs(struct amdgpu_device *adev, bool encode,
case IP_VERSION(4, 0, 0):
case IP_VERSION(4, 0, 2):
case IP_VERSION(4, 0, 4):
- if (adev->vcn.harvest_config & AMDGPU_VCN_HARVEST_VCN0) {
- if (encode)
- *codecs = &vcn_4_0_0_video_codecs_encode_vcn1;
- else
- *codecs = &vcn_4_0_0_video_codecs_decode_vcn1;
+ if (amdgpu_sriov_vf(adev)) {
+ if ((adev->vcn.harvest_config & AMDGPU_VCN_HARVEST_VCN0) ||
+ !amdgpu_sriov_is_av1_support(adev)) {
+ if (encode)
+ *codecs = &sriov_vcn_4_0_0_video_codecs_encode_vcn1;
+ else
+ *codecs = &sriov_vcn_4_0_0_video_codecs_decode_vcn1;
+ } else {
+ if (encode)
+ *codecs = &sriov_vcn_4_0_0_video_codecs_encode_vcn0;
+ else
+ *codecs = &sriov_vcn_4_0_0_video_codecs_decode_vcn0;
+ }
} else {
- if (encode)
- *codecs = &vcn_4_0_0_video_codecs_encode_vcn0;
- else
- *codecs = &vcn_4_0_0_video_codecs_decode_vcn0;
+ if ((adev->vcn.harvest_config & AMDGPU_VCN_HARVEST_VCN0)) {
+ if (encode)
+ *codecs = &vcn_4_0_0_video_codecs_encode_vcn1;
+ else
+ *codecs = &vcn_4_0_0_video_codecs_decode_vcn1;
+ } else {
+ if (encode)
+ *codecs = &vcn_4_0_0_video_codecs_encode_vcn0;
+ else
+ *codecs = &vcn_4_0_0_video_codecs_decode_vcn0;
+ }
}
return 0;
default:
return -EINVAL;
}
}
-/*
- * Indirect registers accessor
- */
-static u32 soc21_pcie_rreg(struct amdgpu_device *adev, u32 reg)
-{
- unsigned long address, data;
- address = adev->nbio.funcs->get_pcie_index_offset(adev);
- data = adev->nbio.funcs->get_pcie_data_offset(adev);
-
- return amdgpu_device_indirect_rreg(adev, address, data, reg);
-}
-
-static void soc21_pcie_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
-{
- unsigned long address, data;
-
- address = adev->nbio.funcs->get_pcie_index_offset(adev);
- data = adev->nbio.funcs->get_pcie_data_offset(adev);
-
- amdgpu_device_indirect_wreg(adev, address, data, reg, v);
-}
-
-static u64 soc21_pcie_rreg64(struct amdgpu_device *adev, u32 reg)
-{
- unsigned long address, data;
- address = adev->nbio.funcs->get_pcie_index_offset(adev);
- data = adev->nbio.funcs->get_pcie_data_offset(adev);
-
- return amdgpu_device_indirect_rreg64(adev, address, data, reg);
-}
-
-static void soc21_pcie_wreg64(struct amdgpu_device *adev, u32 reg, u64 v)
-{
- unsigned long address, data;
-
- address = adev->nbio.funcs->get_pcie_index_offset(adev);
- data = adev->nbio.funcs->get_pcie_data_offset(adev);
-
- amdgpu_device_indirect_wreg64(adev, address, data, reg, v);
-}
static u32 soc21_didt_rreg(struct amdgpu_device *adev, u32 reg)
{
@@ -412,21 +440,6 @@ static int soc21_set_vce_clocks(struct amdgpu_device *adev, u32 evclk, u32 ecclk
return 0;
}
-static void soc21_pcie_gen3_enable(struct amdgpu_device *adev)
-{
- if (pci_is_root_bus(adev->pdev->bus))
- return;
-
- if (amdgpu_pcie_gen2 == 0)
- return;
-
- if (!(adev->pm.pcie_gen_mask & (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
- CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)))
- return;
-
- /* todo */
-}
-
static void soc21_program_aspm(struct amdgpu_device *adev)
{
if (!amdgpu_device_should_use_aspm(adev))
@@ -453,11 +466,6 @@ const struct amdgpu_ip_block_version soc21_common_ip_block =
.funcs = &soc21_common_ip_funcs,
};
-static uint32_t soc21_get_rev_id(struct amdgpu_device *adev)
-{
- return adev->nbio.funcs->get_rev_id(adev);
-}
-
static bool soc21_need_full_reset(struct amdgpu_device *adev)
{
switch (adev->ip_versions[GC_HWIP][0]) {
@@ -582,10 +590,10 @@ static int soc21_common_early_init(void *handle)
adev->rmmio_remap.bus_addr = adev->rmmio_base + MMIO_REG_HOLE_OFFSET;
adev->smc_rreg = NULL;
adev->smc_wreg = NULL;
- adev->pcie_rreg = &soc21_pcie_rreg;
- adev->pcie_wreg = &soc21_pcie_wreg;
- adev->pcie_rreg64 = &soc21_pcie_rreg64;
- adev->pcie_wreg64 = &soc21_pcie_wreg64;
+ adev->pcie_rreg = &amdgpu_device_indirect_rreg;
+ adev->pcie_wreg = &amdgpu_device_indirect_wreg;
+ adev->pcie_rreg64 = &amdgpu_device_indirect_rreg64;
+ adev->pcie_wreg64 = &amdgpu_device_indirect_wreg64;
adev->pciep_rreg = amdgpu_device_pcie_port_rreg;
adev->pciep_wreg = amdgpu_device_pcie_port_wreg;
@@ -598,7 +606,7 @@ static int soc21_common_early_init(void *handle)
adev->asic_funcs = &soc21_asic_funcs;
- adev->rev_id = soc21_get_rev_id(adev);
+ adev->rev_id = amdgpu_device_get_rev_id(adev);
adev->external_rev_id = 0xff;
switch (adev->ip_versions[GC_HWIP][0]) {
case IP_VERSION(11, 0, 0):
@@ -730,8 +738,23 @@ static int soc21_common_late_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- if (amdgpu_sriov_vf(adev))
+ if (amdgpu_sriov_vf(adev)) {
xgpu_nv_mailbox_get_irq(adev);
+ if ((adev->vcn.harvest_config & AMDGPU_VCN_HARVEST_VCN0) ||
+ !amdgpu_sriov_is_av1_support(adev)) {
+ amdgpu_virt_update_sriov_video_codec(adev,
+ sriov_vcn_4_0_0_video_codecs_encode_array_vcn1,
+ ARRAY_SIZE(sriov_vcn_4_0_0_video_codecs_encode_array_vcn1),
+ sriov_vcn_4_0_0_video_codecs_decode_array_vcn1,
+ ARRAY_SIZE(sriov_vcn_4_0_0_video_codecs_decode_array_vcn1));
+ } else {
+ amdgpu_virt_update_sriov_video_codec(adev,
+ sriov_vcn_4_0_0_video_codecs_encode_array_vcn0,
+ ARRAY_SIZE(sriov_vcn_4_0_0_video_codecs_encode_array_vcn0),
+ sriov_vcn_4_0_0_video_codecs_decode_array_vcn0,
+ ARRAY_SIZE(sriov_vcn_4_0_0_video_codecs_decode_array_vcn0));
+ }
+ }
return 0;
}
@@ -755,8 +778,6 @@ static int soc21_common_hw_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- /* enable pcie gen2/3 link */
- soc21_pcie_gen3_enable(adev);
/* enable aspm */
soc21_program_aspm(adev);
/* setup nbio registers */
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
index b0b0e69c6a94..223e7dfe4618 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
@@ -225,6 +225,10 @@ static int vcn_v2_5_sw_init(void *handle)
if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
adev->vcn.pause_dpg_mode = vcn_v2_5_pause_dpg_mode;
+ r = amdgpu_vcn_ras_sw_init(adev);
+ if (r)
+ return r;
+
return 0;
}
@@ -2031,6 +2035,4 @@ static void vcn_v2_5_set_ras_funcs(struct amdgpu_device *adev)
default:
break;
}
-
- amdgpu_vcn_set_ras_funcs(adev);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c
index 43d587404c3e..720ab36f9c92 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c
@@ -181,6 +181,10 @@ static int vcn_v4_0_sw_init(void *handle)
if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
adev->vcn.pause_dpg_mode = vcn_v4_0_pause_dpg_mode;
+ r = amdgpu_vcn_ras_sw_init(adev);
+ if (r)
+ return r;
+
return 0;
}
@@ -2123,6 +2127,4 @@ static void vcn_v4_0_set_ras_funcs(struct amdgpu_device *adev)
default:
break;
}
-
- amdgpu_vcn_set_ras_funcs(adev);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/vega20_ih.c b/drivers/gpu/drm/amd/amdgpu/vega20_ih.c
index 1706081d054d..827e2768f867 100644
--- a/drivers/gpu/drm/amd/amdgpu/vega20_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/vega20_ih.c
@@ -321,7 +321,8 @@ static int vega20_ih_irq_init(struct amdgpu_device *adev)
/* psp firmware won't program IH_CHICKEN for aldebaran
* driver needs to program it properly according to
* MC_SPACE type in IH_RB_CNTL */
- if (adev->ip_versions[OSSSYS_HWIP][0] == IP_VERSION(4, 4, 0)) {
+ if ((adev->ip_versions[OSSSYS_HWIP][0] == IP_VERSION(4, 4, 0)) ||
+ (adev->ip_versions[OSSSYS_HWIP][0] == IP_VERSION(4, 4, 2))) {
ih_chicken = RREG32_SOC15(OSSSYS, 0, mmIH_CHICKEN_ALDEBARAN);
if (adev->irq.ih.use_bus_addr) {
ih_chicken = REG_SET_FIELD(ih_chicken, IH_CHICKEN,
@@ -551,12 +552,14 @@ static int vega20_ih_sw_init(void *handle)
adev->irq.ih1.use_doorbell = true;
adev->irq.ih1.doorbell_index = (adev->doorbell_index.ih + 1) << 1;
- r = amdgpu_ih_ring_init(adev, &adev->irq.ih2, PAGE_SIZE, true);
- if (r)
- return r;
+ if (adev->ip_versions[OSSSYS_HWIP][0] != IP_VERSION(4, 4, 2)) {
+ r = amdgpu_ih_ring_init(adev, &adev->irq.ih2, PAGE_SIZE, true);
+ if (r)
+ return r;
- adev->irq.ih2.use_doorbell = true;
- adev->irq.ih2.doorbell_index = (adev->doorbell_index.ih + 2) << 1;
+ adev->irq.ih2.use_doorbell = true;
+ adev->irq.ih2.doorbell_index = (adev->doorbell_index.ih + 2) << 1;
+ }
/* initialize ih control registers offset */
vega20_ih_init_register_offset(adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c
index 12ef782eb478..2512b70ea992 100644
--- a/drivers/gpu/drm/amd/amdgpu/vi.c
+++ b/drivers/gpu/drm/amd/amdgpu/vi.c
@@ -1105,24 +1105,6 @@ static int vi_set_vce_clocks(struct amdgpu_device *adev, u32 evclk, u32 ecclk)
return 0;
}
-static void vi_pcie_gen3_enable(struct amdgpu_device *adev)
-{
- if (pci_is_root_bus(adev->pdev->bus))
- return;
-
- if (amdgpu_pcie_gen2 == 0)
- return;
-
- if (adev->flags & AMD_IS_APU)
- return;
-
- if (!(adev->pm.pcie_gen_mask & (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
- CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)))
- return;
-
- /* todo */
-}
-
static void vi_enable_aspm(struct amdgpu_device *adev)
{
u32 data, orig;
@@ -1743,8 +1725,6 @@ static int vi_common_hw_init(void *handle)
/* move the golden regs per IP block */
vi_init_golden_registers(adev);
- /* enable pcie gen2/3 link */
- vi_pcie_gen3_enable(adev);
/* enable aspm */
vi_program_aspm(adev);
/* enable the doorbell aperture */
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
index a0e30f21e12e..81d07ecf666d 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
@@ -1312,14 +1312,14 @@ static int kfd_ioctl_map_memory_to_gpu(struct file *filep,
args->n_success = i+1;
}
- mutex_unlock(&p->mutex);
-
err = amdgpu_amdkfd_gpuvm_sync_memory(dev->adev, (struct kgd_mem *) mem, true);
if (err) {
pr_debug("Sync memory failed, wait interrupted by user signal\n");
goto sync_memory_failed;
}
+ mutex_unlock(&p->mutex);
+
/* Flush TLBs after waiting for the page table updates to complete */
for (i = 0; i < args->n_devices; i++) {
peer_pdd = kfd_process_device_data_by_id(p, devices_arr[i]);
@@ -1335,9 +1335,9 @@ get_process_device_data_failed:
bind_process_to_device_failed:
get_mem_obj_from_handle_failed:
map_memory_to_gpu_failed:
+sync_memory_failed:
mutex_unlock(&p->mutex);
copy_from_user_failed:
-sync_memory_failed:
kfree(devices_arr);
return err;
@@ -1351,6 +1351,7 @@ static int kfd_ioctl_unmap_memory_from_gpu(struct file *filep,
void *mem;
long err = 0;
uint32_t *devices_arr = NULL, i;
+ bool flush_tlb;
if (!args->n_devices) {
pr_debug("Device IDs array empty\n");
@@ -1403,16 +1404,19 @@ static int kfd_ioctl_unmap_memory_from_gpu(struct file *filep,
}
args->n_success = i+1;
}
- mutex_unlock(&p->mutex);
- if (kfd_flush_tlb_after_unmap(pdd->dev)) {
+ flush_tlb = kfd_flush_tlb_after_unmap(pdd->dev);
+ if (flush_tlb) {
err = amdgpu_amdkfd_gpuvm_sync_memory(pdd->dev->adev,
(struct kgd_mem *) mem, true);
if (err) {
pr_debug("Sync memory failed, wait interrupted by user signal\n");
goto sync_memory_failed;
}
+ }
+ mutex_unlock(&p->mutex);
+ if (flush_tlb) {
/* Flush TLBs after waiting for the page table updates to complete */
for (i = 0; i < args->n_devices; i++) {
peer_pdd = kfd_process_device_data_by_id(p, devices_arr[i]);
@@ -1428,9 +1432,9 @@ static int kfd_ioctl_unmap_memory_from_gpu(struct file *filep,
bind_process_to_device_failed:
get_mem_obj_from_handle_failed:
unmap_memory_from_gpu_failed:
+sync_memory_failed:
mutex_unlock(&p->mutex);
copy_from_user_failed:
-sync_memory_failed:
kfree(devices_arr);
return err;
}
@@ -1586,6 +1590,58 @@ err_unlock:
return r;
}
+static int kfd_ioctl_export_dmabuf(struct file *filep,
+ struct kfd_process *p, void *data)
+{
+ struct kfd_ioctl_export_dmabuf_args *args = data;
+ struct kfd_process_device *pdd;
+ struct dma_buf *dmabuf;
+ struct kfd_dev *dev;
+ void *mem;
+ int ret = 0;
+
+ dev = kfd_device_by_id(GET_GPU_ID(args->handle));
+ if (!dev)
+ return -EINVAL;
+
+ mutex_lock(&p->mutex);
+
+ pdd = kfd_get_process_device_data(dev, p);
+ if (!pdd) {
+ ret = -EINVAL;
+ goto err_unlock;
+ }
+
+ mem = kfd_process_device_translate_handle(pdd,
+ GET_IDR_HANDLE(args->handle));
+ if (!mem) {
+ ret = -EINVAL;
+ goto err_unlock;
+ }
+
+ ret = amdgpu_amdkfd_gpuvm_export_dmabuf(mem, &dmabuf);
+ mutex_unlock(&p->mutex);
+ if (ret)
+ goto err_out;
+
+ ret = dma_buf_fd(dmabuf, args->flags);
+ if (ret < 0) {
+ dma_buf_put(dmabuf);
+ goto err_out;
+ }
+ /* dma_buf_fd assigns the reference count to the fd, no need to
+ * put the reference here.
+ */
+ args->dmabuf_fd = ret;
+
+ return 0;
+
+err_unlock:
+ mutex_unlock(&p->mutex);
+err_out:
+ return ret;
+}
+
/* Handle requests for watching SMI events */
static int kfd_ioctl_smi_events(struct file *filep,
struct kfd_process *p, void *data)
@@ -2768,6 +2824,9 @@ static const struct amdkfd_ioctl_desc amdkfd_ioctls[] = {
AMDKFD_IOCTL_DEF(AMDKFD_IOC_AVAILABLE_MEMORY,
kfd_ioctl_get_available_memory, 0),
+
+ AMDKFD_IOCTL_DEF(AMDKFD_IOC_EXPORT_DMABUF,
+ kfd_ioctl_export_dmabuf, 0),
};
#define AMDKFD_CORE_IOCTL_COUNT ARRAY_SIZE(amdkfd_ioctls)
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
index 3de7f616a001..ec70a1658dc3 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
@@ -59,6 +59,7 @@ static int kfd_gtt_sa_init(struct kfd_dev *kfd, unsigned int buf_size,
unsigned int chunk_size);
static void kfd_gtt_sa_fini(struct kfd_dev *kfd);
+static int kfd_resume_iommu(struct kfd_dev *kfd);
static int kfd_resume(struct kfd_dev *kfd);
static void kfd_device_info_set_sdma_info(struct kfd_dev *kfd)
@@ -624,7 +625,7 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd,
svm_migrate_init(kfd->adev);
- if (kgd2kfd_resume_iommu(kfd))
+ if (kfd_resume_iommu(kfd))
goto device_iommu_error;
if (kfd_resume(kfd))
@@ -773,6 +774,14 @@ int kgd2kfd_resume(struct kfd_dev *kfd, bool run_pm)
int kgd2kfd_resume_iommu(struct kfd_dev *kfd)
{
+ if (!kfd->init_complete)
+ return 0;
+
+ return kfd_resume_iommu(kfd);
+}
+
+static int kfd_resume_iommu(struct kfd_dev *kfd)
+{
int err = 0;
err = kfd_iommu_resume(kfd);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
index de8ce72344fc..54933903bcb8 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
@@ -289,7 +289,7 @@ static unsigned long svm_migrate_unsuccessful_pages(struct migrate_vma *migrate)
static int
svm_migrate_copy_to_vram(struct amdgpu_device *adev, struct svm_range *prange,
struct migrate_vma *migrate, struct dma_fence **mfence,
- dma_addr_t *scratch)
+ dma_addr_t *scratch, uint64_t ttm_res_offset)
{
uint64_t npages = migrate->npages;
struct device *dev = adev->dev;
@@ -299,19 +299,13 @@ svm_migrate_copy_to_vram(struct amdgpu_device *adev, struct svm_range *prange,
uint64_t i, j;
int r;
- pr_debug("svms 0x%p [0x%lx 0x%lx]\n", prange->svms, prange->start,
- prange->last);
+ pr_debug("svms 0x%p [0x%lx 0x%lx 0x%llx]\n", prange->svms, prange->start,
+ prange->last, ttm_res_offset);
src = scratch;
dst = (uint64_t *)(scratch + npages);
- r = svm_range_vram_node_new(adev, prange, true);
- if (r) {
- dev_dbg(adev->dev, "fail %d to alloc vram\n", r);
- goto out;
- }
-
- amdgpu_res_first(prange->ttm_res, prange->offset << PAGE_SHIFT,
+ amdgpu_res_first(prange->ttm_res, ttm_res_offset,
npages << PAGE_SHIFT, &cursor);
for (i = j = 0; i < npages; i++) {
struct page *spage;
@@ -391,14 +385,14 @@ out_free_vram_pages:
migrate->dst[i + 3] = 0;
}
#endif
-out:
+
return r;
}
static long
svm_migrate_vma_to_vram(struct amdgpu_device *adev, struct svm_range *prange,
struct vm_area_struct *vma, uint64_t start,
- uint64_t end, uint32_t trigger)
+ uint64_t end, uint32_t trigger, uint64_t ttm_res_offset)
{
struct kfd_process *p = container_of(prange->svms, struct kfd_process, svms);
uint64_t npages = (end - start) >> PAGE_SHIFT;
@@ -451,7 +445,7 @@ svm_migrate_vma_to_vram(struct amdgpu_device *adev, struct svm_range *prange,
else
pr_debug("0x%lx pages migrated\n", cpages);
- r = svm_migrate_copy_to_vram(adev, prange, &migrate, &mfence, scratch);
+ r = svm_migrate_copy_to_vram(adev, prange, &migrate, &mfence, scratch, ttm_res_offset);
migrate_vma_pages(&migrate);
pr_debug("successful/cpages/npages 0x%lx/0x%lx/0x%lx\n",
@@ -499,6 +493,7 @@ svm_migrate_ram_to_vram(struct svm_range *prange, uint32_t best_loc,
unsigned long addr, start, end;
struct vm_area_struct *vma;
struct amdgpu_device *adev;
+ uint64_t ttm_res_offset;
unsigned long cpages = 0;
long r = 0;
@@ -520,6 +515,13 @@ svm_migrate_ram_to_vram(struct svm_range *prange, uint32_t best_loc,
start = prange->start << PAGE_SHIFT;
end = (prange->last + 1) << PAGE_SHIFT;
+ r = svm_range_vram_node_new(adev, prange, true);
+ if (r) {
+ dev_dbg(adev->dev, "fail %ld to alloc vram\n", r);
+ return r;
+ }
+ ttm_res_offset = prange->offset << PAGE_SHIFT;
+
for (addr = start; addr < end;) {
unsigned long next;
@@ -528,18 +530,21 @@ svm_migrate_ram_to_vram(struct svm_range *prange, uint32_t best_loc,
break;
next = min(vma->vm_end, end);
- r = svm_migrate_vma_to_vram(adev, prange, vma, addr, next, trigger);
+ r = svm_migrate_vma_to_vram(adev, prange, vma, addr, next, trigger, ttm_res_offset);
if (r < 0) {
pr_debug("failed %ld to migrate\n", r);
break;
} else {
cpages += r;
}
+ ttm_res_offset += next - addr;
addr = next;
}
if (cpages)
prange->actual_loc = best_loc;
+ else
+ svm_range_vram_node_free(prange);
return r < 0 ? r : 0;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_module.c b/drivers/gpu/drm/amd/amdkfd/kfd_module.c
index 09b966dc3768..aee2212e52f6 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_module.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_module.c
@@ -77,6 +77,7 @@ err_ioctl:
static void kfd_exit(void)
{
+ kfd_cleanup_processes();
kfd_debugfs_fini();
kfd_process_destroy_wq();
kfd_procfs_shutdown();
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
index bfa30d12406b..7e4d992e48b3 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
@@ -928,6 +928,7 @@ bool kfd_dev_is_large_bar(struct kfd_dev *dev);
int kfd_process_create_wq(void);
void kfd_process_destroy_wq(void);
+void kfd_cleanup_processes(void);
struct kfd_process *kfd_create_process(struct file *filep);
struct kfd_process *kfd_get_process(const struct task_struct *task);
struct kfd_process *kfd_lookup_process_by_pasid(u32 pasid);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
index 7acd55a814b2..07a9eaf9b7d8 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
@@ -344,7 +344,7 @@ static const struct sysfs_ops kfd_procfs_ops = {
.show = kfd_procfs_show,
};
-static struct kobj_type procfs_type = {
+static const struct kobj_type procfs_type = {
.release = kfd_procfs_kobj_release,
.sysfs_ops = &kfd_procfs_ops,
};
@@ -469,7 +469,7 @@ static const struct sysfs_ops procfs_queue_ops = {
.show = kfd_procfs_queue_show,
};
-static struct kobj_type procfs_queue_type = {
+static const struct kobj_type procfs_queue_type = {
.sysfs_ops = &procfs_queue_ops,
.default_groups = procfs_queue_groups,
};
@@ -478,7 +478,7 @@ static const struct sysfs_ops procfs_stats_ops = {
.show = kfd_procfs_stats_show,
};
-static struct kobj_type procfs_stats_type = {
+static const struct kobj_type procfs_stats_type = {
.sysfs_ops = &procfs_stats_ops,
.release = kfd_procfs_kobj_release,
};
@@ -487,7 +487,7 @@ static const struct sysfs_ops sysfs_counters_ops = {
.show = kfd_sysfs_counters_show,
};
-static struct kobj_type sysfs_counters_type = {
+static const struct kobj_type sysfs_counters_type = {
.sysfs_ops = &sysfs_counters_ops,
.release = kfd_procfs_kobj_release,
};
@@ -1167,6 +1167,17 @@ static void kfd_process_free_notifier(struct mmu_notifier *mn)
kfd_unref_process(container_of(mn, struct kfd_process, mmu_notifier));
}
+static void kfd_process_notifier_release_internal(struct kfd_process *p)
+{
+ cancel_delayed_work_sync(&p->eviction_work);
+ cancel_delayed_work_sync(&p->restore_work);
+
+ /* Indicate to other users that MM is no longer valid */
+ p->mm = NULL;
+
+ mmu_notifier_put(&p->mmu_notifier);
+}
+
static void kfd_process_notifier_release(struct mmu_notifier *mn,
struct mm_struct *mm)
{
@@ -1181,17 +1192,22 @@ static void kfd_process_notifier_release(struct mmu_notifier *mn,
return;
mutex_lock(&kfd_processes_mutex);
+ /*
+ * Do early return if table is empty.
+ *
+ * This could potentially happen if this function is called concurrently
+ * by mmu_notifier and by kfd_cleanup_pocesses.
+ *
+ */
+ if (hash_empty(kfd_processes_table)) {
+ mutex_unlock(&kfd_processes_mutex);
+ return;
+ }
hash_del_rcu(&p->kfd_processes);
mutex_unlock(&kfd_processes_mutex);
synchronize_srcu(&kfd_processes_srcu);
- cancel_delayed_work_sync(&p->eviction_work);
- cancel_delayed_work_sync(&p->restore_work);
-
- /* Indicate to other users that MM is no longer valid */
- p->mm = NULL;
-
- mmu_notifier_put(&p->mmu_notifier);
+ kfd_process_notifier_release_internal(p);
}
static const struct mmu_notifier_ops kfd_process_mmu_notifier_ops = {
@@ -1200,6 +1216,43 @@ static const struct mmu_notifier_ops kfd_process_mmu_notifier_ops = {
.free_notifier = kfd_process_free_notifier,
};
+/*
+ * This code handles the case when driver is being unloaded before all
+ * mm_struct are released. We need to safely free the kfd_process and
+ * avoid race conditions with mmu_notifier that might try to free them.
+ *
+ */
+void kfd_cleanup_processes(void)
+{
+ struct kfd_process *p;
+ struct hlist_node *p_temp;
+ unsigned int temp;
+ HLIST_HEAD(cleanup_list);
+
+ /*
+ * Move all remaining kfd_process from the process table to a
+ * temp list for processing. Once done, callback from mmu_notifier
+ * release will not see the kfd_process in the table and do early return,
+ * avoiding double free issues.
+ */
+ mutex_lock(&kfd_processes_mutex);
+ hash_for_each_safe(kfd_processes_table, temp, p_temp, p, kfd_processes) {
+ hash_del_rcu(&p->kfd_processes);
+ synchronize_srcu(&kfd_processes_srcu);
+ hlist_add_head(&p->kfd_processes, &cleanup_list);
+ }
+ mutex_unlock(&kfd_processes_mutex);
+
+ hlist_for_each_entry_safe(p, p_temp, &cleanup_list, kfd_processes)
+ kfd_process_notifier_release_internal(p);
+
+ /*
+ * Ensures that all outstanding free_notifier get called, triggering
+ * the release of the kfd_process struct.
+ */
+ mmu_notifier_synchronize();
+}
+
static int kfd_process_init_cwsr_apu(struct kfd_process *p, struct file *filep)
{
unsigned long offset;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
index 5137476ec18e..4236539d9f93 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
@@ -218,8 +218,8 @@ static int init_user_queue(struct process_queue_manager *pqm,
return 0;
cleanup:
- if (dev->shared_resources.enable_mes)
- uninit_queue(*q);
+ uninit_queue(*q);
+ *q = NULL;
return retval;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
index 3fdaba56be6f..8e4124dcb6e4 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
@@ -278,7 +278,7 @@ static const struct sysfs_ops sysprops_ops = {
.show = sysprops_show,
};
-static struct kobj_type sysprops_type = {
+static const struct kobj_type sysprops_type = {
.release = kfd_topology_kobj_release,
.sysfs_ops = &sysprops_ops,
};
@@ -318,7 +318,7 @@ static const struct sysfs_ops iolink_ops = {
.show = iolink_show,
};
-static struct kobj_type iolink_type = {
+static const struct kobj_type iolink_type = {
.release = kfd_topology_kobj_release,
.sysfs_ops = &iolink_ops,
};
@@ -350,7 +350,7 @@ static const struct sysfs_ops mem_ops = {
.show = mem_show,
};
-static struct kobj_type mem_type = {
+static const struct kobj_type mem_type = {
.release = kfd_topology_kobj_release,
.sysfs_ops = &mem_ops,
};
@@ -395,7 +395,7 @@ static const struct sysfs_ops cache_ops = {
.show = kfd_cache_show,
};
-static struct kobj_type cache_type = {
+static const struct kobj_type cache_type = {
.release = kfd_topology_kobj_release,
.sysfs_ops = &cache_ops,
};
@@ -566,7 +566,7 @@ static const struct sysfs_ops node_ops = {
.show = node_show,
};
-static struct kobj_type node_type = {
+static const struct kobj_type node_type = {
.release = kfd_topology_kobj_release,
.sysfs_ops = &node_ops,
};
diff --git a/drivers/gpu/drm/amd/display/Kconfig b/drivers/gpu/drm/amd/display/Kconfig
index 0c9bd0a53e60..06b438217c61 100644
--- a/drivers/gpu/drm/amd/display/Kconfig
+++ b/drivers/gpu/drm/amd/display/Kconfig
@@ -8,7 +8,7 @@ config DRM_AMD_DC
depends on BROKEN || !CC_IS_CLANG || X86_64 || SPARC64 || ARM64
select SND_HDA_COMPONENT if SND_HDA_CORE
# !CC_IS_CLANG: https://github.com/ClangBuiltLinux/linux/issues/1752
- select DRM_AMD_DC_DCN if (X86 || PPC_LONG_DOUBLE_128 || (ARM64 && KERNEL_MODE_NEON && !CC_IS_CLANG))
+ select DRM_AMD_DC_FP if (X86 || PPC64 || (ARM64 && KERNEL_MODE_NEON && !CC_IS_CLANG))
help
Choose this option if you want to use the new display engine
support for AMDGPU. This adds required support for Vega and
@@ -20,16 +20,10 @@ config DRM_AMD_DC
panic on most architectures. We'll revert this when the following bug report
has been resolved: https://github.com/llvm/llvm-project/issues/41896.
-config DRM_AMD_DC_DCN
+config DRM_AMD_DC_FP
def_bool n
help
- Raven, Navi, and newer family support for display engine
-
-config DRM_AMD_DC_HDCP
- bool "Enable HDCP support in DC"
- depends on DRM_AMD_DC
- help
- Choose this option if you want to support HDCP authentication.
+ Floating point support, required for DCN-based SoCs
config DRM_AMD_DC_SI
bool "AMD DC support for Southern Islands ASICs"
@@ -50,7 +44,7 @@ config DEBUG_KERNEL_DC
config DRM_AMD_SECURE_DISPLAY
bool "Enable secure display support"
depends on DEBUG_FS
- depends on DRM_AMD_DC_DCN
+ depends on DRM_AMD_DC_FP
help
Choose this option if you want to
support secure display
diff --git a/drivers/gpu/drm/amd/display/Makefile b/drivers/gpu/drm/amd/display/Makefile
index 2633de77de5e..0d610cb376bb 100644
--- a/drivers/gpu/drm/amd/display/Makefile
+++ b/drivers/gpu/drm/amd/display/Makefile
@@ -36,18 +36,14 @@ subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/modules/info_packet
subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/modules/power
subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dmub/inc
-ifdef CONFIG_DRM_AMD_DC_HDCP
subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/modules/hdcp
-endif
#TODO: remove when Timing Sync feature is complete
subdir-ccflags-y += -DBUILD_FEATURE_TIMING_SYNC=0
DAL_LIBS = amdgpu_dm dc modules/freesync modules/color modules/info_packet modules/power dmub/src
-ifdef CONFIG_DRM_AMD_DC_HDCP
DAL_LIBS += modules/hdcp
-endif
AMD_DAL = $(addsuffix /Makefile, $(addprefix $(FULL_AMD_DISPLAY_PATH)/,$(DAL_LIBS)))
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/Makefile b/drivers/gpu/drm/amd/display/amdgpu_dm/Makefile
index 90fb0f3cdb6f..249b073f6a23 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/Makefile
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/Makefile
@@ -33,7 +33,7 @@ AMDGPUDM = \
amdgpu_dm_mst_types.o \
amdgpu_dm_color.o
-ifdef CONFIG_DRM_AMD_DC_DCN
+ifdef CONFIG_DRM_AMD_DC_FP
AMDGPUDM += dc_fpu.o
endif
@@ -41,9 +41,7 @@ ifneq ($(CONFIG_DRM_AMD_DC),)
AMDGPUDM += amdgpu_dm_services.o amdgpu_dm_helpers.o amdgpu_dm_pp_smu.o amdgpu_dm_psr.o
endif
-ifdef CONFIG_DRM_AMD_DC_HDCP
AMDGPUDM += amdgpu_dm_hdcp.o
-endif
ifneq ($(CONFIG_DEBUG_FS),)
AMDGPUDM += amdgpu_dm_crc.o amdgpu_dm_debugfs.o
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 009ef917dad4..eeaeca8b51f4 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -52,10 +52,8 @@
#include "amdgpu_dm.h"
#include "amdgpu_dm_plane.h"
#include "amdgpu_dm_crtc.h"
-#ifdef CONFIG_DRM_AMD_DC_HDCP
#include "amdgpu_dm_hdcp.h"
#include <drm/display/drm_hdcp_helper.h>
-#endif
#include "amdgpu_pm.h"
#include "amdgpu_atombios.h"
@@ -344,12 +342,52 @@ static inline bool is_dc_timing_adjust_needed(struct dm_crtc_state *old_state,
{
if (new_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED)
return true;
- else if (amdgpu_dm_vrr_active(old_state) != amdgpu_dm_vrr_active(new_state))
+ else if (amdgpu_dm_crtc_vrr_active(old_state) != amdgpu_dm_crtc_vrr_active(new_state))
return true;
else
return false;
}
+static inline void reverse_planes_order(struct dc_surface_update *array_of_surface_update,
+ int planes_count)
+{
+ int i, j;
+
+ for (i = 0, j = planes_count - 1; i < j; i++, j--)
+ swap(array_of_surface_update[i], array_of_surface_update[j]);
+}
+
+/**
+ * update_planes_and_stream_adapter() - Send planes to be updated in DC
+ *
+ * DC has a generic way to update planes and stream via
+ * dc_update_planes_and_stream function; however, DM might need some
+ * adjustments and preparation before calling it. This function is a wrapper
+ * for the dc_update_planes_and_stream that does any required configuration
+ * before passing control to DC.
+ */
+static inline bool update_planes_and_stream_adapter(struct dc *dc,
+ int update_type,
+ int planes_count,
+ struct dc_stream_state *stream,
+ struct dc_stream_update *stream_update,
+ struct dc_surface_update *array_of_surface_update)
+{
+ reverse_planes_order(array_of_surface_update, planes_count);
+
+ /*
+ * Previous frame finished and HW is ready for optimization.
+ */
+ if (update_type == UPDATE_TYPE_FAST)
+ dc_post_update_surfaces_to_stream(dc);
+
+ return dc_update_planes_and_stream(dc,
+ array_of_surface_update,
+ planes_count,
+ stream,
+ stream_update);
+}
+
/**
* dm_pflip_high_irq() - Handle pageflip interrupt
* @interrupt_params: ignored
@@ -394,7 +432,7 @@ static void dm_pflip_high_irq(void *interrupt_params)
WARN_ON(!e);
- vrr_active = amdgpu_dm_vrr_active_irq(amdgpu_crtc);
+ vrr_active = amdgpu_dm_crtc_vrr_active_irq(amdgpu_crtc);
/* Fixed refresh rate, or VRR scanout position outside front-porch? */
if (!vrr_active ||
@@ -468,7 +506,7 @@ static void dm_vupdate_high_irq(void *interrupt_params)
acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
if (acrtc) {
- vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
+ vrr_active = amdgpu_dm_crtc_vrr_active_irq(acrtc);
drm_dev = acrtc->base.dev;
vblank = &drm_dev->vblank[acrtc->base.index];
previous_timestamp = atomic64_read(&irq_params->previous_timestamp);
@@ -492,7 +530,7 @@ static void dm_vupdate_high_irq(void *interrupt_params)
* if a pageflip happened inside front-porch.
*/
if (vrr_active) {
- dm_crtc_handle_vblank(acrtc);
+ amdgpu_dm_crtc_handle_vblank(acrtc);
/* BTR processing for pre-DCE12 ASICs */
if (acrtc->dm_irq_params.stream &&
@@ -532,7 +570,7 @@ static void dm_crtc_high_irq(void *interrupt_params)
if (!acrtc)
return;
- vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
+ vrr_active = amdgpu_dm_crtc_vrr_active_irq(acrtc);
DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
vrr_active, acrtc->dm_irq_params.active_planes);
@@ -544,7 +582,7 @@ static void dm_crtc_high_irq(void *interrupt_params)
* to dm_vupdate_high_irq after end of front-porch.
*/
if (!vrr_active)
- dm_crtc_handle_vblank(acrtc);
+ amdgpu_dm_crtc_handle_vblank(acrtc);
/**
* Following stuff must happen at start of vblank, for crc
@@ -675,7 +713,14 @@ static void dmub_hpd_callback(struct amdgpu_device *adev,
drm_for_each_connector_iter(connector, &iter) {
aconnector = to_amdgpu_dm_connector(connector);
if (link && aconnector->dc_link == link) {
- DRM_INFO("DMUB HPD callback: link_index=%u\n", link_index);
+ if (notify->type == DMUB_NOTIFICATION_HPD)
+ DRM_INFO("DMUB HPD callback: link_index=%u\n", link_index);
+ else if (notify->type == DMUB_NOTIFICATION_HPD_IRQ)
+ DRM_INFO("DMUB HPD IRQ callback: link_index=%u\n", link_index);
+ else
+ DRM_WARN("DMUB Unknown HPD callback type %d, link_index=%u\n",
+ notify->type, link_index);
+
hpd_aconnector = aconnector;
break;
}
@@ -1488,9 +1533,7 @@ static void retrieve_dmi_info(struct amdgpu_display_manager *dm)
static int amdgpu_dm_init(struct amdgpu_device *adev)
{
struct dc_init_data init_data;
-#ifdef CONFIG_DRM_AMD_DC_HDCP
struct dc_callback_init init_params;
-#endif
int r;
adev->dm.ddev = adev_to_drm(adev);
@@ -1498,9 +1541,7 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
/* Zero all the fields */
memset(&init_data, 0, sizeof(init_data));
-#ifdef CONFIG_DRM_AMD_DC_HDCP
memset(&init_params, 0, sizeof(init_params));
-#endif
mutex_init(&adev->dm.dpia_aux_lock);
mutex_init(&adev->dm.dc_lock);
@@ -1726,7 +1767,6 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
DRM_ERROR("amdgpu: failed to initialize vblank_workqueue.\n");
}
-#ifdef CONFIG_DRM_AMD_DC_HDCP
if (adev->dm.dc->caps.max_links > 0 && adev->family >= AMDGPU_FAMILY_RV) {
adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
@@ -1737,7 +1777,6 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
dc_init_callbacks(adev->dm.dc, &init_params);
}
-#endif
#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
adev->dm.secure_display_ctxs = amdgpu_dm_crtc_secure_display_create_contexts(adev);
if (!adev->dm.secure_display_ctxs) {
@@ -1844,7 +1883,6 @@ static void amdgpu_dm_fini(struct amdgpu_device *adev)
adev->dm.secure_display_ctxs = NULL;
}
#endif
-#ifdef CONFIG_DRM_AMD_DC_HDCP
if (adev->dm.hdcp_workqueue) {
hdcp_destroy(&adev->dev->kobj, adev->dm.hdcp_workqueue);
adev->dm.hdcp_workqueue = NULL;
@@ -1852,7 +1890,6 @@ static void amdgpu_dm_fini(struct amdgpu_device *adev)
if (adev->dm.dc)
dc_deinit_callbacks(adev->dm.dc);
-#endif
dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
@@ -2273,7 +2310,7 @@ static int dm_late_init(void *handle)
struct dc_link *edp_links[MAX_NUM_EDP];
int edp_num;
- get_edp_links(adev->dm.dc, edp_links, &edp_num);
+ dc_get_edp_links(adev->dm.dc, edp_links, &edp_num);
for (i = 0; i < edp_num; i++) {
if (!dmub_init_abm_config(adev->dm.dc->res_pool, params, i))
return -EINVAL;
@@ -2449,11 +2486,11 @@ static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
enable ? "enable" : "disable");
if (enable) {
- rc = dm_enable_vblank(&acrtc->base);
+ rc = amdgpu_dm_crtc_enable_vblank(&acrtc->base);
if (rc)
DRM_WARN("Failed to enable vblank interrupts\n");
} else {
- dm_disable_vblank(&acrtc->base);
+ amdgpu_dm_crtc_disable_vblank(&acrtc->base);
}
}
@@ -2496,7 +2533,7 @@ static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
goto fail;
}
- res = dc_commit_state(dc, context);
+ res = dc_commit_streams(dc, context->streams, context->stream_count);
fail:
dc_release_state(context);
@@ -2682,10 +2719,13 @@ static void dm_gpureset_commit_state(struct dc_state *dc_state,
bundle->surface_updates[m].surface->force_full_update =
true;
}
- dc_commit_updates_for_stream(
- dm->dc, bundle->surface_updates,
- dc_state->stream_status->plane_count,
- dc_state->streams[k], &bundle->stream_update, dc_state);
+
+ update_planes_and_stream_adapter(dm->dc,
+ UPDATE_TYPE_FULL,
+ dc_state->stream_status->plane_count,
+ dc_state->streams[k],
+ &bundle->stream_update,
+ bundle->surface_updates);
}
cleanup:
@@ -2755,7 +2795,7 @@ static int dm_resume(void *handle)
dc_enable_dmub_outbox(adev->dm.dc);
}
- WARN_ON(!dc_commit_state(dm->dc, dc_state));
+ WARN_ON(!dc_commit_streams(dm->dc, dc_state->streams, dc_state->stream_count));
dm_gpureset_commit_state(dm->cached_dc_state, dm);
@@ -2923,7 +2963,7 @@ const struct amdgpu_ip_block_version dm_ip_block =
static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
.fb_create = amdgpu_display_user_framebuffer_create,
- .get_format_info = amd_get_format_info,
+ .get_format_info = amdgpu_dm_plane_get_format_info,
.atomic_check = amdgpu_dm_atomic_check,
.atomic_commit = drm_atomic_helper_commit,
};
@@ -2974,8 +3014,14 @@ static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
caps->aux_support = true;
luminance_range = &conn_base->display_info.luminance_range;
- caps->aux_min_input_signal = luminance_range->min_luminance;
- caps->aux_max_input_signal = luminance_range->max_luminance;
+
+ if (luminance_range->max_luminance) {
+ caps->aux_min_input_signal = luminance_range->min_luminance;
+ caps->aux_max_input_signal = luminance_range->max_luminance;
+ } else {
+ caps->aux_min_input_signal = 0;
+ caps->aux_max_input_signal = 512;
+ }
}
void amdgpu_dm_update_connector_after_detect(
@@ -3111,11 +3157,9 @@ void amdgpu_dm_update_connector_after_detect(
aconnector->edid = NULL;
kfree(aconnector->timing_requested);
aconnector->timing_requested = NULL;
-#ifdef CONFIG_DRM_AMD_DC_HDCP
/* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
-#endif
}
mutex_unlock(&dev->mode_config.mutex);
@@ -3132,9 +3176,7 @@ static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector)
struct drm_device *dev = connector->dev;
enum dc_connection_type new_connection_type = dc_connection_none;
struct amdgpu_device *adev = drm_to_adev(dev);
-#ifdef CONFIG_DRM_AMD_DC_HDCP
struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
-#endif
bool ret = false;
if (adev->dm.disable_hpd_irq)
@@ -3146,12 +3188,10 @@ static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector)
*/
mutex_lock(&aconnector->hpd_lock);
-#ifdef CONFIG_DRM_AMD_DC_HDCP
if (adev->dm.hdcp_workqueue) {
hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
dm_con_state->update_hdcp = true;
}
-#endif
if (aconnector->fake_enable)
aconnector->fake_enable = false;
@@ -3398,12 +3438,10 @@ out:
}
}
}
-#ifdef CONFIG_DRM_AMD_DC_HDCP
if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
if (adev->dm.hdcp_workqueue)
hdcp_handle_cpirq(adev->dm.hdcp_workqueue, aconnector->base.index);
}
-#endif
if (dc_link->type != dc_connection_mst_branch)
drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
@@ -4320,9 +4358,6 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
continue;
- if (!plane->blends_with_above || !plane->blends_with_below)
- continue;
-
if (!plane->pixel_format_support.argb8888)
continue;
@@ -4947,7 +4982,7 @@ fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
if (ret)
return ret;
- ret = fill_plane_buffer_attributes(adev, afb, plane_info->format,
+ ret = amdgpu_dm_plane_fill_plane_buffer_attributes(adev, afb, plane_info->format,
plane_info->rotation, tiling_flags,
&plane_info->tiling_info,
&plane_info->plane_size,
@@ -4956,7 +4991,7 @@ fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
if (ret)
return ret;
- fill_blending_from_plane_state(
+ amdgpu_dm_plane_fill_blending_from_plane_state(
plane_state, &plane_info->per_pixel_alpha, &plane_info->pre_multiplied_alpha,
&plane_info->global_alpha, &plane_info->global_alpha_value);
@@ -4975,7 +5010,7 @@ static int fill_dc_plane_attributes(struct amdgpu_device *adev,
int ret;
bool force_disable_dcc = false;
- ret = fill_dc_scaling_info(adev, plane_state, &scaling_info);
+ ret = amdgpu_dm_plane_fill_dc_scaling_info(adev, plane_state, &scaling_info);
if (ret)
return ret;
@@ -5105,9 +5140,9 @@ static void fill_dc_dirty_rects(struct drm_plane *plane,
for (; flip_addrs->dirty_rect_count < num_clips; clips++)
fill_dc_dirty_rect(new_plane_state->plane,
- &dirty_rects[i], clips->x1,
- clips->y1, clips->x2 - clips->x1,
- clips->y2 - clips->y1,
+ &dirty_rects[flip_addrs->dirty_rect_count],
+ clips->x1, clips->y1,
+ clips->x2 - clips->x1, clips->y2 - clips->y1,
&flip_addrs->dirty_rect_count,
false);
return;
@@ -5753,7 +5788,6 @@ static bool is_freesync_video_mode(const struct drm_display_mode *mode,
return true;
}
-#if defined(CONFIG_DRM_AMD_DC_DCN)
static void update_dsc_caps(struct amdgpu_dm_connector *aconnector,
struct dc_sink *sink, struct dc_stream_state *stream,
struct dsc_dec_dpcd_caps *dsc_caps)
@@ -5784,6 +5818,10 @@ static void apply_dsc_policy_for_edp(struct amdgpu_dm_connector *aconnector,
struct dc *dc = sink->ctx->dc;
struct dc_dsc_bw_range bw_range = {0};
struct dc_dsc_config dsc_cfg = {0};
+ struct dc_dsc_config_options dsc_options = {0};
+
+ dc_dsc_get_default_config_option(dc, &dsc_options);
+ dsc_options.max_target_bpp_limit_override_x16 = max_dsc_target_bpp_limit_override * 16;
verified_link_cap = dc_link_get_link_cap(stream->link);
link_bw_in_kbps = dc_link_bandwidth_kbps(stream->link, verified_link_cap);
@@ -5806,8 +5844,7 @@ static void apply_dsc_policy_for_edp(struct amdgpu_dm_connector *aconnector,
if (bw_range.max_kbps < link_bw_in_kbps) {
if (dc_dsc_compute_config(dc->res_pool->dscs[0],
dsc_caps,
- dc->debug.dsc_min_slice_height_override,
- max_dsc_target_bpp_limit_override,
+ &dsc_options,
0,
&stream->timing,
&dsc_cfg)) {
@@ -5821,8 +5858,7 @@ static void apply_dsc_policy_for_edp(struct amdgpu_dm_connector *aconnector,
if (dc_dsc_compute_config(dc->res_pool->dscs[0],
dsc_caps,
- dc->debug.dsc_min_slice_height_override,
- max_dsc_target_bpp_limit_override,
+ &dsc_options,
link_bw_in_kbps,
&stream->timing,
&dsc_cfg)) {
@@ -5843,6 +5879,10 @@ static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector,
u32 dsc_max_supported_bw_in_kbps;
u32 max_dsc_target_bpp_limit_override =
drm_connector->display_info.max_dsc_bpp;
+ struct dc_dsc_config_options dsc_options = {0};
+
+ dc_dsc_get_default_config_option(dc, &dsc_options);
+ dsc_options.max_target_bpp_limit_override_x16 = max_dsc_target_bpp_limit_override * 16;
link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
dc_link_get_link_cap(aconnector->dc_link));
@@ -5861,8 +5901,7 @@ static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector,
if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_NONE) {
if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
dsc_caps,
- aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
- max_dsc_target_bpp_limit_override,
+ &dsc_options,
link_bandwidth_kbps,
&stream->timing,
&stream->timing.dsc_cfg)) {
@@ -5879,8 +5918,7 @@ static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector,
dsc_max_supported_bw_in_kbps > 0)
if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
dsc_caps,
- aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
- max_dsc_target_bpp_limit_override,
+ &dsc_options,
dsc_max_supported_bw_in_kbps,
&stream->timing,
&stream->timing.dsc_cfg)) {
@@ -5904,7 +5942,6 @@ static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector,
if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_bits_per_pixel)
stream->timing.dsc_cfg.bits_per_pixel = aconnector->dsc_settings.dsc_bits_per_pixel;
}
-#endif /* CONFIG_DRM_AMD_DC_DCN */
static struct dc_stream_state *
create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
@@ -5927,9 +5964,7 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
int mode_refresh;
int preferred_refresh = 0;
enum color_transfer_func tf = TRANSFER_FUNC_UNKNOWN;
-#if defined(CONFIG_DRM_AMD_DC_DCN)
struct dsc_dec_dpcd_caps dsc_caps;
-#endif
struct dc_sink *sink = NULL;
@@ -6028,12 +6063,10 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
stream->timing = *aconnector->timing_requested;
}
-#if defined(CONFIG_DRM_AMD_DC_DCN)
/* SST DSC determination policy */
update_dsc_caps(aconnector, sink, stream, &dsc_caps);
if (aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE && dsc_caps.is_dsc_supported)
apply_dsc_policy_for_stream(aconnector, sink, stream, &dsc_caps);
-#endif
update_stream_scaling_settings(&mode, dm_state, stream);
@@ -6759,7 +6792,6 @@ const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
.atomic_check = dm_encoder_helper_atomic_check
};
-#if defined(CONFIG_DRM_AMD_DC_DCN)
static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
struct dc_state *dc_state,
struct dsc_mst_fairness_vars *vars)
@@ -6833,7 +6865,6 @@ static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
}
return 0;
}
-#endif
static int to_drm_connector_type(enum signal_type st)
{
@@ -7158,12 +7189,18 @@ static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
to_amdgpu_dm_connector(connector);
struct drm_encoder *encoder;
struct edid *edid = amdgpu_dm_connector->edid;
+ struct dc_link_settings *verified_link_cap =
+ &amdgpu_dm_connector->dc_link->verified_link_cap;
+ const struct dc *dc = amdgpu_dm_connector->dc_link->dc;
encoder = amdgpu_dm_connector_to_encoder(connector);
if (!drm_edid_is_valid(edid)) {
amdgpu_dm_connector->num_modes =
drm_add_modes_noedid(connector, 640, 480);
+ if (dc->link_srv->dp_get_encoding_format(verified_link_cap) == DP_128b_132b_ENCODING)
+ amdgpu_dm_connector->num_modes +=
+ drm_add_modes_noedid(connector, 1920, 1080);
} else {
amdgpu_dm_connector_ddc_get_modes(connector, edid);
amdgpu_dm_connector_add_common_modes(encoder, connector);
@@ -7262,10 +7299,8 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
if (!aconnector->mst_root)
drm_connector_attach_vrr_capable_property(&aconnector->base);
-#ifdef CONFIG_DRM_AMD_DC_HDCP
if (adev->dm.hdcp_workqueue)
drm_connector_attach_content_protection_property(&aconnector->base, true);
-#endif
}
}
@@ -7527,7 +7562,6 @@ is_scaling_state_different(const struct dm_connector_state *dm_state,
return false;
}
-#ifdef CONFIG_DRM_AMD_DC_HDCP
static bool is_content_protection_different(struct drm_crtc_state *new_crtc_state,
struct drm_crtc_state *old_crtc_state,
struct drm_connector_state *new_conn_state,
@@ -7647,7 +7681,6 @@ static bool is_content_protection_different(struct drm_crtc_state *new_crtc_stat
pr_debug("[HDCP_DM] DESIRED->ENABLED %s :false\n", __func__);
return false;
}
-#endif
static void remove_stream(struct amdgpu_device *adev,
struct amdgpu_crtc *acrtc,
@@ -7716,7 +7749,7 @@ static void update_freesync_state_on_stream(
&vrr_params);
if (adev->family < AMDGPU_FAMILY_AI &&
- amdgpu_dm_vrr_active(new_crtc_state)) {
+ amdgpu_dm_crtc_vrr_active(new_crtc_state)) {
mod_freesync_handle_v_update(dm->freesync_module,
new_stream, &vrr_params);
@@ -7834,8 +7867,8 @@ static void update_stream_irq_parameters(
static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
struct dm_crtc_state *new_state)
{
- bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
- bool new_vrr_active = amdgpu_dm_vrr_active(new_state);
+ bool old_vrr_active = amdgpu_dm_crtc_vrr_active(old_state);
+ bool new_vrr_active = amdgpu_dm_crtc_vrr_active(new_state);
if (!old_vrr_active && new_vrr_active) {
/* Transition VRR inactive -> active:
@@ -7846,7 +7879,7 @@ static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
* We also need vupdate irq for the actual core vblank handling
* at end of vblank.
*/
- WARN_ON(dm_set_vupdate_irq(new_state->base.crtc, true) != 0);
+ WARN_ON(amdgpu_dm_crtc_set_vupdate_irq(new_state->base.crtc, true) != 0);
WARN_ON(drm_crtc_vblank_get(new_state->base.crtc) != 0);
DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
__func__, new_state->base.crtc->base.id);
@@ -7854,7 +7887,7 @@ static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
/* Transition VRR active -> inactive:
* Allow vblank irq disable again for fixed refresh rate.
*/
- WARN_ON(dm_set_vupdate_irq(new_state->base.crtc, false) != 0);
+ WARN_ON(amdgpu_dm_crtc_set_vupdate_irq(new_state->base.crtc, false) != 0);
drm_crtc_vblank_put(new_state->base.crtc);
DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
__func__, new_state->base.crtc->base.id);
@@ -7873,7 +7906,7 @@ static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
*/
for_each_old_plane_in_state(state, plane, old_plane_state, i)
if (plane->type == DRM_PLANE_TYPE_CURSOR)
- handle_cursor_update(plane, old_plane_state);
+ amdgpu_dm_plane_handle_cursor_update(plane, old_plane_state);
}
static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
@@ -7896,7 +7929,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
int planes_count = 0, vpos, hpos;
unsigned long flags;
u32 target_vblank, last_flip_vblank;
- bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
+ bool vrr_active = amdgpu_dm_crtc_vrr_active(acrtc_state);
bool cursor_update = false;
bool pflip_present = false;
bool dirty_rects_changed = false;
@@ -7958,7 +7991,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix;
}
- fill_dc_scaling_info(dm->adev, new_plane_state,
+ amdgpu_dm_plane_fill_dc_scaling_info(dm->adev, new_plane_state,
&bundle->scaling_infos[planes_count]);
bundle->surface_updates[planes_count].scaling_info =
@@ -8178,12 +8211,12 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
acrtc_state->stream->link->psr_settings.psr_allow_active)
amdgpu_dm_psr_disable(acrtc_state->stream);
- dc_commit_updates_for_stream(dm->dc,
- bundle->surface_updates,
- planes_count,
- acrtc_state->stream,
- &bundle->stream_update,
- dc_state);
+ update_planes_and_stream_adapter(dm->dc,
+ acrtc_state->update_type,
+ planes_count,
+ acrtc_state->stream,
+ &bundle->stream_update,
+ bundle->surface_updates);
/**
* Enable or disable the interrupts on the backend.
@@ -8446,7 +8479,7 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
* aconnector as needed
*/
- if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
+ if (amdgpu_dm_crtc_modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
DRM_DEBUG_ATOMIC("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
@@ -8501,7 +8534,7 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
dm_enable_per_frame_crtc_master_sync(dc_state);
mutex_lock(&dm->dc_lock);
- WARN_ON(!dc_commit_state(dm->dc, dc_state));
+ WARN_ON(!dc_commit_streams(dm->dc, dc_state->streams, dc_state->stream_count));
/* Allow idle optimization when vblank count is 0 for display off */
if (dm->active_vblank_irq_count == 0)
@@ -8527,7 +8560,6 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
acrtc->otg_inst = status->primary_otg_inst;
}
}
-#ifdef CONFIG_DRM_AMD_DC_HDCP
for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
@@ -8638,7 +8670,6 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
new_con_state->hdcp_content_type, enable_encryption);
}
}
-#endif
/* Handle connector state changes */
for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
@@ -8715,12 +8746,11 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
mutex_lock(&dm->dc_lock);
- dc_commit_updates_for_stream(dm->dc,
- dummy_updates,
- status->plane_count,
- dm_new_crtc_state->stream,
- &stream_update,
- dc_state);
+ dc_update_planes_and_stream(dm->dc,
+ dummy_updates,
+ status->plane_count,
+ dm_new_crtc_state->stream,
+ &stream_update);
mutex_unlock(&dm->dc_lock);
}
@@ -9274,7 +9304,7 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
if (modereset_required(new_crtc_state))
goto skip_modeset;
- if (modeset_required(new_crtc_state, new_stream,
+ if (amdgpu_dm_crtc_modeset_required(new_crtc_state, new_stream,
dm_old_crtc_state->stream)) {
WARN_ON(dm_new_crtc_state->stream);
@@ -9625,7 +9655,7 @@ static int dm_update_plane_state(struct dc *dc,
if (!needs_reset)
return 0;
- ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
+ ret = amdgpu_dm_plane_helper_check_state(new_plane_state, new_crtc_state);
if (ret)
return ret;
@@ -9771,7 +9801,6 @@ static int dm_check_crtc_cursor(struct drm_atomic_state *state,
return 0;
}
-#if defined(CONFIG_DRM_AMD_DC_DCN)
static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
{
struct drm_connector *connector;
@@ -9797,7 +9826,6 @@ static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm
return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_root->mst_mgr);
}
-#endif
/**
* amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
@@ -9841,11 +9869,9 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
bool lock_and_validation_needed = false;
bool is_top_most_overlay = true;
struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
-#if defined(CONFIG_DRM_AMD_DC_DCN)
struct drm_dp_mst_topology_mgr *mgr;
struct drm_dp_mst_topology_state *mst_state;
struct dsc_mst_fairness_vars vars[MAX_PIPES];
-#endif
trace_amdgpu_dm_atomic_check_begin(state);
@@ -9876,7 +9902,6 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
new_crtc_state->connectors_changed = true;
}
-#if defined(CONFIG_DRM_AMD_DC_DCN)
if (dc_resource_is_dsc_encoding_supported(dc)) {
for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
@@ -9888,7 +9913,6 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
}
}
}
-#endif
for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
@@ -10026,13 +10050,11 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
}
}
-#if defined(CONFIG_DRM_AMD_DC_DCN)
if (dc_resource_is_dsc_encoding_supported(dc)) {
ret = pre_validate_dsc(state, &dm_state, vars);
if (ret != 0)
goto fail;
}
-#endif
/* Run this here since we want to validate the streams we created */
ret = drm_atomic_helper_check_planes(dev, state);
@@ -10098,7 +10120,6 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
lock_and_validation_needed = true;
}
-#if defined(CONFIG_DRM_AMD_DC_DCN)
/* set the slot info for each mst_state based on the link encoding format */
for_each_new_mst_mgr_in_state(state, mgr, mst_state, i) {
struct amdgpu_dm_connector *aconnector;
@@ -10118,7 +10139,6 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
}
drm_connector_list_iter_end(&iter);
}
-#endif
/**
* Streams and planes are reset when there are changes that affect
@@ -10146,7 +10166,6 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
goto fail;
}
-#if defined(CONFIG_DRM_AMD_DC_DCN)
ret = compute_mst_dsc_configs_for_state(state, dm_state->context, vars);
if (ret) {
DRM_DEBUG_DRIVER("compute_mst_dsc_configs_for_state() failed\n");
@@ -10158,7 +10177,6 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
DRM_DEBUG_DRIVER("dm_update_mst_vcpi_slots_for_dsc() failed\n");
goto fail;
}
-#endif
/*
* Perform validation of MST topology in the state:
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
index ed5cbe9da40c..904f9e2fd35b 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
@@ -461,9 +461,7 @@ struct amdgpu_display_manager {
struct amdgpu_dm_backlight_caps backlight_caps[AMDGPU_DM_MAX_NUM_EDP];
struct mod_freesync *freesync_module;
-#ifdef CONFIG_DRM_AMD_DC_HDCP
struct hdcp_workqueue *hdcp_workqueue;
-#endif
/**
* @vblank_control_workqueue:
@@ -747,9 +745,7 @@ struct dm_connector_state {
uint8_t underscan_hborder;
bool underscan_enable;
bool freesync_capable;
-#ifdef CONFIG_DRM_AMD_DC_HDCP
bool update_hdcp;
-#endif
uint8_t abm_level;
int vcpi_slots;
uint64_t pbn;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
index dc4f37240beb..1d924dc51a3e 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
@@ -34,7 +34,7 @@
#include "amdgpu_dm_trace.h"
#include "amdgpu_dm_debugfs.h"
-void dm_crtc_handle_vblank(struct amdgpu_crtc *acrtc)
+void amdgpu_dm_crtc_handle_vblank(struct amdgpu_crtc *acrtc)
{
struct drm_crtc *crtc = &acrtc->base;
struct drm_device *dev = crtc->dev;
@@ -54,14 +54,14 @@ void dm_crtc_handle_vblank(struct amdgpu_crtc *acrtc)
spin_unlock_irqrestore(&dev->event_lock, flags);
}
-bool modeset_required(struct drm_crtc_state *crtc_state,
+bool amdgpu_dm_crtc_modeset_required(struct drm_crtc_state *crtc_state,
struct dc_stream_state *new_stream,
struct dc_stream_state *old_stream)
{
return crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
}
-bool amdgpu_dm_vrr_active_irq(struct amdgpu_crtc *acrtc)
+bool amdgpu_dm_crtc_vrr_active_irq(struct amdgpu_crtc *acrtc)
{
return acrtc->dm_irq_params.freesync_config.state ==
@@ -70,7 +70,7 @@ bool amdgpu_dm_vrr_active_irq(struct amdgpu_crtc *acrtc)
VRR_STATE_ACTIVE_FIXED;
}
-int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
+int amdgpu_dm_crtc_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
{
enum dc_irq_source irq_source;
struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
@@ -89,7 +89,7 @@ int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
return rc;
}
-bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
+bool amdgpu_dm_crtc_vrr_active(struct dm_crtc_state *dm_state)
{
return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
@@ -159,11 +159,11 @@ static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
if (enable) {
/* vblank irq on -> Only need vupdate irq in vrr mode */
- if (amdgpu_dm_vrr_active(acrtc_state))
- rc = dm_set_vupdate_irq(crtc, true);
+ if (amdgpu_dm_crtc_vrr_active(acrtc_state))
+ rc = amdgpu_dm_crtc_set_vupdate_irq(crtc, true);
} else {
/* vblank irq off -> vupdate irq off */
- rc = dm_set_vupdate_irq(crtc, false);
+ rc = amdgpu_dm_crtc_set_vupdate_irq(crtc, false);
}
if (rc)
@@ -199,12 +199,12 @@ skip:
return 0;
}
-int dm_enable_vblank(struct drm_crtc *crtc)
+int amdgpu_dm_crtc_enable_vblank(struct drm_crtc *crtc)
{
return dm_set_vblank(crtc, true);
}
-void dm_disable_vblank(struct drm_crtc *crtc)
+void amdgpu_dm_crtc_disable_vblank(struct drm_crtc *crtc)
{
dm_set_vblank(crtc, false);
}
@@ -300,8 +300,8 @@ static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
.verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
.get_crc_sources = amdgpu_dm_crtc_get_crc_sources,
.get_vblank_counter = amdgpu_get_vblank_counter_kms,
- .enable_vblank = dm_enable_vblank,
- .disable_vblank = dm_disable_vblank,
+ .enable_vblank = amdgpu_dm_crtc_enable_vblank,
+ .disable_vblank = amdgpu_dm_crtc_disable_vblank,
.get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
#if defined(CONFIG_DEBUG_FS)
.late_register = amdgpu_dm_crtc_late_register,
@@ -381,7 +381,7 @@ static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
dm_update_crtc_active_planes(crtc, crtc_state);
if (WARN_ON(unlikely(!dm_crtc_state->stream &&
- modeset_required(crtc_state, NULL, dm_crtc_state->stream)))) {
+ amdgpu_dm_crtc_modeset_required(crtc_state, NULL, dm_crtc_state->stream)))) {
return ret;
}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.h
index 1ac8692354cf..17e948753f59 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.h
@@ -27,21 +27,21 @@
#ifndef __AMDGPU_DM_CRTC_H__
#define __AMDGPU_DM_CRTC_H__
-void dm_crtc_handle_vblank(struct amdgpu_crtc *acrtc);
+void amdgpu_dm_crtc_handle_vblank(struct amdgpu_crtc *acrtc);
-bool modeset_required(struct drm_crtc_state *crtc_state,
+bool amdgpu_dm_crtc_modeset_required(struct drm_crtc_state *crtc_state,
struct dc_stream_state *new_stream,
struct dc_stream_state *old_stream);
-int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable);
+int amdgpu_dm_crtc_set_vupdate_irq(struct drm_crtc *crtc, bool enable);
-bool amdgpu_dm_vrr_active_irq(struct amdgpu_crtc *acrtc);
+bool amdgpu_dm_crtc_vrr_active_irq(struct amdgpu_crtc *acrtc);
-bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state);
+bool amdgpu_dm_crtc_vrr_active(struct dm_crtc_state *dm_state);
-int dm_enable_vblank(struct drm_crtc *crtc);
+int amdgpu_dm_crtc_enable_vblank(struct drm_crtc *crtc);
-void dm_disable_vblank(struct drm_crtc *crtc);
+void amdgpu_dm_crtc_disable_vblank(struct drm_crtc *crtc);
int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
struct drm_plane *plane,
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
index 09a3efa517da..827fcb4fb3b3 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
@@ -724,7 +724,7 @@ static ssize_t dp_phy_test_pattern_debugfs_write(struct file *f, const char __us
for (i = 0; i < (unsigned int)(link_training_settings.link_settings.lane_count); i++)
link_training_settings.hw_lane_settings[i] = link->cur_lane_setting[i];
- dc_link_set_test_pattern(
+ dc_link_dp_set_test_pattern(
link,
test_pattern,
DP_TEST_PATTERN_COLOR_SPACE_RGB,
@@ -947,7 +947,6 @@ static ssize_t dp_dsc_passthrough_set(struct file *f, const char __user *buf,
return 0;
}
-#ifdef CONFIG_DRM_AMD_DC_HDCP
/*
* Returns the HDCP capability of the Display (1.4 for now).
*
@@ -984,7 +983,6 @@ static int hdcp_sink_capability_show(struct seq_file *m, void *data)
return 0;
}
-#endif
/*
* Returns whether the connected display is internal and not hotpluggable.
@@ -2593,9 +2591,7 @@ DEFINE_SHOW_ATTRIBUTE(dp_dsc_fec_support);
DEFINE_SHOW_ATTRIBUTE(dmub_fw_state);
DEFINE_SHOW_ATTRIBUTE(dmub_tracebuffer);
DEFINE_SHOW_ATTRIBUTE(dp_lttpr_status);
-#ifdef CONFIG_DRM_AMD_DC_HDCP
DEFINE_SHOW_ATTRIBUTE(hdcp_sink_capability);
-#endif
DEFINE_SHOW_ATTRIBUTE(internal_display);
DEFINE_SHOW_ATTRIBUTE(psr_capability);
DEFINE_SHOW_ATTRIBUTE(dp_is_mst_connector);
@@ -2726,9 +2722,7 @@ static const struct {
{"phy_settings", &dp_phy_settings_debugfs_fop},
{"lttpr_status", &dp_lttpr_status_fops},
{"test_pattern", &dp_phy_test_pattern_fops},
-#ifdef CONFIG_DRM_AMD_DC_HDCP
{"hdcp_sink_capability", &hdcp_sink_capability_fops},
-#endif
{"sdp_message", &sdp_message_fops},
{"aux_dpcd_address", &dp_dpcd_address_debugfs_fops},
{"aux_dpcd_size", &dp_dpcd_size_debugfs_fops},
@@ -2749,14 +2743,13 @@ static const struct {
{"is_dpia_link", &is_dpia_link_fops}
};
-#ifdef CONFIG_DRM_AMD_DC_HDCP
static const struct {
char *name;
const struct file_operations *fops;
} hdmi_debugfs_entries[] = {
{"hdcp_sink_capability", &hdcp_sink_capability_fops}
};
-#endif
+
/*
* Force YUV420 output if available from the given mode
*/
@@ -2801,6 +2794,22 @@ static int psr_get(void *data, u64 *val)
}
/*
+ * Read PSR state residency
+ */
+static int psr_read_residency(void *data, u64 *val)
+{
+ struct amdgpu_dm_connector *connector = data;
+ struct dc_link *link = connector->dc_link;
+ u32 residency;
+
+ link->dc->link_srv->edp_get_psr_residency(link, &residency);
+
+ *val = (u64)residency;
+
+ return 0;
+}
+
+/*
* Set dmcub trace event IRQ enable or disable.
* Usage to enable dmcub trace event IRQ: echo 1 > /sys/kernel/debug/dri/0/amdgpu_dm_dmcub_trace_event_en
* Usage to disable dmcub trace event IRQ: echo 0 > /sys/kernel/debug/dri/0/amdgpu_dm_dmcub_trace_event_en
@@ -2835,6 +2844,8 @@ DEFINE_DEBUGFS_ATTRIBUTE(dmcub_trace_event_state_fops, dmcub_trace_event_state_g
dmcub_trace_event_state_set, "%llu\n");
DEFINE_DEBUGFS_ATTRIBUTE(psr_fops, psr_get, NULL, "%llu\n");
+DEFINE_DEBUGFS_ATTRIBUTE(psr_residency_fops, psr_read_residency, NULL,
+ "%llu\n");
DEFINE_SHOW_ATTRIBUTE(current_backlight);
DEFINE_SHOW_ATTRIBUTE(target_backlight);
@@ -2998,6 +3009,8 @@ void connector_debugfs_init(struct amdgpu_dm_connector *connector)
if (connector->base.connector_type == DRM_MODE_CONNECTOR_eDP) {
debugfs_create_file_unsafe("psr_capability", 0444, dir, connector, &psr_capability_fops);
debugfs_create_file_unsafe("psr_state", 0444, dir, connector, &psr_fops);
+ debugfs_create_file_unsafe("psr_residency", 0444, dir,
+ connector, &psr_residency_fops);
debugfs_create_file("amdgpu_current_backlight_pwm", 0444, dir, connector,
&current_backlight_fops);
debugfs_create_file("amdgpu_target_backlight_pwm", 0444, dir, connector,
@@ -3015,7 +3028,6 @@ void connector_debugfs_init(struct amdgpu_dm_connector *connector)
connector->debugfs_dpcd_address = 0;
connector->debugfs_dpcd_size = 0;
-#ifdef CONFIG_DRM_AMD_DC_HDCP
if (connector->base.connector_type == DRM_MODE_CONNECTOR_HDMIA) {
for (i = 0; i < ARRAY_SIZE(hdmi_debugfs_entries); i++) {
debugfs_create_file(hdmi_debugfs_entries[i].name,
@@ -3023,7 +3035,6 @@ void connector_debugfs_init(struct amdgpu_dm_connector *connector)
hdmi_debugfs_entries[i].fops);
}
}
-#endif
}
#ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
index 8e572f07ec47..5536d17306d0 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
@@ -559,9 +559,10 @@ static void update_config(void *handle, struct cp_psp_stream_config *config)
link->dp.rev = aconnector->dc_link->dpcd_caps.dpcd_rev.raw;
link->dp.assr_enabled = config->assr_enabled;
link->dp.mst_enabled = config->mst_enabled;
+ link->dp.dp2_enabled = config->dp2_enabled;
link->dp.usb4_enabled = config->usb4_enabled;
display->adjust.disable = MOD_HDCP_DISPLAY_DISABLE_AUTHENTICATION;
- link->adjust.auth_delay = 0;
+ link->adjust.auth_delay = 2;
link->adjust.hdcp1.disable = 0;
conn_state = aconnector->base.state;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
index 1583157da355..9c1e91c2179e 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
@@ -177,6 +177,40 @@ void dm_helpers_dp_update_branch_info(
const struct dc_link *link)
{}
+static void dm_helpers_construct_old_payload(
+ struct dc_link *link,
+ int pbn_per_slot,
+ struct drm_dp_mst_atomic_payload *new_payload,
+ struct drm_dp_mst_atomic_payload *old_payload)
+{
+ struct link_mst_stream_allocation_table current_link_table =
+ link->mst_stream_alloc_table;
+ struct link_mst_stream_allocation *dc_alloc;
+ int i;
+
+ *old_payload = *new_payload;
+
+ /* Set correct time_slots/PBN of old payload.
+ * other fields (delete & dsc_enabled) in
+ * struct drm_dp_mst_atomic_payload are don't care fields
+ * while calling drm_dp_remove_payload()
+ */
+ for (i = 0; i < current_link_table.stream_count; i++) {
+ dc_alloc =
+ &current_link_table.stream_allocations[i];
+
+ if (dc_alloc->vcp_id == new_payload->vcpi) {
+ old_payload->time_slots = dc_alloc->slot_count;
+ old_payload->pbn = dc_alloc->slot_count * pbn_per_slot;
+ break;
+ }
+ }
+
+ /* make sure there is an old payload*/
+ ASSERT(i != current_link_table.stream_count);
+
+}
+
/*
* Writes payload allocation table in immediate downstream device.
*/
@@ -188,7 +222,7 @@ bool dm_helpers_dp_mst_write_payload_allocation_table(
{
struct amdgpu_dm_connector *aconnector;
struct drm_dp_mst_topology_state *mst_state;
- struct drm_dp_mst_atomic_payload *payload;
+ struct drm_dp_mst_atomic_payload *target_payload, *new_payload, old_payload;
struct drm_dp_mst_topology_mgr *mst_mgr;
aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context;
@@ -204,17 +238,26 @@ bool dm_helpers_dp_mst_write_payload_allocation_table(
mst_state = to_drm_dp_mst_topology_state(mst_mgr->base.state);
/* It's OK for this to fail */
- payload = drm_atomic_get_mst_payload_state(mst_state, aconnector->mst_output_port);
- if (enable)
- drm_dp_add_payload_part1(mst_mgr, mst_state, payload);
- else
- drm_dp_remove_payload(mst_mgr, mst_state, payload, payload);
+ new_payload = drm_atomic_get_mst_payload_state(mst_state, aconnector->mst_output_port);
+
+ if (enable) {
+ target_payload = new_payload;
+
+ drm_dp_add_payload_part1(mst_mgr, mst_state, new_payload);
+ } else {
+ /* construct old payload by VCPI*/
+ dm_helpers_construct_old_payload(stream->link, mst_state->pbn_div,
+ new_payload, &old_payload);
+ target_payload = &old_payload;
+
+ drm_dp_remove_payload(mst_mgr, mst_state, &old_payload, new_payload);
+ }
/* mst_mgr->->payloads are VC payload notify MST branch using DPCD or
* AUX message. The sequence is slot 1-63 allocated sequence for each
* stream. AMD ASIC stream slot allocation should follow the same
* sequence. copy DRM MST allocation to dc */
- fill_dc_mst_payload_table_from_drm(stream->link, enable, payload, proposed_table);
+ fill_dc_mst_payload_table_from_drm(stream->link, enable, target_payload, proposed_table);
return true;
}
@@ -468,8 +511,8 @@ bool dm_helpers_dp_read_dpcd(
return false;
}
- return drm_dp_dpcd_read(&aconnector->dm_dp_aux.aux, address,
- data, size) > 0;
+ return drm_dp_dpcd_read(&aconnector->dm_dp_aux.aux, address, data,
+ size) == size;
}
bool dm_helpers_dp_write_dpcd(
@@ -525,7 +568,6 @@ bool dm_helpers_submit_i2c(
return result;
}
-#if defined(CONFIG_DRM_AMD_DC_DCN)
static bool execute_synaptics_rc_command(struct drm_dp_aux *aux,
bool is_write_cmd,
unsigned char cmd,
@@ -693,7 +735,6 @@ static uint8_t write_dsc_enable_synaptics_non_virtual_dpcd_mst(
return ret;
}
-#endif
bool dm_helpers_dp_write_dsc_enable(
struct dc_context *ctx,
@@ -719,13 +760,11 @@ bool dm_helpers_dp_write_dsc_enable(
if (!aconnector->dsc_aux)
return false;
-#if defined(CONFIG_DRM_AMD_DC_DCN)
// apply w/a to synaptics
if (needs_dsc_aux_workaround(aconnector->dc_link) &&
(aconnector->mst_downstream_port_present.byte & 0x7) != 0x3)
return write_dsc_enable_synaptics_non_virtual_dpcd_mst(
aconnector->dsc_aux, stream, enable_dsc);
-#endif
port = aconnector->mst_output_port;
@@ -763,17 +802,13 @@ bool dm_helpers_dp_write_dsc_enable(
}
if (stream->signal == SIGNAL_TYPE_DISPLAY_PORT || stream->signal == SIGNAL_TYPE_EDP) {
-#if defined(CONFIG_DRM_AMD_DC_DCN)
if (stream->sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_NONE) {
-#endif
ret = dm_helpers_dp_write_dpcd(ctx, stream->link, DP_DSC_ENABLE, &enable_dsc, 1);
DC_LOG_DC("Send DSC %s to SST RX\n", enable_dsc ? "enable" : "disable");
-#if defined(CONFIG_DRM_AMD_DC_DCN)
} else if (stream->sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER) {
ret = dm_helpers_dp_write_dpcd(ctx, stream->link, DP_DSC_ENABLE, &enable_dsc, 1);
DC_LOG_DC("Send DSC %s to DP-HDMI PCON\n", enable_dsc ? "enable" : "disable");
}
-#endif
}
return ret;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
index e25e1b2bf194..6378352346c8 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
@@ -31,10 +31,7 @@
#include "amdgpu.h"
#include "amdgpu_dm.h"
#include "amdgpu_dm_mst_types.h"
-
-#ifdef CONFIG_DRM_AMD_DC_HDCP
#include "amdgpu_dm_hdcp.h"
-#endif
#include "dc.h"
#include "dm_helpers.h"
@@ -201,7 +198,6 @@ static const struct drm_connector_funcs dm_dp_mst_connector_funcs = {
.early_unregister = amdgpu_dm_mst_connector_early_unregister,
};
-#if defined(CONFIG_DRM_AMD_DC_DCN)
bool needs_dsc_aux_workaround(struct dc_link *link)
{
if (link->dpcd_caps.branch_dev_id == DP_BRANCH_DEVICE_ID_90CC24 &&
@@ -212,6 +208,21 @@ bool needs_dsc_aux_workaround(struct dc_link *link)
return false;
}
+bool is_synaptics_cascaded_panamera(struct dc_link *link, struct drm_dp_mst_port *port)
+{
+ u8 branch_vendor_data[4] = { 0 }; // Vendor data 0x50C ~ 0x50F
+
+ if (drm_dp_dpcd_read(port->mgr->aux, DP_BRANCH_VENDOR_SPECIFIC_START, &branch_vendor_data, 4) == 4) {
+ if (link->dpcd_caps.branch_dev_id == DP_BRANCH_DEVICE_ID_90CC24 &&
+ IS_SYNAPTICS_CASCADED_PANAMERA(link->dpcd_caps.branch_dev_name, branch_vendor_data)) {
+ DRM_INFO("Synaptics Cascaded MST hub\n");
+ return true;
+ }
+ }
+
+ return false;
+}
+
static bool validate_dsc_caps_on_connector(struct amdgpu_dm_connector *aconnector)
{
struct dc_sink *dc_sink = aconnector->dc_sink;
@@ -235,6 +246,10 @@ static bool validate_dsc_caps_on_connector(struct amdgpu_dm_connector *aconnecto
needs_dsc_aux_workaround(aconnector->dc_link))
aconnector->dsc_aux = &aconnector->mst_root->dm_dp_aux.aux;
+ /* synaptics cascaded MST hub case */
+ if (!aconnector->dsc_aux && is_synaptics_cascaded_panamera(aconnector->dc_link, port))
+ aconnector->dsc_aux = port->mgr->aux;
+
if (!aconnector->dsc_aux)
return false;
@@ -271,7 +286,6 @@ static bool retrieve_downstream_port_device(struct amdgpu_dm_connector *aconnect
return true;
}
-#endif
static int dm_dp_mst_get_modes(struct drm_connector *connector)
{
@@ -362,7 +376,6 @@ static int dm_dp_mst_get_modes(struct drm_connector *connector)
* plugged back with same display index, its hdcp properties
* will be retrieved from hdcp_work within dm_dp_mst_get_modes
*/
-#ifdef CONFIG_DRM_AMD_DC_HDCP
if (aconnector->dc_sink && connector->state) {
struct drm_device *dev = connector->dev;
struct amdgpu_device *adev = drm_to_adev(dev);
@@ -374,13 +387,11 @@ static int dm_dp_mst_get_modes(struct drm_connector *connector)
connector->state->content_protection =
hdcp_w->content_protection[connector->index];
}
-#endif
if (aconnector->dc_sink) {
amdgpu_dm_update_freesync_caps(
connector, aconnector->edid);
-#if defined(CONFIG_DRM_AMD_DC_DCN)
if (!validate_dsc_caps_on_connector(aconnector))
memset(&aconnector->dc_sink->dsc_caps,
0, sizeof(aconnector->dc_sink->dsc_caps));
@@ -388,7 +399,6 @@ static int dm_dp_mst_get_modes(struct drm_connector *connector)
if (!retrieve_downstream_port_device(aconnector))
memset(&aconnector->mst_downstream_port_present,
0, sizeof(aconnector->mst_downstream_port_present));
-#endif
}
}
@@ -647,8 +657,6 @@ int dm_mst_get_pbn_divider(struct dc_link *link)
dc_link_get_link_cap(link)) / (8 * 1000 * 54);
}
-#if defined(CONFIG_DRM_AMD_DC_DCN)
-
struct dsc_mst_fairness_params {
struct dc_crtc_timing *timing;
struct dc_sink *sink;
@@ -662,12 +670,25 @@ struct dsc_mst_fairness_params {
struct amdgpu_dm_connector *aconnector;
};
-static int kbps_to_peak_pbn(int kbps)
+static uint16_t get_fec_overhead_multiplier(struct dc_link *dc_link)
+{
+ u8 link_coding_cap;
+ uint16_t fec_overhead_multiplier_x1000 = PBN_FEC_OVERHEAD_MULTIPLIER_8B_10B;
+
+ link_coding_cap = dc_link_dp_mst_decide_link_encoding_format(dc_link);
+ if (link_coding_cap == DP_128b_132b_ENCODING)
+ fec_overhead_multiplier_x1000 = PBN_FEC_OVERHEAD_MULTIPLIER_128B_132B;
+
+ return fec_overhead_multiplier_x1000;
+}
+
+static int kbps_to_peak_pbn(int kbps, uint16_t fec_overhead_multiplier_x1000)
{
u64 peak_kbps = kbps;
peak_kbps *= 1006;
- peak_kbps = div_u64(peak_kbps, 1000);
+ peak_kbps *= fec_overhead_multiplier_x1000;
+ peak_kbps = div_u64(peak_kbps, 1000 * 1000);
return (int) DIV64_U64_ROUND_UP(peak_kbps * 64, (54 * 8 * 1000));
}
@@ -678,16 +699,19 @@ static void set_dsc_configs_from_fairness_vars(struct dsc_mst_fairness_params *p
{
struct drm_connector *drm_connector;
int i;
+ struct dc_dsc_config_options dsc_options = {0};
for (i = 0; i < count; i++) {
drm_connector = &params[i].aconnector->base;
+ dc_dsc_get_default_config_option(params[i].sink->ctx->dc, &dsc_options);
+ dsc_options.max_target_bpp_limit_override_x16 = drm_connector->display_info.max_dsc_bpp * 16;
+
memset(&params[i].timing->dsc_cfg, 0, sizeof(params[i].timing->dsc_cfg));
if (vars[i + k].dsc_enabled && dc_dsc_compute_config(
params[i].sink->ctx->dc->res_pool->dscs[0],
&params[i].sink->dsc_caps.dsc_dec_caps,
- params[i].sink->ctx->dc->debug.dsc_min_slice_height_override,
- drm_connector->display_info.max_dsc_bpp,
+ &dsc_options,
0,
params[i].timing,
&params[i].timing->dsc_cfg)) {
@@ -730,15 +754,16 @@ static int bpp_x16_from_pbn(struct dsc_mst_fairness_params param, int pbn)
u64 kbps;
struct drm_connector *drm_connector = &param.aconnector->base;
- uint32_t max_dsc_target_bpp_limit_override =
- drm_connector->display_info.max_dsc_bpp;
+ struct dc_dsc_config_options dsc_options = {0};
+
+ dc_dsc_get_default_config_option(param.sink->ctx->dc, &dsc_options);
+ dsc_options.max_target_bpp_limit_override_x16 = drm_connector->display_info.max_dsc_bpp * 16;
kbps = div_u64((u64)pbn * 994 * 8 * 54, 64);
dc_dsc_compute_config(
param.sink->ctx->dc->res_pool->dscs[0],
&param.sink->dsc_caps.dsc_dec_caps,
- param.sink->ctx->dc->debug.dsc_min_slice_height_override,
- max_dsc_target_bpp_limit_override,
+ &dsc_options,
(int) kbps, param.timing, &dsc_config);
return dsc_config.bits_per_pixel;
@@ -761,11 +786,12 @@ static int increase_dsc_bpp(struct drm_atomic_state *state,
int link_timeslots_used;
int fair_pbn_alloc;
int ret = 0;
+ uint16_t fec_overhead_multiplier_x1000 = get_fec_overhead_multiplier(dc_link);
for (i = 0; i < count; i++) {
if (vars[i + k].dsc_enabled) {
initial_slack[i] =
- kbps_to_peak_pbn(params[i].bw_range.max_kbps) - vars[i + k].pbn;
+ kbps_to_peak_pbn(params[i].bw_range.max_kbps, fec_overhead_multiplier_x1000) - vars[i + k].pbn;
bpp_increased[i] = false;
remaining_to_increase += 1;
} else {
@@ -861,6 +887,7 @@ static int try_disable_dsc(struct drm_atomic_state *state,
int next_index;
int remaining_to_try = 0;
int ret;
+ uint16_t fec_overhead_multiplier_x1000 = get_fec_overhead_multiplier(dc_link);
for (i = 0; i < count; i++) {
if (vars[i + k].dsc_enabled
@@ -890,7 +917,7 @@ static int try_disable_dsc(struct drm_atomic_state *state,
if (next_index == -1)
break;
- vars[next_index].pbn = kbps_to_peak_pbn(params[next_index].bw_range.stream_kbps);
+ vars[next_index].pbn = kbps_to_peak_pbn(params[next_index].bw_range.stream_kbps, fec_overhead_multiplier_x1000);
ret = drm_dp_atomic_find_time_slots(state,
params[next_index].port->mgr,
params[next_index].port,
@@ -903,7 +930,7 @@ static int try_disable_dsc(struct drm_atomic_state *state,
vars[next_index].dsc_enabled = false;
vars[next_index].bpp_x16 = 0;
} else {
- vars[next_index].pbn = kbps_to_peak_pbn(params[next_index].bw_range.max_kbps);
+ vars[next_index].pbn = kbps_to_peak_pbn(params[next_index].bw_range.max_kbps, fec_overhead_multiplier_x1000);
ret = drm_dp_atomic_find_time_slots(state,
params[next_index].port->mgr,
params[next_index].port,
@@ -932,6 +959,7 @@ static int compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
int count = 0;
int i, k, ret;
bool debugfs_overwrite = false;
+ uint16_t fec_overhead_multiplier_x1000 = get_fec_overhead_multiplier(dc_link);
memset(params, 0, sizeof(params));
@@ -993,7 +1021,7 @@ static int compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
/* Try no compression */
for (i = 0; i < count; i++) {
vars[i + k].aconnector = params[i].aconnector;
- vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.stream_kbps);
+ vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.stream_kbps, fec_overhead_multiplier_x1000);
vars[i + k].dsc_enabled = false;
vars[i + k].bpp_x16 = 0;
ret = drm_dp_atomic_find_time_slots(state, params[i].port->mgr, params[i].port,
@@ -1012,7 +1040,7 @@ static int compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
/* Try max compression */
for (i = 0; i < count; i++) {
if (params[i].compression_possible && params[i].clock_force_enable != DSC_CLK_FORCE_DISABLE) {
- vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.min_kbps);
+ vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.min_kbps, fec_overhead_multiplier_x1000);
vars[i + k].dsc_enabled = true;
vars[i + k].bpp_x16 = params[i].bw_range.min_target_bpp_x16;
ret = drm_dp_atomic_find_time_slots(state, params[i].port->mgr,
@@ -1020,7 +1048,7 @@ static int compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
if (ret < 0)
return ret;
} else {
- vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.stream_kbps);
+ vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.stream_kbps, fec_overhead_multiplier_x1000);
vars[i + k].dsc_enabled = false;
vars[i + k].bpp_x16 = 0;
ret = drm_dp_atomic_find_time_slots(state, params[i].port->mgr,
@@ -1154,6 +1182,7 @@ int compute_mst_dsc_configs_for_state(struct drm_atomic_state *state,
bool computed_streams[MAX_PIPES];
struct amdgpu_dm_connector *aconnector;
struct drm_dp_mst_topology_mgr *mst_mgr;
+ struct resource_pool *res_pool;
int link_vars_start_index = 0;
int ret = 0;
@@ -1162,6 +1191,7 @@ int compute_mst_dsc_configs_for_state(struct drm_atomic_state *state,
for (i = 0; i < dc_state->stream_count; i++) {
stream = dc_state->streams[i];
+ res_pool = stream->ctx->dc->res_pool;
if (stream->signal != SIGNAL_TYPE_DISPLAY_PORT_MST)
continue;
@@ -1177,7 +1207,8 @@ int compute_mst_dsc_configs_for_state(struct drm_atomic_state *state,
if (computed_streams[i])
continue;
- if (dcn20_remove_stream_from_ctx(stream->ctx->dc, dc_state, stream) != DC_OK)
+ if (!res_pool->funcs->remove_stream_from_ctx ||
+ res_pool->funcs->remove_stream_from_ctx(stream->ctx->dc, dc_state, stream) != DC_OK)
return -EINVAL;
if (!is_dsc_need_re_compute(state, dc_state, stream->link))
@@ -1435,14 +1466,12 @@ static bool is_dsc_common_config_possible(struct dc_stream_state *stream,
return bw_range->max_target_bpp_x16 && bw_range->min_target_bpp_x16;
}
-#endif /* CONFIG_DRM_AMD_DC_DCN */
enum dc_status dm_dp_mst_is_port_support_mode(
struct amdgpu_dm_connector *aconnector,
struct dc_stream_state *stream)
{
int bpp, pbn, branch_max_throughput_mps = 0;
-#if defined(CONFIG_DRM_AMD_DC_DCN)
struct dc_link_settings cur_link_settings;
unsigned int end_to_end_bw_in_kbps = 0;
unsigned int upper_link_bw_in_kbps = 0, down_link_bw_in_kbps = 0;
@@ -1484,16 +1513,13 @@ enum dc_status dm_dp_mst_is_port_support_mode(
return DC_FAIL_BANDWIDTH_VALIDATE;
}
} else {
-#endif
/* check if mode could be supported within full_pbn */
bpp = convert_dc_color_depth_into_bpc(stream->timing.display_color_depth) * 3;
pbn = drm_dp_calc_pbn_mode(stream->timing.pix_clk_100hz / 10, bpp, false);
if (pbn > aconnector->mst_output_port->full_pbn)
return DC_FAIL_BANDWIDTH_VALIDATE;
-#if defined(CONFIG_DRM_AMD_DC_DCN)
}
-#endif
/* check is mst dsc output bandwidth branch_overall_throughput_0_mps */
switch (stream->timing.pixel_encoding) {
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h
index 97fd70df531b..1e4ede1e57ab 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h
@@ -34,6 +34,21 @@
#define SYNAPTICS_RC_OFFSET 0x4BC
#define SYNAPTICS_RC_DATA 0x4C0
+#define DP_BRANCH_VENDOR_SPECIFIC_START 0x50C
+
+/**
+ * Panamera MST Hub detection
+ * Offset DPCD 050Eh == 0x5A indicates cascaded MST hub case
+ * Check from beginning of branch device vendor specific field (050Ch)
+ */
+#define IS_SYNAPTICS_PANAMERA(branchDevName) (((int)branchDevName[4] & 0xF0) == 0x50 ? 1 : 0)
+#define BRANCH_HW_REVISION_PANAMERA_A2 0x10
+#define SYNAPTICS_CASCADED_HUB_ID 0x5A
+#define IS_SYNAPTICS_CASCADED_PANAMERA(devName, data) ((IS_SYNAPTICS_PANAMERA(devName) && ((int)data[2] == SYNAPTICS_CASCADED_HUB_ID)) ? 1 : 0)
+
+#define PBN_FEC_OVERHEAD_MULTIPLIER_8B_10B 1031
+#define PBN_FEC_OVERHEAD_MULTIPLIER_128B_132B 1000
+
struct amdgpu_display_manager;
struct amdgpu_dm_connector;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c
index 28fb1f02591a..322668973747 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c
@@ -90,12 +90,12 @@ enum dm_micro_swizzle {
MICRO_SWIZZLE_R = 3
};
-const struct drm_format_info *amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
+const struct drm_format_info *amdgpu_dm_plane_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
{
return amdgpu_lookup_format_info(cmd->pixel_format, cmd->modifier[0]);
}
-void fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
+void amdgpu_dm_plane_fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
bool *per_pixel_alpha, bool *pre_multiplied_alpha,
bool *global_alpha, int *global_alpha_value)
{
@@ -741,25 +741,7 @@ static int get_plane_formats(const struct drm_plane *plane,
return num_formats;
}
-#ifdef CONFIG_DRM_AMD_DC_HDR
-static int attach_color_mgmt_properties(struct amdgpu_display_manager *dm, struct drm_plane *plane)
-{
- drm_object_attach_property(&plane->base,
- dm->degamma_lut_property,
- 0);
- drm_object_attach_property(&plane->base,
- dm->degamma_lut_size_property,
- MAX_COLOR_LUT_ENTRIES);
- drm_object_attach_property(&plane->base, dm->ctm_property,
- 0);
- drm_object_attach_property(&plane->base, dm->sdr_boost_property,
- DEFAULT_SDR_BOOST);
-
- return 0;
-}
-#endif
-
-int fill_plane_buffer_attributes(struct amdgpu_device *adev,
+int amdgpu_dm_plane_fill_plane_buffer_attributes(struct amdgpu_device *adev,
const struct amdgpu_framebuffer *afb,
const enum surface_pixel_format format,
const enum dc_rotation_angle rotation,
@@ -918,7 +900,7 @@ static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
dm_plane_state_new->dc_state;
bool force_disable_dcc = !plane_state->dcc.enable;
- fill_plane_buffer_attributes(
+ amdgpu_dm_plane_fill_plane_buffer_attributes(
adev, afb, plane_state->format, plane_state->rotation,
afb->tiling_flags,
&plane_state->tiling_info, &plane_state->plane_size,
@@ -999,7 +981,7 @@ static void get_min_max_dc_plane_scaling(struct drm_device *dev,
*min_downscale = 1000;
}
-int dm_plane_helper_check_state(struct drm_plane_state *state,
+int amdgpu_dm_plane_helper_check_state(struct drm_plane_state *state,
struct drm_crtc_state *new_crtc_state)
{
struct drm_framebuffer *fb = state->fb;
@@ -1053,7 +1035,7 @@ int dm_plane_helper_check_state(struct drm_plane_state *state,
state, new_crtc_state, min_scale, max_scale, true, true);
}
-int fill_dc_scaling_info(struct amdgpu_device *adev,
+int amdgpu_dm_plane_fill_dc_scaling_info(struct amdgpu_device *adev,
const struct drm_plane_state *state,
struct dc_scaling_info *scaling_info)
{
@@ -1161,11 +1143,11 @@ static int dm_plane_atomic_check(struct drm_plane *plane,
if (!new_crtc_state)
return -EINVAL;
- ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
+ ret = amdgpu_dm_plane_helper_check_state(new_plane_state, new_crtc_state);
if (ret)
return ret;
- ret = fill_dc_scaling_info(adev, new_plane_state, &scaling_info);
+ ret = amdgpu_dm_plane_fill_dc_scaling_info(adev, new_plane_state, &scaling_info);
if (ret)
return ret;
@@ -1229,7 +1211,7 @@ static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
return 0;
}
-void handle_cursor_update(struct drm_plane *plane,
+void amdgpu_dm_plane_handle_cursor_update(struct drm_plane *plane,
struct drm_plane_state *old_plane_state)
{
struct amdgpu_device *adev = drm_to_adev(plane->dev);
@@ -1314,7 +1296,7 @@ static void dm_plane_atomic_async_update(struct drm_plane *plane,
plane->state->crtc_w = new_state->crtc_w;
plane->state->crtc_h = new_state->crtc_h;
- handle_cursor_update(plane, old_state);
+ amdgpu_dm_plane_handle_cursor_update(plane, old_state);
}
static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
@@ -1337,10 +1319,6 @@ static void dm_drm_plane_reset(struct drm_plane *plane)
if (amdgpu_state)
__drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
-#ifdef CONFIG_DRM_AMD_DC_HDR
- if (amdgpu_state)
- amdgpu_state->sdr_boost = DEFAULT_SDR_BOOST;
-#endif
}
static struct drm_plane_state *
@@ -1360,15 +1338,6 @@ dm_drm_plane_duplicate_state(struct drm_plane *plane)
dc_plane_state_retain(dm_plane_state->dc_state);
}
-#ifdef CONFIG_DRM_AMD_DC_HDR
- if (dm_plane_state->degamma_lut)
- drm_property_blob_get(dm_plane_state->degamma_lut);
- if (dm_plane_state->ctm)
- drm_property_blob_get(dm_plane_state->ctm);
-
- dm_plane_state->sdr_boost = old_dm_plane_state->sdr_boost;
-#endif
-
return &dm_plane_state->base;
}
@@ -1436,103 +1405,12 @@ static void dm_drm_plane_destroy_state(struct drm_plane *plane,
{
struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
-#ifdef CONFIG_DRM_AMD_DC_HDR
- drm_property_blob_put(dm_plane_state->degamma_lut);
- drm_property_blob_put(dm_plane_state->ctm);
-#endif
if (dm_plane_state->dc_state)
dc_plane_state_release(dm_plane_state->dc_state);
drm_atomic_helper_plane_destroy_state(plane, state);
}
-#ifdef CONFIG_DRM_AMD_DC_HDR
-/* copied from drm_atomic_uapi.c */
-static int atomic_replace_property_blob_from_id(struct drm_device *dev,
- struct drm_property_blob **blob,
- uint64_t blob_id,
- ssize_t expected_size,
- ssize_t expected_elem_size,
- bool *replaced)
-{
- struct drm_property_blob *new_blob = NULL;
-
- if (blob_id != 0) {
- new_blob = drm_property_lookup_blob(dev, blob_id);
- if (new_blob == NULL)
- return -EINVAL;
-
- if (expected_size > 0 &&
- new_blob->length != expected_size) {
- drm_property_blob_put(new_blob);
- return -EINVAL;
- }
- if (expected_elem_size > 0 &&
- new_blob->length % expected_elem_size != 0) {
- drm_property_blob_put(new_blob);
- return -EINVAL;
- }
- }
-
- *replaced |= drm_property_replace_blob(blob, new_blob);
- drm_property_blob_put(new_blob);
-
- return 0;
-}
-
-int dm_drm_plane_set_property(struct drm_plane *plane,
- struct drm_plane_state *state,
- struct drm_property *property,
- uint64_t val)
-{
- struct amdgpu_device *adev = drm_to_adev(plane->dev);
- struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
- int ret = 0;
- bool replaced;
-
- if (property == adev->dm.degamma_lut_property) {
- ret = atomic_replace_property_blob_from_id(adev_to_drm(adev),
- &dm_plane_state->degamma_lut,
- val, -1, sizeof(struct drm_color_lut),
- &replaced);
- } else if (property == adev->dm.ctm_property) {
- ret = atomic_replace_property_blob_from_id(adev_to_drm(adev),
- &dm_plane_state->ctm,
- val,
- sizeof(struct drm_color_ctm), -1,
- &replaced);
- } else if (property == adev->dm.sdr_boost_property) {
- dm_plane_state->sdr_boost = val;
- } else {
- return -EINVAL;
- }
-
- return ret;
-}
-
-int dm_drm_plane_get_property(struct drm_plane *plane,
- const struct drm_plane_state *state,
- struct drm_property *property,
- uint64_t *val)
-{
- struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
- struct amdgpu_device *adev = drm_to_adev(plane->dev);
-
- if (property == adev->dm.degamma_lut_property) {
- *val = (dm_plane_state->degamma_lut) ?
- dm_plane_state->degamma_lut->base.id : 0;
- } else if (property == adev->dm.ctm_property) {
- *val = (dm_plane_state->ctm) ? dm_plane_state->ctm->base.id : 0;
- } else if (property == adev->dm.sdr_boost_property) {
- *val = dm_plane_state->sdr_boost;
- } else {
- return -EINVAL;
- }
-
- return 0;
-}
-#endif
-
static const struct drm_plane_funcs dm_plane_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
@@ -1541,10 +1419,6 @@ static const struct drm_plane_funcs dm_plane_funcs = {
.atomic_duplicate_state = dm_drm_plane_duplicate_state,
.atomic_destroy_state = dm_drm_plane_destroy_state,
.format_mod_supported = dm_plane_format_mod_supported,
-#ifdef CONFIG_DRM_AMD_DC_HDR
- .atomic_set_property = dm_drm_plane_set_property,
- .atomic_get_property = dm_drm_plane_get_property,
-#endif
};
int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
@@ -1615,9 +1489,6 @@ int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
drm_plane_helper_add(plane, &dm_plane_helper_funcs);
-#ifdef CONFIG_DRM_AMD_DC_HDR
- attach_color_mgmt_properties(dm, plane);
-#endif
/* Create (reset) the plane state */
if (plane->funcs->reset)
plane->funcs->reset(plane);
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.h
index a4bee8528a51..930f1572f898 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.h
@@ -29,17 +29,17 @@
#include "dc.h"
-void handle_cursor_update(struct drm_plane *plane,
+void amdgpu_dm_plane_handle_cursor_update(struct drm_plane *plane,
struct drm_plane_state *old_plane_state);
-int fill_dc_scaling_info(struct amdgpu_device *adev,
+int amdgpu_dm_plane_fill_dc_scaling_info(struct amdgpu_device *adev,
const struct drm_plane_state *state,
struct dc_scaling_info *scaling_info);
-int dm_plane_helper_check_state(struct drm_plane_state *state,
+int amdgpu_dm_plane_helper_check_state(struct drm_plane_state *state,
struct drm_crtc_state *new_crtc_state);
-int fill_plane_buffer_attributes(struct amdgpu_device *adev,
+int amdgpu_dm_plane_fill_plane_buffer_attributes(struct amdgpu_device *adev,
const struct amdgpu_framebuffer *afb,
const enum surface_pixel_format format,
const enum dc_rotation_angle rotation,
@@ -56,9 +56,9 @@ int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
unsigned long possible_crtcs,
const struct dc_plane_cap *plane_cap);
-const struct drm_format_info *amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd);
+const struct drm_format_info *amdgpu_dm_plane_get_format_info(const struct drm_mode_fb_cmd2 *cmd);
-void fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
+void amdgpu_dm_plane_fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
bool *per_pixel_alpha, bool *pre_multiplied_alpha,
bool *global_alpha, int *global_alpha_value);
diff --git a/drivers/gpu/drm/amd/display/dc/Makefile b/drivers/gpu/drm/amd/display/dc/Makefile
index 94f156d57220..69ffd4424dc7 100644
--- a/drivers/gpu/drm/amd/display/dc/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/Makefile
@@ -22,14 +22,13 @@
#
# Makefile for Display Core (dc) component.
-DC_LIBS = basics bios dml clk_mgr dce gpio irq link virtual
+DC_LIBS = basics bios dml clk_mgr dce gpio irq link virtual dsc
-ifdef CONFIG_DRM_AMD_DC_DCN
+ifdef CONFIG_DRM_AMD_DC_FP
KCOV_INSTRUMENT := n
DC_LIBS += dcn20
-DC_LIBS += dsc
DC_LIBS += dcn10
DC_LIBS += dcn21
DC_LIBS += dcn201
@@ -56,9 +55,7 @@ ifdef CONFIG_DRM_AMD_DC_SI
DC_LIBS += dce60
endif
-ifdef CONFIG_DRM_AMD_DC_HDCP
DC_LIBS += hdcp
-endif
AMD_DC = $(addsuffix /Makefile, $(addprefix $(FULL_AMD_DISPLAY_PATH)/dc/,$(DC_LIBS)))
diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
index e381de2429fa..f0f948501e9a 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
@@ -2064,7 +2064,7 @@ static enum bp_result bios_parser_get_encoder_cap_info(
if (!info)
return BP_RESULT_BADINPUT;
-#if defined(CONFIG_DRM_AMD_DC_DCN)
+#if defined(CONFIG_DRM_AMD_DC_FP)
/* encoder cap record not available in v1_5 */
if (bp->object_info_tbl.revision.minor == 5)
return BP_RESULT_NORECORD;
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/Makefile b/drivers/gpu/drm/amd/display/dc/clk_mgr/Makefile
index 271d8e573181..ad390e4cd0a9 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/Makefile
@@ -74,7 +74,7 @@ CLK_MGR_DCE120 = dce120_clk_mgr.o
AMD_DAL_CLK_MGR_DCE120 = $(addprefix $(AMDDALPATH)/dc/clk_mgr/dce120/,$(CLK_MGR_DCE120))
AMD_DISPLAY_FILES += $(AMD_DAL_CLK_MGR_DCE120)
-ifdef CONFIG_DRM_AMD_DC_DCN
+ifdef CONFIG_DRM_AMD_DC_FP
###############################################################################
# DCN10
###############################################################################
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c
index 69691daf4dbb..6127d6045336 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c
@@ -104,7 +104,7 @@ void clk_mgr_exit_optimized_pwr_state(const struct dc *dc, struct clk_mgr *clk_m
int edp_num;
unsigned int panel_inst;
- get_edp_links(dc, edp_links, &edp_num);
+ dc_get_edp_links(dc, edp_links, &edp_num);
if (dc->hwss.exit_optimized_pwr_state)
dc->hwss.exit_optimized_pwr_state(dc, dc->current_state);
@@ -116,7 +116,7 @@ void clk_mgr_exit_optimized_pwr_state(const struct dc *dc, struct clk_mgr *clk_m
if (!edp_link->psr_settings.psr_feature_enabled)
continue;
clk_mgr->psr_allow_active_cache = edp_link->psr_settings.psr_allow_active;
- dc_link_set_psr_allow_active(edp_link, &allow_active, false, false, NULL);
+ dc->link_srv->edp_set_psr_allow_active(edp_link, &allow_active, false, false, NULL);
}
}
@@ -129,13 +129,13 @@ void clk_mgr_optimize_pwr_state(const struct dc *dc, struct clk_mgr *clk_mgr)
int edp_num;
unsigned int panel_inst;
- get_edp_links(dc, edp_links, &edp_num);
+ dc_get_edp_links(dc, edp_links, &edp_num);
if (edp_num) {
for (panel_inst = 0; panel_inst < edp_num; panel_inst++) {
edp_link = edp_links[panel_inst];
if (!edp_link->psr_settings.psr_feature_enabled)
continue;
- dc_link_set_psr_allow_active(edp_link,
+ dc->link_srv->edp_set_psr_allow_active(edp_link,
&clk_mgr->psr_allow_active_cache, false, false, NULL);
}
}
@@ -221,7 +221,7 @@ struct clk_mgr *dc_clk_mgr_create(struct dc_context *ctx, struct pp_smu_funcs *p
dce120_clk_mgr_construct(ctx, clk_mgr);
return &clk_mgr->base;
}
-#if defined(CONFIG_DRM_AMD_DC_DCN)
+#if defined(CONFIG_DRM_AMD_DC_FP)
case FAMILY_RV: {
struct clk_mgr_internal *clk_mgr = kzalloc(sizeof(*clk_mgr), GFP_KERNEL);
@@ -351,7 +351,7 @@ struct clk_mgr *dc_clk_mgr_create(struct dc_context *ctx, struct pp_smu_funcs *p
}
break;
-#endif
+#endif /* CONFIG_DRM_AMD_DC_FP - Family RV */
default:
ASSERT(0); /* Unknown Asic */
break;
@@ -364,7 +364,7 @@ void dc_destroy_clk_mgr(struct clk_mgr *clk_mgr_base)
{
struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
-#ifdef CONFIG_DRM_AMD_DC_DCN
+#ifdef CONFIG_DRM_AMD_DC_FP
switch (clk_mgr_base->ctx->asic_id.chip_family) {
case FAMILY_NV:
if (ASICREV_IS_SIENNA_CICHLID_P(clk_mgr_base->ctx->asic_id.hw_internal_rev)) {
@@ -405,7 +405,7 @@ void dc_destroy_clk_mgr(struct clk_mgr *clk_mgr_base)
default:
break;
}
-#endif
+#endif /* CONFIG_DRM_AMD_DC_FP */
kfree(clk_mgr);
}
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn201/dcn201_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn201/dcn201_clk_mgr.c
index f0577dcd1af6..811720749faf 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn201/dcn201_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn201/dcn201_clk_mgr.c
@@ -162,7 +162,7 @@ static void dcn201_update_clocks(struct clk_mgr *clk_mgr_base,
}
}
-struct clk_mgr_funcs dcn201_funcs = {
+static struct clk_mgr_funcs dcn201_funcs = {
.get_dp_ref_clk_frequency = dce12_get_dp_ref_freq_khz,
.update_clocks = dcn201_update_clocks,
.init_clocks = dcn201_init_clocks,
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
index ca6dfd2d7561..bd9fd0b54f46 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
@@ -706,7 +706,7 @@ void rn_clk_mgr_construct(
enum pp_smu_status status = 0;
int is_green_sardine = 0;
-#if defined(CONFIG_DRM_AMD_DC_DCN)
+#if defined(CONFIG_DRM_AMD_DC_FP)
is_green_sardine = ASICREV_IS_GREEN_SARDINE(ctx->asic_id.hw_internal_rev);
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c
index 89df7244b272..5cb44f838bde 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c
@@ -108,6 +108,11 @@ static int dcn314_get_active_display_cnt_wa(
stream->signal == SIGNAL_TYPE_DVI_SINGLE_LINK ||
stream->signal == SIGNAL_TYPE_DVI_DUAL_LINK)
tmds_present = true;
+
+ /* Checking stream / link detection ensuring that PHY is active*/
+ if (dc_is_dp_signal(stream->signal) && !stream->dpms_off)
+ display_count++;
+
}
for (i = 0; i < dc->link_count; i++) {
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c
index 61768bf726f8..af108f88b112 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c
@@ -255,27 +255,60 @@ static void dcn32_update_dppclk_dispclk_freq(struct clk_mgr_internal *clk_mgr, s
}
}
+void dcn32_update_clocks_update_dpp_dto(struct clk_mgr_internal *clk_mgr,
+ struct dc_state *context, bool safe_to_lower)
+{
+ int i;
+
+ clk_mgr->dccg->ref_dppclk = clk_mgr->base.clks.dppclk_khz;
+ for (i = 0; i < clk_mgr->base.ctx->dc->res_pool->pipe_count; i++) {
+ int dpp_inst, dppclk_khz, prev_dppclk_khz;
+
+ dppclk_khz = context->res_ctx.pipe_ctx[i].plane_res.bw.dppclk_khz;
+
+ if (context->res_ctx.pipe_ctx[i].plane_res.dpp)
+ dpp_inst = context->res_ctx.pipe_ctx[i].plane_res.dpp->inst;
+ else if (!context->res_ctx.pipe_ctx[i].plane_res.dpp && dppclk_khz == 0) {
+ /* dpp == NULL && dppclk_khz == 0 is valid because of pipe harvesting.
+ * In this case just continue in loop
+ */
+ continue;
+ } else if (!context->res_ctx.pipe_ctx[i].plane_res.dpp && dppclk_khz > 0) {
+ /* The software state is not valid if dpp resource is NULL and
+ * dppclk_khz > 0.
+ */
+ ASSERT(false);
+ continue;
+ }
+
+ prev_dppclk_khz = clk_mgr->dccg->pipe_dppclk_khz[i];
+
+ if (safe_to_lower || prev_dppclk_khz < dppclk_khz)
+ clk_mgr->dccg->funcs->update_dpp_dto(
+ clk_mgr->dccg, dpp_inst, dppclk_khz);
+ }
+}
+
static void dcn32_update_clocks_update_dentist(
struct clk_mgr_internal *clk_mgr,
- struct dc_state *context,
- uint32_t old_dispclk_khz)
+ struct dc_state *context)
{
uint32_t new_disp_divider = 0;
- uint32_t old_disp_divider = 0;
uint32_t new_dispclk_wdivider = 0;
uint32_t old_dispclk_wdivider = 0;
uint32_t i;
+ uint32_t dentist_dispclk_wdivider_readback = 0;
+ struct dc *dc = clk_mgr->base.ctx->dc;
- if (old_dispclk_khz == 0 || clk_mgr->base.clks.dispclk_khz == 0)
+ if (clk_mgr->base.clks.dispclk_khz == 0)
return;
new_disp_divider = DENTIST_DIVIDER_RANGE_SCALE_FACTOR
* clk_mgr->base.dentist_vco_freq_khz / clk_mgr->base.clks.dispclk_khz;
- old_disp_divider = DENTIST_DIVIDER_RANGE_SCALE_FACTOR
- * clk_mgr->base.dentist_vco_freq_khz / old_dispclk_khz;
new_dispclk_wdivider = dentist_get_did_from_divider(new_disp_divider);
- old_dispclk_wdivider = dentist_get_did_from_divider(old_disp_divider);
+ REG_GET(DENTIST_DISPCLK_CNTL,
+ DENTIST_DISPCLK_WDIVIDER, &old_dispclk_wdivider);
/* When changing divider to or from 127, some extra programming is required to prevent corruption */
if (old_dispclk_wdivider == 127 && new_dispclk_wdivider != 127) {
@@ -314,6 +347,17 @@ static void dcn32_update_clocks_update_dentist(
if (clk_mgr->smu_present)
dcn32_smu_set_hard_min_by_freq(clk_mgr, PPCLK_DISPCLK, khz_to_mhz_ceil(temp_dispclk_khz));
+ if (dc->debug.override_dispclk_programming) {
+ REG_GET(DENTIST_DISPCLK_CNTL,
+ DENTIST_DISPCLK_WDIVIDER, &dentist_dispclk_wdivider_readback);
+
+ if (dentist_dispclk_wdivider_readback != 126) {
+ REG_UPDATE(DENTIST_DISPCLK_CNTL,
+ DENTIST_DISPCLK_WDIVIDER, 126);
+ REG_WAIT(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_CHG_DONE, 1, 50, 2000);
+ }
+ }
+
for (i = 0; i < clk_mgr->base.ctx->dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
struct dccg *dccg = clk_mgr->base.ctx->dc->res_pool->dccg;
@@ -341,6 +385,18 @@ static void dcn32_update_clocks_update_dentist(
/* do requested DISPCLK updates*/
if (clk_mgr->smu_present)
dcn32_smu_set_hard_min_by_freq(clk_mgr, PPCLK_DISPCLK, khz_to_mhz_ceil(clk_mgr->base.clks.dispclk_khz));
+
+ if (dc->debug.override_dispclk_programming) {
+ REG_GET(DENTIST_DISPCLK_CNTL,
+ DENTIST_DISPCLK_WDIVIDER, &dentist_dispclk_wdivider_readback);
+
+ if (dentist_dispclk_wdivider_readback > new_dispclk_wdivider) {
+ REG_UPDATE(DENTIST_DISPCLK_CNTL,
+ DENTIST_DISPCLK_WDIVIDER, new_dispclk_wdivider);
+ REG_WAIT(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_CHG_DONE, 1, 50, 2000);
+ }
+ }
+
}
static void dcn32_update_clocks(struct clk_mgr *clk_mgr_base,
@@ -361,7 +417,6 @@ static void dcn32_update_clocks(struct clk_mgr *clk_mgr_base,
bool p_state_change_support;
bool fclk_p_state_change_support;
int total_plane_count;
- int old_dispclk_khz = clk_mgr_base->clks.dispclk_khz;
if (dc->work_arounds.skip_clock_update)
return;
@@ -503,19 +558,19 @@ static void dcn32_update_clocks(struct clk_mgr *clk_mgr_base,
if (dc->config.forced_clocks == false || (force_reset && safe_to_lower)) {
if (dpp_clock_lowered) {
/* if clock is being lowered, increase DTO before lowering refclk */
- dcn20_update_clocks_update_dpp_dto(clk_mgr, context, safe_to_lower);
- dcn32_update_clocks_update_dentist(clk_mgr, context, old_dispclk_khz);
+ dcn32_update_clocks_update_dpp_dto(clk_mgr, context, safe_to_lower);
+ dcn32_update_clocks_update_dentist(clk_mgr, context);
if (clk_mgr->smu_present)
dcn32_smu_set_hard_min_by_freq(clk_mgr, PPCLK_DPPCLK, khz_to_mhz_ceil(clk_mgr_base->clks.dppclk_khz));
} else {
/* if clock is being raised, increase refclk before lowering DTO */
if (update_dppclk || update_dispclk)
- dcn32_update_clocks_update_dentist(clk_mgr, context, old_dispclk_khz);
+ dcn32_update_clocks_update_dentist(clk_mgr, context);
/* There is a check inside dcn20_update_clocks_update_dpp_dto which ensures
* that we do not lower dto when it is not safe to lower. We do not need to
* compare the current and new dppclk before calling this function.
*/
- dcn20_update_clocks_update_dpp_dto(clk_mgr, context, safe_to_lower);
+ dcn32_update_clocks_update_dpp_dto(clk_mgr, context, safe_to_lower);
}
}
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.h b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.h
index 57e09c7c95f5..186daada7b03 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.h
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.h
@@ -32,6 +32,9 @@ void dcn32_clk_mgr_construct(struct dc_context *ctx,
struct pp_smu_funcs *pp_smu,
struct dccg *dccg);
+void dcn32_update_clocks_update_dpp_dto(struct clk_mgr_internal *clk_mgr,
+ struct dc_state *context, bool safe_to_lower);
+
void dcn32_clk_mgr_destroy(struct clk_mgr_internal *clk_mgr);
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
index 1c218c526650..ae5f1b7b4fef 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -53,7 +53,6 @@
#include "link_encoder.h"
#include "link_enc_cfg.h"
-#include "dc_link.h"
#include "link.h"
#include "dm_helpers.h"
#include "mem_input.h"
@@ -74,6 +73,8 @@
#include "dc_trace.h"
+#include "hw_sequencer_private.h"
+
#include "dce/dmub_outbox.h"
#define CTX \
@@ -147,7 +148,7 @@ static void destroy_links(struct dc *dc)
for (i = 0; i < dc->link_count; i++) {
if (NULL != dc->links[i])
- link_destroy(&dc->links[i]);
+ dc->link_srv->destroy_link(&dc->links[i]);
}
}
@@ -216,7 +217,7 @@ static bool create_links(
link_init_params.connector_index = i;
link_init_params.link_index = dc->link_count;
link_init_params.dc = dc;
- link = link_create(&link_init_params);
+ link = dc->link_srv->create_link(&link_init_params);
if (link) {
dc->links[dc->link_count] = link;
@@ -238,7 +239,7 @@ static bool create_links(
link_init_params.dc = dc;
link_init_params.is_dpia_link = true;
- link = link_create(&link_init_params);
+ link = dc->link_srv->create_link(&link_init_params);
if (link) {
dc->links[dc->link_count] = link;
link->dc = dc;
@@ -399,6 +400,14 @@ bool dc_stream_adjust_vmin_vmax(struct dc *dc,
{
int i;
+ /*
+ * Don't adjust DRR while there's bandwidth optimizations pending to
+ * avoid conflicting with firmware updates.
+ */
+ if (dc->ctx->dce_version > DCE_VERSION_MAX)
+ if (dc->optimized_required || dc->wm_optimized_required)
+ return false;
+
stream->adjust.v_total_max = adjust->v_total_max;
stream->adjust.v_total_mid = adjust->v_total_mid;
stream->adjust.v_total_mid_frame_num = adjust->v_total_mid_frame_num;
@@ -814,6 +823,9 @@ static void dc_destruct(struct dc *dc)
dc_destroy_resource_pool(dc);
+ if (dc->link_srv)
+ link_destroy_link_service(&dc->link_srv);
+
if (dc->ctx->gpio_service)
dal_gpio_service_destroy(&dc->ctx->gpio_service);
@@ -973,6 +985,8 @@ static bool dc_construct(struct dc *dc,
goto fail;
}
+ dc->link_srv = link_create_link_service();
+
dc->res_pool = dc_create_resource_pool(dc, init_params, dc_ctx->dce_version);
if (!dc->res_pool)
goto fail;
@@ -984,7 +998,7 @@ static bool dc_construct(struct dc *dc,
dc->clk_mgr = dc_clk_mgr_create(dc->ctx, dc->res_pool->pp_smu, dc->res_pool->dccg);
if (!dc->clk_mgr)
goto fail;
-#ifdef CONFIG_DRM_AMD_DC_DCN
+#ifdef CONFIG_DRM_AMD_DC_FP
dc->clk_mgr->force_smu_not_present = init_params->force_smu_not_present;
if (dc->res_pool->funcs->update_bw_bounding_box) {
@@ -1057,6 +1071,53 @@ static void apply_ctx_interdependent_lock(struct dc *dc, struct dc_state *contex
}
}
+static void phantom_pipe_blank(
+ struct dc *dc,
+ struct timing_generator *tg,
+ int width,
+ int height)
+{
+ struct dce_hwseq *hws = dc->hwseq;
+ enum dc_color_space color_space;
+ struct tg_color black_color = {0};
+ struct output_pixel_processor *opp = NULL;
+ uint32_t num_opps, opp_id_src0, opp_id_src1;
+ uint32_t otg_active_width, otg_active_height;
+ uint32_t i;
+
+ /* program opp dpg blank color */
+ color_space = COLOR_SPACE_SRGB;
+ color_space_to_black_color(dc, color_space, &black_color);
+
+ otg_active_width = width;
+ otg_active_height = height;
+
+ /* get the OPTC source */
+ tg->funcs->get_optc_source(tg, &num_opps, &opp_id_src0, &opp_id_src1);
+ ASSERT(opp_id_src0 < dc->res_pool->res_cap->num_opp);
+
+ for (i = 0; i < dc->res_pool->res_cap->num_opp; i++) {
+ if (dc->res_pool->opps[i] != NULL && dc->res_pool->opps[i]->inst == opp_id_src0) {
+ opp = dc->res_pool->opps[i];
+ break;
+ }
+ }
+
+ if (opp && opp->funcs->opp_set_disp_pattern_generator)
+ opp->funcs->opp_set_disp_pattern_generator(
+ opp,
+ CONTROLLER_DP_TEST_PATTERN_SOLID_COLOR,
+ CONTROLLER_DP_COLOR_SPACE_UDEFINED,
+ COLOR_DEPTH_UNDEFINED,
+ &black_color,
+ otg_active_width,
+ otg_active_height,
+ 0);
+
+ if (tg->funcs->is_tg_enabled(tg))
+ hws->funcs.wait_for_blank_complete(opp);
+}
+
static void disable_dangling_plane(struct dc *dc, struct dc_state *context)
{
int i, j;
@@ -1115,8 +1176,14 @@ static void disable_dangling_plane(struct dc *dc, struct dc_state *context)
* again for different use.
*/
if (old_stream->mall_stream_config.type == SUBVP_PHANTOM) {
- if (tg->funcs->enable_crtc)
+ if (tg->funcs->enable_crtc) {
+ int main_pipe_width, main_pipe_height;
+
+ main_pipe_width = old_stream->mall_stream_config.paired_stream->dst.width;
+ main_pipe_height = old_stream->mall_stream_config.paired_stream->dst.height;
+ phantom_pipe_blank(dc, tg, main_pipe_width, main_pipe_height);
tg->funcs->enable_crtc(tg);
+ }
}
dc_rem_all_planes_for_stream(dc, old_stream, dangling_context);
disable_all_writeback_pipes_for_stream(dc, old_stream, dangling_context);
@@ -1199,7 +1266,7 @@ static void disable_vbios_mode_if_required(
pipe->stream_res.pix_clk_params.requested_pix_clk_100hz;
if (pix_clk_100hz != requested_pix_clk_100hz) {
- link_set_dpms_off(pipe);
+ dc->link_srv->set_dpms_off(pipe);
pipe->stream->dpms_off = false;
}
}
@@ -1298,7 +1365,7 @@ static void detect_edp_presence(struct dc *dc)
int i;
int edp_num;
- get_edp_links(dc, edp_links, &edp_num);
+ dc_get_edp_links(dc, edp_links, &edp_num);
if (!edp_num)
return;
@@ -1324,16 +1391,12 @@ void dc_hardware_init(struct dc *dc)
void dc_init_callbacks(struct dc *dc,
const struct dc_callback_init *init_params)
{
-#ifdef CONFIG_DRM_AMD_DC_HDCP
dc->ctx->cp_psp = init_params->cp_psp;
-#endif
}
void dc_deinit_callbacks(struct dc *dc)
{
-#ifdef CONFIG_DRM_AMD_DC_HDCP
memset(&dc->ctx->cp_psp, 0, sizeof(dc->ctx->cp_psp));
-#endif
}
void dc_destroy(struct dc **dc)
@@ -1658,7 +1721,7 @@ bool dc_validate_boot_timing(const struct dc *dc,
return false;
}
- if (link_is_edp_ilr_optimization_required(link, crtc_timing)) {
+ if (dc->link_srv->edp_is_ilr_optimization_required(link, crtc_timing)) {
DC_LOG_EVENT_LINK_TRAINING("Seamless boot disabled to optimize eDP link rate\n");
return false;
}
@@ -2001,53 +2064,6 @@ context_alloc_fail:
return res;
}
-/* TODO: When the transition to the new commit sequence is done, remove this
- * function in favor of dc_commit_streams. */
-bool dc_commit_state(struct dc *dc, struct dc_state *context)
-{
- enum dc_status result = DC_ERROR_UNEXPECTED;
- int i;
-
- /* TODO: Since change commit sequence can have a huge impact,
- * we decided to only enable it for DCN3x. However, as soon as
- * we get more confident about this change we'll need to enable
- * the new sequence for all ASICs. */
- if (dc->ctx->dce_version >= DCN_VERSION_3_2) {
- result = dc_commit_streams(dc, context->streams, context->stream_count);
- return result == DC_OK;
- }
-
- if (!streams_changed(dc, context->streams, context->stream_count)) {
- return DC_OK;
- }
-
- DC_LOG_DC("%s: %d streams\n",
- __func__, context->stream_count);
-
- for (i = 0; i < context->stream_count; i++) {
- struct dc_stream_state *stream = context->streams[i];
-
- dc_stream_log(dc, stream);
- }
-
- /*
- * Previous validation was perfomred with fast_validation = true and
- * the full DML state required for hardware programming was skipped.
- *
- * Re-validate here to calculate these parameters / watermarks.
- */
- result = dc_validate_global_state(dc, context, false);
- if (result != DC_OK) {
- DC_LOG_ERROR("DC commit global validation failure: %s (%d)",
- dc_status_to_str(result), result);
- return result;
- }
-
- result = dc_commit_state_no_check(dc, context);
-
- return (result == DC_OK);
-}
-
bool dc_acquire_release_mpc_3dlut(
struct dc *dc, bool acquire,
struct dc_stream_state *stream,
@@ -2134,27 +2150,33 @@ void dc_post_update_surfaces_to_stream(struct dc *dc)
post_surface_trace(dc);
- if (dc->ctx->dce_version >= DCE_VERSION_MAX)
- TRACE_DCN_CLOCK_STATE(&context->bw_ctx.bw.dcn.clk);
- else
+ /*
+ * Only relevant for DCN behavior where we can guarantee the optimization
+ * is safe to apply - retain the legacy behavior for DCE.
+ */
+
+ if (dc->ctx->dce_version < DCE_VERSION_MAX)
TRACE_DCE_CLOCK_STATE(&context->bw_ctx.bw.dce);
+ else {
+ TRACE_DCN_CLOCK_STATE(&context->bw_ctx.bw.dcn.clk);
- if (is_flip_pending_in_pipes(dc, context))
- return;
+ if (is_flip_pending_in_pipes(dc, context))
+ return;
- for (i = 0; i < dc->res_pool->pipe_count; i++)
- if (context->res_ctx.pipe_ctx[i].stream == NULL ||
- context->res_ctx.pipe_ctx[i].plane_state == NULL) {
- context->res_ctx.pipe_ctx[i].pipe_idx = i;
- dc->hwss.disable_plane(dc, &context->res_ctx.pipe_ctx[i]);
- }
+ for (i = 0; i < dc->res_pool->pipe_count; i++)
+ if (context->res_ctx.pipe_ctx[i].stream == NULL ||
+ context->res_ctx.pipe_ctx[i].plane_state == NULL) {
+ context->res_ctx.pipe_ctx[i].pipe_idx = i;
+ dc->hwss.disable_plane(dc, &context->res_ctx.pipe_ctx[i]);
+ }
- process_deferred_updates(dc);
+ process_deferred_updates(dc);
- dc->hwss.optimize_bandwidth(dc, context);
+ dc->hwss.optimize_bandwidth(dc, context);
- if (dc->debug.enable_double_buffered_dsc_pg_support)
- dc->hwss.update_dsc_pg(dc, context, true);
+ if (dc->debug.enable_double_buffered_dsc_pg_support)
+ dc->hwss.update_dsc_pg(dc, context, true);
+ }
dc->optimized_required = false;
dc->wm_optimized_required = false;
@@ -3173,7 +3195,9 @@ static void commit_planes_do_stream_update(struct dc *dc,
dc->hwss.update_info_frame(pipe_ctx);
if (dc_is_dp_signal(pipe_ctx->stream->signal))
- link_dp_source_sequence_trace(pipe_ctx->stream->link, DPCD_SOURCE_SEQ_AFTER_UPDATE_INFO_FRAME);
+ dc->link_srv->dp_trace_source_sequence(
+ pipe_ctx->stream->link,
+ DPCD_SOURCE_SEQ_AFTER_UPDATE_INFO_FRAME);
}
if (stream_update->hdr_static_metadata &&
@@ -3209,13 +3233,15 @@ static void commit_planes_do_stream_update(struct dc *dc,
continue;
if (stream_update->dsc_config)
- link_update_dsc_config(pipe_ctx);
+ dc->link_srv->update_dsc_config(pipe_ctx);
if (stream_update->mst_bw_update) {
if (stream_update->mst_bw_update->is_increase)
- link_increase_mst_payload(pipe_ctx, stream_update->mst_bw_update->mst_stream_bw);
+ dc->link_srv->increase_mst_payload(pipe_ctx,
+ stream_update->mst_bw_update->mst_stream_bw);
else
- link_reduce_mst_payload(pipe_ctx, stream_update->mst_bw_update->mst_stream_bw);
+ dc->link_srv->reduce_mst_payload(pipe_ctx,
+ stream_update->mst_bw_update->mst_stream_bw);
}
if (stream_update->pending_test_pattern) {
@@ -3229,7 +3255,7 @@ static void commit_planes_do_stream_update(struct dc *dc,
if (stream_update->dpms_off) {
if (*stream_update->dpms_off) {
- link_set_dpms_off(pipe_ctx);
+ dc->link_srv->set_dpms_off(pipe_ctx);
/* for dpms, keep acquired resources*/
if (pipe_ctx->stream_res.audio && !dc->debug.az_endpoint_mute_only)
pipe_ctx->stream_res.audio->funcs->az_disable(pipe_ctx->stream_res.audio);
@@ -3239,7 +3265,7 @@ static void commit_planes_do_stream_update(struct dc *dc,
} else {
if (get_seamless_boot_stream_count(context) == 0)
dc->hwss.prepare_bandwidth(dc, dc->current_state);
- link_set_dpms_on(dc->current_state, pipe_ctx);
+ dc->link_srv->set_dpms_on(dc->current_state, pipe_ctx);
}
}
@@ -3510,14 +3536,9 @@ static void commit_planes_for_stream(struct dc *dc,
/* Since phantom pipe programming is moved to post_unlock_program_front_end,
* move the SubVP lock to after the phantom pipes have been setup
*/
- if (should_lock_all_pipes && dc->hwss.interdependent_update_lock) {
- if (dc->hwss.subvp_pipe_control_lock)
- dc->hwss.subvp_pipe_control_lock(dc, context, false, should_lock_all_pipes, NULL, subvp_prev_use);
- } else {
- if (dc->hwss.subvp_pipe_control_lock)
- dc->hwss.subvp_pipe_control_lock(dc, context, false, should_lock_all_pipes, NULL, subvp_prev_use);
- }
-
+ if (dc->hwss.subvp_pipe_control_lock)
+ dc->hwss.subvp_pipe_control_lock(dc, context, false, should_lock_all_pipes,
+ NULL, subvp_prev_use);
return;
}
@@ -4083,24 +4104,30 @@ void dc_commit_updates_for_stream(struct dc *dc,
struct dc_context *dc_ctx = dc->ctx;
int i, j;
+ stream_status = dc_stream_get_status(stream);
+ context = dc->current_state;
+
+ update_type = dc_check_update_surfaces_for_stream(
+ dc, srf_updates, surface_count, stream_update, stream_status);
+
/* TODO: Since change commit sequence can have a huge impact,
* we decided to only enable it for DCN3x. However, as soon as
* we get more confident about this change we'll need to enable
* the new sequence for all ASICs.
*/
if (dc->ctx->dce_version >= DCN_VERSION_3_2) {
+ /*
+ * Previous frame finished and HW is ready for optimization.
+ */
+ if (update_type == UPDATE_TYPE_FAST)
+ dc_post_update_surfaces_to_stream(dc);
+
dc_update_planes_and_stream(dc, srf_updates,
surface_count, stream,
stream_update);
return;
}
- stream_status = dc_stream_get_status(stream);
- context = dc->current_state;
-
- update_type = dc_check_update_surfaces_for_stream(
- dc, srf_updates, surface_count, stream_update, stream_status);
-
if (update_type >= update_surface_trace_level)
update_surface_trace(dc, srf_updates, surface_count);
@@ -4123,12 +4150,9 @@ void dc_commit_updates_for_stream(struct dc *dc,
if (new_pipe->plane_state && new_pipe->plane_state != old_pipe->plane_state)
new_pipe->plane_state->force_full_update = true;
}
- } else if (update_type == UPDATE_TYPE_FAST && dc_ctx->dce_version >= DCE_VERSION_MAX) {
+ } else if (update_type == UPDATE_TYPE_FAST) {
/*
* Previous frame finished and HW is ready for optimization.
- *
- * Only relevant for DCN behavior where we can guarantee the optimization
- * is safe to apply - retain the legacy behavior for DCE.
*/
dc_post_update_surfaces_to_stream(dc);
}
@@ -4305,7 +4329,7 @@ void dc_resume(struct dc *dc)
uint32_t i;
for (i = 0; i < dc->link_count; i++)
- link_resume(dc->links[i]);
+ dc->link_srv->resume(dc->links[i]);
}
bool dc_is_dmcu_initialized(struct dc *dc)
@@ -4317,157 +4341,6 @@ bool dc_is_dmcu_initialized(struct dc *dc)
return false;
}
-bool dc_is_oem_i2c_device_present(
- struct dc *dc,
- size_t slave_address)
-{
- if (dc->res_pool->oem_device)
- return dce_i2c_oem_device_present(
- dc->res_pool,
- dc->res_pool->oem_device,
- slave_address);
-
- return false;
-}
-
-bool dc_submit_i2c(
- struct dc *dc,
- uint32_t link_index,
- struct i2c_command *cmd)
-{
-
- struct dc_link *link = dc->links[link_index];
- struct ddc_service *ddc = link->ddc;
- return dce_i2c_submit_command(
- dc->res_pool,
- ddc->ddc_pin,
- cmd);
-}
-
-bool dc_submit_i2c_oem(
- struct dc *dc,
- struct i2c_command *cmd)
-{
- struct ddc_service *ddc = dc->res_pool->oem_device;
- if (ddc)
- return dce_i2c_submit_command(
- dc->res_pool,
- ddc->ddc_pin,
- cmd);
-
- return false;
-}
-
-static bool link_add_remote_sink_helper(struct dc_link *dc_link, struct dc_sink *sink)
-{
- if (dc_link->sink_count >= MAX_SINKS_PER_LINK) {
- BREAK_TO_DEBUGGER();
- return false;
- }
-
- dc_sink_retain(sink);
-
- dc_link->remote_sinks[dc_link->sink_count] = sink;
- dc_link->sink_count++;
-
- return true;
-}
-
-/*
- * dc_link_add_remote_sink() - Create a sink and attach it to an existing link
- *
- * EDID length is in bytes
- */
-struct dc_sink *dc_link_add_remote_sink(
- struct dc_link *link,
- const uint8_t *edid,
- int len,
- struct dc_sink_init_data *init_data)
-{
- struct dc_sink *dc_sink;
- enum dc_edid_status edid_status;
-
- if (len > DC_MAX_EDID_BUFFER_SIZE) {
- dm_error("Max EDID buffer size breached!\n");
- return NULL;
- }
-
- if (!init_data) {
- BREAK_TO_DEBUGGER();
- return NULL;
- }
-
- if (!init_data->link) {
- BREAK_TO_DEBUGGER();
- return NULL;
- }
-
- dc_sink = dc_sink_create(init_data);
-
- if (!dc_sink)
- return NULL;
-
- memmove(dc_sink->dc_edid.raw_edid, edid, len);
- dc_sink->dc_edid.length = len;
-
- if (!link_add_remote_sink_helper(
- link,
- dc_sink))
- goto fail_add_sink;
-
- edid_status = dm_helpers_parse_edid_caps(
- link,
- &dc_sink->dc_edid,
- &dc_sink->edid_caps);
-
- /*
- * Treat device as no EDID device if EDID
- * parsing fails
- */
- if (edid_status != EDID_OK && edid_status != EDID_PARTIAL_VALID) {
- dc_sink->dc_edid.length = 0;
- dm_error("Bad EDID, status%d!\n", edid_status);
- }
-
- return dc_sink;
-
-fail_add_sink:
- dc_sink_release(dc_sink);
- return NULL;
-}
-
-/*
- * dc_link_remove_remote_sink() - Remove a remote sink from a dc_link
- *
- * Note that this just removes the struct dc_sink - it doesn't
- * program hardware or alter other members of dc_link
- */
-void dc_link_remove_remote_sink(struct dc_link *link, struct dc_sink *sink)
-{
- int i;
-
- if (!link->sink_count) {
- BREAK_TO_DEBUGGER();
- return;
- }
-
- for (i = 0; i < link->sink_count; i++) {
- if (link->remote_sinks[i] == sink) {
- dc_sink_release(sink);
- link->remote_sinks[i] = NULL;
-
- /* shrink array to remove empty place */
- while (i < link->sink_count - 1) {
- link->remote_sinks[i] = link->remote_sinks[i+1];
- i++;
- }
- link->remote_sinks[i] = NULL;
- link->sink_count--;
- return;
- }
- }
-}
-
void get_clock_requirements_for_state(struct dc_state *state, struct AsicStateEx *info)
{
info->displayClock = (unsigned int)state->bw_ctx.bw.dcn.clk.dispclk_khz;
@@ -4990,7 +4863,7 @@ void dc_notify_vsync_int_state(struct dc *dc, struct dc_stream_state *stream, bo
return;
}
- get_edp_links(dc, edp_links, &edp_num);
+ dc_get_edp_links(dc, edp_links, &edp_num);
/* Determine panel inst */
for (i = 0; i < edp_num; i++) {
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c
index 652270a0b498..2acbf692193f 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c
@@ -73,28 +73,38 @@ struct out_csc_color_matrix_type {
static const struct out_csc_color_matrix_type output_csc_matrix[] = {
{ COLOR_SPACE_RGB_TYPE,
- { 0x2000, 0, 0, 0, 0, 0x2000, 0, 0, 0, 0, 0x2000, 0} },
+ { 0x2000, 0, 0, 0,
+ 0, 0x2000, 0, 0,
+ 0, 0, 0x2000, 0} },
{ COLOR_SPACE_RGB_LIMITED_TYPE,
- { 0x1B67, 0, 0, 0x201, 0, 0x1B67, 0, 0x201, 0, 0, 0x1B67, 0x201} },
+ { 0x1B67, 0, 0, 0x201,
+ 0, 0x1B67, 0, 0x201,
+ 0, 0, 0x1B67, 0x201} },
{ COLOR_SPACE_YCBCR601_TYPE,
- { 0xE04, 0xF444, 0xFDB9, 0x1004, 0x831, 0x1016, 0x320, 0x201, 0xFB45,
- 0xF6B7, 0xE04, 0x1004} },
+ { 0xE04, 0xF444, 0xFDB9, 0x1004,
+ 0x831, 0x1016, 0x320, 0x201,
+ 0xFB45, 0xF6B7, 0xE04, 0x1004} },
{ COLOR_SPACE_YCBCR709_TYPE,
- { 0xE04, 0xF345, 0xFEB7, 0x1004, 0x5D3, 0x1399, 0x1FA,
- 0x201, 0xFCCA, 0xF533, 0xE04, 0x1004} },
+ { 0xE04, 0xF345, 0xFEB7, 0x1004,
+ 0x5D3, 0x1399, 0x1FA, 0x201,
+ 0xFCCA, 0xF533, 0xE04, 0x1004} },
/* TODO: correct values below */
{ COLOR_SPACE_YCBCR601_LIMITED_TYPE,
- { 0xE00, 0xF447, 0xFDB9, 0x1000, 0x991,
- 0x12C9, 0x3A6, 0x200, 0xFB47, 0xF6B9, 0xE00, 0x1000} },
+ { 0xE00, 0xF447, 0xFDB9, 0x1000,
+ 0x991, 0x12C9, 0x3A6, 0x200,
+ 0xFB47, 0xF6B9, 0xE00, 0x1000} },
{ COLOR_SPACE_YCBCR709_LIMITED_TYPE,
- { 0xE00, 0xF349, 0xFEB7, 0x1000, 0x6CE, 0x16E3,
- 0x24F, 0x200, 0xFCCB, 0xF535, 0xE00, 0x1000} },
+ { 0xE00, 0xF349, 0xFEB7, 0x1000,
+ 0x6CE, 0x16E3, 0x24F, 0x200,
+ 0xFCCB, 0xF535, 0xE00, 0x1000} },
{ COLOR_SPACE_YCBCR2020_TYPE,
- { 0x1000, 0xF149, 0xFEB7, 0x1004, 0x0868, 0x15B2,
- 0x01E6, 0x201, 0xFB88, 0xF478, 0x1000, 0x1004} },
+ { 0x1000, 0xF149, 0xFEB7, 0x1004,
+ 0x0868, 0x15B2, 0x01E6, 0x201,
+ 0xFB88, 0xF478, 0x1000, 0x1004} },
{ COLOR_SPACE_YCBCR709_BLACK_TYPE,
- { 0x0000, 0x0000, 0x0000, 0x1000, 0x0000, 0x0000,
- 0x0000, 0x0200, 0x0000, 0x0000, 0x0000, 0x1000} },
+ { 0x0000, 0x0000, 0x0000, 0x1000,
+ 0x0000, 0x0000, 0x0000, 0x0200,
+ 0x0000, 0x0000, 0x0000, 0x1000} },
};
static bool is_rgb_type(
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_enc_cfg.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_enc_cfg.c
index 74e465ba158d..41198c729d90 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_enc_cfg.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_enc_cfg.c
@@ -48,7 +48,7 @@ static bool is_dig_link_enc_stream(struct dc_stream_state *stream)
/* DIGs do not support DP2.0 streams with 128b/132b encoding. */
struct dc_link_settings link_settings = {0};
- link_decide_link_settings(stream, &link_settings);
+ stream->ctx->dc->link_srv->dp_decide_link_settings(stream, &link_settings);
if ((link_settings.link_rate >= LINK_RATE_LOW) &&
link_settings.link_rate <= LINK_RATE_HIGH3) {
is_dig_stream = true;
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_exports.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_exports.c
index a951e10416ee..58fa911b1417 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_exports.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_exports.c
@@ -34,70 +34,443 @@
* in this file which calls link functions.
*/
#include "link.h"
+#include "dce/dce_i2c.h"
+struct dc_link *dc_get_link_at_index(struct dc *dc, uint32_t link_index)
+{
+ return dc->links[link_index];
+}
+
+void dc_get_edp_links(const struct dc *dc,
+ struct dc_link **edp_links,
+ int *edp_num)
+{
+ int i;
+
+ *edp_num = 0;
+ for (i = 0; i < dc->link_count; i++) {
+ // report any eDP links, even unconnected DDI's
+ if (!dc->links[i])
+ continue;
+ if (dc->links[i]->connector_signal == SIGNAL_TYPE_EDP) {
+ edp_links[*edp_num] = dc->links[i];
+ if (++(*edp_num) == MAX_NUM_EDP)
+ return;
+ }
+ }
+}
+
+bool dc_get_edp_link_panel_inst(const struct dc *dc,
+ const struct dc_link *link,
+ unsigned int *inst_out)
+{
+ struct dc_link *edp_links[MAX_NUM_EDP];
+ int edp_num, i;
+
+ *inst_out = 0;
+ if (link->connector_signal != SIGNAL_TYPE_EDP)
+ return false;
+ dc_get_edp_links(dc, edp_links, &edp_num);
+ for (i = 0; i < edp_num; i++) {
+ if (link == edp_links[i])
+ break;
+ (*inst_out)++;
+ }
+ return true;
+}
bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason)
{
- return link_detect(link, reason);
+ return link->dc->link_srv->detect_link(link, reason);
}
bool dc_link_detect_connection_type(struct dc_link *link,
enum dc_connection_type *type)
{
- return link_detect_connection_type(link, type);
+ return link->dc->link_srv->detect_connection_type(link, type);
}
const struct dc_link_status *dc_link_get_status(const struct dc_link *link)
{
- return link_get_status(link);
+ return link->dc->link_srv->get_status(link);
}
-#ifdef CONFIG_DRM_AMD_DC_HDCP
/* return true if the connected receiver supports the hdcp version */
bool dc_link_is_hdcp14(struct dc_link *link, enum signal_type signal)
{
- return link_is_hdcp14(link, signal);
+ return link->dc->link_srv->is_hdcp1x_supported(link, signal);
}
bool dc_link_is_hdcp22(struct dc_link *link, enum signal_type signal)
{
- return link_is_hdcp22(link, signal);
+ return link->dc->link_srv->is_hdcp2x_supported(link, signal);
}
-#endif
void dc_link_clear_dprx_states(struct dc_link *link)
{
- link_clear_dprx_states(link);
+ link->dc->link_srv->clear_dprx_states(link);
}
bool dc_link_reset_cur_dp_mst_topology(struct dc_link *link)
{
- return link_reset_cur_dp_mst_topology(link);
+ return link->dc->link_srv->reset_cur_dp_mst_topology(link);
}
uint32_t dc_link_bandwidth_kbps(
const struct dc_link *link,
const struct dc_link_settings *link_settings)
{
- return dp_link_bandwidth_kbps(link, link_settings);
-}
-
-uint32_t dc_bandwidth_in_kbps_from_timing(
- const struct dc_crtc_timing *timing)
-{
- return link_timing_bandwidth_kbps(timing);
+ return link->dc->link_srv->dp_link_bandwidth_kbps(link, link_settings);
}
void dc_get_cur_link_res_map(const struct dc *dc, uint32_t *map)
{
- link_get_cur_res_map(dc, map);
+ dc->link_srv->get_cur_res_map(dc, map);
}
void dc_restore_link_res_map(const struct dc *dc, uint32_t *map)
{
- link_restore_res_map(dc, map);
+ dc->link_srv->restore_res_map(dc, map);
}
bool dc_link_update_dsc_config(struct pipe_ctx *pipe_ctx)
{
- return link_update_dsc_config(pipe_ctx);
+ struct dc_link *link = pipe_ctx->stream->link;
+
+ return link->dc->link_srv->update_dsc_config(pipe_ctx);
+}
+
+bool dc_is_oem_i2c_device_present(
+ struct dc *dc,
+ size_t slave_address)
+{
+ if (dc->res_pool->oem_device)
+ return dce_i2c_oem_device_present(
+ dc->res_pool,
+ dc->res_pool->oem_device,
+ slave_address);
+
+ return false;
+}
+
+bool dc_submit_i2c(
+ struct dc *dc,
+ uint32_t link_index,
+ struct i2c_command *cmd)
+{
+
+ struct dc_link *link = dc->links[link_index];
+ struct ddc_service *ddc = link->ddc;
+
+ return dce_i2c_submit_command(
+ dc->res_pool,
+ ddc->ddc_pin,
+ cmd);
+}
+
+bool dc_submit_i2c_oem(
+ struct dc *dc,
+ struct i2c_command *cmd)
+{
+ struct ddc_service *ddc = dc->res_pool->oem_device;
+
+ if (ddc)
+ return dce_i2c_submit_command(
+ dc->res_pool,
+ ddc->ddc_pin,
+ cmd);
+
+ return false;
+}
+
+void dc_link_dp_handle_automated_test(struct dc_link *link)
+{
+ link->dc->link_srv->dp_handle_automated_test(link);
+}
+
+bool dc_link_dp_set_test_pattern(
+ struct dc_link *link,
+ enum dp_test_pattern test_pattern,
+ enum dp_test_pattern_color_space test_pattern_color_space,
+ const struct link_training_settings *p_link_settings,
+ const unsigned char *p_custom_pattern,
+ unsigned int cust_pattern_size)
+{
+ return link->dc->link_srv->dp_set_test_pattern(link, test_pattern,
+ test_pattern_color_space, p_link_settings,
+ p_custom_pattern, cust_pattern_size);
+}
+
+void dc_link_set_drive_settings(struct dc *dc,
+ struct link_training_settings *lt_settings,
+ struct dc_link *link)
+{
+ struct link_resource link_res;
+
+ dc->link_srv->get_cur_link_res(link, &link_res);
+ dc->link_srv->dp_set_drive_settings(link, &link_res, lt_settings);
+}
+
+void dc_link_set_preferred_link_settings(struct dc *dc,
+ struct dc_link_settings *link_setting,
+ struct dc_link *link)
+{
+ dc->link_srv->dp_set_preferred_link_settings(dc, link_setting, link);
+}
+
+void dc_link_set_preferred_training_settings(struct dc *dc,
+ struct dc_link_settings *link_setting,
+ struct dc_link_training_overrides *lt_overrides,
+ struct dc_link *link,
+ bool skip_immediate_retrain)
+{
+ dc->link_srv->dp_set_preferred_training_settings(dc, link_setting,
+ lt_overrides, link, skip_immediate_retrain);
+}
+
+bool dc_dp_trace_is_initialized(struct dc_link *link)
+{
+ return link->dc->link_srv->dp_trace_is_initialized(link);
+}
+
+void dc_dp_trace_set_is_logged_flag(struct dc_link *link,
+ bool in_detection,
+ bool is_logged)
+{
+ link->dc->link_srv->dp_trace_set_is_logged_flag(link, in_detection, is_logged);
+}
+
+bool dc_dp_trace_is_logged(struct dc_link *link, bool in_detection)
+{
+ return link->dc->link_srv->dp_trace_is_logged(link, in_detection);
+}
+
+unsigned long long dc_dp_trace_get_lt_end_timestamp(struct dc_link *link,
+ bool in_detection)
+{
+ return link->dc->link_srv->dp_trace_get_lt_end_timestamp(link, in_detection);
+}
+
+const struct dp_trace_lt_counts *dc_dp_trace_get_lt_counts(struct dc_link *link,
+ bool in_detection)
+{
+ return link->dc->link_srv->dp_trace_get_lt_counts(link, in_detection);
+}
+
+unsigned int dc_dp_trace_get_link_loss_count(struct dc_link *link)
+{
+ return link->dc->link_srv->dp_trace_get_link_loss_count(link);
+}
+
+struct dc_sink *dc_link_add_remote_sink(
+ struct dc_link *link,
+ const uint8_t *edid,
+ int len,
+ struct dc_sink_init_data *init_data)
+{
+ return link->dc->link_srv->add_remote_sink(link, edid, len, init_data);
+}
+
+void dc_link_remove_remote_sink(struct dc_link *link, struct dc_sink *sink)
+{
+ link->dc->link_srv->remove_remote_sink(link, sink);
+}
+
+int dc_link_aux_transfer_raw(struct ddc_service *ddc,
+ struct aux_payload *payload,
+ enum aux_return_code_type *operation_result)
+{
+ const struct dc *dc = ddc->link->dc;
+
+ return dc->link_srv->aux_transfer_raw(
+ ddc, payload, operation_result);
+}
+
+uint32_t dc_link_bw_kbps_from_raw_frl_link_rate_data(
+ struct dc *dc, uint8_t bw)
+{
+ return dc->link_srv->bw_kbps_from_raw_frl_link_rate_data(bw);
+}
+
+bool dc_link_decide_edp_link_settings(struct dc_link *link,
+ struct dc_link_settings *link_setting, uint32_t req_bw)
+{
+ return link->dc->link_srv->edp_decide_link_settings(link, link_setting, req_bw);
+}
+
+
+bool dc_link_dp_get_max_link_enc_cap(const struct dc_link *link,
+ struct dc_link_settings *max_link_enc_cap)
+{
+ return link->dc->link_srv->dp_get_max_link_enc_cap(link, max_link_enc_cap);
+}
+
+enum dp_link_encoding dc_link_dp_mst_decide_link_encoding_format(
+ const struct dc_link *link)
+{
+ return link->dc->link_srv->mst_decide_link_encoding_format(link);
+}
+
+const struct dc_link_settings *dc_link_get_link_cap(const struct dc_link *link)
+{
+ return link->dc->link_srv->dp_get_verified_link_cap(link);
+}
+
+bool dc_link_is_dp_sink_present(struct dc_link *link)
+{
+ return link->dc->link_srv->dp_is_sink_present(link);
+}
+
+bool dc_link_is_fec_supported(const struct dc_link *link)
+{
+ return link->dc->link_srv->dp_is_fec_supported(link);
+}
+
+void dc_link_overwrite_extended_receiver_cap(
+ struct dc_link *link)
+{
+ link->dc->link_srv->dp_overwrite_extended_receiver_cap(link);
+}
+
+bool dc_link_should_enable_fec(const struct dc_link *link)
+{
+ return link->dc->link_srv->dp_should_enable_fec(link);
+}
+
+int dc_link_dp_dpia_handle_usb4_bandwidth_allocation_for_link(
+ struct dc_link *link, int peak_bw)
+{
+ return link->dc->link_srv->dpia_handle_usb4_bandwidth_allocation_for_link(link, peak_bw);
+}
+
+void dc_link_handle_usb4_bw_alloc_response(struct dc_link *link, uint8_t bw, uint8_t result)
+{
+ link->dc->link_srv->dpia_handle_bw_alloc_response(link, bw, result);
+}
+
+bool dc_link_check_link_loss_status(
+ struct dc_link *link,
+ union hpd_irq_data *hpd_irq_dpcd_data)
+{
+ return link->dc->link_srv->dp_parse_link_loss_status(link, hpd_irq_dpcd_data);
+}
+
+bool dc_link_dp_allow_hpd_rx_irq(const struct dc_link *link)
+{
+ return link->dc->link_srv->dp_should_allow_hpd_rx_irq(link);
+}
+
+void dc_link_dp_handle_link_loss(struct dc_link *link)
+{
+ link->dc->link_srv->dp_handle_link_loss(link);
+}
+
+enum dc_status dc_link_dp_read_hpd_rx_irq_data(
+ struct dc_link *link,
+ union hpd_irq_data *irq_data)
+{
+ return link->dc->link_srv->dp_read_hpd_rx_irq_data(link, irq_data);
+}
+
+bool dc_link_handle_hpd_rx_irq(struct dc_link *link,
+ union hpd_irq_data *out_hpd_irq_dpcd_data, bool *out_link_loss,
+ bool defer_handling, bool *has_left_work)
+{
+ return link->dc->link_srv->dp_handle_hpd_rx_irq(link, out_hpd_irq_dpcd_data,
+ out_link_loss, defer_handling, has_left_work);
+}
+
+void dc_link_dp_receiver_power_ctrl(struct dc_link *link, bool on)
+{
+ link->dc->link_srv->dpcd_write_rx_power_ctrl(link, on);
+}
+
+enum lttpr_mode dc_link_decide_lttpr_mode(struct dc_link *link,
+ struct dc_link_settings *link_setting)
+{
+ return link->dc->link_srv->dp_decide_lttpr_mode(link, link_setting);
+}
+
+void dc_link_edp_panel_backlight_power_on(struct dc_link *link, bool wait_for_hpd)
+{
+ link->dc->link_srv->edp_panel_backlight_power_on(link, wait_for_hpd);
+}
+
+int dc_link_get_backlight_level(const struct dc_link *link)
+{
+ return link->dc->link_srv->edp_get_backlight_level(link);
+}
+
+bool dc_link_get_backlight_level_nits(struct dc_link *link,
+ uint32_t *backlight_millinits_avg,
+ uint32_t *backlight_millinits_peak)
+{
+ return link->dc->link_srv->edp_get_backlight_level_nits(link,
+ backlight_millinits_avg,
+ backlight_millinits_peak);
+}
+
+bool dc_link_set_backlight_level(const struct dc_link *link,
+ uint32_t backlight_pwm_u16_16,
+ uint32_t frame_ramp)
+{
+ return link->dc->link_srv->edp_set_backlight_level(link,
+ backlight_pwm_u16_16, frame_ramp);
+}
+
+bool dc_link_set_backlight_level_nits(struct dc_link *link,
+ bool isHDR,
+ uint32_t backlight_millinits,
+ uint32_t transition_time_in_ms)
+{
+ return link->dc->link_srv->edp_set_backlight_level_nits(link, isHDR,
+ backlight_millinits, transition_time_in_ms);
+}
+
+int dc_link_get_target_backlight_pwm(const struct dc_link *link)
+{
+ return link->dc->link_srv->edp_get_target_backlight_pwm(link);
+}
+
+bool dc_link_get_psr_state(const struct dc_link *link, enum dc_psr_state *state)
+{
+ return link->dc->link_srv->edp_get_psr_state(link, state);
+}
+
+bool dc_link_set_psr_allow_active(struct dc_link *link, const bool *allow_active,
+ bool wait, bool force_static, const unsigned int *power_opts)
+{
+ return link->dc->link_srv->edp_set_psr_allow_active(link, allow_active, wait,
+ force_static, power_opts);
+}
+
+bool dc_link_setup_psr(struct dc_link *link,
+ const struct dc_stream_state *stream, struct psr_config *psr_config,
+ struct psr_context *psr_context)
+{
+ return link->dc->link_srv->edp_setup_psr(link, stream, psr_config, psr_context);
+}
+
+bool dc_link_wait_for_t12(struct dc_link *link)
+{
+ return link->dc->link_srv->edp_wait_for_t12(link);
+}
+
+bool dc_link_get_hpd_state(struct dc_link *link)
+{
+ return link->dc->link_srv->get_hpd_state(link);
+}
+
+void dc_link_enable_hpd(const struct dc_link *link)
+{
+ link->dc->link_srv->enable_hpd(link);
+}
+
+void dc_link_disable_hpd(const struct dc_link *link)
+{
+ link->dc->link_srv->disable_hpd(link);
+}
+
+void dc_link_enable_hpd_filter(struct dc_link *link, bool enable)
+{
+ link->dc->link_srv->enable_hpd_filter(link, enable);
}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
index d9f2ef242b0f..85d54bfb595c 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
@@ -232,7 +232,7 @@ struct resource_pool *dc_create_resource_pool(struct dc *dc,
init_data->num_virtual_links, dc);
break;
-#if defined(CONFIG_DRM_AMD_DC_DCN)
+#if defined(CONFIG_DRM_AMD_DC_FP)
case DCN_VERSION_1_0:
case DCN_VERSION_1_01:
res_pool = dcn10_create_resource_pool(init_data, dc);
@@ -276,7 +276,7 @@ struct resource_pool *dc_create_resource_pool(struct dc *dc,
case DCN_VERSION_3_21:
res_pool = dcn321_create_resource_pool(init_data, dc);
break;
-#endif
+#endif /* CONFIG_DRM_AMD_DC_FP */
default:
break;
}
@@ -2213,7 +2213,7 @@ enum dc_status dc_remove_stream_from_ctx(
del_pipe->stream_res.stream_enc,
false);
- if (link_is_dp_128b_132b_signal(del_pipe)) {
+ if (dc->link_srv->dp_is_128b_132b_signal(del_pipe)) {
update_hpo_dp_stream_engine_usage(
&new_ctx->res_ctx, dc->res_pool,
del_pipe->stream_res.hpo_dp_stream_enc,
@@ -2513,9 +2513,10 @@ enum dc_status resource_map_pool_resources(
* and link settings
*/
if (dc_is_dp_signal(stream->signal)) {
- if (!link_decide_link_settings(stream, &pipe_ctx->link_config.dp_link_settings))
+ if (!dc->link_srv->dp_decide_link_settings(stream, &pipe_ctx->link_config.dp_link_settings))
return DC_FAIL_DP_LINK_BANDWIDTH;
- if (link_dp_get_encoding_format(&pipe_ctx->link_config.dp_link_settings) == DP_128b_132b_ENCODING) {
+ if (dc->link_srv->dp_get_encoding_format(
+ &pipe_ctx->link_config.dp_link_settings) == DP_128b_132b_ENCODING) {
pipe_ctx->stream_res.hpo_dp_stream_enc =
find_first_free_match_hpo_dp_stream_enc_for_link(
&context->res_ctx, pool, stream);
@@ -3685,7 +3686,7 @@ enum dc_status dc_validate_stream(struct dc *dc, struct dc_stream_state *stream)
/* TODO: validate audio ASIC caps, encoder */
if (res == DC_OK)
- res = link_validate_mode_timing(stream,
+ res = dc->link_srv->validate_mode_timing(stream,
link,
&stream->timing);
@@ -3812,7 +3813,7 @@ bool get_temp_dp_link_res(struct dc_link *link,
memset(link_res, 0, sizeof(*link_res));
- if (link_dp_get_encoding_format(link_settings) == DP_128b_132b_ENCODING) {
+ if (dc->link_srv->dp_get_encoding_format(link_settings) == DP_128b_132b_ENCODING) {
link_res->hpo_dp_link_enc = get_temp_hpo_dp_link_enc(res_ctx,
dc->res_pool, link);
if (!link_res->hpo_dp_link_enc)
@@ -4027,14 +4028,14 @@ bool dc_resource_acquire_secondary_pipe_for_mpc_odm(
else
sec_pipe->stream_res.opp = sec_pipe->top_pipe->stream_res.opp;
if (sec_pipe->stream->timing.flags.DSC == 1) {
-#if defined(CONFIG_DRM_AMD_DC_DCN)
+#if defined(CONFIG_DRM_AMD_DC_FP)
dcn20_acquire_dsc(dc, &state->res_ctx, &sec_pipe->stream_res.dsc, pipe_idx);
#endif
ASSERT(sec_pipe->stream_res.dsc);
if (sec_pipe->stream_res.dsc == NULL)
return false;
}
-#if defined(CONFIG_DRM_AMD_DC_DCN)
+#if defined(CONFIG_DRM_AMD_DC_FP)
dcn20_build_mapped_resource(dc, state, sec_pipe->stream);
#endif
}
@@ -4046,7 +4047,7 @@ enum dc_status update_dp_encoder_resources_for_test_harness(const struct dc *dc,
struct dc_state *context,
struct pipe_ctx *pipe_ctx)
{
- if (link_dp_get_encoding_format(&pipe_ctx->link_config.dp_link_settings) == DP_128b_132b_ENCODING) {
+ if (dc->link_srv->dp_get_encoding_format(&pipe_ctx->link_config.dp_link_settings) == DP_128b_132b_ENCODING) {
if (pipe_ctx->stream_res.hpo_dp_stream_enc == NULL) {
pipe_ctx->stream_res.hpo_dp_stream_enc =
find_first_free_match_hpo_dp_stream_enc_for_link(
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_vm_helper.c b/drivers/gpu/drm/amd/display/dc/core/dc_vm_helper.c
index cde8ed2560b3..eda2152dcd1f 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_vm_helper.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_vm_helper.c
@@ -47,9 +47,7 @@ int dc_setup_system_context(struct dc *dc, struct dc_phy_addr_space_config *pa_c
*/
memcpy(&dc->vm_pa_config, pa_config, sizeof(struct dc_phy_addr_space_config));
dc->vm_pa_config.valid = true;
-#if defined(CONFIG_DRM_AMD_DC_DCN)
dc_z10_save_init(dc);
-#endif
}
return num_vmids;
diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
index 1fde43378689..2818483964dd 100644
--- a/drivers/gpu/drm/amd/display/dc/dc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc.h
@@ -29,9 +29,7 @@
#include "dc_types.h"
#include "grph_object_defs.h"
#include "logger_types.h"
-#if defined(CONFIG_DRM_AMD_DC_HDCP)
-#include "hdcp_types.h"
-#endif
+#include "hdcp_msg_types.h"
#include "gpio_types.h"
#include "link_service_types.h"
#include "grph_object_ctrl_defs.h"
@@ -47,7 +45,7 @@ struct aux_payload;
struct set_config_cmd_payload;
struct dmub_notification;
-#define DC_VER "3.2.223"
+#define DC_VER "3.2.227"
#define MAX_SURFACES 3
#define MAX_PLANES 6
@@ -84,8 +82,6 @@ enum det_size {
struct dc_plane_cap {
enum dc_plane_type type;
- uint32_t blends_with_above : 1;
- uint32_t blends_with_below : 1;
uint32_t per_pixel_alpha : 1;
struct {
uint32_t argb8888 : 1;
@@ -716,6 +712,7 @@ struct dc_bounding_box_overrides {
struct dc_state;
struct resource_pool;
struct dce_hwseq;
+struct link_service;
/**
* struct dc_debug_options - DC debug struct
@@ -795,6 +792,7 @@ struct dc_debug_options {
unsigned int force_odm_combine; //bit vector based on otg inst
unsigned int seamless_boot_odm_combine;
unsigned int force_odm_combine_4to1; //bit vector based on otg inst
+ int minimum_z8_residency_time;
bool disable_z9_mpc;
unsigned int force_fclk_khz;
bool enable_tri_buf;
@@ -874,6 +872,7 @@ struct dc_debug_options {
bool disable_unbounded_requesting;
bool dig_fifo_off_in_blank;
bool temp_mst_deallocation_sequence;
+ bool override_dispclk_programming;
};
struct gpu_info_soc_bounding_box_v1_0;
@@ -890,6 +889,7 @@ struct dc {
uint8_t link_count;
struct dc_link *links[MAX_PIPES * 2];
+ struct link_service *link_srv;
struct dc_state *current_state;
struct resource_pool *res_pool;
@@ -991,11 +991,7 @@ struct dc_init_data {
};
struct dc_callback_init {
-#ifdef CONFIG_DRM_AMD_DC_HDCP
struct cp_psp cp_psp;
-#else
- uint8_t reserved;
-#endif
};
struct dc *dc_create(const struct dc_init_data *init_params);
@@ -1362,10 +1358,6 @@ enum dc_status dc_commit_streams(struct dc *dc,
struct dc_stream_state *streams[],
uint8_t stream_count);
-/* TODO: When the transition to the new commit sequence is done, remove this
- * function in favor of dc_commit_streams. */
-bool dc_commit_state(struct dc *dc, struct dc_state *context);
-
struct dc_state *dc_create_state(struct dc *dc);
struct dc_state *dc_copy_state(struct dc_state *src_ctx);
void dc_retain_state(struct dc_state *context);
@@ -1378,9 +1370,163 @@ struct dc_plane_state *dc_get_surface_for_mpcc(struct dc *dc,
uint32_t dc_get_opp_for_plane(struct dc *dc, struct dc_plane_state *plane);
+/* The function returns minimum bandwidth required to drive a given timing
+ * return - minimum required timing bandwidth in kbps.
+ */
+uint32_t dc_bandwidth_in_kbps_from_timing(const struct dc_crtc_timing *timing);
+
/* Link Interfaces */
-/* TODO: remove this after resolving external dependencies */
-#include "dc_link.h"
+/*
+ * A link contains one or more sinks and their connected status.
+ * The currently active signal type (HDMI, DP-SST, DP-MST) is also reported.
+ */
+struct dc_link {
+ struct dc_sink *remote_sinks[MAX_SINKS_PER_LINK];
+ unsigned int sink_count;
+ struct dc_sink *local_sink;
+ unsigned int link_index;
+ enum dc_connection_type type;
+ enum signal_type connector_signal;
+ enum dc_irq_source irq_source_hpd;
+ enum dc_irq_source irq_source_hpd_rx;/* aka DP Short Pulse */
+
+ bool is_hpd_filter_disabled;
+ bool dp_ss_off;
+
+ /**
+ * @link_state_valid:
+ *
+ * If there is no link and local sink, this variable should be set to
+ * false. Otherwise, it should be set to true; usually, the function
+ * core_link_enable_stream sets this field to true.
+ */
+ bool link_state_valid;
+ bool aux_access_disabled;
+ bool sync_lt_in_progress;
+ bool skip_stream_reenable;
+ bool is_internal_display;
+ /** @todo Rename. Flag an endpoint as having a programmable mapping to a DIG encoder. */
+ bool is_dig_mapping_flexible;
+ bool hpd_status; /* HPD status of link without physical HPD pin. */
+ bool is_hpd_pending; /* Indicates a new received hpd */
+ bool is_automated; /* Indicates automated testing */
+
+ bool edp_sink_present;
+
+ struct dp_trace dp_trace;
+
+ /* caps is the same as reported_link_cap. link_traing use
+ * reported_link_cap. Will clean up. TODO
+ */
+ struct dc_link_settings reported_link_cap;
+ struct dc_link_settings verified_link_cap;
+ struct dc_link_settings cur_link_settings;
+ struct dc_lane_settings cur_lane_setting[LANE_COUNT_DP_MAX];
+ struct dc_link_settings preferred_link_setting;
+ /* preferred_training_settings are override values that
+ * come from DM. DM is responsible for the memory
+ * management of the override pointers.
+ */
+ struct dc_link_training_overrides preferred_training_settings;
+ struct dp_audio_test_data audio_test_data;
+
+ uint8_t ddc_hw_inst;
+
+ uint8_t hpd_src;
+
+ uint8_t link_enc_hw_inst;
+ /* DIG link encoder ID. Used as index in link encoder resource pool.
+ * For links with fixed mapping to DIG, this is not changed after dc_link
+ * object creation.
+ */
+ enum engine_id eng_id;
+
+ bool test_pattern_enabled;
+ union compliance_test_state compliance_test_state;
+
+ void *priv;
+
+ struct ddc_service *ddc;
+
+ bool aux_mode;
+
+ /* Private to DC core */
+
+ const struct dc *dc;
+
+ struct dc_context *ctx;
+
+ struct panel_cntl *panel_cntl;
+ struct link_encoder *link_enc;
+ struct graphics_object_id link_id;
+ /* Endpoint type distinguishes display endpoints which do not have entries
+ * in the BIOS connector table from those that do. Helps when tracking link
+ * encoder to display endpoint assignments.
+ */
+ enum display_endpoint_type ep_type;
+ union ddi_channel_mapping ddi_channel_mapping;
+ struct connector_device_tag_info device_tag;
+ struct dpcd_caps dpcd_caps;
+ uint32_t dongle_max_pix_clk;
+ unsigned short chip_caps;
+ unsigned int dpcd_sink_count;
+ struct hdcp_caps hdcp_caps;
+ enum edp_revision edp_revision;
+ union dpcd_sink_ext_caps dpcd_sink_ext_caps;
+
+ struct psr_settings psr_settings;
+
+ /* Drive settings read from integrated info table */
+ struct dc_lane_settings bios_forced_drive_settings;
+
+ /* Vendor specific LTTPR workaround variables */
+ uint8_t vendor_specific_lttpr_link_rate_wa;
+ bool apply_vendor_specific_lttpr_link_rate_wa;
+
+ /* MST record stream using this link */
+ struct link_flags {
+ bool dp_keep_receiver_powered;
+ bool dp_skip_DID2;
+ bool dp_skip_reset_segment;
+ bool dp_skip_fs_144hz;
+ bool dp_mot_reset_segment;
+ /* Some USB4 docks do not handle turning off MST DSC once it has been enabled. */
+ bool dpia_mst_dsc_always_on;
+ /* Forced DPIA into TBT3 compatibility mode. */
+ bool dpia_forced_tbt3_mode;
+ bool dongle_mode_timing_override;
+ } wa_flags;
+ struct link_mst_stream_allocation_table mst_stream_alloc_table;
+
+ struct dc_link_status link_status;
+ struct dprx_states dprx_states;
+
+ struct gpio *hpd_gpio;
+ enum dc_link_fec_state fec_state;
+ bool link_powered_externally; // Used to bypass hardware sequencing delays when panel is powered down forcibly
+
+ struct dc_panel_config panel_config;
+ struct phy_state phy_state;
+ // BW ALLOCATON USB4 ONLY
+ struct dc_dpia_bw_alloc dpia_bw_alloc_config;
+};
+
+/* Return an enumerated dc_link.
+ * dc_link order is constant and determined at
+ * boot time. They cannot be created or destroyed.
+ * Use dc_get_caps() to get number of links.
+ */
+struct dc_link *dc_get_link_at_index(struct dc *dc, uint32_t link_index);
+
+/* Return instance id of the edp link. Inst 0 is primary edp link. */
+bool dc_get_edp_link_panel_inst(const struct dc *dc,
+ const struct dc_link *link,
+ unsigned int *inst_out);
+
+/* Return an array of link pointers to edp links. */
+void dc_get_edp_links(const struct dc *dc,
+ struct dc_link **edp_links,
+ int *edp_num);
/* The function initiates detection handshake over the given link. It first
* determines if there are display connections over the link. If so it initiates
@@ -1404,6 +1550,38 @@ uint32_t dc_get_opp_for_plane(struct dc *dc, struct dc_plane_state *plane);
*/
bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason);
+struct dc_sink_init_data;
+
+/* When link connection type is dc_connection_mst_branch, remote sink can be
+ * added to the link. The interface creates a remote sink and associates it with
+ * current link. The sink will be retained by link until remove remote sink is
+ * called.
+ *
+ * @dc_link - link the remote sink will be added to.
+ * @edid - byte array of EDID raw data.
+ * @len - size of the edid in byte
+ * @init_data -
+ */
+struct dc_sink *dc_link_add_remote_sink(
+ struct dc_link *dc_link,
+ const uint8_t *edid,
+ int len,
+ struct dc_sink_init_data *init_data);
+
+/* Remove remote sink from a link with dc_connection_mst_branch connection type.
+ * @link - link the sink should be removed from
+ * @sink - sink to be removed.
+ */
+void dc_link_remove_remote_sink(
+ struct dc_link *link,
+ struct dc_sink *sink);
+
+/* Enable HPD interrupt handler for a given link */
+void dc_link_enable_hpd(const struct dc_link *link);
+
+/* Disable HPD interrupt handler for a given link */
+void dc_link_disable_hpd(const struct dc_link *link);
+
/* determine if there is a sink connected to the link
*
* @type - dc_connection_single if connected, dc_connection_none otherwise.
@@ -1417,14 +1595,115 @@ bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason);
bool dc_link_detect_connection_type(struct dc_link *link,
enum dc_connection_type *type);
+/* query current hpd pin value
+ * return - true HPD is asserted (HPD high), false otherwise (HPD low)
+ *
+ */
+bool dc_link_get_hpd_state(struct dc_link *link);
+
/* Getter for cached link status from given link */
const struct dc_link_status *dc_link_get_status(const struct dc_link *link);
-#ifdef CONFIG_DRM_AMD_DC_HDCP
+/* enable/disable hardware HPD filter.
+ *
+ * @link - The link the HPD pin is associated with.
+ * @enable = true - enable hardware HPD filter. HPD event will only queued to irq
+ * handler once after no HPD change has been detected within dc default HPD
+ * filtering interval since last HPD event. i.e if display keeps toggling hpd
+ * pulses within default HPD interval, no HPD event will be received until HPD
+ * toggles have stopped. Then HPD event will be queued to irq handler once after
+ * dc default HPD filtering interval since last HPD event.
+ *
+ * @enable = false - disable hardware HPD filter. HPD event will be queued
+ * immediately to irq handler after no HPD change has been detected within
+ * IRQ_HPD (aka HPD short pulse) interval (i.e 2ms).
+ */
+void dc_link_enable_hpd_filter(struct dc_link *link, bool enable);
+
+/* submit i2c read/write payloads through ddc channel
+ * @link_index - index to a link with ddc in i2c mode
+ * @cmd - i2c command structure
+ * return - true if success, false otherwise.
+ */
+bool dc_submit_i2c(
+ struct dc *dc,
+ uint32_t link_index,
+ struct i2c_command *cmd);
+
+/* submit i2c read/write payloads through oem channel
+ * @link_index - index to a link with ddc in i2c mode
+ * @cmd - i2c command structure
+ * return - true if success, false otherwise.
+ */
+bool dc_submit_i2c_oem(
+ struct dc *dc,
+ struct i2c_command *cmd);
+
+enum aux_return_code_type;
+/* Attempt to transfer the given aux payload. This function does not perform
+ * retries or handle error states. The reply is returned in the payload->reply
+ * and the result through operation_result. Returns the number of bytes
+ * transferred,or -1 on a failure.
+ */
+int dc_link_aux_transfer_raw(struct ddc_service *ddc,
+ struct aux_payload *payload,
+ enum aux_return_code_type *operation_result);
+
+bool dc_is_oem_i2c_device_present(
+ struct dc *dc,
+ size_t slave_address
+);
+
/* return true if the connected receiver supports the hdcp version */
bool dc_link_is_hdcp14(struct dc_link *link, enum signal_type signal);
bool dc_link_is_hdcp22(struct dc_link *link, enum signal_type signal);
-#endif
+
+/* Notify DC about DP RX Interrupt (aka DP IRQ_HPD).
+ *
+ * TODO - When defer_handling is true the function will have a different purpose.
+ * It no longer does complete hpd rx irq handling. We should create a separate
+ * interface specifically for this case.
+ *
+ * Return:
+ * true - Downstream port status changed. DM should call DC to do the
+ * detection.
+ * false - no change in Downstream port status. No further action required
+ * from DM.
+ */
+bool dc_link_handle_hpd_rx_irq(struct dc_link *dc_link,
+ union hpd_irq_data *hpd_irq_dpcd_data, bool *out_link_loss,
+ bool defer_handling, bool *has_left_work);
+/* handle DP specs define test automation sequence*/
+void dc_link_dp_handle_automated_test(struct dc_link *link);
+
+/* handle DP Link loss sequence and try to recover RX link loss with best
+ * effort
+ */
+void dc_link_dp_handle_link_loss(struct dc_link *link);
+
+/* Determine if hpd rx irq should be handled or ignored
+ * return true - hpd rx irq should be handled.
+ * return false - it is safe to ignore hpd rx irq event
+ */
+bool dc_link_dp_allow_hpd_rx_irq(const struct dc_link *link);
+
+/* Determine if link loss is indicated with a given hpd_irq_dpcd_data.
+ * @link - link the hpd irq data associated with
+ * @hpd_irq_dpcd_data - input hpd irq data
+ * return - true if hpd irq data indicates a link lost
+ */
+bool dc_link_check_link_loss_status(struct dc_link *link,
+ union hpd_irq_data *hpd_irq_dpcd_data);
+
+/* Read hpd rx irq data from a given link
+ * @link - link where the hpd irq data should be read from
+ * @irq_data - output hpd irq data
+ * return - DC_OK if hpd irq data is read successfully, otherwise hpd irq data
+ * read has failed.
+ */
+enum dc_status dc_link_dp_read_hpd_rx_irq_data(
+ struct dc_link *link,
+ union hpd_irq_data *irq_data);
/* The function clears recorded DP RX states in the link. DM should call this
* function when it is resuming from S3 power state to previously connected links.
@@ -1450,12 +1729,6 @@ uint32_t dc_link_bandwidth_kbps(
const struct dc_link *link,
const struct dc_link_settings *link_setting);
-/* The function returns minimum bandwidth required to drive a given timing
- * return - minimum required timing bandwidth in kbps.
- */
-uint32_t dc_bandwidth_in_kbps_from_timing(
- const struct dc_crtc_timing *timing);
-
/* The function takes a snapshot of current link resource allocation state
* @dc: pointer to dc of the dm calling this
* @map: a dc link resource snapshot defined internally to dc.
@@ -1493,6 +1766,269 @@ void dc_restore_link_res_map(const struct dc *dc, uint32_t *map);
* interface i.e stream_update->dsc_config
*/
bool dc_link_update_dsc_config(struct pipe_ctx *pipe_ctx);
+
+/* translate a raw link rate data to bandwidth in kbps */
+uint32_t dc_link_bw_kbps_from_raw_frl_link_rate_data(
+ struct dc *dc, uint8_t bw);
+
+/* determine the optimal bandwidth given link and required bw.
+ * @link - current detected link
+ * @req_bw - requested bandwidth in kbps
+ * @link_settings - returned most optimal link settings that can fit the
+ * requested bandwidth
+ * return - false if link can't support requested bandwidth, true if link
+ * settings is found.
+ */
+bool dc_link_decide_edp_link_settings(struct dc_link *link,
+ struct dc_link_settings *link_settings,
+ uint32_t req_bw);
+
+/* return the max dp link settings can be driven by the link without considering
+ * connected RX device and its capability
+ */
+bool dc_link_dp_get_max_link_enc_cap(const struct dc_link *link,
+ struct dc_link_settings *max_link_enc_cap);
+
+/* determine when the link is driving MST mode, what DP link channel coding
+ * format will be used. The decision will remain unchanged until next HPD event.
+ *
+ * @link - a link with DP RX connection
+ * return - if stream is committed to this link with MST signal type, type of
+ * channel coding format dc will choose.
+ */
+enum dp_link_encoding dc_link_dp_mst_decide_link_encoding_format(
+ const struct dc_link *link);
+
+/* get max dp link settings the link can enable with all things considered. (i.e
+ * TX/RX/Cable capabilities and dp override policies.
+ *
+ * @link - a link with DP RX connection
+ * return - max dp link settings the link can enable.
+ *
+ */
+const struct dc_link_settings *dc_link_get_link_cap(const struct dc_link *link);
+
+/* Check if a RX (ex. DP sink, MST hub, passive or active dongle) is connected
+ * to a link with dp connector signal type.
+ * @link - a link with dp connector signal type
+ * return - true if connected, false otherwise
+ */
+bool dc_link_is_dp_sink_present(struct dc_link *link);
+
+/* Force DP lane settings update to main-link video signal and notify the change
+ * to DP RX via DPCD. This is a debug interface used for video signal integrity
+ * tuning purpose. The interface assumes link has already been enabled with DP
+ * signal.
+ *
+ * @lt_settings - a container structure with desired hw_lane_settings
+ */
+void dc_link_set_drive_settings(struct dc *dc,
+ struct link_training_settings *lt_settings,
+ struct dc_link *link);
+
+/* Enable a test pattern in Link or PHY layer in an active link for compliance
+ * test or debugging purpose. The test pattern will remain until next un-plug.
+ *
+ * @link - active link with DP signal output enabled.
+ * @test_pattern - desired test pattern to output.
+ * NOTE: set to DP_TEST_PATTERN_VIDEO_MODE to disable previous test pattern.
+ * @test_pattern_color_space - for video test pattern choose a desired color
+ * space.
+ * @p_link_settings - For PHY pattern choose a desired link settings
+ * @p_custom_pattern - some test pattern will require a custom input to
+ * customize some pattern details. Otherwise keep it to NULL.
+ * @cust_pattern_size - size of the custom pattern input.
+ *
+ */
+bool dc_link_dp_set_test_pattern(
+ struct dc_link *link,
+ enum dp_test_pattern test_pattern,
+ enum dp_test_pattern_color_space test_pattern_color_space,
+ const struct link_training_settings *p_link_settings,
+ const unsigned char *p_custom_pattern,
+ unsigned int cust_pattern_size);
+
+/* Force DP link settings to always use a specific value until reboot to a
+ * specific link. If link has already been enabled, the interface will also
+ * switch to desired link settings immediately. This is a debug interface to
+ * generic dp issue trouble shooting.
+ */
+void dc_link_set_preferred_link_settings(struct dc *dc,
+ struct dc_link_settings *link_setting,
+ struct dc_link *link);
+
+/* Force DP link to customize a specific link training behavior by overriding to
+ * standard DP specs defined protocol. This is a debug interface to trouble shoot
+ * display specific link training issues or apply some display specific
+ * workaround in link training.
+ *
+ * @link_settings - if not NULL, force preferred link settings to the link.
+ * @lt_override - a set of override pointers. If any pointer is none NULL, dc
+ * will apply this particular override in future link training. If NULL is
+ * passed in, dc resets previous overrides.
+ * NOTE: DM must keep the memory from override pointers until DM resets preferred
+ * training settings.
+ */
+void dc_link_set_preferred_training_settings(struct dc *dc,
+ struct dc_link_settings *link_setting,
+ struct dc_link_training_overrides *lt_overrides,
+ struct dc_link *link,
+ bool skip_immediate_retrain);
+
+/* return - true if FEC is supported with connected DP RX, false otherwise */
+bool dc_link_is_fec_supported(const struct dc_link *link);
+
+/* query FEC enablement policy to determine if FEC will be enabled by dc during
+ * link enablement.
+ * return - true if FEC should be enabled, false otherwise.
+ */
+bool dc_link_should_enable_fec(const struct dc_link *link);
+
+/* determine lttpr mode the current link should be enabled with a specific link
+ * settings.
+ */
+enum lttpr_mode dc_link_decide_lttpr_mode(struct dc_link *link,
+ struct dc_link_settings *link_setting);
+
+/* Force DP RX to update its power state.
+ * NOTE: this interface doesn't update dp main-link. Calling this function will
+ * cause DP TX main-link and DP RX power states out of sync. DM has to restore
+ * RX power state back upon finish DM specific execution requiring DP RX in a
+ * specific power state.
+ * @on - true to set DP RX in D0 power state, false to set DP RX in D3 power
+ * state.
+ */
+void dc_link_dp_receiver_power_ctrl(struct dc_link *link, bool on);
+
+/* Force link to read base dp receiver caps from dpcd 000h - 00Fh and overwrite
+ * current value read from extended receiver cap from 02200h - 0220Fh.
+ * Some DP RX has problems of providing accurate DP receiver caps from extended
+ * field, this interface is a workaround to revert link back to use base caps.
+ */
+void dc_link_overwrite_extended_receiver_cap(
+ struct dc_link *link);
+
+void dc_link_edp_panel_backlight_power_on(struct dc_link *link,
+ bool wait_for_hpd);
+
+/* Set backlight level of an embedded panel (eDP, LVDS).
+ * backlight_pwm_u16_16 is unsigned 32 bit with 16 bit integer
+ * and 16 bit fractional, where 1.0 is max backlight value.
+ */
+bool dc_link_set_backlight_level(const struct dc_link *dc_link,
+ uint32_t backlight_pwm_u16_16,
+ uint32_t frame_ramp);
+
+/* Set/get nits-based backlight level of an embedded panel (eDP, LVDS). */
+bool dc_link_set_backlight_level_nits(struct dc_link *link,
+ bool isHDR,
+ uint32_t backlight_millinits,
+ uint32_t transition_time_in_ms);
+
+bool dc_link_get_backlight_level_nits(struct dc_link *link,
+ uint32_t *backlight_millinits,
+ uint32_t *backlight_millinits_peak);
+
+int dc_link_get_backlight_level(const struct dc_link *dc_link);
+
+int dc_link_get_target_backlight_pwm(const struct dc_link *link);
+
+bool dc_link_set_psr_allow_active(struct dc_link *dc_link, const bool *enable,
+ bool wait, bool force_static, const unsigned int *power_opts);
+
+bool dc_link_get_psr_state(const struct dc_link *dc_link, enum dc_psr_state *state);
+
+bool dc_link_setup_psr(struct dc_link *dc_link,
+ const struct dc_stream_state *stream, struct psr_config *psr_config,
+ struct psr_context *psr_context);
+
+/* On eDP links this function call will stall until T12 has elapsed.
+ * If the panel is not in power off state, this function will return
+ * immediately.
+ */
+bool dc_link_wait_for_t12(struct dc_link *link);
+
+/* Determine if dp trace has been initialized to reflect upto date result *
+ * return - true if trace is initialized and has valid data. False dp trace
+ * doesn't have valid result.
+ */
+bool dc_dp_trace_is_initialized(struct dc_link *link);
+
+/* Query a dp trace flag to indicate if the current dp trace data has been
+ * logged before
+ */
+bool dc_dp_trace_is_logged(struct dc_link *link,
+ bool in_detection);
+
+/* Set dp trace flag to indicate whether DM has already logged the current dp
+ * trace data. DM can set is_logged to true upon logging and check
+ * dc_dp_trace_is_logged before logging to avoid logging the same result twice.
+ */
+void dc_dp_trace_set_is_logged_flag(struct dc_link *link,
+ bool in_detection,
+ bool is_logged);
+
+/* Obtain driver time stamp for last dp link training end. The time stamp is
+ * formatted based on dm_get_timestamp DM function.
+ * @in_detection - true to get link training end time stamp of last link
+ * training in detection sequence. false to get link training end time stamp
+ * of last link training in commit (dpms) sequence
+ */
+unsigned long long dc_dp_trace_get_lt_end_timestamp(struct dc_link *link,
+ bool in_detection);
+
+/* Get how many link training attempts dc has done with latest sequence.
+ * @in_detection - true to get link training count of last link
+ * training in detection sequence. false to get link training count of last link
+ * training in commit (dpms) sequence
+ */
+const struct dp_trace_lt_counts *dc_dp_trace_get_lt_counts(struct dc_link *link,
+ bool in_detection);
+
+/* Get how many link loss has happened since last link training attempts */
+unsigned int dc_dp_trace_get_link_loss_count(struct dc_link *link);
+
+/*
+ * USB4 DPIA BW ALLOCATION PUBLIC FUNCTIONS
+ */
+/*
+ * Send a request from DP-Tx requesting to allocate BW remotely after
+ * allocating it locally. This will get processed by CM and a CB function
+ * will be called.
+ *
+ * @link: pointer to the dc_link struct instance
+ * @req_bw: The requested bw in Kbyte to allocated
+ *
+ * return: none
+ */
+void dc_link_set_usb4_req_bw_req(struct dc_link *link, int req_bw);
+
+/*
+ * Handle function for when the status of the Request above is complete.
+ * We will find out the result of allocating on CM and update structs.
+ *
+ * @link: pointer to the dc_link struct instance
+ * @bw: Allocated or Estimated BW depending on the result
+ * @result: Response type
+ *
+ * return: none
+ */
+void dc_link_handle_usb4_bw_alloc_response(struct dc_link *link,
+ uint8_t bw, uint8_t result);
+
+/*
+ * Handle the USB4 BW Allocation related functionality here:
+ * Plug => Try to allocate max bw from timing parameters supported by the sink
+ * Unplug => de-allocate bw
+ *
+ * @link: pointer to the dc_link struct instance
+ * @peak_bw: Peak bw used by the link/sink
+ *
+ * return: allocated bw else return 0
+ */
+int dc_link_dp_dpia_handle_usb4_bandwidth_allocation_for_link(
+ struct dc_link *link, int peak_bw);
+
/* Sink Interfaces - A sink corresponds to a display output device */
struct dc_container_id {
@@ -1511,7 +2047,7 @@ struct dc_sink_dsc_caps {
// 'true' if these are virtual DPCD's DSC caps (immediately upstream of sink in MST topology),
// 'false' if they are sink's DSC caps
bool is_virtual_dpcd_dsc;
-#if defined(CONFIG_DRM_AMD_DC_DCN)
+#if defined(CONFIG_DRM_AMD_DC_FP)
// 'true' if MST topology supports DSC passthrough for sink
// 'false' if MST topology does not support DSC passthrough
bool is_dsc_passthrough_supported;
@@ -1603,7 +2139,6 @@ void dc_resume(struct dc *dc);
void dc_power_down_on_boot(struct dc *dc);
-#if defined(CONFIG_DRM_AMD_DC_HDCP)
/*
* HDCP Interfaces
*/
@@ -1611,7 +2146,6 @@ enum hdcp_message_status dc_process_hdcp_msg(
enum signal_type signal,
struct dc_link *link,
struct hdcp_protection_message *message_info);
-#endif
bool dc_is_dmcu_initialized(struct dc *dc);
enum dc_status dc_set_clock(struct dc *dc, enum dc_clock_type clock_type, uint32_t clk_khz, uint32_t stepping);
diff --git a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
index c2092775ca88..b5c6501c28fc 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
+++ b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
@@ -421,7 +421,6 @@ void dc_dmub_srv_get_visual_confirm_color_cmd(struct dc *dc, struct pipe_ctx *pi
}
}
-#ifdef CONFIG_DRM_AMD_DC_DCN
/**
* populate_subvp_cmd_drr_info - Helper to populate DRR pipe info for the DMCUB subvp command
*
@@ -638,7 +637,7 @@ static void populate_subvp_cmd_pipe_info(struct dc *dc,
pipe_data->pipe_config.subvp_data.main_vblank_end =
main_timing->v_total - main_timing->v_front_porch - main_timing->v_addressable;
pipe_data->pipe_config.subvp_data.mall_region_lines = phantom_timing->v_addressable;
- pipe_data->pipe_config.subvp_data.main_pipe_index = subvp_pipe->pipe_idx;
+ pipe_data->pipe_config.subvp_data.main_pipe_index = subvp_pipe->stream_res.tg->inst;
pipe_data->pipe_config.subvp_data.is_drr = subvp_pipe->stream->ignore_msa_timing_param;
/* Calculate the scaling factor from the src and dst height.
@@ -680,11 +679,11 @@ static void populate_subvp_cmd_pipe_info(struct dc *dc,
struct pipe_ctx *phantom_pipe = &context->res_ctx.pipe_ctx[j];
if (phantom_pipe->stream == subvp_pipe->stream->mall_stream_config.paired_stream) {
- pipe_data->pipe_config.subvp_data.phantom_pipe_index = phantom_pipe->pipe_idx;
+ pipe_data->pipe_config.subvp_data.phantom_pipe_index = phantom_pipe->stream_res.tg->inst;
if (phantom_pipe->bottom_pipe) {
- pipe_data->pipe_config.subvp_data.phantom_split_pipe_index = phantom_pipe->bottom_pipe->pipe_idx;
+ pipe_data->pipe_config.subvp_data.phantom_split_pipe_index = phantom_pipe->bottom_pipe->plane_res.hubp->inst;
} else if (phantom_pipe->next_odm_pipe) {
- pipe_data->pipe_config.subvp_data.phantom_split_pipe_index = phantom_pipe->next_odm_pipe->pipe_idx;
+ pipe_data->pipe_config.subvp_data.phantom_split_pipe_index = phantom_pipe->next_odm_pipe->plane_res.hubp->inst;
} else {
pipe_data->pipe_config.subvp_data.phantom_split_pipe_index = 0;
}
@@ -750,7 +749,8 @@ void dc_dmub_setup_subvp_dmub_command(struct dc *dc,
!pipe->top_pipe && !pipe->prev_odm_pipe &&
pipe->stream->mall_stream_config.type == SUBVP_MAIN) {
populate_subvp_cmd_pipe_info(dc, context, &cmd, pipe, cmd_pipe_index++);
- } else if (pipe->plane_state && pipe->stream->mall_stream_config.type == SUBVP_NONE) {
+ } else if (pipe->plane_state && pipe->stream->mall_stream_config.type == SUBVP_NONE &&
+ !pipe->top_pipe && !pipe->prev_odm_pipe) {
// Don't need to check for ActiveDRAMClockChangeMargin < 0, not valid in cases where
// we run through DML without calculating "natural" P-state support
populate_subvp_cmd_vblank_pipe_info(dc, context, &cmd, pipe, cmd_pipe_index++);
@@ -775,7 +775,6 @@ void dc_dmub_setup_subvp_dmub_command(struct dc *dc,
dc_dmub_srv_cmd_execute(dc->ctx->dmub_srv);
dc_dmub_srv_wait_idle(dc->ctx->dmub_srv);
}
-#endif
bool dc_dmub_srv_get_diagnostic_data(struct dc_dmub_srv *dc_dmub_srv, struct dmub_diagnostic_data *diag_data)
{
diff --git a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
index 809a1851f196..af53278662ec 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
@@ -921,12 +921,6 @@ struct dpcd_usb4_dp_tunneling_info {
#ifndef DP_DFP_CAPABILITY_EXTENSION_SUPPORT
#define DP_DFP_CAPABILITY_EXTENSION_SUPPORT 0x0A3
#endif
-#ifndef DP_LINK_SQUARE_PATTERN
-#define DP_LINK_SQUARE_PATTERN 0x10F
-#endif
-#ifndef DP_CABLE_ATTRIBUTES_UPDATED_BY_DPTX
-#define DP_CABLE_ATTRIBUTES_UPDATED_BY_DPTX 0x110
-#endif
#ifndef DP_DSC_CONFIGURATION
#define DP_DSC_CONFIGURATION 0x161
#endif
@@ -939,12 +933,6 @@ struct dpcd_usb4_dp_tunneling_info {
#ifndef DP_128b_132b_TRAINING_AUX_RD_INTERVAL
#define DP_128b_132b_TRAINING_AUX_RD_INTERVAL 0x2216
#endif
-#ifndef DP_LINK_SQUARE_PATTERN
-#define DP_LINK_SQUARE_PATTERN 0x10F
-#endif
-#ifndef DP_CABLE_ATTRIBUTES_UPDATED_BY_DPRX
-#define DP_CABLE_ATTRIBUTES_UPDATED_BY_DPRX 0x2217
-#endif
#ifndef DP_TEST_264BIT_CUSTOM_PATTERN_7_0
#define DP_TEST_264BIT_CUSTOM_PATTERN_7_0 0X2230
#endif
@@ -988,10 +976,6 @@ struct dpcd_usb4_dp_tunneling_info {
#define DP_INTRA_HOP_AUX_REPLY_INDICATION (1 << 3)
/* TODO - Use DRM header to replace above once available */
#endif // DP_INTRA_HOP_AUX_REPLY_INDICATION
-
-#ifndef DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE
-#define DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE 0x50
-#endif
union dp_main_line_channel_coding_cap {
struct {
uint8_t DP_8b_10b_SUPPORTED :1;
@@ -1261,4 +1245,161 @@ union dpcd_sink_ext_caps {
} bits;
uint8_t raw;
};
+
+enum dc_link_fec_state {
+ dc_link_fec_not_ready,
+ dc_link_fec_ready,
+ dc_link_fec_enabled
+};
+
+union dpcd_psr_configuration {
+ struct {
+ unsigned char ENABLE : 1;
+ unsigned char TRANSMITTER_ACTIVE_IN_PSR : 1;
+ unsigned char CRC_VERIFICATION : 1;
+ unsigned char FRAME_CAPTURE_INDICATION : 1;
+ /* For eDP 1.4, PSR v2*/
+ unsigned char LINE_CAPTURE_INDICATION : 1;
+ /* For eDP 1.4, PSR v2*/
+ unsigned char IRQ_HPD_WITH_CRC_ERROR : 1;
+ unsigned char ENABLE_PSR2 : 1;
+ unsigned char EARLY_TRANSPORT_ENABLE : 1;
+ } bits;
+ unsigned char raw;
+};
+
+union dpcd_alpm_configuration {
+ struct {
+ unsigned char ENABLE : 1;
+ unsigned char IRQ_HPD_ENABLE : 1;
+ unsigned char RESERVED : 6;
+ } bits;
+ unsigned char raw;
+};
+
+union dpcd_sink_active_vtotal_control_mode {
+ struct {
+ unsigned char ENABLE : 1;
+ unsigned char RESERVED : 7;
+ } bits;
+ unsigned char raw;
+};
+
+union psr_error_status {
+ struct {
+ unsigned char LINK_CRC_ERROR :1;
+ unsigned char RFB_STORAGE_ERROR :1;
+ unsigned char VSC_SDP_ERROR :1;
+ unsigned char RESERVED :5;
+ } bits;
+ unsigned char raw;
+};
+
+union psr_sink_psr_status {
+ struct {
+ unsigned char SINK_SELF_REFRESH_STATUS :3;
+ unsigned char RESERVED :5;
+ } bits;
+ unsigned char raw;
+};
+
+struct edp_trace_power_timestamps {
+ uint64_t poweroff;
+ uint64_t poweron;
+};
+
+struct dp_trace_lt_counts {
+ unsigned int total;
+ unsigned int fail;
+};
+
+enum link_training_result {
+ LINK_TRAINING_SUCCESS,
+ LINK_TRAINING_CR_FAIL_LANE0,
+ LINK_TRAINING_CR_FAIL_LANE1,
+ LINK_TRAINING_CR_FAIL_LANE23,
+ /* CR DONE bit is cleared during EQ step */
+ LINK_TRAINING_EQ_FAIL_CR,
+ /* CR DONE bit is cleared but LANE0_CR_DONE is set during EQ step */
+ LINK_TRAINING_EQ_FAIL_CR_PARTIAL,
+ /* other failure during EQ step */
+ LINK_TRAINING_EQ_FAIL_EQ,
+ LINK_TRAINING_LQA_FAIL,
+ /* one of the CR,EQ or symbol lock is dropped */
+ LINK_TRAINING_LINK_LOSS,
+ /* Abort link training (because sink unplugged) */
+ LINK_TRAINING_ABORT,
+ DP_128b_132b_LT_FAILED,
+ DP_128b_132b_MAX_LOOP_COUNT_REACHED,
+ DP_128b_132b_CHANNEL_EQ_DONE_TIMEOUT,
+ DP_128b_132b_CDS_DONE_TIMEOUT,
+};
+
+struct dp_trace_lt {
+ struct dp_trace_lt_counts counts;
+ struct dp_trace_timestamps {
+ unsigned long long start;
+ unsigned long long end;
+ } timestamps;
+ enum link_training_result result;
+ bool is_logged;
+};
+
+struct dp_trace {
+ struct dp_trace_lt detect_lt_trace;
+ struct dp_trace_lt commit_lt_trace;
+ unsigned int link_loss_count;
+ bool is_initialized;
+ struct edp_trace_power_timestamps edp_trace_power_timestamps;
+};
+
+/* TODO - This is a temporary location for any new DPCD definitions.
+ * We should move these to drm_dp header.
+ */
+#ifndef DP_LINK_SQUARE_PATTERN
+#define DP_LINK_SQUARE_PATTERN 0x10F
+#endif
+#ifndef DP_CABLE_ATTRIBUTES_UPDATED_BY_DPRX
+#define DP_CABLE_ATTRIBUTES_UPDATED_BY_DPRX 0x2217
+#endif
+#ifndef DP_CABLE_ATTRIBUTES_UPDATED_BY_DPTX
+#define DP_CABLE_ATTRIBUTES_UPDATED_BY_DPTX 0x110
+#endif
+#ifndef DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE
+#define DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE 0x50
+#endif
+#ifndef DP_TUNNELING_IRQ
+#define DP_TUNNELING_IRQ (1 << 5)
+#endif
+/** USB4 DPCD BW Allocation Registers Chapter 10.7 **/
+#ifndef DP_TUNNELING_CAPABILITIES
+#define DP_TUNNELING_CAPABILITIES 0xE000D /* 1.4a */
+#endif
+#ifndef USB4_DRIVER_ID
+#define USB4_DRIVER_ID 0xE000F /* 1.4a */
+#endif
+#ifndef USB4_DRIVER_BW_CAPABILITY
+#define USB4_DRIVER_BW_CAPABILITY 0xE0020 /* 1.4a */
+#endif
+#ifndef DP_IN_ADAPTER_TUNNEL_INFO
+#define DP_IN_ADAPTER_TUNNEL_INFO 0xE0021 /* 1.4a */
+#endif
+#ifndef DP_BW_GRANULALITY
+#define DP_BW_GRANULALITY 0xE0022 /* 1.4a */
+#endif
+#ifndef ESTIMATED_BW
+#define ESTIMATED_BW 0xE0023 /* 1.4a */
+#endif
+#ifndef ALLOCATED_BW
+#define ALLOCATED_BW 0xE0024 /* 1.4a */
+#endif
+#ifndef DP_TUNNELING_STATUS
+#define DP_TUNNELING_STATUS 0xE0025 /* 1.4a */
+#endif
+#ifndef DPTX_BW_ALLOCATION_MODE_CONTROL
+#define DPTX_BW_ALLOCATION_MODE_CONTROL 0xE0030 /* 1.4a */
+#endif
+#ifndef REQUESTED_BW
+#define REQUESTED_BW 0xE0031 /* 1.4a */
+#endif
#endif /* DC_DP_TYPES_H */
diff --git a/drivers/gpu/drm/amd/display/dc/dc_dsc.h b/drivers/gpu/drm/amd/display/dc/dc_dsc.h
index 684713b2cff7..0e92a322c2ed 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_dsc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_dsc.h
@@ -54,6 +54,12 @@ struct dc_dsc_policy {
bool enable_dsc_when_not_needed;
};
+struct dc_dsc_config_options {
+ uint32_t dsc_min_slice_height_override;
+ uint32_t max_target_bpp_limit_override_x16;
+ uint32_t slice_height_granularity;
+};
+
bool dc_dsc_parse_dsc_dpcd(const struct dc *dc,
const uint8_t *dpcd_dsc_basic_data,
const uint8_t *dpcd_dsc_ext_data,
@@ -71,8 +77,7 @@ bool dc_dsc_compute_bandwidth_range(
bool dc_dsc_compute_config(
const struct display_stream_compressor *dsc,
const struct dsc_dec_dpcd_caps *dsc_sink_caps,
- uint32_t dsc_min_slice_height_override,
- uint32_t max_target_bpp_limit_override,
+ const struct dc_dsc_config_options *options,
uint32_t target_bandwidth_kbps,
const struct dc_crtc_timing *timing,
struct dc_dsc_config *dsc_cfg);
@@ -100,4 +105,6 @@ void dc_dsc_policy_set_enable_dsc_when_not_needed(bool enable);
void dc_dsc_policy_set_disable_dsc_stream_overhead(bool disable);
+void dc_dsc_get_default_config_option(const struct dc *dc, struct dc_dsc_config_options *options);
+
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
index cc3d6fb39364..100d62162b71 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
@@ -829,7 +829,7 @@ struct dc_dsc_config {
uint32_t version_minor; /* DSC minor version. Full version is formed as 1.version_minor. */
bool ycbcr422_simple; /* Tell DSC engine to convert YCbCr 4:2:2 to 'YCbCr 4:2:2 simple'. */
int32_t rc_buffer_size; /* DSC RC buffer block size in bytes */
-#if defined(CONFIG_DRM_AMD_DC_DCN)
+#if defined(CONFIG_DRM_AMD_DC_FP)
bool is_frl; /* indicate if DSC is applied based on HDMI FRL sink's capability */
#endif
bool is_dp; /* indicate if DSC is applied based on DP's capability */
@@ -1085,5 +1085,19 @@ struct tg_color {
uint16_t color_b_cb;
};
+enum symclk_state {
+ SYMCLK_OFF_TX_OFF,
+ SYMCLK_ON_TX_ON,
+ SYMCLK_ON_TX_OFF,
+};
+
+struct phy_state {
+ struct {
+ uint8_t otg : 1;
+ uint8_t reserved : 7;
+ } symclk_ref_cnts;
+ enum symclk_state symclk_state;
+};
+
#endif /* DC_HW_TYPES_H */
diff --git a/drivers/gpu/drm/amd/display/dc/dc_link.h b/drivers/gpu/drm/amd/display/dc/dc_link.h
deleted file mode 100644
index cecd807f5ed8..000000000000
--- a/drivers/gpu/drm/amd/display/dc/dc_link.h
+++ /dev/null
@@ -1,577 +0,0 @@
-/*
- * Copyright 2012-14 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: AMD
- *
- */
-
-#ifndef DC_LINK_H_
-#define DC_LINK_H_
-
-#include "dc.h"
-#include "dc_types.h"
-#include "grph_object_defs.h"
-
-struct link_resource;
-enum aux_return_code_type;
-
-enum dc_link_fec_state {
- dc_link_fec_not_ready,
- dc_link_fec_ready,
- dc_link_fec_enabled
-};
-
-/* DP MST stream allocation (payload bandwidth number) */
-struct link_mst_stream_allocation {
- /* DIG front */
- const struct stream_encoder *stream_enc;
- /* HPO DP Stream Encoder */
- const struct hpo_dp_stream_encoder *hpo_dp_stream_enc;
- /* associate DRM payload table with DC stream encoder */
- uint8_t vcp_id;
- /* number of slots required for the DP stream in transport packet */
- uint8_t slot_count;
-};
-
-/* DP MST stream allocation table */
-struct link_mst_stream_allocation_table {
- /* number of DP video streams */
- int stream_count;
- /* array of stream allocations */
- struct link_mst_stream_allocation stream_allocations[MAX_CONTROLLER_NUM];
-};
-
-struct edp_trace_power_timestamps {
- uint64_t poweroff;
- uint64_t poweron;
-};
-
-struct dp_trace_lt_counts {
- unsigned int total;
- unsigned int fail;
-};
-
-struct dp_trace_lt {
- struct dp_trace_lt_counts counts;
- struct dp_trace_timestamps {
- unsigned long long start;
- unsigned long long end;
- } timestamps;
- enum link_training_result result;
- bool is_logged;
-};
-
-struct dp_trace {
- struct dp_trace_lt detect_lt_trace;
- struct dp_trace_lt commit_lt_trace;
- unsigned int link_loss_count;
- bool is_initialized;
- struct edp_trace_power_timestamps edp_trace_power_timestamps;
-};
-
-/* PSR feature flags */
-struct psr_settings {
- bool psr_feature_enabled; // PSR is supported by sink
- bool psr_allow_active; // PSR is currently active
- enum dc_psr_version psr_version; // Internal PSR version, determined based on DPCD
- bool psr_vtotal_control_support; // Vtotal control is supported by sink
- unsigned long long psr_dirty_rects_change_timestamp_ns; // for delay of enabling PSR-SU
-
- /* These parameters are calculated in Driver,
- * based on display timing and Sink capabilities.
- * If VBLANK region is too small and Sink takes a long time
- * to set up RFB, it may take an extra frame to enter PSR state.
- */
- bool psr_frame_capture_indication_req;
- unsigned int psr_sdp_transmit_line_num_deadline;
- uint8_t force_ffu_mode;
- unsigned int psr_power_opt;
-};
-
-/* To split out "global" and "per-panel" config settings.
- * Add a struct dc_panel_config under dc_link
- */
-struct dc_panel_config {
- /* extra panel power sequence parameters */
- struct pps {
- unsigned int extra_t3_ms;
- unsigned int extra_t7_ms;
- unsigned int extra_delay_backlight_off;
- unsigned int extra_post_t7_ms;
- unsigned int extra_pre_t11_ms;
- unsigned int extra_t12_ms;
- unsigned int extra_post_OUI_ms;
- } pps;
- /* PSR */
- struct psr {
- bool disable_psr;
- bool disallow_psrsu;
- bool rc_disable;
- bool rc_allow_static_screen;
- bool rc_allow_fullscreen_VPB;
- } psr;
- /* ABM */
- struct varib {
- unsigned int varibright_feature_enable;
- unsigned int def_varibright_level;
- unsigned int abm_config_setting;
- } varib;
- /* edp DSC */
- struct dsc {
- bool disable_dsc_edp;
- unsigned int force_dsc_edp_policy;
- } dsc;
- /* eDP ILR */
- struct ilr {
- bool optimize_edp_link_rate; /* eDP ILR */
- } ilr;
-};
-
-/*
- * USB4 DPIA BW ALLOCATION STRUCTS
- */
-struct dc_dpia_bw_alloc {
- int sink_verified_bw; // The Verified BW that sink can allocated and use that has been verified already
- int sink_allocated_bw; // The Actual Allocated BW that sink currently allocated
- int sink_max_bw; // The Max BW that sink can require/support
- int estimated_bw; // The estimated available BW for this DPIA
- int bw_granularity; // BW Granularity
- bool bw_alloc_enabled; // The BW Alloc Mode Support is turned ON for all 3: DP-Tx & Dpia & CM
- bool response_ready; // Response ready from the CM side
-};
-
-#define MAX_SINKS_PER_LINK 4
-
-/*
- * A link contains one or more sinks and their connected status.
- * The currently active signal type (HDMI, DP-SST, DP-MST) is also reported.
- */
-struct dc_link {
- struct dc_sink *remote_sinks[MAX_SINKS_PER_LINK];
- unsigned int sink_count;
- struct dc_sink *local_sink;
- unsigned int link_index;
- enum dc_connection_type type;
- enum signal_type connector_signal;
- enum dc_irq_source irq_source_hpd;
- enum dc_irq_source irq_source_hpd_rx;/* aka DP Short Pulse */
- bool is_hpd_filter_disabled;
- bool dp_ss_off;
-
- /**
- * @link_state_valid:
- *
- * If there is no link and local sink, this variable should be set to
- * false. Otherwise, it should be set to true; usually, the function
- * core_link_enable_stream sets this field to true.
- */
- bool link_state_valid;
- bool aux_access_disabled;
- bool sync_lt_in_progress;
- bool is_internal_display;
-
- /* TODO: Rename. Flag an endpoint as having a programmable mapping to a
- * DIG encoder. */
- bool is_dig_mapping_flexible;
- bool hpd_status; /* HPD status of link without physical HPD pin. */
- bool is_hpd_pending; /* Indicates a new received hpd */
- bool is_automated; /* Indicates automated testing */
-
- bool edp_sink_present;
-
- struct dp_trace dp_trace;
-
- /* caps is the same as reported_link_cap. link_traing use
- * reported_link_cap. Will clean up. TODO
- */
- struct dc_link_settings reported_link_cap;
- struct dc_link_settings verified_link_cap;
- struct dc_link_settings cur_link_settings;
- struct dc_lane_settings cur_lane_setting[LANE_COUNT_DP_MAX];
- struct dc_link_settings preferred_link_setting;
- /* preferred_training_settings are override values that
- * come from DM. DM is responsible for the memory
- * management of the override pointers.
- */
- struct dc_link_training_overrides preferred_training_settings;
- struct dp_audio_test_data audio_test_data;
-
- uint8_t ddc_hw_inst;
-
- uint8_t hpd_src;
-
- uint8_t link_enc_hw_inst;
- /* DIG link encoder ID. Used as index in link encoder resource pool.
- * For links with fixed mapping to DIG, this is not changed after dc_link
- * object creation.
- */
- enum engine_id eng_id;
-
- bool test_pattern_enabled;
- union compliance_test_state compliance_test_state;
-
- void *priv;
-
- struct ddc_service *ddc;
-
- bool aux_mode;
-
- /* Private to DC core */
-
- const struct dc *dc;
-
- struct dc_context *ctx;
-
- struct panel_cntl *panel_cntl;
- struct link_encoder *link_enc;
- struct graphics_object_id link_id;
- /* Endpoint type distinguishes display endpoints which do not have entries
- * in the BIOS connector table from those that do. Helps when tracking link
- * encoder to display endpoint assignments.
- */
- enum display_endpoint_type ep_type;
- union ddi_channel_mapping ddi_channel_mapping;
- struct connector_device_tag_info device_tag;
- struct dpcd_caps dpcd_caps;
- uint32_t dongle_max_pix_clk;
- unsigned short chip_caps;
- unsigned int dpcd_sink_count;
-#if defined(CONFIG_DRM_AMD_DC_HDCP)
- struct hdcp_caps hdcp_caps;
-#endif
- enum edp_revision edp_revision;
- union dpcd_sink_ext_caps dpcd_sink_ext_caps;
-
- struct psr_settings psr_settings;
-
- /* Drive settings read from integrated info table */
- struct dc_lane_settings bios_forced_drive_settings;
-
- /* Vendor specific LTTPR workaround variables */
- uint8_t vendor_specific_lttpr_link_rate_wa;
- bool apply_vendor_specific_lttpr_link_rate_wa;
-
- /* MST record stream using this link */
- struct link_flags {
- bool dp_keep_receiver_powered;
- bool dp_skip_DID2;
- bool dp_skip_reset_segment;
- bool dp_skip_fs_144hz;
- bool dp_mot_reset_segment;
- /* Some USB4 docks do not handle turning off MST DSC once it has been enabled. */
- bool dpia_mst_dsc_always_on;
- /* Forced DPIA into TBT3 compatibility mode. */
- bool dpia_forced_tbt3_mode;
- bool dongle_mode_timing_override;
- } wa_flags;
- struct link_mst_stream_allocation_table mst_stream_alloc_table;
-
- struct dc_link_status link_status;
- struct dprx_states dprx_states;
-
- struct gpio *hpd_gpio;
- enum dc_link_fec_state fec_state;
- bool link_powered_externally; // Used to bypass hardware sequencing delays when panel is powered down forcibly
-
- struct dc_panel_config panel_config;
- struct phy_state phy_state;
-};
-
-
-/**
- * dc_get_link_at_index() - Return an enumerated dc_link.
- *
- * dc_link order is constant and determined at
- * boot time. They cannot be created or destroyed.
- * Use dc_get_caps() to get number of links.
- */
-static inline struct dc_link *dc_get_link_at_index(struct dc *dc, uint32_t link_index)
-{
- return dc->links[link_index];
-}
-
-static inline void get_edp_links(const struct dc *dc,
- struct dc_link **edp_links,
- int *edp_num)
-{
- int i;
-
- *edp_num = 0;
- for (i = 0; i < dc->link_count; i++) {
- // report any eDP links, even unconnected DDI's
- if (!dc->links[i])
- continue;
- if (dc->links[i]->connector_signal == SIGNAL_TYPE_EDP) {
- edp_links[*edp_num] = dc->links[i];
- if (++(*edp_num) == MAX_NUM_EDP)
- return;
- }
- }
-}
-
-static inline bool dc_get_edp_link_panel_inst(const struct dc *dc,
- const struct dc_link *link,
- unsigned int *inst_out)
-{
- struct dc_link *edp_links[MAX_NUM_EDP];
- int edp_num, i;
-
- *inst_out = 0;
- if (link->connector_signal != SIGNAL_TYPE_EDP)
- return false;
- get_edp_links(dc, edp_links, &edp_num);
- for (i = 0; i < edp_num; i++) {
- if (link == edp_links[i])
- break;
- (*inst_out)++;
- }
- return true;
-}
-
-/* Set backlight level of an embedded panel (eDP, LVDS).
- * backlight_pwm_u16_16 is unsigned 32 bit with 16 bit integer
- * and 16 bit fractional, where 1.0 is max backlight value.
- */
-bool dc_link_set_backlight_level(const struct dc_link *dc_link,
- uint32_t backlight_pwm_u16_16,
- uint32_t frame_ramp);
-
-/* Set/get nits-based backlight level of an embedded panel (eDP, LVDS). */
-bool dc_link_set_backlight_level_nits(struct dc_link *link,
- bool isHDR,
- uint32_t backlight_millinits,
- uint32_t transition_time_in_ms);
-
-bool dc_link_get_backlight_level_nits(struct dc_link *link,
- uint32_t *backlight_millinits,
- uint32_t *backlight_millinits_peak);
-
-int dc_link_get_backlight_level(const struct dc_link *dc_link);
-
-int dc_link_get_target_backlight_pwm(const struct dc_link *link);
-
-bool dc_link_set_psr_allow_active(struct dc_link *dc_link, const bool *enable,
- bool wait, bool force_static, const unsigned int *power_opts);
-
-bool dc_link_get_psr_state(const struct dc_link *dc_link, enum dc_psr_state *state);
-
-bool dc_link_setup_psr(struct dc_link *dc_link,
- const struct dc_stream_state *stream, struct psr_config *psr_config,
- struct psr_context *psr_context);
-
-bool dc_link_get_hpd_state(struct dc_link *dc_link);
-
-/* Notify DC about DP RX Interrupt (aka Short Pulse Interrupt).
- * Return:
- * true - Downstream port status changed. DM should call DC to do the
- * detection.
- * false - no change in Downstream port status. No further action required
- * from DM. */
-bool dc_link_handle_hpd_rx_irq(struct dc_link *dc_link,
- union hpd_irq_data *hpd_irq_dpcd_data, bool *out_link_loss,
- bool defer_handling, bool *has_left_work);
-
-/*
- * On eDP links this function call will stall until T12 has elapsed.
- * If the panel is not in power off state, this function will return
- * immediately.
- */
-bool dc_link_wait_for_t12(struct dc_link *link);
-
-void dc_link_dp_handle_automated_test(struct dc_link *link);
-void dc_link_dp_handle_link_loss(struct dc_link *link);
-bool dc_link_dp_allow_hpd_rx_irq(const struct dc_link *link);
-bool dc_link_check_link_loss_status(struct dc_link *link,
- union hpd_irq_data *hpd_irq_dpcd_data);
-enum dc_status dc_link_dp_read_hpd_rx_irq_data(
- struct dc_link *link,
- union hpd_irq_data *irq_data);
-struct dc_sink_init_data;
-
-struct dc_sink *dc_link_add_remote_sink(
- struct dc_link *dc_link,
- const uint8_t *edid,
- int len,
- struct dc_sink_init_data *init_data);
-
-void dc_link_remove_remote_sink(
- struct dc_link *link,
- struct dc_sink *sink);
-
-/* Used by diagnostics for virtual link at the moment */
-
-bool dc_link_dp_set_test_pattern(
- struct dc_link *link,
- enum dp_test_pattern test_pattern,
- enum dp_test_pattern_color_space test_pattern_color_space,
- const struct link_training_settings *p_link_settings,
- const unsigned char *p_custom_pattern,
- unsigned int cust_pattern_size);
-
-bool dc_link_dp_get_max_link_enc_cap(const struct dc_link *link, struct dc_link_settings *max_link_enc_cap);
-
-/**
- *****************************************************************************
- * Function: dc_link_enable_hpd_filter
- *
- * @brief
- * If enable is true, programs HPD filter on associated HPD line to default
- * values dependent on link->connector_signal
- *
- * If enable is false, programs HPD filter on associated HPD line with no
- * delays on connect or disconnect
- *
- * @param [in] link: pointer to the dc link
- * @param [in] enable: boolean specifying whether to enable hbd
- *****************************************************************************
- */
-void dc_link_enable_hpd_filter(struct dc_link *link, bool enable);
-
-bool dc_link_is_dp_sink_present(struct dc_link *link);
-/*
- * DPCD access interfaces
- */
-
-void dc_link_set_drive_settings(struct dc *dc,
- struct link_training_settings *lt_settings,
- const struct dc_link *link);
-void dc_link_set_preferred_link_settings(struct dc *dc,
- struct dc_link_settings *link_setting,
- struct dc_link *link);
-void dc_link_set_preferred_training_settings(struct dc *dc,
- struct dc_link_settings *link_setting,
- struct dc_link_training_overrides *lt_overrides,
- struct dc_link *link,
- bool skip_immediate_retrain);
-void dc_link_enable_hpd(const struct dc_link *link);
-void dc_link_disable_hpd(const struct dc_link *link);
-void dc_link_set_test_pattern(struct dc_link *link,
- enum dp_test_pattern test_pattern,
- enum dp_test_pattern_color_space test_pattern_color_space,
- const struct link_training_settings *p_link_settings,
- const unsigned char *p_custom_pattern,
- unsigned int cust_pattern_size);
-
-const struct dc_link_settings *dc_link_get_link_cap(
- const struct dc_link *link);
-
-void dc_link_overwrite_extended_receiver_cap(
- struct dc_link *link);
-
-bool dc_is_oem_i2c_device_present(
- struct dc *dc,
- size_t slave_address
-);
-
-bool dc_submit_i2c(
- struct dc *dc,
- uint32_t link_index,
- struct i2c_command *cmd);
-
-bool dc_submit_i2c_oem(
- struct dc *dc,
- struct i2c_command *cmd);
-
-bool dc_link_is_fec_supported(const struct dc_link *link);
-bool dc_link_should_enable_fec(const struct dc_link *link);
-
-uint32_t dc_link_bw_kbps_from_raw_frl_link_rate_data(uint8_t bw);
-enum dp_link_encoding dc_link_dp_mst_decide_link_encoding_format(const struct dc_link *link);
-
-/* take a snapshot of current link resource allocation state */
-void dc_get_cur_link_res_map(const struct dc *dc, uint32_t *map);
-/* restore link resource allocation state from a snapshot */
-void dc_restore_link_res_map(const struct dc *dc, uint32_t *map);
-void dp_trace_reset(struct dc_link *link);
-bool dc_dp_trace_is_initialized(struct dc_link *link);
-unsigned long long dc_dp_trace_get_lt_end_timestamp(struct dc_link *link,
- bool in_detection);
-void dc_dp_trace_set_is_logged_flag(struct dc_link *link,
- bool in_detection,
- bool is_logged);
-bool dc_dp_trace_is_logged(struct dc_link *link,
- bool in_detection);
-struct dp_trace_lt_counts *dc_dp_trace_get_lt_counts(struct dc_link *link,
- bool in_detection);
-unsigned int dc_dp_trace_get_link_loss_count(struct dc_link *link);
-
-/* Attempt to transfer the given aux payload. This function does not perform
- * retries or handle error states. The reply is returned in the payload->reply
- * and the result through operation_result. Returns the number of bytes
- * transferred,or -1 on a failure.
- */
-int dc_link_aux_transfer_raw(struct ddc_service *ddc,
- struct aux_payload *payload,
- enum aux_return_code_type *operation_result);
-
-enum lttpr_mode dc_link_decide_lttpr_mode(struct dc_link *link,
- struct dc_link_settings *link_setting);
-void dc_link_dp_receiver_power_ctrl(struct dc_link *link, bool on);
-bool dc_link_decide_edp_link_settings(struct dc_link *link,
- struct dc_link_settings *link_setting,
- uint32_t req_bw);
-void dc_link_edp_panel_backlight_power_on(struct dc_link *link,
- bool wait_for_hpd);
-
-/*
- * USB4 DPIA BW ALLOCATION PUBLIC FUNCTIONS
- */
-/*
- * Send a request from DP-Tx requesting to allocate BW remotely after
- * allocating it locally. This will get processed by CM and a CB function
- * will be called.
- *
- * @link: pointer to the dc_link struct instance
- * @req_bw: The requested bw in Kbyte to allocated
- *
- * return: none
- */
-void dc_link_set_usb4_req_bw_req(struct dc_link *link, int req_bw);
-
-/*
- * CB function for when the status of the Req above is complete. We will
- * find out the result of allocating on CM and update structs accordingly
- *
- * @link: pointer to the dc_link struct instance
- * @bw: Allocated or Estimated BW depending on the result
- * @result: Response type
- *
- * return: none
- */
-void dc_link_get_usb4_req_bw_resp(struct dc_link *link, uint8_t bw, uint8_t result);
-
-/*
- * Handle the USB4 BW Allocation related functionality here:
- * Plug => Try to allocate max bw from timing parameters supported by the sink
- * Unplug => de-allocate bw
- *
- * @link: pointer to the dc_link struct instance
- * @peak_bw: Peak bw used by the link/sink
- *
- * return: allocated bw else return 0
- */
-int dc_link_dp_dpia_handle_usb4_bandwidth_allocation_for_link(struct dc_link *link, int peak_bw);
-
-/* TODO: this is not meant to be exposed to DM. Should switch to stream update
- * interface i.e stream_update->dsc_config
- */
-bool dc_link_update_dsc_config(struct pipe_ctx *pipe_ctx);
-#endif /* DC_LINK_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dc_types.h b/drivers/gpu/drm/amd/display/dc/dc_types.h
index 27d0242d6cbd..45ab48fe5d00 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_types.h
@@ -38,10 +38,9 @@
#include "dc_hw_types.h"
#include "dal_types.h"
#include "grph_object_defs.h"
+#include "grph_object_ctrl_defs.h"
-#ifdef CONFIG_DRM_AMD_DC_HDCP
#include "dm_cp_psp.h"
-#endif
/* forward declarations */
struct dc_plane_state;
@@ -812,9 +811,7 @@ struct dc_context {
uint32_t dc_edp_id_count;
uint64_t fbc_gpu_addr;
struct dc_dmub_srv *dmub_srv;
-#ifdef CONFIG_DRM_AMD_DC_HDCP
struct cp_psp cp_psp;
-#endif
uint32_t *dcn_reg_offsets;
uint32_t *nbio_reg_offsets;
};
@@ -954,7 +951,6 @@ struct dc_link_status {
struct dpcd_caps *dpcd_caps;
};
-#if defined(CONFIG_DRM_AMD_DC_HDCP)
union hdcp_rx_caps {
struct {
uint8_t version;
@@ -981,5 +977,114 @@ struct hdcp_caps {
union hdcp_rx_caps rx_caps;
union hdcp_bcaps bcaps;
};
-#endif
+
+/* DP MST stream allocation (payload bandwidth number) */
+struct link_mst_stream_allocation {
+ /* DIG front */
+ const struct stream_encoder *stream_enc;
+ /* HPO DP Stream Encoder */
+ const struct hpo_dp_stream_encoder *hpo_dp_stream_enc;
+ /* associate DRM payload table with DC stream encoder */
+ uint8_t vcp_id;
+ /* number of slots required for the DP stream in transport packet */
+ uint8_t slot_count;
+};
+
+#define MAX_CONTROLLER_NUM 6
+
+/* DP MST stream allocation table */
+struct link_mst_stream_allocation_table {
+ /* number of DP video streams */
+ int stream_count;
+ /* array of stream allocations */
+ struct link_mst_stream_allocation stream_allocations[MAX_CONTROLLER_NUM];
+};
+
+/* PSR feature flags */
+struct psr_settings {
+ bool psr_feature_enabled; // PSR is supported by sink
+ bool psr_allow_active; // PSR is currently active
+ enum dc_psr_version psr_version; // Internal PSR version, determined based on DPCD
+ bool psr_vtotal_control_support; // Vtotal control is supported by sink
+ unsigned long long psr_dirty_rects_change_timestamp_ns; // for delay of enabling PSR-SU
+
+ /* These parameters are calculated in Driver,
+ * based on display timing and Sink capabilities.
+ * If VBLANK region is too small and Sink takes a long time
+ * to set up RFB, it may take an extra frame to enter PSR state.
+ */
+ bool psr_frame_capture_indication_req;
+ unsigned int psr_sdp_transmit_line_num_deadline;
+ uint8_t force_ffu_mode;
+ unsigned int psr_power_opt;
+};
+
+/* To split out "global" and "per-panel" config settings.
+ * Add a struct dc_panel_config under dc_link
+ */
+struct dc_panel_config {
+ /* extra panel power sequence parameters */
+ struct pps {
+ unsigned int extra_t3_ms;
+ unsigned int extra_t7_ms;
+ unsigned int extra_delay_backlight_off;
+ unsigned int extra_post_t7_ms;
+ unsigned int extra_pre_t11_ms;
+ unsigned int extra_t12_ms;
+ unsigned int extra_post_OUI_ms;
+ } pps;
+ /* nit brightness */
+ struct nits_brightness {
+ unsigned int peak; /* nits */
+ unsigned int max_avg; /* nits */
+ unsigned int min; /* 1/10000 nits */
+ unsigned int max_nonboost_brightness_millinits;
+ unsigned int min_brightness_millinits;
+ } nits_brightness;
+ /* PSR */
+ struct psr {
+ bool disable_psr;
+ bool disallow_psrsu;
+ bool rc_disable;
+ bool rc_allow_static_screen;
+ bool rc_allow_fullscreen_VPB;
+ } psr;
+ /* ABM */
+ struct varib {
+ unsigned int varibright_feature_enable;
+ unsigned int def_varibright_level;
+ unsigned int abm_config_setting;
+ } varib;
+ /* edp DSC */
+ struct dsc {
+ bool disable_dsc_edp;
+ unsigned int force_dsc_edp_policy;
+ } dsc;
+ /* eDP ILR */
+ struct ilr {
+ bool optimize_edp_link_rate; /* eDP ILR */
+ } ilr;
+};
+
+/*
+ * USB4 DPIA BW ALLOCATION STRUCTS
+ */
+struct dc_dpia_bw_alloc {
+ int sink_verified_bw; // The Verified BW that sink can allocated and use that has been verified already
+ int sink_allocated_bw; // The Actual Allocated BW that sink currently allocated
+ int sink_max_bw; // The Max BW that sink can require/support
+ int estimated_bw; // The estimated available BW for this DPIA
+ int bw_granularity; // BW Granularity
+ bool bw_alloc_enabled; // The BW Alloc Mode Support is turned ON for all 3: DP-Tx & Dpia & CM
+ bool response_ready; // Response ready from the CM side
+};
+
+#define MAX_SINKS_PER_LINK 4
+
+enum dc_hpd_enable_select {
+ HPD_EN_FOR_ALL_EDP = 0,
+ HPD_EN_FOR_PRIMARY_EDP_ONLY,
+ HPD_EN_FOR_SECONDARY_EDP_ONLY,
+};
+
#endif /* DC_TYPES_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
index 140297c8ff55..739298d2dff3 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
@@ -832,13 +832,8 @@ bool dce_aux_transfer_with_retries(struct ddc_service *ddc,
LOG_FLAG_I2cAux_DceAux,
"dce_aux_transfer_with_retries: payload->defer_delay=%u",
payload->defer_delay);
- if (payload->defer_delay > 1) {
- msleep(payload->defer_delay);
- defer_time_in_ms += payload->defer_delay;
- } else if (payload->defer_delay <= 1) {
- udelay(payload->defer_delay * 1000);
- defer_time_in_ms += payload->defer_delay;
- }
+ fsleep(payload->defer_delay * 1000);
+ defer_time_in_ms += payload->defer_delay;
}
}
break;
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
index 165392380842..67e3df7e1b05 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
@@ -930,7 +930,13 @@ static bool dce112_program_pix_clk(
REG_WRITE(MODULO[inst], dp_dto_ref_100hz);
/* Enable DTO */
- REG_UPDATE(PIXEL_RATE_CNTL[inst], DP_DTO0_ENABLE, 1);
+ if (clk_src->cs_mask->PIPE0_DTO_SRC_SEL)
+ REG_UPDATE_2(PIXEL_RATE_CNTL[inst],
+ DP_DTO0_ENABLE, 1,
+ PIPE0_DTO_SRC_SEL, 1);
+ else
+ REG_UPDATE(PIXEL_RATE_CNTL[inst],
+ DP_DTO0_ENABLE, 1);
return true;
}
/* First disable SS
@@ -995,7 +1001,6 @@ static bool dcn31_program_pix_clk(
REG_WRITE(PHASE[inst], pll_settings->actual_pix_clk_100hz * 100);
REG_WRITE(MODULO[inst], dp_dto_ref_khz * 1000);
}
-#if defined(CONFIG_DRM_AMD_DC_DCN)
/* Enable DTO */
if (clk_src->cs_mask->PIPE0_DTO_SRC_SEL)
if (encoding == DP_128b_132b_ENCODING)
@@ -1009,9 +1014,6 @@ static bool dcn31_program_pix_clk(
else
REG_UPDATE(PIXEL_RATE_CNTL[inst],
DP_DTO0_ENABLE, 1);
-#else
- REG_UPDATE(PIXEL_RATE_CNTL[inst], DP_DTO0_ENABLE, 1);
-#endif
} else {
if (IS_FPGA_MAXIMUS_DC(clock_source->ctx->dce_environment)) {
unsigned int inst = pix_clk_params->controller_id - CONTROLLER_ID_D0;
@@ -1023,7 +1025,6 @@ static bool dcn31_program_pix_clk(
REG_WRITE(MODULO[inst], dp_dto_ref_100hz);
/* Enable DTO */
- #if defined(CONFIG_DRM_AMD_DC_DCN)
if (clk_src->cs_mask->PIPE0_DTO_SRC_SEL)
REG_UPDATE_2(PIXEL_RATE_CNTL[inst],
DP_DTO0_ENABLE, 1,
@@ -1031,17 +1032,12 @@ static bool dcn31_program_pix_clk(
else
REG_UPDATE(PIXEL_RATE_CNTL[inst],
DP_DTO0_ENABLE, 1);
- #else
- REG_UPDATE(PIXEL_RATE_CNTL[inst], DP_DTO0_ENABLE, 1);
- #endif
return true;
}
-#if defined(CONFIG_DRM_AMD_DC_DCN)
if (clk_src->cs_mask->PIPE0_DTO_SRC_SEL)
REG_UPDATE(PIXEL_RATE_CNTL[inst],
PIPE0_DTO_SRC_SEL, 0);
-#endif
/*ATOMBIOS expects pixel rate adjusted by deep color ratio)*/
bp_pc_params.controller_id = pix_clk_params->controller_id;
@@ -1274,7 +1270,14 @@ static bool dcn3_program_pix_clk(
REG_WRITE(PHASE[inst], pll_settings->actual_pix_clk_100hz * 100);
REG_WRITE(MODULO[inst], dp_dto_ref_khz * 1000);
}
- REG_UPDATE(PIXEL_RATE_CNTL[inst], DP_DTO0_ENABLE, 1);
+ /* Enable DTO */
+ if (clk_src->cs_mask->PIPE0_DTO_SRC_SEL)
+ REG_UPDATE_2(PIXEL_RATE_CNTL[inst],
+ DP_DTO0_ENABLE, 1,
+ PIPE0_DTO_SRC_SEL, 1);
+ else
+ REG_UPDATE(PIXEL_RATE_CNTL[inst],
+ DP_DTO0_ENABLE, 1);
} else
// For other signal types(HDMI_TYPE_A, DVI) Driver still to call VBIOS Command table
dce112_program_pix_clk(clock_source, pix_clk_params, encoding, pll_settings);
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.h b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.h
index aaf33c79b09b..f600b7431e23 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.h
@@ -204,23 +204,17 @@
type DP_DTO0_MODULO; \
type DP_DTO0_ENABLE;
-#if defined(CONFIG_DRM_AMD_DC_DCN)
#define CS_REG_FIELD_LIST_DCN32(type) \
type PIPE0_DTO_SRC_SEL;
-#endif
struct dce110_clk_src_shift {
CS_REG_FIELD_LIST(uint8_t)
-#if defined(CONFIG_DRM_AMD_DC_DCN)
CS_REG_FIELD_LIST_DCN32(uint8_t)
-#endif
};
struct dce110_clk_src_mask{
CS_REG_FIELD_LIST(uint32_t)
-#if defined(CONFIG_DRM_AMD_DC_DCN)
CS_REG_FIELD_LIST_DCN32(uint32_t)
-#endif
};
struct dce110_clk_src_regs {
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c
index d3cc5ec46956..e74266cc0098 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c
@@ -586,7 +586,7 @@ static void dcn10_dmcu_set_psr_enable(struct dmcu *dmcu, bool enable, bool wait)
if (state == PSR_STATE0)
break;
}
- udelay(500);
+ fsleep(500);
}
/* assert if max retry hit */
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_transform.c b/drivers/gpu/drm/amd/display/dc/dce/dce_transform.c
index d9fd4ec60588..670d5ab9d998 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_transform.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_transform.c
@@ -1009,7 +1009,7 @@ static void dce_transform_set_pixel_storage_depth(
color_depth = COLOR_DEPTH_101010;
pixel_depth = 0;
expan_mode = 1;
- BREAK_TO_DEBUGGER();
+ DC_LOG_DC("The pixel depth %d is not valid, set COLOR_DEPTH_101010 instead.", depth);
break;
}
@@ -1023,8 +1023,7 @@ static void dce_transform_set_pixel_storage_depth(
if (!(xfm_dce->lb_pixel_depth_supported & depth)) {
/*we should use unsupported capabilities
* unless it is required by w/a*/
- DC_LOG_WARNING("%s: Capability not supported",
- __func__);
+ DC_LOG_DC("%s: Capability not supported", __func__);
}
}
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_abm.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_abm.c
index fb0dec4ed3a6..9fc48208c2e4 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dmub_abm.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_abm.c
@@ -148,7 +148,7 @@ static bool dmub_abm_set_level(struct abm *abm, uint32_t level)
int edp_num;
uint8_t panel_mask = 0;
- get_edp_links(dc->dc, edp_links, &edp_num);
+ dc_get_edp_links(dc->dc, edp_links, &edp_num);
for (i = 0; i < edp_num; i++) {
if (edp_links[i]->link_status.link_active)
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c
index 1e2d2cbe2c37..19440bdf6344 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c
@@ -215,7 +215,7 @@ static void dmub_psr_enable(struct dmub_psr *dmub, bool enable, bool wait, uint8
break;
}
- udelay(500);
+ fsleep(500);
}
/* assert if max retry hit */
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.h b/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.h
index 74005b9d352a..289e42070ece 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.h
@@ -26,8 +26,9 @@
#ifndef _DMUB_PSR_H_
#define _DMUB_PSR_H_
-#include "os_types.h"
-#include "dc_link.h"
+#include "dc_types.h"
+struct dc_link;
+struct dmub_psr_funcs;
struct dmub_psr {
struct dc_context *ctx;
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
index 0d4d3d586166..9fe0ce91db00 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
@@ -741,7 +741,7 @@ void dce110_edp_wait_for_hpd_ready(
/* obtain HPD */
/* TODO what to do with this? */
- hpd = link_get_hpd_gpio(ctx->dc_bios, connector, ctx->gpio_service);
+ hpd = ctx->dc->link_srv->get_hpd_gpio(ctx->dc_bios, connector, ctx->gpio_service);
if (!hpd) {
BREAK_TO_DEBUGGER();
@@ -809,19 +809,19 @@ void dce110_edp_power_control(
div64_u64(dm_get_elapse_time_in_ns(
ctx,
current_ts,
- link_dp_trace_get_edp_poweroff_timestamp(link)), 1000000);
+ ctx->dc->link_srv->dp_trace_get_edp_poweroff_timestamp(link)), 1000000);
unsigned long long time_since_edp_poweron_ms =
div64_u64(dm_get_elapse_time_in_ns(
ctx,
current_ts,
- link_dp_trace_get_edp_poweron_timestamp(link)), 1000000);
+ ctx->dc->link_srv->dp_trace_get_edp_poweron_timestamp(link)), 1000000);
DC_LOG_HW_RESUME_S3(
"%s: transition: power_up=%d current_ts=%llu edp_poweroff=%llu edp_poweron=%llu time_since_edp_poweroff_ms=%llu time_since_edp_poweron_ms=%llu",
__func__,
power_up,
current_ts,
- link_dp_trace_get_edp_poweroff_timestamp(link),
- link_dp_trace_get_edp_poweron_timestamp(link),
+ ctx->dc->link_srv->dp_trace_get_edp_poweroff_timestamp(link),
+ ctx->dc->link_srv->dp_trace_get_edp_poweron_timestamp(link),
time_since_edp_poweroff_ms,
time_since_edp_poweron_ms);
@@ -836,7 +836,7 @@ void dce110_edp_power_control(
link->panel_config.pps.extra_t12_ms;
/* Adjust remaining_min_edp_poweroff_time_ms if this is not the first time. */
- if (link_dp_trace_get_edp_poweroff_timestamp(link) != 0) {
+ if (ctx->dc->link_srv->dp_trace_get_edp_poweroff_timestamp(link) != 0) {
if (time_since_edp_poweroff_ms < remaining_min_edp_poweroff_time_ms)
remaining_min_edp_poweroff_time_ms =
remaining_min_edp_poweroff_time_ms - time_since_edp_poweroff_ms;
@@ -896,13 +896,13 @@ void dce110_edp_power_control(
__func__, (power_up ? "On":"Off"),
bp_result);
- link_dp_trace_set_edp_power_timestamp(link, power_up);
+ ctx->dc->link_srv->dp_trace_set_edp_power_timestamp(link, power_up);
DC_LOG_HW_RESUME_S3(
"%s: updated values: edp_poweroff=%llu edp_poweron=%llu\n",
__func__,
- link_dp_trace_get_edp_poweroff_timestamp(link),
- link_dp_trace_get_edp_poweron_timestamp(link));
+ ctx->dc->link_srv->dp_trace_get_edp_poweroff_timestamp(link),
+ ctx->dc->link_srv->dp_trace_get_edp_poweron_timestamp(link));
if (bp_result != BP_RESULT_OK)
DC_LOG_ERROR(
@@ -930,14 +930,14 @@ void dce110_edp_wait_for_T12(
return;
if (!link->panel_cntl->funcs->is_panel_powered_on(link->panel_cntl) &&
- link_dp_trace_get_edp_poweroff_timestamp(link) != 0) {
+ ctx->dc->link_srv->dp_trace_get_edp_poweroff_timestamp(link) != 0) {
unsigned int t12_duration = 500; // Default T12 as per spec
unsigned long long current_ts = dm_get_timestamp(ctx);
unsigned long long time_since_edp_poweroff_ms =
div64_u64(dm_get_elapse_time_in_ns(
ctx,
current_ts,
- link_dp_trace_get_edp_poweroff_timestamp(link)), 1000000);
+ ctx->dc->link_srv->dp_trace_get_edp_poweroff_timestamp(link)), 1000000);
t12_duration += link->panel_config.pps.extra_t12_ms; // Add extra T12
@@ -1018,7 +1018,7 @@ void dce110_edp_backlight_control(
* we shouldn't be doing power-sequencing, hence we can skip
* waiting for T7-ready.
*/
- link_edp_receiver_ready_T7(link);
+ ctx->dc->link_srv->edp_receiver_ready_T7(link);
else
DC_LOG_DC("edp_receiver_ready_T7 skipped\n");
}
@@ -1049,7 +1049,7 @@ void dce110_edp_backlight_control(
if (link->dpcd_sink_ext_caps.bits.oled ||
link->dpcd_sink_ext_caps.bits.hdr_aux_backlight_control == 1 ||
link->dpcd_sink_ext_caps.bits.sdr_aux_backlight_control == 1)
- link_backlight_enable_aux(link, enable);
+ ctx->dc->link_srv->edp_backlight_enable_aux(link, enable);
/*edp 1.2*/
if (cntl.action == TRANSMITTER_CONTROL_BACKLIGHT_OFF) {
@@ -1061,7 +1061,7 @@ void dce110_edp_backlight_control(
* we shouldn't be doing power-sequencing, hence we can skip
* waiting for T9-ready.
*/
- link_edp_add_delay_for_T9(link);
+ ctx->dc->link_srv->edp_add_delay_for_T9(link);
else
DC_LOG_DC("edp_receiver_ready_T9 skipped\n");
}
@@ -1161,7 +1161,7 @@ void dce110_disable_stream(struct pipe_ctx *pipe_ctx)
pipe_ctx->stream_res.stream_enc);
}
- if (link_is_dp_128b_132b_signal(pipe_ctx)) {
+ if (dc->link_srv->dp_is_128b_132b_signal(pipe_ctx)) {
pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->stop_dp_info_packets(
pipe_ctx->stream_res.hpo_dp_stream_enc);
} else if (dc_is_dp_signal(pipe_ctx->stream->signal))
@@ -1172,7 +1172,7 @@ void dce110_disable_stream(struct pipe_ctx *pipe_ctx)
link_hwss->reset_stream_encoder(pipe_ctx);
- if (link_is_dp_128b_132b_signal(pipe_ctx)) {
+ if (dc->link_srv->dp_is_128b_132b_signal(pipe_ctx)) {
dto_params.otg_inst = tg->inst;
dto_params.timing = &pipe_ctx->stream->timing;
dp_hpo_inst = pipe_ctx->stream_res.hpo_dp_stream_enc->inst;
@@ -1181,7 +1181,7 @@ void dce110_disable_stream(struct pipe_ctx *pipe_ctx)
dccg->funcs->set_dpstreamclk(dccg, REFCLK, tg->inst, dp_hpo_inst);
}
- if (link_is_dp_128b_132b_signal(pipe_ctx)) {
+ if (dc->link_srv->dp_is_128b_132b_signal(pipe_ctx)) {
/* TODO: This looks like a bug to me as we are disabling HPO IO when
* we are just disabling a single HPO stream. Shouldn't we disable HPO
* HW control only when HPOs for all streams are disabled?
@@ -1223,7 +1223,7 @@ void dce110_blank_stream(struct pipe_ctx *pipe_ctx)
link->dc->hwss.set_abm_immediate_disable(pipe_ctx);
}
- if (link_is_dp_128b_132b_signal(pipe_ctx)) {
+ if (link->dc->link_srv->dp_is_128b_132b_signal(pipe_ctx)) {
/* TODO - DP2.0 HW: Set ODM mode in dp hpo encoder here */
pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->dp_blank(
pipe_ctx->stream_res.hpo_dp_stream_enc);
@@ -1245,7 +1245,7 @@ void dce110_blank_stream(struct pipe_ctx *pipe_ctx)
* we shouldn't be doing power-sequencing, hence we can skip
* waiting for T9-ready.
*/
- link_edp_receiver_ready_T9(link);
+ link->dc->link_srv->edp_receiver_ready_T9(link);
}
}
}
@@ -1428,7 +1428,7 @@ static enum dc_status dce110_enable_stream_timing(
if (false == pipe_ctx->clock_source->funcs->program_pix_clk(
pipe_ctx->clock_source,
&pipe_ctx->stream_res.pix_clk_params,
- link_dp_get_encoding_format(&pipe_ctx->link_config.dp_link_settings),
+ dc->link_srv->dp_get_encoding_format(&pipe_ctx->link_config.dp_link_settings),
&pipe_ctx->pll_settings)) {
BREAK_TO_DEBUGGER();
return DC_ERROR_UNEXPECTED;
@@ -1532,7 +1532,7 @@ static enum dc_status apply_single_controller_ctx_to_hw(
* To do so, move calling function enable_stream_timing to only be done AFTER calling
* function core_link_enable_stream
*/
- if (!(hws->wa.dp_hpo_and_otg_sequence && link_is_dp_128b_132b_signal(pipe_ctx)))
+ if (!(hws->wa.dp_hpo_and_otg_sequence && dc->link_srv->dp_is_128b_132b_signal(pipe_ctx)))
/* */
/* Do not touch stream timing on seamless boot optimization. */
if (!pipe_ctx->stream->apply_seamless_boot_optimization)
@@ -1564,17 +1564,17 @@ static enum dc_status apply_single_controller_ctx_to_hw(
pipe_ctx->stream_res.tg->inst);
if (dc_is_dp_signal(pipe_ctx->stream->signal))
- link_dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_CONNECT_DIG_FE_OTG);
+ dc->link_srv->dp_trace_source_sequence(link, DPCD_SOURCE_SEQ_AFTER_CONNECT_DIG_FE_OTG);
if (!stream->dpms_off)
- link_set_dpms_on(context, pipe_ctx);
+ dc->link_srv->set_dpms_on(context, pipe_ctx);
/* DCN3.1 FPGA Workaround
* Need to enable HPO DP Stream Encoder before setting OTG master enable.
* To do so, move calling function enable_stream_timing to only be done AFTER calling
* function core_link_enable_stream
*/
- if (hws->wa.dp_hpo_and_otg_sequence && link_is_dp_128b_132b_signal(pipe_ctx)) {
+ if (hws->wa.dp_hpo_and_otg_sequence && dc->link_srv->dp_is_128b_132b_signal(pipe_ctx)) {
if (!pipe_ctx->stream->apply_seamless_boot_optimization)
hws->funcs.enable_stream_timing(pipe_ctx, context, dc);
}
@@ -1600,7 +1600,7 @@ static void power_down_encoders(struct dc *dc)
for (i = 0; i < dc->link_count; i++) {
enum signal_type signal = dc->links[i]->connector_signal;
- link_blank_dp_stream(dc->links[i], false);
+ dc->link_srv->blank_dp_stream(dc->links[i], false);
if (signal != SIGNAL_TYPE_EDP)
signal = SIGNAL_TYPE_NONE;
@@ -1739,7 +1739,7 @@ void dce110_enable_accelerated_mode(struct dc *dc, struct dc_state *context)
get_edp_links_with_sink(dc, edp_links_with_sink, &edp_with_sink_num);
- get_edp_links(dc, edp_links, &edp_num);
+ dc_get_edp_links(dc, edp_links, &edp_num);
if (hws->funcs.init_pipes)
hws->funcs.init_pipes(dc, context);
@@ -2083,7 +2083,7 @@ static void dce110_reset_hw_ctx_wrap(
* disabled already, no need to disable again.
*/
if (!pipe_ctx->stream || !pipe_ctx->stream->dpms_off) {
- link_set_dpms_off(pipe_ctx_old);
+ dc->link_srv->set_dpms_off(pipe_ctx_old);
/* free acquired resources*/
if (pipe_ctx_old->stream_res.audio) {
@@ -3054,13 +3054,13 @@ void dce110_enable_dp_link_output(
pipes[i].clock_source->funcs->program_pix_clk(
pipes[i].clock_source,
&pipes[i].stream_res.pix_clk_params,
- link_dp_get_encoding_format(link_settings),
+ dc->link_srv->dp_get_encoding_format(link_settings),
&pipes[i].pll_settings);
}
}
}
- if (link_dp_get_encoding_format(link_settings) == DP_8b_10b_ENCODING) {
+ if (dc->link_srv->dp_get_encoding_format(link_settings) == DP_8b_10b_ENCODING) {
if (dc->clk_mgr->funcs->notify_link_rate_change)
dc->clk_mgr->funcs->notify_link_rate_change(dc->clk_mgr, link);
}
@@ -3077,7 +3077,7 @@ void dce110_enable_dp_link_output(
if (dmcu != NULL && dmcu->funcs->unlock_phy)
dmcu->funcs->unlock_phy(dmcu);
- link_dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_ENABLE_LINK_PHY);
+ dc->link_srv->dp_trace_source_sequence(link, DPCD_SOURCE_SEQ_AFTER_ENABLE_LINK_PHY);
}
void dce110_disable_link_output(struct dc_link *link,
@@ -3102,7 +3102,7 @@ void dce110_disable_link_output(struct dc_link *link,
link->dc->hwss.edp_power_control(link, false);
else if (dmcu != NULL && dmcu->funcs->lock_phy)
dmcu->funcs->unlock_phy(dmcu);
- link_dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_DISABLE_LINK_PHY);
+ dc->link_srv->dp_trace_source_sequence(link, DPCD_SOURCE_SEQ_AFTER_DISABLE_LINK_PHY);
}
static const struct hw_sequencer_funcs dce110_funcs = {
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h
index 394d83a97f33..08028a1779ae 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h
@@ -71,8 +71,6 @@ void dce110_optimize_bandwidth(
struct dc *dc,
struct dc_state *context);
-void dc_link_dp_receiver_power_ctrl(struct dc_link *link, bool on);
-
void dce110_edp_power_control(
struct dc_link *link,
bool power_up);
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
index f808315b2835..a4a45a6ce61e 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
@@ -401,8 +401,6 @@ static const struct resource_caps stoney_resource_cap = {
static const struct dc_plane_cap plane_cap = {
.type = DC_PLANE_TYPE_DCE_RGB,
- .blends_with_below = true,
- .blends_with_above = true,
.per_pixel_alpha = 1,
.pixel_format_support = {
@@ -428,7 +426,6 @@ static const struct dc_plane_cap plane_cap = {
static const struct dc_plane_cap underlay_plane_cap = {
.type = DC_PLANE_TYPE_DCE_UNDERLAY,
- .blends_with_above = true,
.per_pixel_alpha = 1,
.pixel_format_support = {
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h
index 71b3a6949001..c9e045666dcc 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h
@@ -59,6 +59,7 @@
SRI(LB_DATA_FORMAT, DSCL, id), \
SRI(LB_MEMORY_CTRL, DSCL, id), \
SRI(DSCL_AUTOCAL, DSCL, id), \
+ SRI(DSCL_CONTROL, DSCL, id), \
SRI(SCL_BLACK_OFFSET, DSCL, id), \
SRI(SCL_TAP_CONTROL, DSCL, id), \
SRI(SCL_COEF_RAM_TAP_SELECT, DSCL, id), \
@@ -209,6 +210,7 @@
TF_SF(DSCL0_DSCL_AUTOCAL, AUTOCAL_MODE, mask_sh),\
TF_SF(DSCL0_DSCL_AUTOCAL, AUTOCAL_NUM_PIPE, mask_sh),\
TF_SF(DSCL0_DSCL_AUTOCAL, AUTOCAL_PIPE_ID, mask_sh),\
+ TF_SF(DSCL0_DSCL_CONTROL, SCL_BOUNDARY_MODE, mask_sh),\
TF_SF(DSCL0_SCL_BLACK_OFFSET, SCL_BLACK_OFFSET_RGB_Y, mask_sh),\
TF_SF(DSCL0_SCL_BLACK_OFFSET, SCL_BLACK_OFFSET_CBCR, mask_sh),\
TF_SF(DSCL0_SCL_TAP_CONTROL, SCL_V_NUM_TAPS, mask_sh),\
@@ -495,6 +497,7 @@
type AUTOCAL_MODE; \
type AUTOCAL_NUM_PIPE; \
type AUTOCAL_PIPE_ID; \
+ type SCL_BOUNDARY_MODE; \
type SCL_BLACK_OFFSET_RGB_Y; \
type SCL_BLACK_OFFSET_CBCR; \
type SCL_V_NUM_TAPS; \
@@ -1108,6 +1111,7 @@ struct dcn_dpp_mask {
uint32_t LB_DATA_FORMAT; \
uint32_t LB_MEMORY_CTRL; \
uint32_t DSCL_AUTOCAL; \
+ uint32_t DSCL_CONTROL; \
uint32_t SCL_BLACK_OFFSET; \
uint32_t SCL_TAP_CONTROL; \
uint32_t SCL_COEF_RAM_TAP_SELECT; \
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c
index f62368da875d..b33955928bd0 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c
@@ -655,6 +655,10 @@ void dpp1_dscl_set_scaler_manual_scale(struct dpp *dpp_base,
AUTOCAL_NUM_PIPE, 0,
AUTOCAL_PIPE_ID, 0);
+ /*clean scaler boundary mode when Autocal off*/
+ REG_SET(DSCL_CONTROL, 0,
+ SCL_BOUNDARY_MODE, 0);
+
/* Recout */
dpp1_dscl_set_recout(dpp, &scl_data->recout);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dwb.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dwb.c
index b6391a5ead78..365a3215f6d5 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dwb.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dwb.c
@@ -23,8 +23,6 @@
*
*/
-#if defined(CONFIG_DRM_AMD_DC_DCN)
-
#include "reg_helper.h"
#include "resource.h"
#include "dwb.h"
@@ -129,6 +127,3 @@ void dcn10_dwbc_construct(struct dcn10_dwbc *dwbc10,
dwbc10->dwbc_shift = dwbc_shift;
dwbc10->dwbc_mask = dwbc_mask;
}
-
-
-#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dwb.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dwb.h
index d56ea7c8171e..5268c46ae907 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dwb.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dwb.h
@@ -24,8 +24,6 @@
#ifndef __DC_DWBC_DCN10_H__
#define __DC_DWBC_DCN10_H__
-#if defined(CONFIG_DRM_AMD_DC_DCN)
-
/* DCN */
#define BASE_INNER(seg) \
DCE_BASE__INST0_SEG ## seg
@@ -267,5 +265,3 @@ void dcn10_dwbc_construct(struct dcn10_dwbc *dwbc10,
int inst);
#endif
-
-#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
index a1a29c508394..7f9cceb49f4e 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
@@ -919,7 +919,7 @@ enum dc_status dcn10_enable_stream_timing(
if (false == pipe_ctx->clock_source->funcs->program_pix_clk(
pipe_ctx->clock_source,
&pipe_ctx->stream_res.pix_clk_params,
- link_dp_get_encoding_format(&pipe_ctx->link_config.dp_link_settings),
+ dc->link_srv->dp_get_encoding_format(&pipe_ctx->link_config.dp_link_settings),
&pipe_ctx->pll_settings)) {
BREAK_TO_DEBUGGER();
return DC_ERROR_UNEXPECTED;
@@ -1017,7 +1017,7 @@ static void dcn10_reset_back_end_for_pipe(
* VBIOS lit up eDP, so check link status too.
*/
if (!pipe_ctx->stream->dpms_off || link->link_status.link_active)
- link_set_dpms_off(pipe_ctx);
+ dc->link_srv->set_dpms_off(pipe_ctx);
else if (pipe_ctx->stream_res.audio)
dc->hwss.disable_audio_stream(pipe_ctx);
@@ -1564,7 +1564,7 @@ void dcn10_init_hw(struct dc *dc)
}
/* we want to turn off all dp displays before doing detection */
- link_blank_all_dp_displays(dc);
+ dc->link_srv->blank_all_dp_displays(dc);
if (hws->funcs.enable_power_gating_plane)
hws->funcs.enable_power_gating_plane(dc->hwseq, true);
@@ -1638,7 +1638,7 @@ void dcn10_power_down_on_boot(struct dc *dc)
int edp_num;
int i = 0;
- get_edp_links(dc, edp_links, &edp_num);
+ dc_get_edp_links(dc, edp_links, &edp_num);
if (edp_num)
edp_link = edp_links[0];
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c
index c4287147b853..ee08b545aaea 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c
@@ -1219,7 +1219,6 @@ void dcn10_link_encoder_update_mst_stream_allocation_table(
const struct link_mst_stream_allocation_table *table)
{
struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
- uint32_t value0 = 0;
uint32_t value1 = 0;
uint32_t value2 = 0;
uint32_t slots = 0;
@@ -1321,7 +1320,7 @@ void dcn10_link_encoder_update_mst_stream_allocation_table(
do {
udelay(10);
- value0 = REG_READ(DP_MSE_SAT_UPDATE);
+ REG_READ(DP_MSE_SAT_UPDATE);
REG_GET(DP_MSE_SAT_UPDATE,
DP_MSE_SAT_UPDATE, &value1);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
index 6bfac8088ab0..2bb8e11f26e0 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
@@ -504,8 +504,6 @@ static const struct resource_caps rv2_res_cap = {
static const struct dc_plane_cap plane_cap = {
.type = DC_PLANE_TYPE_DCN_UNIVERSAL,
- .blends_with_above = true,
- .blends_with_below = true,
.per_pixel_alpha = true,
.pixel_format_support = {
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c
index 3c451ab5d3ca..f496e952ceec 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c
@@ -933,7 +933,7 @@ void enc1_stream_encoder_dp_blank(
/* disable DP stream */
REG_UPDATE(DP_VID_STREAM_CNTL, DP_VID_STREAM_ENABLE, 0);
- link_dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_DISABLE_DP_VID_STREAM);
+ link->dc->link_srv->dp_trace_source_sequence(link, DPCD_SOURCE_SEQ_AFTER_DISABLE_DP_VID_STREAM);
/* the encoder stops sending the video stream
* at the start of the vertical blanking.
@@ -952,7 +952,7 @@ void enc1_stream_encoder_dp_blank(
REG_UPDATE(DP_STEER_FIFO, DP_STEER_FIFO_RESET, true);
- link_dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_FIFO_STEER_RESET);
+ link->dc->link_srv->dp_trace_source_sequence(link, DPCD_SOURCE_SEQ_AFTER_FIFO_STEER_RESET);
}
/* output video stream to link encoder */
@@ -1025,7 +1025,8 @@ void enc1_stream_encoder_dp_unblank(
REG_UPDATE(DP_VID_STREAM_CNTL, DP_VID_STREAM_ENABLE, true);
- link_dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_ENABLE_DP_VID_STREAM);
+ link->dc->link_srv->dp_trace_source_sequence(link,
+ DPCD_SOURCE_SEQ_AFTER_ENABLE_DP_VID_STREAM);
}
void enc1_stream_encoder_set_avmute(
@@ -1470,10 +1471,9 @@ void enc1_se_hdmi_audio_setup(
void enc1_se_hdmi_audio_disable(
struct stream_encoder *enc)
{
-#if defined(CONFIG_DRM_AMD_DC_DCN)
if (enc->afmt && enc->afmt->funcs->afmt_powerdown)
enc->afmt->funcs->afmt_powerdown(enc->afmt);
-#endif
+
enc1_se_enable_audio_clock(enc, false);
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c
index 42344aec60d6..5bd698cd6d20 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c
@@ -50,7 +50,7 @@ static void dsc2_enable(struct display_stream_compressor *dsc, int opp_pipe);
static void dsc2_disable(struct display_stream_compressor *dsc);
static void dsc2_disconnect(struct display_stream_compressor *dsc);
-const struct dsc_funcs dcn20_dsc_funcs = {
+static const struct dsc_funcs dcn20_dsc_funcs = {
.dsc_get_enc_caps = dsc2_get_enc_caps,
.dsc_read_state = dsc2_read_state,
.dsc_validate_stream = dsc2_validate_stream,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dwb.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dwb.c
index f1490e97b6ce..f8667be57046 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dwb.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dwb.c
@@ -301,7 +301,7 @@ void dwb2_set_scaler(struct dwbc *dwbc, struct dc_dwb_params *params)
}
-const struct dwbc_funcs dcn20_dwbc_funcs = {
+static const struct dwbc_funcs dcn20_dwbc_funcs = {
.get_caps = dwb2_get_caps,
.enable = dwb2_enable,
.disable = dwb2_disable,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
index b83873a3a534..53669f832ba5 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
@@ -190,10 +190,15 @@ void dcn20_enable_power_gating_plane(
bool enable)
{
bool force_on = true; /* disable power gating */
+ uint32_t org_ip_request_cntl = 0;
if (enable)
force_on = false;
+ REG_GET(DC_IP_REQUEST_CNTL, IP_REQUEST_EN, &org_ip_request_cntl);
+ if (org_ip_request_cntl == 0)
+ REG_SET(DC_IP_REQUEST_CNTL, 0, IP_REQUEST_EN, 1);
+
/* DCHUBP0/1/2/3/4/5 */
REG_UPDATE(DOMAIN0_PG_CONFIG, DOMAIN0_POWER_FORCEON, force_on);
REG_UPDATE(DOMAIN2_PG_CONFIG, DOMAIN2_POWER_FORCEON, force_on);
@@ -224,6 +229,10 @@ void dcn20_enable_power_gating_plane(
REG_UPDATE(DOMAIN20_PG_CONFIG, DOMAIN20_POWER_FORCEON, force_on);
if (REG(DOMAIN21_PG_CONFIG))
REG_UPDATE(DOMAIN21_PG_CONFIG, DOMAIN21_POWER_FORCEON, force_on);
+
+ if (org_ip_request_cntl == 0)
+ REG_SET(DC_IP_REQUEST_CNTL, 0, IP_REQUEST_EN, 0);
+
}
void dcn20_dccg_init(struct dce_hwseq *hws)
@@ -711,7 +720,7 @@ enum dc_status dcn20_enable_stream_timing(
if (false == pipe_ctx->clock_source->funcs->program_pix_clk(
pipe_ctx->clock_source,
&pipe_ctx->stream_res.pix_clk_params,
- link_dp_get_encoding_format(&pipe_ctx->link_config.dp_link_settings),
+ dc->link_srv->dp_get_encoding_format(&pipe_ctx->link_config.dp_link_settings),
&pipe_ctx->pll_settings)) {
BREAK_TO_DEBUGGER();
return DC_ERROR_UNEXPECTED;
@@ -2396,7 +2405,7 @@ void dcn20_unblank_stream(struct pipe_ctx *pipe_ctx,
params.link_settings.link_rate = link_settings->link_rate;
- if (link_is_dp_128b_132b_signal(pipe_ctx)) {
+ if (link->dc->link_srv->dp_is_128b_132b_signal(pipe_ctx)) {
/* TODO - DP2.0 HW: Set ODM mode in dp hpo encoder here */
pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->dp_unblank(
pipe_ctx->stream_res.hpo_dp_stream_enc,
@@ -2449,7 +2458,7 @@ static void dcn20_reset_back_end_for_pipe(
* VBIOS lit up eDP, so check link status too.
*/
if (!pipe_ctx->stream->dpms_off || link->link_status.link_active)
- link_set_dpms_off(pipe_ctx);
+ dc->link_srv->set_dpms_off(pipe_ctx);
else if (pipe_ctx->stream_res.audio)
dc->hwss.disable_audio_stream(pipe_ctx);
@@ -2469,7 +2478,7 @@ static void dcn20_reset_back_end_for_pipe(
}
}
else if (pipe_ctx->stream_res.dsc) {
- link_set_dsc_enable(pipe_ctx, false);
+ dc->link_srv->set_dsc_enable(pipe_ctx, false);
}
/* by upper caller loop, parent pipe: pipe0, will be reset last.
@@ -2704,12 +2713,12 @@ void dcn20_enable_stream(struct pipe_ctx *pipe_ctx)
unsigned int k1_div = PIXEL_RATE_DIV_NA;
unsigned int k2_div = PIXEL_RATE_DIV_NA;
- if (link_is_dp_128b_132b_signal(pipe_ctx)) {
+ if (dc->link_srv->dp_is_128b_132b_signal(pipe_ctx)) {
if (dc->hwseq->funcs.setup_hpo_hw_control)
dc->hwseq->funcs.setup_hpo_hw_control(dc->hwseq, true);
}
- if (link_is_dp_128b_132b_signal(pipe_ctx)) {
+ if (dc->link_srv->dp_is_128b_132b_signal(pipe_ctx)) {
dp_hpo_inst = pipe_ctx->stream_res.hpo_dp_stream_enc->inst;
dccg->funcs->set_dpstreamclk(dccg, DTBCLK0, tg->inst, dp_hpo_inst);
@@ -2743,7 +2752,7 @@ void dcn20_enable_stream(struct pipe_ctx *pipe_ctx)
dc->hwss.update_info_frame(pipe_ctx);
if (dc_is_dp_signal(pipe_ctx->stream->signal))
- link_dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_UPDATE_INFO_FRAME);
+ dc->link_srv->dp_trace_source_sequence(link, DPCD_SOURCE_SEQ_AFTER_UPDATE_INFO_FRAME);
/* enable early control to avoid corruption on DP monitor*/
active_total_with_borders =
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mmhubbub.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mmhubbub.c
index ccd91792991b..259a98e4ee2c 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mmhubbub.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mmhubbub.c
@@ -297,7 +297,7 @@ void mcifwb2_dump_frame(struct mcif_wb *mcif_wb,
dump_info->size = dest_height * (mcif_params->luma_pitch + mcif_params->chroma_pitch);
}
-const struct mcif_wb_funcs dcn20_mmhubbub_funcs = {
+static const struct mcif_wb_funcs dcn20_mmhubbub_funcs = {
.enable_mcif = mmhubbub2_enable_mcif,
.disable_mcif = mmhubbub2_disable_mcif,
.config_mcif_buf = mmhubbub2_config_mcif_buf,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c
index 116f67a0b989..5da6e44f284a 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c
@@ -542,7 +542,7 @@ static struct mpcc *mpc2_get_mpcc_for_dpp(struct mpc_tree *tree, int dpp_id)
return NULL;
}
-const struct mpc_funcs dcn20_mpc_funcs = {
+static const struct mpc_funcs dcn20_mpc_funcs = {
.read_mpcc_state = mpc1_read_mpcc_state,
.insert_plane = mpc1_insert_plane,
.remove_mpcc = mpc1_remove_mpcc,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
index 3af24ef9cb2d..77ef474ced07 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
@@ -670,8 +670,6 @@ static const struct resource_caps res_cap_nv10 = {
static const struct dc_plane_cap plane_cap = {
.type = DC_PLANE_TYPE_DCN_UNIVERSAL,
- .blends_with_above = true,
- .blends_with_below = true,
.per_pixel_alpha = true,
.pixel_format_support = {
@@ -1213,8 +1211,11 @@ static void dcn20_resource_destruct(struct dcn20_resource_pool *pool)
if (pool->base.pp_smu != NULL)
dcn20_pp_smu_destroy(&pool->base.pp_smu);
- if (pool->base.oem_device != NULL)
- link_destroy_ddc_service(&pool->base.oem_device);
+ if (pool->base.oem_device != NULL) {
+ struct dc *dc = pool->base.oem_device->ctx->dc;
+
+ dc->link_srv->destroy_ddc_service(&pool->base.oem_device);
+ }
}
struct hubp *dcn20_hubp_create(
@@ -2765,7 +2766,7 @@ static bool dcn20_resource_construct(
ddc_init_data.id.id = dc->ctx->dc_bios->fw_info.oem_i2c_obj_id;
ddc_init_data.id.enum_id = 0;
ddc_init_data.id.type = OBJECT_TYPE_GENERIC;
- pool->base.oem_device = link_create_ddc_service(&ddc_init_data);
+ pool->base.oem_device = dc->link_srv->create_ddc_service(&ddc_init_data);
} else {
pool->base.oem_device = NULL;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c
index 42865d6c0cdd..0b47aeb60e79 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c
@@ -546,7 +546,8 @@ void enc2_stream_encoder_dp_unblank(
REG_UPDATE(DP_VID_STREAM_CNTL, DP_VID_STREAM_ENABLE, true);
- link_dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_ENABLE_DP_VID_STREAM);
+ link->dc->link_srv->dp_trace_source_sequence(link,
+ DPCD_SOURCE_SEQ_AFTER_ENABLE_DP_VID_STREAM);
}
static void enc2_dp_set_odm_combine(
diff --git a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_dpp.c b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_dpp.c
index f50ab961bc17..a7268027a472 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_dpp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_dpp.c
@@ -185,13 +185,6 @@ static bool dpp201_get_optimal_number_of_taps(
struct scaler_data *scl_data,
const struct scaling_taps *in_taps)
{
- uint32_t pixel_width;
-
- if (scl_data->viewport.width > scl_data->recout.width)
- pixel_width = scl_data->recout.width;
- else
- pixel_width = scl_data->viewport.width;
-
if (scl_data->viewport.width != scl_data->h_active &&
scl_data->viewport.height != scl_data->v_active &&
dpp->caps->dscl_data_proc_format == DSCL_DATA_PRCESSING_FIXED_FORMAT &&
diff --git a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_hwseq.c
index 61bcfa03c4e7..1aeb04fbd89d 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_hwseq.c
@@ -541,8 +541,6 @@ void dcn201_pipe_control_lock(
bool lock)
{
struct dce_hwseq *hws = dc->hwseq;
- struct hubp *hubp = NULL;
- hubp = dc->res_pool->hubps[pipe->pipe_idx];
/* use TG master update lock to lock everything on the TG
* therefore only top pipe need to lock
*/
diff --git a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_mpc.c b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_mpc.c
index 95c4c55f067c..1af03a86ec9b 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_mpc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_mpc.c
@@ -76,7 +76,7 @@ static void mpc201_init_mpcc(struct mpcc *mpcc, int mpcc_inst)
mpcc->shared_bottom = false;
}
-const struct mpc_funcs dcn201_mpc_funcs = {
+static const struct mpc_funcs dcn201_mpc_funcs = {
.read_mpcc_state = mpc1_read_mpcc_state,
.insert_plane = mpc1_insert_plane,
.remove_mpcc = mpc1_remove_mpcc,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_resource.c b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_resource.c
index 407d995bfa99..6ea70da28aaa 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_resource.c
@@ -74,7 +74,7 @@
#define MIN_DISP_CLK_KHZ 100000
#define MIN_DPP_CLK_KHZ 100000
-struct _vcs_dpi_ip_params_st dcn201_ip = {
+static struct _vcs_dpi_ip_params_st dcn201_ip = {
.gpuvm_enable = 0,
.hostvm_enable = 0,
.gpuvm_max_page_table_levels = 4,
@@ -136,7 +136,7 @@ struct _vcs_dpi_ip_params_st dcn201_ip = {
.number_of_cursors = 1,
};
-struct _vcs_dpi_soc_bounding_box_st dcn201_soc = {
+static struct _vcs_dpi_soc_bounding_box_st dcn201_soc = {
.clock_limits = {
{
.state = 0,
@@ -571,8 +571,6 @@ static const struct resource_caps res_cap_dnc201 = {
static const struct dc_plane_cap plane_cap = {
.type = DC_PLANE_TYPE_DCN_UNIVERSAL,
- .blends_with_above = true,
- .blends_with_below = true,
.per_pixel_alpha = true,
.pixel_format_support = {
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.c
index 15475c7e2cf9..2a182c2f57d6 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.c
@@ -132,8 +132,8 @@ void dcn21_PLAT_58856_wa(struct dc_state *context, struct pipe_ctx *pipe_ctx)
return;
pipe_ctx->stream->dpms_off = false;
- link_set_dpms_on(context, pipe_ctx);
- link_set_dpms_off(pipe_ctx);
+ pipe_ctx->stream->ctx->dc->link_srv->set_dpms_on(context, pipe_ctx);
+ pipe_ctx->stream->ctx->dc->link_srv->set_dpms_off(pipe_ctx);
pipe_ctx->stream->dpms_off = true;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
index 8f9244fe5c86..19aaa557b2db 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
@@ -609,8 +609,6 @@ static const struct resource_caps res_cap_rn_FPGA_2pipe_dsc = {
static const struct dc_plane_cap plane_cap = {
.type = DC_PLANE_TYPE_DCN_UNIVERSAL,
- .blends_with_above = true,
- .blends_with_below = true,
.per_pixel_alpha = true,
.pixel_format_support = {
@@ -642,7 +640,7 @@ static const struct dc_debug_options debug_defaults_drv = {
.clock_trace = true,
.disable_pplib_clock_request = true,
.min_disp_clk_khz = 100000,
- .pipe_split_policy = MPC_SPLIT_AVOID_MULT_DISP,
+ .pipe_split_policy = MPC_SPLIT_DYNAMIC,
.force_single_disp_pipe_split = false,
.disable_dcc = DCC_ENABLE,
.vsr_support = true,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_afmt.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_afmt.c
index 95528e5ef89e..55e388c4c98b 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_afmt.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_afmt.c
@@ -123,7 +123,6 @@ void afmt3_se_audio_setup(
{
struct dcn30_afmt *afmt3 = DCN30_AFMT_FROM_AFMT(afmt);
- uint32_t speakers = 0;
uint32_t channels = 0;
ASSERT(audio_info);
@@ -131,7 +130,6 @@ void afmt3_se_audio_setup(
if (audio_info == NULL)
return;
- speakers = audio_info->flags.info.ALLSPEAKERS;
channels = speakers_to_channels(audio_info->flags.speaker_flags).all;
/* setup the audio stream source select (audio -> dig mapping) */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_stream_encoder.c
index 5f9079d3943a..9d08127d209b 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_stream_encoder.c
@@ -28,6 +28,7 @@
#include "dcn30_dio_stream_encoder.h"
#include "reg_helper.h"
#include "hw_shared.h"
+#include "dc.h"
#include "core_types.h"
#include <linux/delay.h>
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.h b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.h
index 6263408d71fc..2082372d69ee 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.h
@@ -102,6 +102,7 @@
SRI(LB_DATA_FORMAT, DSCL, id), \
SRI(LB_MEMORY_CTRL, DSCL, id), \
SRI(DSCL_AUTOCAL, DSCL, id), \
+ SRI(DSCL_CONTROL, DSCL, id), \
SRI(SCL_TAP_CONTROL, DSCL, id), \
SRI(SCL_COEF_RAM_TAP_SELECT, DSCL, id), \
SRI(SCL_COEF_RAM_TAP_DATA, DSCL, id), \
@@ -237,6 +238,7 @@
TF_SF(DSCL0_LB_MEMORY_CTRL, LB_MAX_PARTITIONS, mask_sh),\
TF_SF(DSCL0_DSCL_AUTOCAL, AUTOCAL_MODE, mask_sh),\
TF_SF(DSCL0_DSCL_AUTOCAL, AUTOCAL_NUM_PIPE, mask_sh),\
+ TF_SF(DSCL0_DSCL_CONTROL, SCL_BOUNDARY_MODE, mask_sh),\
TF_SF(DSCL0_DSCL_AUTOCAL, AUTOCAL_PIPE_ID, mask_sh),\
TF_SF(DSCL0_SCL_TAP_CONTROL, SCL_V_NUM_TAPS, mask_sh),\
TF_SF(DSCL0_SCL_TAP_CONTROL, SCL_H_NUM_TAPS, mask_sh),\
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb.c
index f14f69616692..0d98918bf0fc 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb.c
@@ -220,7 +220,7 @@ void dwb3_set_denorm(struct dwbc *dwbc, struct dc_dwb_params *params)
}
-const struct dwbc_funcs dcn30_dwbc_funcs = {
+static const struct dwbc_funcs dcn30_dwbc_funcs = {
.get_caps = dwb3_get_caps,
.enable = dwb3_enable,
.disable = dwb3_disable,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.c
index dc3e8df706b3..e46bbe7ddcc9 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.c
@@ -47,13 +47,9 @@ void hubp3_set_vm_system_aperture_settings(struct hubp *hubp,
{
struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
- PHYSICAL_ADDRESS_LOC mc_vm_apt_default;
PHYSICAL_ADDRESS_LOC mc_vm_apt_low;
PHYSICAL_ADDRESS_LOC mc_vm_apt_high;
- // The format of default addr is 48:12 of the 48 bit addr
- mc_vm_apt_default.quad_part = apt->sys_default.quad_part >> 12;
-
// The format of high/low are 48:18 of the 48 bit addr
mc_vm_apt_low.quad_part = apt->sys_low.quad_part >> 18;
mc_vm_apt_high.quad_part = apt->sys_high.quad_part >> 18;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c
index 3b4d4d68359b..586de81fc2da 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c
@@ -323,13 +323,10 @@ void dcn30_enable_writeback(
{
struct dwbc *dwb;
struct mcif_wb *mcif_wb;
- struct timing_generator *optc;
dwb = dc->res_pool->dwbc[wb_info->dwb_pipe_inst];
mcif_wb = dc->res_pool->mcif_wb[wb_info->dwb_pipe_inst];
- /* set the OPTC source mux */
- optc = dc->res_pool->timing_generators[dwb->otg_inst];
DC_LOG_DWB("%s dwb_pipe_inst = %d, mpcc_inst = %d",\
__func__, wb_info->dwb_pipe_inst,\
wb_info->mpcc_inst);
@@ -534,13 +531,8 @@ void dcn30_init_hw(struct dc *dc)
}
}
- /* Power gate DSCs */
- for (i = 0; i < res_pool->res_cap->num_dsc; i++)
- if (hws->funcs.dsc_pg_control != NULL)
- hws->funcs.dsc_pg_control(hws, res_pool->dscs[i]->inst, false);
-
/* we want to turn off all dp displays before doing detection */
- link_blank_all_dp_displays(dc);
+ dc->link_srv->blank_all_dp_displays(dc);
if (hws->funcs.enable_power_gating_plane)
hws->funcs.enable_power_gating_plane(dc->hwseq, true);
@@ -567,7 +559,7 @@ void dcn30_init_hw(struct dc *dc)
struct dc_link *edp_links[MAX_NUM_EDP];
struct dc_link *edp_link = NULL;
- get_edp_links(dc, edp_links, &edp_num);
+ dc_get_edp_links(dc, edp_links, &edp_num);
if (edp_num)
edp_link = edp_links[0];
if (edp_link && edp_link->link_enc->funcs->is_dig_enabled &&
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mmhubbub.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mmhubbub.c
index 7a93eff183d9..6f2a0d5d963b 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mmhubbub.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mmhubbub.c
@@ -211,7 +211,7 @@ static void mmhubbub3_config_mcif_arb(struct mcif_wb *mcif_wb,
REG_UPDATE(MCIF_WB_ARBITRATION_CONTROL, MCIF_WB_CLIENT_ARBITRATION_SLICE, params->arbitration_slice);
}
-const struct mcif_wb_funcs dcn30_mmhubbub_funcs = {
+static const struct mcif_wb_funcs dcn30_mmhubbub_funcs = {
.warmup_mcif = mmhubbub3_warmup_mcif,
.enable_mcif = mmhubbub2_enable_mcif,
.disable_mcif = mmhubbub2_disable_mcif,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.c
index ad1c1b703874..6cf40c1332bc 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.c
@@ -1399,7 +1399,7 @@ static void mpc3_set_mpc_mem_lp_mode(struct mpc *mpc)
}
}
-const struct mpc_funcs dcn30_mpc_funcs = {
+static const struct mpc_funcs dcn30_mpc_funcs = {
.read_mpcc_state = mpc1_read_mpcc_state,
.insert_plane = mpc1_insert_plane,
.remove_mpcc = mpc1_remove_mpcc,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c
index b5b5320c7bef..c9e45da6ccd1 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c
@@ -680,8 +680,6 @@ static const struct resource_caps res_cap_dcn3 = {
static const struct dc_plane_cap plane_cap = {
.type = DC_PLANE_TYPE_DCN_UNIVERSAL,
- .blends_with_above = true,
- .blends_with_below = true,
.per_pixel_alpha = true,
.pixel_format_support = {
@@ -1207,8 +1205,11 @@ static void dcn30_resource_destruct(struct dcn30_resource_pool *pool)
if (pool->base.dccg != NULL)
dcn_dccg_destroy(&pool->base.dccg);
- if (pool->base.oem_device != NULL)
- link_destroy_ddc_service(&pool->base.oem_device);
+ if (pool->base.oem_device != NULL) {
+ struct dc *dc = pool->base.oem_device->ctx->dc;
+
+ dc->link_srv->destroy_ddc_service(&pool->base.oem_device);
+ }
}
static struct hubp *dcn30_hubp_create(
@@ -2592,7 +2593,7 @@ static bool dcn30_resource_construct(
ddc_init_data.id.id = dc->ctx->dc_bios->fw_info.oem_i2c_obj_id;
ddc_init_data.id.enum_id = 0;
ddc_init_data.id.type = OBJECT_TYPE_GENERIC;
- pool->base.oem_device = link_create_ddc_service(&ddc_init_data);
+ pool->base.oem_device = dc->link_srv->create_ddc_service(&ddc_init_data);
} else {
pool->base.oem_device = NULL;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c
index ee62ae3eb98f..b93b4498dba4 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c
@@ -651,8 +651,6 @@ static struct resource_caps res_cap_dcn301 = {
static const struct dc_plane_cap plane_cap = {
.type = DC_PLANE_TYPE_DCN_UNIVERSAL,
- .blends_with_above = true,
- .blends_with_below = true,
.per_pixel_alpha = true,
.pixel_format_support = {
diff --git a/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c b/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c
index 03ddf4f5f065..9f93c43115ba 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c
@@ -147,8 +147,6 @@ static const struct resource_caps res_cap_dcn302 = {
static const struct dc_plane_cap plane_cap = {
.type = DC_PLANE_TYPE_DCN_UNIVERSAL,
- .blends_with_above = true,
- .blends_with_below = true,
.per_pixel_alpha = true,
.pixel_format_support = {
.argb8888 = true,
@@ -1127,8 +1125,11 @@ static void dcn302_resource_destruct(struct resource_pool *pool)
if (pool->dccg != NULL)
dcn_dccg_destroy(&pool->dccg);
- if (pool->oem_device != NULL)
- link_destroy_ddc_service(&pool->oem_device);
+ if (pool->oem_device != NULL) {
+ struct dc *dc = pool->oem_device->ctx->dc;
+
+ dc->link_srv->destroy_ddc_service(&pool->oem_device);
+ }
}
static void dcn302_destroy_resource_pool(struct resource_pool **pool)
@@ -1508,7 +1509,7 @@ static bool dcn302_resource_construct(
ddc_init_data.id.id = dc->ctx->dc_bios->fw_info.oem_i2c_obj_id;
ddc_init_data.id.enum_id = 0;
ddc_init_data.id.type = OBJECT_TYPE_GENERIC;
- pool->oem_device = link_create_ddc_service(&ddc_init_data);
+ pool->oem_device = dc->link_srv->create_ddc_service(&ddc_init_data);
} else {
pool->oem_device = NULL;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c b/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c
index 31e212064168..7f72ef882ca4 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c
@@ -126,8 +126,6 @@ static const struct resource_caps res_cap_dcn303 = {
static const struct dc_plane_cap plane_cap = {
.type = DC_PLANE_TYPE_DCN_UNIVERSAL,
- .blends_with_above = true,
- .blends_with_below = true,
.per_pixel_alpha = true,
.pixel_format_support = {
.argb8888 = true,
@@ -1053,8 +1051,11 @@ static void dcn303_resource_destruct(struct resource_pool *pool)
if (pool->dccg != NULL)
dcn_dccg_destroy(&pool->dccg);
- if (pool->oem_device != NULL)
- link_destroy_ddc_service(&pool->oem_device);
+ if (pool->oem_device != NULL) {
+ struct dc *dc = pool->oem_device->ctx->dc;
+
+ dc->link_srv->destroy_ddc_service(&pool->oem_device);
+ }
}
static void dcn303_destroy_resource_pool(struct resource_pool **pool)
@@ -1163,7 +1164,6 @@ static bool dcn303_resource_construct(
dc->caps.max_cursor_size = 256;
dc->caps.min_horizontal_blanking_period = 80;
dc->caps.dmdata_alloc_size = 2048;
-#if defined(CONFIG_DRM_AMD_DC_DCN)
dc->caps.mall_size_per_mem_channel = 4;
/* total size = mall per channel * num channels * 1024 * 1024 */
dc->caps.mall_size_total = dc->caps.mall_size_per_mem_channel *
@@ -1171,7 +1171,6 @@ static bool dcn303_resource_construct(
1024 * 1024;
dc->caps.cursor_cache_size =
dc->caps.max_cursor_size * dc->caps.max_cursor_size * 8;
-#endif
dc->caps.max_slave_planes = 1;
dc->caps.post_blend_color_processing = true;
dc->caps.force_dp_tps4_for_cp2520 = true;
@@ -1421,7 +1420,7 @@ static bool dcn303_resource_construct(
ddc_init_data.id.id = dc->ctx->dc_bios->fw_info.oem_i2c_obj_id;
ddc_init_data.id.enum_id = 0;
ddc_init_data.id.type = OBJECT_TYPE_GENERIC;
- pool->oem_device = link_create_ddc_service(&ddc_init_data);
+ pool->oem_device = dc->link_srv->create_ddc_service(&ddc_init_data);
} else {
pool->oem_device = NULL;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_apg.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_apg.c
index 24e9ff65434d..05aac3e444b4 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_apg.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_apg.c
@@ -72,40 +72,6 @@ static void apg31_disable(
REG_UPDATE(APG_CONTROL2, APG_ENABLE, 0);
}
-static union audio_cea_channels speakers_to_channels(
- struct audio_speaker_flags speaker_flags)
-{
- union audio_cea_channels cea_channels = {0};
-
- /* these are one to one */
- cea_channels.channels.FL = speaker_flags.FL_FR;
- cea_channels.channels.FR = speaker_flags.FL_FR;
- cea_channels.channels.LFE = speaker_flags.LFE;
- cea_channels.channels.FC = speaker_flags.FC;
-
- /* if Rear Left and Right exist move RC speaker to channel 7
- * otherwise to channel 5
- */
- if (speaker_flags.RL_RR) {
- cea_channels.channels.RL_RC = speaker_flags.RL_RR;
- cea_channels.channels.RR = speaker_flags.RL_RR;
- cea_channels.channels.RC_RLC_FLC = speaker_flags.RC;
- } else {
- cea_channels.channels.RL_RC = speaker_flags.RC;
- }
-
- /* FRONT Left Right Center and REAR Left Right Center are exclusive */
- if (speaker_flags.FLC_FRC) {
- cea_channels.channels.RC_RLC_FLC = speaker_flags.FLC_FRC;
- cea_channels.channels.RRC_FRC = speaker_flags.FLC_FRC;
- } else {
- cea_channels.channels.RC_RLC_FLC = speaker_flags.RLC_RRC;
- cea_channels.channels.RRC_FRC = speaker_flags.RLC_RRC;
- }
-
- return cea_channels;
-}
-
static void apg31_se_audio_setup(
struct apg *apg,
unsigned int az_inst,
@@ -113,24 +79,17 @@ static void apg31_se_audio_setup(
{
struct dcn31_apg *apg31 = DCN31_APG_FROM_APG(apg);
- uint32_t speakers = 0;
- uint32_t channels = 0;
-
ASSERT(audio_info);
/* This should not happen.it does so we don't get BSOD*/
if (audio_info == NULL)
return;
- speakers = audio_info->flags.info.ALLSPEAKERS;
- channels = speakers_to_channels(audio_info->flags.speaker_flags).all;
-
/* DisplayPort only allows for one audio stream with stream ID 0 */
REG_UPDATE(APG_CONTROL2, APG_DP_AUDIO_STREAM_ID, 0);
/* When running in "pair mode", pairs of audio channels have their own enable
* this is for really old audio drivers */
REG_UPDATE(APG_DBG_GEN_CONTROL, APG_DBG_AUDIO_CHANNEL_ENABLE, 0xFF);
- // REG_UPDATE(APG_DBG_GEN_CONTROL, APG_DBG_AUDIO_CHANNEL_ENABLE, channels);
/* Disable forced mem power off */
REG_UPDATE(APG_MEM_PWR, APG_MEM_PWR_FORCE, 0);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.c
index 275e78c06dee..745a5d187a98 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.c
@@ -37,6 +37,7 @@
#include "link_enc_cfg.h"
#include "dc_dmub_srv.h"
#include "dal_asic_id.h"
+#include "link.h"
#define CTX \
enc10->base.ctx
@@ -485,7 +486,7 @@ void dcn31_link_encoder_enable_dp_output(
if (link) {
dpia_control.dpia_id = link->ddc_hw_inst;
- dpia_control.fec_rdy = dc_link_should_enable_fec(link);
+ dpia_control.fec_rdy = link->dc->link_srv->dp_should_enable_fec(link);
} else {
DC_LOG_ERROR("%s: Failed to execute DPIA enable DMUB command.\n", __func__);
BREAK_TO_DEBUGGER();
@@ -532,7 +533,7 @@ void dcn31_link_encoder_enable_dp_mst_output(
if (link) {
dpia_control.dpia_id = link->ddc_hw_inst;
- dpia_control.fec_rdy = dc_link_should_enable_fec(link);
+ dpia_control.fec_rdy = link->dc->link_srv->dp_should_enable_fec(link);
} else {
DC_LOG_ERROR("%s: Failed to execute DPIA enable DMUB command.\n", __func__);
BREAK_TO_DEBUGGER();
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_link_encoder.c
index 0b317ed31f91..5b7ad38f85e0 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_link_encoder.c
@@ -26,7 +26,6 @@
#include "dc_bios_types.h"
#include "dcn31_hpo_dp_link_encoder.h"
#include "reg_helper.h"
-#include "dc_link.h"
#include "stream_encoder.h"
#define DC_LOGGER \
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.c
index d76f55a12eb4..0278bae50a9d 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.c
@@ -26,7 +26,7 @@
#include "dc_bios_types.h"
#include "dcn31_hpo_dp_stream_encoder.h"
#include "reg_helper.h"
-#include "dc_link.h"
+#include "dc.h"
#define DC_LOGGER \
enc3->base.ctx->logger
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c
index d13e46eeee3c..10e3cc17f71a 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c
@@ -97,7 +97,7 @@ static void enable_memory_low_power(struct dc *dc)
// Power down VPGs
for (i = 0; i < dc->res_pool->stream_enc_count; i++)
dc->res_pool->stream_enc[i]->vpg->funcs->vpg_powerdown(dc->res_pool->stream_enc[i]->vpg);
-#if defined(CONFIG_DRM_AMD_DC_DCN)
+#if defined(CONFIG_DRM_AMD_DC_FP)
for (i = 0; i < dc->res_pool->hpo_dp_stream_enc_count; i++)
dc->res_pool->hpo_dp_stream_enc[i]->vpg->funcs->vpg_powerdown(dc->res_pool->hpo_dp_stream_enc[i]->vpg);
#endif
@@ -202,7 +202,7 @@ void dcn31_init_hw(struct dc *dc)
dmub_enable_outbox_notification(dc->ctx->dmub_srv);
/* we want to turn off all dp displays before doing detection */
- link_blank_all_dp_displays(dc);
+ dc->link_srv->blank_all_dp_displays(dc);
if (hws->funcs.enable_power_gating_plane)
hws->funcs.enable_power_gating_plane(dc->hwseq, true);
@@ -230,7 +230,7 @@ void dcn31_init_hw(struct dc *dc)
}
if (num_opps > 1) {
- link_blank_all_edp_displays(dc);
+ dc->link_srv->blank_all_edp_displays(dc);
break;
}
}
@@ -291,7 +291,7 @@ void dcn31_init_hw(struct dc *dc)
if (dc->res_pool->hubbub->funcs->force_pstate_change_control)
dc->res_pool->hubbub->funcs->force_pstate_change_control(
dc->res_pool->hubbub, false, false);
-#if defined(CONFIG_DRM_AMD_DC_DCN)
+#if defined(CONFIG_DRM_AMD_DC_FP)
if (dc->res_pool->hubbub->funcs->init_crb)
dc->res_pool->hubbub->funcs->init_crb(dc->res_pool->hubbub);
#endif
@@ -414,7 +414,7 @@ void dcn31_update_info_frame(struct pipe_ctx *pipe_ctx)
pipe_ctx->stream_res.stream_enc->funcs->update_hdmi_info_packets(
pipe_ctx->stream_res.stream_enc,
&pipe_ctx->stream_res.encoder_info_frame);
- else if (link_is_dp_128b_132b_signal(pipe_ctx)) {
+ else if (pipe_ctx->stream->ctx->dc->link_srv->dp_is_128b_132b_signal(pipe_ctx)) {
pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->update_dp_info_packets(
pipe_ctx->stream_res.hpo_dp_stream_enc,
&pipe_ctx->stream_res.encoder_info_frame);
@@ -565,7 +565,7 @@ static void dcn31_reset_back_end_for_pipe(
* VBIOS lit up eDP, so check link status too.
*/
if (!pipe_ctx->stream->dpms_off || link->link_status.link_active)
- link_set_dpms_off(pipe_ctx);
+ dc->link_srv->set_dpms_off(pipe_ctx);
else if (pipe_ctx->stream_res.audio)
dc->hwss.disable_audio_stream(pipe_ctx);
@@ -584,7 +584,7 @@ static void dcn31_reset_back_end_for_pipe(
}
}
} else if (pipe_ctx->stream_res.dsc) {
- link_set_dsc_enable(pipe_ctx, false);
+ dc->link_srv->set_dsc_enable(pipe_ctx, false);
}
pipe_ctx->stream = NULL;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c
index d3918a10773a..eaaa2e01f6d0 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c
@@ -827,8 +827,6 @@ static const struct resource_caps res_cap_dcn31 = {
static const struct dc_plane_cap plane_cap = {
.type = DC_PLANE_TYPE_DCN_UNIVERSAL,
- .blends_with_above = true,
- .blends_with_below = true,
.per_pixel_alpha = true,
.pixel_format_support = {
diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dio_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dio_stream_encoder.c
index 962a2c02b422..467509a65fa7 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dio_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dio_stream_encoder.c
@@ -296,12 +296,14 @@ static void enc314_stream_encoder_dp_unblank(
uint32_t n_vid = 0x8000;
uint32_t m_vid;
uint32_t n_multiply = 0;
+ uint32_t pix_per_cycle = 0;
uint64_t m_vid_l = n_vid;
/* YCbCr 4:2:0 : Computed VID_M will be 2X the input rate */
if (is_two_pixels_per_containter(&param->timing) || param->opp_cnt > 1) {
/*this logic should be the same in get_pixel_clock_parameters() */
n_multiply = 1;
+ pix_per_cycle = 1;
}
/* M / N = Fstream / Flink
* m_vid / n_vid = pixel rate / link rate
@@ -329,6 +331,10 @@ static void enc314_stream_encoder_dp_unblank(
REG_UPDATE_2(DP_VID_TIMING,
DP_VID_M_N_GEN_EN, 1,
DP_VID_N_MUL, n_multiply);
+
+ REG_UPDATE(DP_PIXEL_FORMAT,
+ DP_PIXEL_PER_CYCLE_PROCESSING_MODE,
+ pix_per_cycle);
}
/* make sure stream is disabled before resetting steer fifo */
@@ -366,7 +372,7 @@ static void enc314_stream_encoder_dp_unblank(
*/
enc314_enable_fifo(enc);
- link_dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_ENABLE_DP_VID_STREAM);
+ link->dc->link_srv->dp_trace_source_sequence(link, DPCD_SOURCE_SEQ_AFTER_ENABLE_DP_VID_STREAM);
}
/* Set DSC-related configuration.
diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.c
index 575d3501c848..bcc03426fc3e 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.c
@@ -346,7 +346,7 @@ unsigned int dcn314_calculate_dccg_k1_k2_values(struct pipe_ctx *pipe_ctx, unsig
two_pix_per_container = optc2_is_two_pixels_per_containter(&stream->timing);
odm_combine_factor = get_odm_config(pipe_ctx, NULL);
- if (link_is_dp_128b_132b_signal(pipe_ctx)) {
+ if (stream->ctx->dc->link_srv->dp_is_128b_132b_signal(pipe_ctx)) {
*k1_div = PIXEL_RATE_DIV_BY_1;
*k2_div = PIXEL_RATE_DIV_BY_1;
} else if (dc_is_hdmi_tmds_signal(pipe_ctx->stream->signal) || dc_is_dvi_signal(pipe_ctx->stream->signal)) {
diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c
index 54ed3de869d3..50ed7e09d5ba 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c
@@ -855,8 +855,6 @@ static const struct resource_caps res_cap_dcn314 = {
static const struct dc_plane_cap plane_cap = {
.type = DC_PLANE_TYPE_DCN_UNIVERSAL,
- .blends_with_above = true,
- .blends_with_below = true,
.per_pixel_alpha = true,
.pixel_format_support = {
@@ -887,6 +885,7 @@ static const struct dc_plane_cap plane_cap = {
static const struct dc_debug_options debug_defaults_drv = {
.disable_z10 = false,
.enable_z9_disable_interface = true,
+ .minimum_z8_residency_time = 3080,
.psr_skip_crtc_disable = true,
.disable_dmcu = true,
.force_abm_enable = false,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.c b/drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.c
index 7887078c5f64..41c972c8eb19 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.c
@@ -824,8 +824,6 @@ static const struct resource_caps res_cap_dcn31 = {
static const struct dc_plane_cap plane_cap = {
.type = DC_PLANE_TYPE_DCN_UNIVERSAL,
- .blends_with_above = true,
- .blends_with_below = true,
.per_pixel_alpha = true,
.pixel_format_support = {
diff --git a/drivers/gpu/drm/amd/display/dc/dcn316/dcn316_resource.c b/drivers/gpu/drm/amd/display/dc/dcn316/dcn316_resource.c
index dc0b49506275..9ead347a33e9 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn316/dcn316_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn316/dcn316_resource.c
@@ -824,8 +824,6 @@ static const struct resource_caps res_cap_dcn31 = {
static const struct dc_plane_cap plane_cap = {
.type = DC_PLANE_TYPE_DCN_UNIVERSAL,
- .blends_with_above = true,
- .blends_with_below = true,
.per_pixel_alpha = true,
.pixel_format_support = {
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dccg.h b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dccg.h
index 1c46fad0977b..271c163e4844 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dccg.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dccg.h
@@ -31,42 +31,6 @@
#define DCCG_SFII(block, reg_name, field_prefix, field_name, inst, post_fix)\
.field_prefix ## _ ## field_name[inst] = block ## inst ## _ ## reg_name ## __ ## field_prefix ## inst ## _ ## field_name ## post_fix
-
-#define DCCG_REG_LIST_DCN32() \
- SR(DPPCLK_DTO_CTRL),\
- DCCG_SRII(DTO_PARAM, DPPCLK, 0),\
- DCCG_SRII(DTO_PARAM, DPPCLK, 1),\
- DCCG_SRII(DTO_PARAM, DPPCLK, 2),\
- DCCG_SRII(DTO_PARAM, DPPCLK, 3),\
- DCCG_SRII(CLOCK_CNTL, HDMICHARCLK, 0),\
- SR(PHYASYMCLK_CLOCK_CNTL),\
- SR(PHYBSYMCLK_CLOCK_CNTL),\
- SR(PHYCSYMCLK_CLOCK_CNTL),\
- SR(PHYDSYMCLK_CLOCK_CNTL),\
- SR(PHYESYMCLK_CLOCK_CNTL),\
- SR(DPSTREAMCLK_CNTL),\
- SR(HDMISTREAMCLK_CNTL),\
- SR(SYMCLK32_SE_CNTL),\
- SR(SYMCLK32_LE_CNTL),\
- DCCG_SRII(PIXEL_RATE_CNTL, OTG, 0),\
- DCCG_SRII(PIXEL_RATE_CNTL, OTG, 1),\
- DCCG_SRII(PIXEL_RATE_CNTL, OTG, 2),\
- DCCG_SRII(PIXEL_RATE_CNTL, OTG, 3),\
- DCCG_SRII(MODULO, DTBCLK_DTO, 0),\
- DCCG_SRII(MODULO, DTBCLK_DTO, 1),\
- DCCG_SRII(MODULO, DTBCLK_DTO, 2),\
- DCCG_SRII(MODULO, DTBCLK_DTO, 3),\
- DCCG_SRII(PHASE, DTBCLK_DTO, 0),\
- DCCG_SRII(PHASE, DTBCLK_DTO, 1),\
- DCCG_SRII(PHASE, DTBCLK_DTO, 2),\
- DCCG_SRII(PHASE, DTBCLK_DTO, 3),\
- SR(DCCG_AUDIO_DTBCLK_DTO_MODULO),\
- SR(DCCG_AUDIO_DTBCLK_DTO_PHASE),\
- SR(OTG_PIXEL_RATE_DIV),\
- SR(DTBCLK_P_CNTL),\
- SR(DCCG_AUDIO_DTO_SOURCE)
-
-
#define DCCG_MASK_SH_LIST_DCN32(mask_sh) \
DCCG_SFI(DPPCLK_DTO_CTRL, DTO_ENABLE, DPPCLK, 0, mask_sh),\
DCCG_SFI(DPPCLK_DTO_CTRL, DTO_DB_EN, DPPCLK, 0, mask_sh),\
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_stream_encoder.c
index 36e6f5657942..c72448125976 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_stream_encoder.c
@@ -211,10 +211,8 @@ static void enc32_stream_encoder_hdmi_set_stream_attribute(
HDMI_GC_SEND, 1,
HDMI_NULL_SEND, 1);
-#if defined(CONFIG_DRM_AMD_DC_HDCP)
/* Disable Audio Content Protection packet transmission */
REG_UPDATE(HDMI_VBI_PACKET_CONTROL, HDMI_ACP_SEND, 0);
-#endif
/* following belongs to audio */
/* Enable Audio InfoFrame packet transmission. */
@@ -373,7 +371,7 @@ static void enc32_stream_encoder_dp_unblank(
REG_UPDATE(DP_VID_STREAM_CNTL, DP_VID_STREAM_ENABLE, true);
- link_dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_ENABLE_DP_VID_STREAM);
+ link->dc->link_srv->dp_trace_source_sequence(link, DPCD_SOURCE_SEQ_AFTER_ENABLE_DP_VID_STREAM);
}
/* Set DSC-related configuration.
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_stream_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_stream_encoder.h
index ecd041a446d2..875b1cd46056 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_stream_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_stream_encoder.h
@@ -31,70 +31,6 @@
#include "stream_encoder.h"
#include "dcn20/dcn20_stream_encoder.h"
-#define SE_DCN32_REG_LIST(id)\
- SRI(AFMT_CNTL, DIG, id), \
- SRI(DIG_FE_CNTL, DIG, id), \
- SRI(HDMI_CONTROL, DIG, id), \
- SRI(HDMI_DB_CONTROL, DIG, id), \
- SRI(HDMI_GC, DIG, id), \
- SRI(HDMI_GENERIC_PACKET_CONTROL0, DIG, id), \
- SRI(HDMI_GENERIC_PACKET_CONTROL1, DIG, id), \
- SRI(HDMI_GENERIC_PACKET_CONTROL2, DIG, id), \
- SRI(HDMI_GENERIC_PACKET_CONTROL3, DIG, id), \
- SRI(HDMI_GENERIC_PACKET_CONTROL4, DIG, id), \
- SRI(HDMI_GENERIC_PACKET_CONTROL5, DIG, id), \
- SRI(HDMI_GENERIC_PACKET_CONTROL6, DIG, id), \
- SRI(HDMI_GENERIC_PACKET_CONTROL7, DIG, id), \
- SRI(HDMI_GENERIC_PACKET_CONTROL8, DIG, id), \
- SRI(HDMI_GENERIC_PACKET_CONTROL9, DIG, id), \
- SRI(HDMI_GENERIC_PACKET_CONTROL10, DIG, id), \
- SRI(HDMI_INFOFRAME_CONTROL0, DIG, id), \
- SRI(HDMI_INFOFRAME_CONTROL1, DIG, id), \
- SRI(HDMI_VBI_PACKET_CONTROL, DIG, id), \
- SRI(HDMI_AUDIO_PACKET_CONTROL, DIG, id),\
- SRI(HDMI_ACR_PACKET_CONTROL, DIG, id),\
- SRI(HDMI_ACR_32_0, DIG, id),\
- SRI(HDMI_ACR_32_1, DIG, id),\
- SRI(HDMI_ACR_44_0, DIG, id),\
- SRI(HDMI_ACR_44_1, DIG, id),\
- SRI(HDMI_ACR_48_0, DIG, id),\
- SRI(HDMI_ACR_48_1, DIG, id),\
- SRI(DP_DB_CNTL, DP, id), \
- SRI(DP_MSA_MISC, DP, id), \
- SRI(DP_MSA_VBID_MISC, DP, id), \
- SRI(DP_MSA_COLORIMETRY, DP, id), \
- SRI(DP_MSA_TIMING_PARAM1, DP, id), \
- SRI(DP_MSA_TIMING_PARAM2, DP, id), \
- SRI(DP_MSA_TIMING_PARAM3, DP, id), \
- SRI(DP_MSA_TIMING_PARAM4, DP, id), \
- SRI(DP_MSE_RATE_CNTL, DP, id), \
- SRI(DP_MSE_RATE_UPDATE, DP, id), \
- SRI(DP_PIXEL_FORMAT, DP, id), \
- SRI(DP_SEC_CNTL, DP, id), \
- SRI(DP_SEC_CNTL1, DP, id), \
- SRI(DP_SEC_CNTL2, DP, id), \
- SRI(DP_SEC_CNTL5, DP, id), \
- SRI(DP_SEC_CNTL6, DP, id), \
- SRI(DP_STEER_FIFO, DP, id), \
- SRI(DP_VID_M, DP, id), \
- SRI(DP_VID_N, DP, id), \
- SRI(DP_VID_STREAM_CNTL, DP, id), \
- SRI(DP_VID_TIMING, DP, id), \
- SRI(DP_SEC_AUD_N, DP, id), \
- SRI(DP_SEC_TIMESTAMP, DP, id), \
- SRI(DP_DSC_CNTL, DP, id), \
- SRI(DP_SEC_METADATA_TRANSMISSION, DP, id), \
- SRI(HDMI_METADATA_PACKET_CONTROL, DIG, id), \
- SRI(DP_SEC_FRAMING4, DP, id), \
- SRI(DP_GSP11_CNTL, DP, id), \
- SRI(DME_CONTROL, DME, id),\
- SRI(DP_SEC_METADATA_TRANSMISSION, DP, id), \
- SRI(HDMI_METADATA_PACKET_CONTROL, DIG, id), \
- SRI(DIG_FE_CNTL, DIG, id), \
- SRI(DIG_CLOCK_PATTERN, DIG, id), \
- SRI(DIG_FIFO_CTRL0, DIG, id)
-
-
#define SE_COMMON_MASK_SH_LIST_DCN32(mask_sh)\
SE_SF(DP0_DP_PIXEL_FORMAT, DP_PIXEL_ENCODING, mask_sh),\
SE_SF(DP0_DP_PIXEL_FORMAT, DP_COMPONENT_DEPTH, mask_sh),\
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hpo_dp_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hpo_dp_link_encoder.c
index 4dbad8d4b4fc..8af01f579690 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hpo_dp_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hpo_dp_link_encoder.c
@@ -26,7 +26,6 @@
#include "dcn31/dcn31_hpo_dp_link_encoder.h"
#include "dcn32_hpo_dp_link_encoder.h"
#include "reg_helper.h"
-#include "dc_link.h"
#include "stream_encoder.h"
#define DC_LOGGER \
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubbub.h b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubbub.h
index b20eb04724bb..ad33427192c6 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubbub.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubbub.h
@@ -28,68 +28,6 @@
#include "dcn21/dcn21_hubbub.h"
-#define HUBBUB_REG_LIST_DCN32(id)\
- SR(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A),\
- SR(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B),\
- SR(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C),\
- SR(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D),\
- SR(DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL),\
- SR(DCHUBBUB_ARB_DRAM_STATE_CNTL),\
- SR(DCHUBBUB_ARB_SAT_LEVEL),\
- SR(DCHUBBUB_ARB_DF_REQ_OUTSTAND),\
- SR(DCHUBBUB_GLOBAL_TIMER_CNTL), \
- SR(DCHUBBUB_SOFT_RESET),\
- SR(DCHUBBUB_CRC_CTRL), \
- SR(DCN_VM_FB_LOCATION_BASE),\
- SR(DCN_VM_FB_LOCATION_TOP),\
- SR(DCN_VM_FB_OFFSET),\
- SR(DCN_VM_AGP_BOT),\
- SR(DCN_VM_AGP_TOP),\
- SR(DCN_VM_AGP_BASE),\
- HUBBUB_SR_WATERMARK_REG_LIST(), \
- SR(DCHUBBUB_ARB_FRAC_URG_BW_NOM_A),\
- SR(DCHUBBUB_ARB_FRAC_URG_BW_NOM_B),\
- SR(DCHUBBUB_ARB_FRAC_URG_BW_NOM_C),\
- SR(DCHUBBUB_ARB_FRAC_URG_BW_NOM_D),\
- SR(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_A),\
- SR(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_B),\
- SR(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_C),\
- SR(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_D),\
- SR(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_A),\
- SR(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_B),\
- SR(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_C),\
- SR(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_D),\
- SR(DCHUBBUB_DET0_CTRL),\
- SR(DCHUBBUB_DET1_CTRL),\
- SR(DCHUBBUB_DET2_CTRL),\
- SR(DCHUBBUB_DET3_CTRL),\
- SR(DCHUBBUB_COMPBUF_CTRL),\
- SR(COMPBUF_RESERVED_SPACE),\
- SR(DCHUBBUB_DEBUG_CTRL_0),\
- SR(DCHUBBUB_ARB_USR_RETRAINING_CNTL),\
- SR(DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_A),\
- SR(DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_B),\
- SR(DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_C),\
- SR(DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_D),\
- SR(DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_A),\
- SR(DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_B),\
- SR(DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_C),\
- SR(DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_D),\
- SR(DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_A),\
- SR(DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_B),\
- SR(DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_C),\
- SR(DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_D),\
- SR(DCN_VM_FAULT_ADDR_MSB),\
- SR(DCN_VM_FAULT_ADDR_LSB),\
- SR(DCN_VM_FAULT_CNTL),\
- SR(DCN_VM_FAULT_STATUS),\
- SR(SDPIF_REQUEST_RATE_LIMIT),\
- SR(DCHUBBUB_CLOCK_CNTL),\
- SR(DCHUBBUB_SDPIF_CFG0),\
- SR(DCHUBBUB_SDPIF_CFG1),\
- SR(DCHUBBUB_MEM_PWR_MODE_CTRL)
-
-
#define HUBBUB_MASK_SH_LIST_DCN32(mask_sh)\
HUBBUB_SF(DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_ENABLE, mask_sh), \
HUBBUB_SF(DCHUBBUB_SOFT_RESET, DCHUBBUB_GLOBAL_SOFT_RESET, mask_sh), \
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubp.h b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubp.h
index 4cdbf63c952b..d5e5ed8ab869 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubp.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubp.h
@@ -31,12 +31,6 @@
#include "dcn30/dcn30_hubp.h"
#include "dcn31/dcn31_hubp.h"
-#define HUBP_REG_LIST_DCN32(id)\
- HUBP_REG_LIST_DCN30(id),\
- SRI(DCHUBP_MALL_CONFIG, HUBP, id),\
- SRI(DCHUBP_VMPG_CONFIG, HUBP, id),\
- SRI(UCLK_PSTATE_FORCE, HUBPREQ, id)
-
#define HUBP_MASK_SH_LIST_DCN32(mask_sh)\
HUBP_MASK_SH_LIST_DCN31(mask_sh),\
HUBP_SF(HUBP0_DCHUBP_MALL_CONFIG, USE_MALL_SEL, mask_sh),\
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c
index 16f892125b6f..f9073b722b36 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c
@@ -131,10 +131,15 @@ void dcn32_enable_power_gating_plane(
bool enable)
{
bool force_on = true; /* disable power gating */
+ uint32_t org_ip_request_cntl = 0;
if (enable)
force_on = false;
+ REG_GET(DC_IP_REQUEST_CNTL, IP_REQUEST_EN, &org_ip_request_cntl);
+ if (org_ip_request_cntl == 0)
+ REG_SET(DC_IP_REQUEST_CNTL, 0, IP_REQUEST_EN, 1);
+
/* DCHUBP0/1/2/3 */
REG_UPDATE(DOMAIN0_PG_CONFIG, DOMAIN_POWER_FORCEON, force_on);
REG_UPDATE(DOMAIN1_PG_CONFIG, DOMAIN_POWER_FORCEON, force_on);
@@ -146,6 +151,9 @@ void dcn32_enable_power_gating_plane(
REG_UPDATE(DOMAIN17_PG_CONFIG, DOMAIN_POWER_FORCEON, force_on);
REG_UPDATE(DOMAIN18_PG_CONFIG, DOMAIN_POWER_FORCEON, force_on);
REG_UPDATE(DOMAIN19_PG_CONFIG, DOMAIN_POWER_FORCEON, force_on);
+
+ if (org_ip_request_cntl == 0)
+ REG_SET(DC_IP_REQUEST_CNTL, 0, IP_REQUEST_EN, 0);
}
void dcn32_hubp_pg_control(struct dce_hwseq *hws, unsigned int hubp_inst, bool power_on)
@@ -786,13 +794,14 @@ void dcn32_init_hw(struct dc *dc)
}
}
- /* Power gate DSCs */
- for (i = 0; i < res_pool->res_cap->num_dsc; i++)
- if (hws->funcs.dsc_pg_control != NULL)
- hws->funcs.dsc_pg_control(hws, res_pool->dscs[i]->inst, false);
+ /* enable_power_gating_plane before dsc_pg_control because
+ * FORCEON = 1 with hw default value on bootup, resume from s3
+ */
+ if (hws->funcs.enable_power_gating_plane)
+ hws->funcs.enable_power_gating_plane(dc->hwseq, true);
/* we want to turn off all dp displays before doing detection */
- link_blank_all_dp_displays(dc);
+ dc->link_srv->blank_all_dp_displays(dc);
/* If taking control over from VBIOS, we may want to optimize our first
* mode set, so we need to skip powering down pipes until we know which
@@ -828,7 +837,7 @@ void dcn32_init_hw(struct dc *dc)
struct dc_link *edp_links[MAX_NUM_EDP];
struct dc_link *edp_link;
- get_edp_links(dc, edp_links, &edp_num);
+ dc_get_edp_links(dc, edp_links, &edp_num);
if (edp_num) {
for (i = 0; i < edp_num; i++) {
edp_link = edp_links[i];
@@ -886,8 +895,6 @@ void dcn32_init_hw(struct dc *dc)
REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0);
}
- if (hws->funcs.enable_power_gating_plane)
- hws->funcs.enable_power_gating_plane(dc->hwseq, true);
if (!dcb->funcs->is_accelerated_mode(dcb) && dc->res_pool->hubbub->funcs->init_watermarks)
dc->res_pool->hubbub->funcs->init_watermarks(dc->res_pool->hubbub);
@@ -1095,7 +1102,7 @@ unsigned int dcn32_calculate_dccg_k1_k2_values(struct pipe_ctx *pipe_ctx, unsign
two_pix_per_container = optc2_is_two_pixels_per_containter(&stream->timing);
odm_combine_factor = get_odm_config(pipe_ctx, NULL);
- if (link_is_dp_128b_132b_signal(pipe_ctx)) {
+ if (stream->ctx->dc->link_srv->dp_is_128b_132b_signal(pipe_ctx)) {
*k1_div = PIXEL_RATE_DIV_BY_1;
*k2_div = PIXEL_RATE_DIV_BY_1;
} else if (dc_is_hdmi_tmds_signal(stream->signal) || dc_is_dvi_signal(stream->signal)) {
@@ -1104,7 +1111,7 @@ unsigned int dcn32_calculate_dccg_k1_k2_values(struct pipe_ctx *pipe_ctx, unsign
*k2_div = PIXEL_RATE_DIV_BY_2;
else
*k2_div = PIXEL_RATE_DIV_BY_4;
- } else if (dc_is_dp_signal(stream->signal) || dc_is_virtual_signal(stream->signal)) {
+ } else if (dc_is_dp_signal(stream->signal)) {
if (two_pix_per_container) {
*k1_div = PIXEL_RATE_DIV_BY_1;
*k2_div = PIXEL_RATE_DIV_BY_2;
@@ -1159,7 +1166,7 @@ void dcn32_unblank_stream(struct pipe_ctx *pipe_ctx,
params.link_settings.link_rate = link_settings->link_rate;
- if (link_is_dp_128b_132b_signal(pipe_ctx)) {
+ if (link->dc->link_srv->dp_is_128b_132b_signal(pipe_ctx)) {
/* TODO - DP2.0 HW: Set ODM mode in dp hpo encoder here */
pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->dp_unblank(
pipe_ctx->stream_res.hpo_dp_stream_enc,
@@ -1186,7 +1193,7 @@ bool dcn32_is_dp_dig_pixel_rate_div_policy(struct pipe_ctx *pipe_ctx)
if (!is_h_timing_divisible_by_2(pipe_ctx->stream))
return false;
- if (dc_is_dp_signal(pipe_ctx->stream->signal) && !link_is_dp_128b_132b_signal(pipe_ctx) &&
+ if (dc_is_dp_signal(pipe_ctx->stream->signal) && !dc->link_srv->dp_is_128b_132b_signal(pipe_ctx) &&
dc->debug.enable_dp_dig_pixel_rate_div_policy)
return true;
return false;
@@ -1220,7 +1227,8 @@ static void apply_symclk_on_tx_off_wa(struct dc_link *link)
pipe_ctx->clock_source->funcs->program_pix_clk(
pipe_ctx->clock_source,
&pipe_ctx->stream_res.pix_clk_params,
- link_dp_get_encoding_format(&pipe_ctx->link_config.dp_link_settings),
+ dc->link_srv->dp_get_encoding_format(
+ &pipe_ctx->link_config.dp_link_settings),
&pipe_ctx->pll_settings);
link->phy_state.symclk_state = SYMCLK_ON_TX_OFF;
break;
@@ -1252,7 +1260,7 @@ void dcn32_disable_link_output(struct dc_link *link,
else if (dmcu != NULL && dmcu->funcs->lock_phy)
dmcu->funcs->unlock_phy(dmcu);
- link_dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_DISABLE_LINK_PHY);
+ dc->link_srv->dp_trace_source_sequence(link, DPCD_SOURCE_SEQ_AFTER_DISABLE_LINK_PHY);
apply_symclk_on_tx_off_wa(link);
}
@@ -1406,3 +1414,86 @@ void dcn32_enable_phantom_streams(struct dc *dc, struct dc_state *context)
}
}
}
+
+/* Blank pixel data during initialization */
+void dcn32_init_blank(
+ struct dc *dc,
+ struct timing_generator *tg)
+{
+ struct dce_hwseq *hws = dc->hwseq;
+ enum dc_color_space color_space;
+ struct tg_color black_color = {0};
+ struct output_pixel_processor *opp = NULL;
+ struct output_pixel_processor *bottom_opp = NULL;
+ uint32_t num_opps, opp_id_src0, opp_id_src1;
+ uint32_t otg_active_width, otg_active_height;
+ uint32_t i;
+
+ /* program opp dpg blank color */
+ color_space = COLOR_SPACE_SRGB;
+ color_space_to_black_color(dc, color_space, &black_color);
+
+ /* get the OTG active size */
+ tg->funcs->get_otg_active_size(tg,
+ &otg_active_width,
+ &otg_active_height);
+
+ /* get the OPTC source */
+ tg->funcs->get_optc_source(tg, &num_opps, &opp_id_src0, &opp_id_src1);
+
+ if (opp_id_src0 >= dc->res_pool->res_cap->num_opp) {
+ ASSERT(false);
+ return;
+ }
+
+ for (i = 0; i < dc->res_pool->res_cap->num_opp; i++) {
+ if (dc->res_pool->opps[i] != NULL && dc->res_pool->opps[i]->inst == opp_id_src0) {
+ opp = dc->res_pool->opps[i];
+ break;
+ }
+ }
+
+ if (num_opps == 2) {
+ otg_active_width = otg_active_width / 2;
+
+ if (opp_id_src1 >= dc->res_pool->res_cap->num_opp) {
+ ASSERT(false);
+ return;
+ }
+ for (i = 0; i < dc->res_pool->res_cap->num_opp; i++) {
+ if (dc->res_pool->opps[i] != NULL && dc->res_pool->opps[i]->inst == opp_id_src1) {
+ bottom_opp = dc->res_pool->opps[i];
+ break;
+ }
+ }
+ }
+
+ if (opp && opp->funcs->opp_set_disp_pattern_generator)
+ opp->funcs->opp_set_disp_pattern_generator(
+ opp,
+ CONTROLLER_DP_TEST_PATTERN_SOLID_COLOR,
+ CONTROLLER_DP_COLOR_SPACE_UDEFINED,
+ COLOR_DEPTH_UNDEFINED,
+ &black_color,
+ otg_active_width,
+ otg_active_height,
+ 0);
+
+ if (num_opps == 2) {
+ if (bottom_opp && bottom_opp->funcs->opp_set_disp_pattern_generator) {
+ bottom_opp->funcs->opp_set_disp_pattern_generator(
+ bottom_opp,
+ CONTROLLER_DP_TEST_PATTERN_SOLID_COLOR,
+ CONTROLLER_DP_COLOR_SPACE_UDEFINED,
+ COLOR_DEPTH_UNDEFINED,
+ &black_color,
+ otg_active_width,
+ otg_active_height,
+ 0);
+ hws->funcs.wait_for_blank_complete(bottom_opp);
+ }
+ }
+
+ if (opp)
+ hws->funcs.wait_for_blank_complete(opp);
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.h b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.h
index e9e9534f3668..84c1f36c3fa6 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.h
@@ -104,4 +104,8 @@ void dcn32_update_dsc_pg(struct dc *dc,
void dcn32_enable_phantom_streams(struct dc *dc, struct dc_state *context);
+void dcn32_init_blank(
+ struct dc *dc,
+ struct timing_generator *tg);
+
#endif /* __DC_HWSS_DCN32_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_init.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_init.c
index 0694fa3a3680..dcb81662884f 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_init.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_init.c
@@ -132,7 +132,7 @@ static const struct hwseq_private_funcs dcn32_private_funcs = {
.enable_stream_gating = dcn20_enable_stream_gating,
.setup_vupdate_interrupt = dcn20_setup_vupdate_interrupt,
.did_underflow_occur = dcn10_did_underflow_occur,
- .init_blank = dcn20_init_blank,
+ .init_blank = dcn32_init_blank,
.disable_vga = dcn20_disable_vga,
.bios_golden_init = dcn10_bios_golden_init,
.plane_atomic_disable = dcn20_plane_atomic_disable,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_mpc.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_mpc.c
index 206a5ddbaf6d..c8041cfd594d 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_mpc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_mpc.c
@@ -42,7 +42,7 @@
mpc30->mpc_shift->field_name, mpc30->mpc_mask->field_name
-static void mpc32_mpc_init(struct mpc *mpc)
+void mpc32_mpc_init(struct mpc *mpc)
{
struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc);
int mpcc_id;
@@ -254,7 +254,7 @@ static void mpc32_program_post1dlut_pwl(
}
}
-static bool mpc32_program_post1dlut(
+bool mpc32_program_post1dlut(
struct mpc *mpc,
const struct pwl_params *params,
uint32_t mpcc_id)
@@ -701,7 +701,7 @@ static void mpc32_power_on_shaper_3dlut(
}
-static bool mpc32_program_shaper(
+bool mpc32_program_shaper(
struct mpc *mpc,
const struct pwl_params *params,
uint32_t mpcc_id)
@@ -897,7 +897,7 @@ static void mpc32_set_3dlut_mode(
}
-static bool mpc32_program_3dlut(
+bool mpc32_program_3dlut(
struct mpc *mpc,
const struct tetrahedral_params *params,
int mpcc_id)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_mpc.h b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_mpc.h
index 61f33c0d8e59..2c2ecd053806 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_mpc.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_mpc.h
@@ -310,6 +310,19 @@ struct dcn32_mpc_registers {
MPC_REG_VARIABLE_LIST_DCN3_0;
MPC_REG_VARIABLE_LIST_DCN32;
};
+void mpc32_mpc_init(struct mpc *mpc);
+bool mpc32_program_3dlut(
+ struct mpc *mpc,
+ const struct tetrahedral_params *params,
+ int mpcc_id);
+bool mpc32_program_post1dlut(
+ struct mpc *mpc,
+ const struct pwl_params *params,
+ uint32_t mpcc_id);
+bool mpc32_program_shaper(
+ struct mpc *mpc,
+ const struct pwl_params *params,
+ uint32_t mpcc_id);
void dcn32_mpc_construct(struct dcn30_mpc *mpc30,
struct dc_context *ctx,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_optc.h b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_optc.h
index 5e57c39235fa..b92ba8c75694 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_optc.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_optc.h
@@ -28,77 +28,6 @@
#include "dcn10/dcn10_optc.h"
-#define OPTC_COMMON_REG_LIST_DCN3_2(inst) \
- SRI(OTG_VSTARTUP_PARAM, OTG, inst),\
- SRI(OTG_VUPDATE_PARAM, OTG, inst),\
- SRI(OTG_VREADY_PARAM, OTG, inst),\
- SRI(OTG_MASTER_UPDATE_LOCK, OTG, inst),\
- SRI(OTG_GLOBAL_CONTROL0, OTG, inst),\
- SRI(OTG_GLOBAL_CONTROL1, OTG, inst),\
- SRI(OTG_GLOBAL_CONTROL2, OTG, inst),\
- SRI(OTG_GLOBAL_CONTROL4, OTG, inst),\
- SRI(OTG_DOUBLE_BUFFER_CONTROL, OTG, inst),\
- SRI(OTG_H_TOTAL, OTG, inst),\
- SRI(OTG_H_BLANK_START_END, OTG, inst),\
- SRI(OTG_H_SYNC_A, OTG, inst),\
- SRI(OTG_H_SYNC_A_CNTL, OTG, inst),\
- SRI(OTG_H_TIMING_CNTL, OTG, inst),\
- SRI(OTG_V_TOTAL, OTG, inst),\
- SRI(OTG_V_BLANK_START_END, OTG, inst),\
- SRI(OTG_V_SYNC_A, OTG, inst),\
- SRI(OTG_V_SYNC_A_CNTL, OTG, inst),\
- SRI(OTG_CONTROL, OTG, inst),\
- SRI(OTG_STEREO_CONTROL, OTG, inst),\
- SRI(OTG_3D_STRUCTURE_CONTROL, OTG, inst),\
- SRI(OTG_STEREO_STATUS, OTG, inst),\
- SRI(OTG_V_TOTAL_MAX, OTG, inst),\
- SRI(OTG_V_TOTAL_MIN, OTG, inst),\
- SRI(OTG_V_TOTAL_CONTROL, OTG, inst),\
- SRI(OTG_TRIGA_CNTL, OTG, inst),\
- SRI(OTG_FORCE_COUNT_NOW_CNTL, OTG, inst),\
- SRI(OTG_STATIC_SCREEN_CONTROL, OTG, inst),\
- SRI(OTG_STATUS_FRAME_COUNT, OTG, inst),\
- SRI(OTG_STATUS, OTG, inst),\
- SRI(OTG_STATUS_POSITION, OTG, inst),\
- SRI(OTG_NOM_VERT_POSITION, OTG, inst),\
- SRI(OTG_M_CONST_DTO0, OTG, inst),\
- SRI(OTG_M_CONST_DTO1, OTG, inst),\
- SRI(OTG_CLOCK_CONTROL, OTG, inst),\
- SRI(OTG_VERTICAL_INTERRUPT0_CONTROL, OTG, inst),\
- SRI(OTG_VERTICAL_INTERRUPT0_POSITION, OTG, inst),\
- SRI(OTG_VERTICAL_INTERRUPT1_CONTROL, OTG, inst),\
- SRI(OTG_VERTICAL_INTERRUPT1_POSITION, OTG, inst),\
- SRI(OTG_VERTICAL_INTERRUPT2_CONTROL, OTG, inst),\
- SRI(OTG_VERTICAL_INTERRUPT2_POSITION, OTG, inst),\
- SRI(OPTC_INPUT_CLOCK_CONTROL, ODM, inst),\
- SRI(OPTC_DATA_SOURCE_SELECT, ODM, inst),\
- SRI(OPTC_INPUT_GLOBAL_CONTROL, ODM, inst),\
- SRI(CONTROL, VTG, inst),\
- SRI(OTG_VERT_SYNC_CONTROL, OTG, inst),\
- SRI(OTG_GSL_CONTROL, OTG, inst),\
- SRI(OTG_CRC_CNTL, OTG, inst),\
- SRI(OTG_CRC0_DATA_RG, OTG, inst),\
- SRI(OTG_CRC0_DATA_B, OTG, inst),\
- SRI(OTG_CRC0_WINDOWA_X_CONTROL, OTG, inst),\
- SRI(OTG_CRC0_WINDOWA_Y_CONTROL, OTG, inst),\
- SRI(OTG_CRC0_WINDOWB_X_CONTROL, OTG, inst),\
- SRI(OTG_CRC0_WINDOWB_Y_CONTROL, OTG, inst),\
- SR(GSL_SOURCE_SELECT),\
- SRI(OTG_TRIGA_MANUAL_TRIG, OTG, inst),\
- SRI(OTG_GLOBAL_CONTROL1, OTG, inst),\
- SRI(OTG_GLOBAL_CONTROL2, OTG, inst),\
- SRI(OTG_GSL_WINDOW_X, OTG, inst),\
- SRI(OTG_GSL_WINDOW_Y, OTG, inst),\
- SRI(OTG_VUPDATE_KEEPOUT, OTG, inst),\
- SRI(OTG_DSC_START_POSITION, OTG, inst),\
- SRI(OTG_DRR_TRIGGER_WINDOW, OTG, inst),\
- SRI(OTG_DRR_V_TOTAL_CHANGE, OTG, inst),\
- SRI(OPTC_DATA_FORMAT_CONTROL, ODM, inst),\
- SRI(OPTC_BYTES_PER_PIXEL, ODM, inst),\
- SRI(OPTC_WIDTH_CONTROL, ODM, inst),\
- SRI(OPTC_MEMORY_CONFIG, ODM, inst),\
- SRI(OTG_DRR_CONTROL, OTG, inst)
-
#define OPTC_COMMON_MASK_SH_LIST_DCN3_2(mask_sh)\
SF(OTG0_OTG_VSTARTUP_PARAM, VSTARTUP_START, mask_sh),\
SF(OTG0_OTG_VUPDATE_PARAM, VUPDATE_OFFSET, mask_sh),\
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c
index 74e50c09bb62..633491331722 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c
@@ -657,8 +657,6 @@ static const struct resource_caps res_cap_dcn32 = {
static const struct dc_plane_cap plane_cap = {
.type = DC_PLANE_TYPE_DCN_UNIVERSAL,
- .blends_with_above = true,
- .blends_with_below = true,
.per_pixel_alpha = true,
.pixel_format_support = {
@@ -726,6 +724,7 @@ static const struct dc_debug_options debug_defaults_drv = {
.alloc_extra_way_for_cursor = true,
.min_prefetch_in_strobe_ns = 60000, // 60us
.disable_unbounded_requesting = false,
+ .override_dispclk_programming = true,
};
static const struct dc_debug_options debug_defaults_diags = {
@@ -1506,8 +1505,11 @@ static void dcn32_resource_destruct(struct dcn32_resource_pool *pool)
if (pool->base.dccg != NULL)
dcn_dccg_destroy(&pool->base.dccg);
- if (pool->base.oem_device != NULL)
- link_destroy_ddc_service(&pool->base.oem_device);
+ if (pool->base.oem_device != NULL) {
+ struct dc *dc = pool->base.oem_device->ctx->dc;
+
+ dc->link_srv->destroy_ddc_service(&pool->base.oem_device);
+ }
}
@@ -1611,7 +1613,6 @@ bool dcn32_acquire_post_bldn_3dlut(
struct dc_transfer_func **shaper)
{
bool ret = false;
- union dc_3dlut_state *state;
ASSERT(*lut == NULL && *shaper == NULL);
*lut = NULL;
@@ -1620,7 +1621,6 @@ bool dcn32_acquire_post_bldn_3dlut(
if (!res_ctx->is_mpc_3dlut_acquired[mpcc_id]) {
*lut = pool->mpc_lut[mpcc_id];
*shaper = pool->mpc_shaper[mpcc_id];
- state = &pool->mpc_lut[mpcc_id]->state;
res_ctx->is_mpc_3dlut_acquired[mpcc_id] = true;
ret = true;
}
@@ -1913,8 +1913,8 @@ int dcn32_populate_dml_pipes_from_context(
struct resource_context *res_ctx = &context->res_ctx;
struct pipe_ctx *pipe;
bool subvp_in_use = false;
- uint8_t is_pipe_split_expected[MAX_PIPES] = {0};
struct dc_crtc_timing *timing;
+ bool vsr_odm_support = false;
dcn20_populate_dml_pipes_from_context(dc, context, pipes, fast_validate);
@@ -1932,12 +1932,15 @@ int dcn32_populate_dml_pipes_from_context(
timing = &pipe->stream->timing;
pipes[pipe_cnt].pipe.dest.odm_combine_policy = dm_odm_combine_policy_dal;
+ vsr_odm_support = (res_ctx->pipe_ctx[i].stream->src.width >= 5120 &&
+ res_ctx->pipe_ctx[i].stream->src.width > res_ctx->pipe_ctx[i].stream->dst.width);
if (context->stream_count == 1 &&
context->stream_status[0].plane_count == 1 &&
!dc_is_hdmi_signal(res_ctx->pipe_ctx[i].stream->signal) &&
is_h_timing_divisible_by_2(res_ctx->pipe_ctx[i].stream) &&
pipe->stream->timing.pix_clk_100hz * 100 > DCN3_2_VMIN_DISPCLK_HZ &&
- dc->debug.enable_single_display_2to1_odm_policy) {
+ dc->debug.enable_single_display_2to1_odm_policy &&
+ !vsr_odm_support) { //excluding 2to1 ODM combine on >= 5k vsr
pipes[pipe_cnt].pipe.dest.odm_combine_policy = dm_odm_combine_policy_2to1;
}
pipe_cnt++;
@@ -2002,7 +2005,7 @@ int dcn32_populate_dml_pipes_from_context(
}
DC_FP_START();
- is_pipe_split_expected[i] = dcn32_predict_pipe_split(context, &pipes[pipe_cnt]);
+ dcn32_predict_pipe_split(context, &pipes[pipe_cnt]);
DC_FP_END();
pipe_cnt++;
@@ -2455,7 +2458,7 @@ static bool dcn32_resource_construct(
ddc_init_data.id.id = dc->ctx->dc_bios->fw_info.oem_i2c_obj_id;
ddc_init_data.id.enum_id = 0;
ddc_init_data.id.type = OBJECT_TYPE_GENERIC;
- pool->base.oem_device = link_create_ddc_service(&ddc_init_data);
+ pool->base.oem_device = dc->link_srv->create_ddc_service(&ddc_init_data);
} else {
pool->base.oem_device = NULL;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c
index 3a2d7bcc4b6d..47fa51c1d3f4 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c
@@ -59,25 +59,21 @@ uint32_t dcn32_helper_calculate_mall_bytes_for_cursor(
{
struct hubp *hubp = pipe_ctx->plane_res.hubp;
uint32_t cursor_size = hubp->curs_attr.pitch * hubp->curs_attr.height;
- uint32_t cursor_bpp = 4;
uint32_t cursor_mall_size_bytes = 0;
switch (pipe_ctx->stream->cursor_attributes.color_format) {
case CURSOR_MODE_MONO:
cursor_size /= 2;
- cursor_bpp = 4;
break;
case CURSOR_MODE_COLOR_1BIT_AND:
case CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA:
case CURSOR_MODE_COLOR_UN_PRE_MULTIPLIED_ALPHA:
cursor_size *= 4;
- cursor_bpp = 4;
break;
case CURSOR_MODE_COLOR_64BIT_FP_PRE_MULTIPLIED:
case CURSOR_MODE_COLOR_64BIT_FP_UN_PRE_MULTIPLIED:
cursor_size *= 8;
- cursor_bpp = 8;
break;
}
@@ -261,6 +257,8 @@ bool dcn32_is_psr_capable(struct pipe_ctx *pipe)
return psr_capable;
}
+#define DCN3_2_NEW_DET_OVERRIDE_MIN_MULTIPLIER 7
+
/**
* *******************************************************************************************
* dcn32_determine_det_override: Determine DET allocation for each pipe
@@ -272,7 +270,6 @@ bool dcn32_is_psr_capable(struct pipe_ctx *pipe)
* If there is a plane that's driven by more than 1 pipe (i.e. pipe split), then the
* number of DET for that given plane will be split among the pipes driving that plane.
*
- *
* High level algorithm:
* 1. Split total DET among number of streams
* 2. For each stream, split DET among the planes
@@ -280,6 +277,18 @@ bool dcn32_is_psr_capable(struct pipe_ctx *pipe)
* among those pipes.
* 4. Assign the DET override to the DML pipes.
*
+ * Special cases:
+ *
+ * For two displays that have a large difference in pixel rate, we may experience
+ * underflow on the larger display when we divide the DET equally. For this, we
+ * will implement a modified algorithm to assign more DET to larger display.
+ *
+ * 1. Calculate difference in pixel rates ( multiplier ) between two displays
+ * 2. If the multiplier exceeds DCN3_2_NEW_DET_OVERRIDE_MIN_MULTIPLIER, then
+ * implement the modified DET override algorithm.
+ * 3. Assign smaller DET size for lower pixel display and higher DET size for
+ * higher pixel display
+ *
* @param [in]: dc: Current DC state
* @param [in]: context: New DC state to be programmed
* @param [in]: pipes: Array of DML pipes
@@ -299,18 +308,46 @@ void dcn32_determine_det_override(struct dc *dc,
struct dc_plane_state *current_plane = NULL;
uint8_t stream_count = 0;
+ int phy_pix_clk_mult, lower_mode_stream_index;
+ int phy_pix_clk[MAX_PIPES] = {0};
+ bool use_new_det_override_algorithm = false;
+
for (i = 0; i < context->stream_count; i++) {
/* Don't count SubVP streams for DET allocation */
if (context->streams[i]->mall_stream_config.type != SUBVP_PHANTOM) {
+ phy_pix_clk[i] = context->streams[i]->phy_pix_clk;
stream_count++;
}
}
+ /* Check for special case with two displays, one with much higher pixel rate */
+ if (stream_count == 2) {
+ ASSERT((phy_pix_clk[0] > 0) && (phy_pix_clk[1] > 0));
+ if (phy_pix_clk[0] < phy_pix_clk[1]) {
+ lower_mode_stream_index = 0;
+ phy_pix_clk_mult = phy_pix_clk[1] / phy_pix_clk[0];
+ } else {
+ lower_mode_stream_index = 1;
+ phy_pix_clk_mult = phy_pix_clk[0] / phy_pix_clk[1];
+ }
+
+ if (phy_pix_clk_mult >= DCN3_2_NEW_DET_OVERRIDE_MIN_MULTIPLIER)
+ use_new_det_override_algorithm = true;
+ }
+
if (stream_count > 0) {
stream_segments = 18 / stream_count;
for (i = 0; i < context->stream_count; i++) {
if (context->streams[i]->mall_stream_config.type == SUBVP_PHANTOM)
continue;
+
+ if (use_new_det_override_algorithm) {
+ if (i == lower_mode_stream_index)
+ stream_segments = 4;
+ else
+ stream_segments = 14;
+ }
+
if (context->stream_status[i].plane_count > 0)
plane_segments = stream_segments / context->stream_status[i].plane_count;
else
diff --git a/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c b/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c
index 55f918b44077..c6a0e84885a2 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c
@@ -655,8 +655,6 @@ static const struct resource_caps res_cap_dcn321 = {
static const struct dc_plane_cap plane_cap = {
.type = DC_PLANE_TYPE_DCN_UNIVERSAL,
- .blends_with_above = true,
- .blends_with_below = true,
.per_pixel_alpha = true,
.pixel_format_support = {
@@ -724,6 +722,7 @@ static const struct dc_debug_options debug_defaults_drv = {
.alloc_extra_way_for_cursor = true,
.min_prefetch_in_strobe_ns = 60000, // 60us
.disable_unbounded_requesting = false,
+ .override_dispclk_programming = true,
};
static const struct dc_debug_options debug_defaults_diags = {
@@ -1491,8 +1490,11 @@ static void dcn321_resource_destruct(struct dcn321_resource_pool *pool)
if (pool->base.dccg != NULL)
dcn_dccg_destroy(&pool->base.dccg);
- if (pool->base.oem_device != NULL)
- link_destroy_ddc_service(&pool->base.oem_device);
+ if (pool->base.oem_device != NULL) {
+ struct dc *dc = pool->base.oem_device->ctx->dc;
+
+ dc->link_srv->destroy_ddc_service(&pool->base.oem_device);
+ }
}
@@ -1996,7 +1998,7 @@ static bool dcn321_resource_construct(
ddc_init_data.id.id = dc->ctx->dc_bios->fw_info.oem_i2c_obj_id;
ddc_init_data.id.enum_id = 0;
ddc_init_data.id.type = OBJECT_TYPE_GENERIC;
- pool->base.oem_device = link_create_ddc_service(&ddc_init_data);
+ pool->base.oem_device = dc->link_srv->create_ddc_service(&ddc_init_data);
} else {
pool->base.oem_device = NULL;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dml/Makefile b/drivers/gpu/drm/amd/display/dc/dml/Makefile
index 9d0f79dff2e3..01db035589c5 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dml/Makefile
@@ -129,7 +129,7 @@ CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/calcs/dcn_calc_math.o := $(dml_rcflags)
DML = calcs/dce_calcs.o calcs/custom_float.o calcs/bw_fixed.o
-ifdef CONFIG_DRM_AMD_DC_DCN
+ifdef CONFIG_DRM_AMD_DC_FP
DML += display_mode_lib.o display_rq_dlg_helpers.o dml1_display_rq_dlg_calc.o
DML += dcn10/dcn10_fpu.o
DML += dcn20/dcn20_fpu.o
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c
index d3ba65efe1d2..38d1f2be8cf3 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c
@@ -938,7 +938,7 @@ static bool is_dtbclk_required(struct dc *dc, struct dc_state *context)
for (i = 0; i < dc->res_pool->pipe_count; i++) {
if (!context->res_ctx.pipe_ctx[i].stream)
continue;
- if (link_is_dp_128b_132b_signal(&context->res_ctx.pipe_ctx[i]))
+ if (dc->link_srv->dp_is_128b_132b_signal(&context->res_ctx.pipe_ctx[i]))
return true;
}
return false;
@@ -973,7 +973,8 @@ static enum dcn_zstate_support_state decide_zstate_support(struct dc *dc, struc
else if (context->stream_count == 1 && context->streams[0]->signal == SIGNAL_TYPE_EDP) {
struct dc_link *link = context->streams[0]->sink->link;
struct dc_stream_status *stream_status = &context->stream_status[0];
- bool allow_z8 = context->bw_ctx.dml.vba.StutterPeriod > 1000.0;
+ int minmum_z8_residency = dc->debug.minimum_z8_residency_time > 0 ? dc->debug.minimum_z8_residency_time : 1000;
+ bool allow_z8 = context->bw_ctx.dml.vba.StutterPeriod > (double)minmum_z8_residency;
bool is_pwrseq0 = link->link_index == 0;
if (dc_extended_blank_supported(dc)) {
@@ -1340,7 +1341,7 @@ int dcn20_populate_dml_pipes_from_context(
case SIGNAL_TYPE_DISPLAY_PORT_MST:
case SIGNAL_TYPE_DISPLAY_PORT:
pipes[pipe_cnt].dout.output_type = dm_dp;
- if (link_is_dp_128b_132b_signal(&res_ctx->pipe_ctx[i]))
+ if (dc->link_srv->dp_is_128b_132b_signal(&res_ctx->pipe_ctx[i]))
pipes[pipe_cnt].dout.output_type = dm_dp2p0;
break;
case SIGNAL_TYPE_EDP:
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c
index c3d75e56410c..d0303173ce80 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c
@@ -23,9 +23,7 @@
*
*/
-#ifdef CONFIG_DRM_AMD_DC_DCN
#include "dc.h"
-#include "dc_link.h"
#include "../display_mode_lib.h"
#include "display_mode_vba_30.h"
#include "../dml_inline_defs.h"
@@ -6635,4 +6633,3 @@ static noinline_for_stack void UseMinimumDCFCLK(
}
}
-#endif /* CONFIG_DRM_AMD_DC_DCN */
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.c b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.c
index 8179be1f34bb..cd3cfcb2a2b0 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.c
@@ -23,8 +23,6 @@
*
*/
-#ifdef CONFIG_DRM_AMD_DC_DCN
-
#include "../display_mode_lib.h"
#include "../display_mode_vba.h"
#include "../dml_inline_defs.h"
@@ -1792,4 +1790,3 @@ void dml30_rq_dlg_get_dlg_reg(struct display_mode_lib *mode_lib,
dml_print("DML_DLG: Calculation for pipe[%d] end\n", pipe_idx);
}
-#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c
index 27f488405335..536a63624595 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c
@@ -24,7 +24,6 @@
*/
#include "dc.h"
-#include "dc_link.h"
#include "../display_mode_lib.h"
#include "../dcn30/display_mode_vba_30.h"
#include "display_mode_vba_31.h"
@@ -4308,11 +4307,11 @@ void dml31_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
v->AudioSampleRate[k],
v->AudioSampleLayout[k],
v->ODMCombineEnablePerState[i][k]);
- } else if (v->Output[k] == dm_dp || v->Output[k] == dm_edp) {
+ } else if (v->Output[k] == dm_dp || v->Output[k] == dm_edp || v->Output[k] == dm_dp2p0) {
if (v->DSCEnable[k] == true) {
v->RequiresDSC[i][k] = true;
v->LinkDSCEnable = true;
- if (v->Output[k] == dm_dp) {
+ if (v->Output[k] == dm_dp || v->Output[k] == dm_dp2p0) {
v->RequiresFEC[i][k] = true;
} else {
v->RequiresFEC[i][k] = false;
@@ -4320,107 +4319,201 @@ void dml31_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
} else {
v->RequiresDSC[i][k] = false;
v->LinkDSCEnable = false;
- v->RequiresFEC[i][k] = false;
- }
-
- v->Outbpp = BPP_INVALID;
- if (v->PHYCLKPerState[i] >= 270.0) {
- v->Outbpp = TruncToValidBPP(
- (1.0 - v->Downspreading / 100.0) * 2700,
- v->OutputLinkDPLanes[k],
- v->HTotal[k],
- v->HActive[k],
- v->PixelClockBackEnd[k],
- v->ForcedOutputLinkBPP[k],
- v->LinkDSCEnable,
- v->Output[k],
- v->OutputFormat[k],
- v->DSCInputBitPerComponent[k],
- v->NumberOfDSCSlices[k],
- v->AudioSampleRate[k],
- v->AudioSampleLayout[k],
- v->ODMCombineEnablePerState[i][k]);
- v->OutputBppPerState[i][k] = v->Outbpp;
- // TODO: Need some other way to handle this nonsense
- // v->OutputTypeAndRatePerState[i][k] = v->Output[k] & " HBR"
- }
- if (v->Outbpp == BPP_INVALID && v->PHYCLKPerState[i] >= 540.0) {
- v->Outbpp = TruncToValidBPP(
- (1.0 - v->Downspreading / 100.0) * 5400,
- v->OutputLinkDPLanes[k],
- v->HTotal[k],
- v->HActive[k],
- v->PixelClockBackEnd[k],
- v->ForcedOutputLinkBPP[k],
- v->LinkDSCEnable,
- v->Output[k],
- v->OutputFormat[k],
- v->DSCInputBitPerComponent[k],
- v->NumberOfDSCSlices[k],
- v->AudioSampleRate[k],
- v->AudioSampleLayout[k],
- v->ODMCombineEnablePerState[i][k]);
- v->OutputBppPerState[i][k] = v->Outbpp;
- // TODO: Need some other way to handle this nonsense
- // v->OutputTypeAndRatePerState[i][k] = v->Output[k] & " HBR2"
- }
- if (v->Outbpp == BPP_INVALID && v->PHYCLKPerState[i] >= 810.0) {
- v->Outbpp = TruncToValidBPP(
- (1.0 - v->Downspreading / 100.0) * 8100,
- v->OutputLinkDPLanes[k],
- v->HTotal[k],
- v->HActive[k],
- v->PixelClockBackEnd[k],
- v->ForcedOutputLinkBPP[k],
- v->LinkDSCEnable,
- v->Output[k],
- v->OutputFormat[k],
- v->DSCInputBitPerComponent[k],
- v->NumberOfDSCSlices[k],
- v->AudioSampleRate[k],
- v->AudioSampleLayout[k],
- v->ODMCombineEnablePerState[i][k]);
- v->OutputBppPerState[i][k] = v->Outbpp;
- // TODO: Need some other way to handle this nonsense
- // v->OutputTypeAndRatePerState[i][k] = v->Output[k] & " HBR3"
- }
- if (v->Outbpp == BPP_INVALID && v->PHYCLKD18PerState[i] >= 10000.0 / 18) {
- v->Outbpp = TruncToValidBPP(
- (1.0 - v->Downspreading / 100.0) * 10000,
- 4,
- v->HTotal[k],
- v->HActive[k],
- v->PixelClockBackEnd[k],
- v->ForcedOutputLinkBPP[k],
- v->LinkDSCEnable,
- v->Output[k],
- v->OutputFormat[k],
- v->DSCInputBitPerComponent[k],
- v->NumberOfDSCSlices[k],
- v->AudioSampleRate[k],
- v->AudioSampleLayout[k],
- v->ODMCombineEnablePerState[i][k]);
- v->OutputBppPerState[i][k] = v->Outbpp;
- //v->OutputTypeAndRatePerState[i][k] = v->Output[k] & "10x4";
+ if (v->Output[k] == dm_dp2p0) {
+ v->RequiresFEC[i][k] = true;
+ } else {
+ v->RequiresFEC[i][k] = false;
+ }
}
- if (v->Outbpp == BPP_INVALID && v->PHYCLKD18PerState[i] >= 12000.0 / 18) {
- v->Outbpp = TruncToValidBPP(
- 12000,
- 4,
- v->HTotal[k],
- v->HActive[k],
- v->PixelClockBackEnd[k],
- v->ForcedOutputLinkBPP[k],
- v->LinkDSCEnable,
- v->Output[k],
- v->OutputFormat[k],
- v->DSCInputBitPerComponent[k],
- v->NumberOfDSCSlices[k],
- v->AudioSampleRate[k],
- v->AudioSampleLayout[k],
- v->ODMCombineEnablePerState[i][k]);
- v->OutputBppPerState[i][k] = v->Outbpp;
- //v->OutputTypeAndRatePerState[i][k] = v->Output[k] & "12x4";
+ if (v->Output[k] == dm_dp2p0) {
+ v->Outbpp = BPP_INVALID;
+ if ((v->OutputLinkDPRate[k] == dm_dp_rate_na || v->OutputLinkDPRate[k] == dm_dp_rate_uhbr10) &&
+ v->PHYCLKD18PerState[k] >= 10000.0 / 18.0) {
+ v->Outbpp = TruncToValidBPP(
+ (1.0 - v->Downspreading / 100.0) * 10000,
+ v->OutputLinkDPLanes[k],
+ v->HTotal[k],
+ v->HActive[k],
+ v->PixelClockBackEnd[k],
+ v->ForcedOutputLinkBPP[k],
+ v->LinkDSCEnable,
+ v->Output[k],
+ v->OutputFormat[k],
+ v->DSCInputBitPerComponent[k],
+ v->NumberOfDSCSlices[k],
+ v->AudioSampleRate[k],
+ v->AudioSampleLayout[k],
+ v->ODMCombineEnablePerState[i][k]);
+ if (v->Outbpp == BPP_INVALID && v->PHYCLKD18PerState[k] < 13500.0 / 18.0 &&
+ v->DSCEnable[k] == true && v->ForcedOutputLinkBPP[k] == 0) {
+ v->RequiresDSC[i][k] = true;
+ v->LinkDSCEnable = true;
+ v->Outbpp = TruncToValidBPP(
+ (1.0 - v->Downspreading / 100.0) * 10000,
+ v->OutputLinkDPLanes[k],
+ v->HTotal[k],
+ v->HActive[k],
+ v->PixelClockBackEnd[k],
+ v->ForcedOutputLinkBPP[k],
+ v->LinkDSCEnable,
+ v->Output[k],
+ v->OutputFormat[k],
+ v->DSCInputBitPerComponent[k],
+ v->NumberOfDSCSlices[k],
+ v->AudioSampleRate[k],
+ v->AudioSampleLayout[k],
+ v->ODMCombineEnablePerState[i][k]);
+ }
+ v->OutputBppPerState[i][k] = v->Outbpp;
+ // TODO: Need some other way to handle this nonsense
+ // v->OutputTypeAndRatePerState[i][k] = v->Output[k] & " UHBR10"
+ }
+ if (v->Outbpp == BPP_INVALID &&
+ (v->OutputLinkDPRate[k] == dm_dp_rate_na || v->OutputLinkDPRate[k] == dm_dp_rate_uhbr13p5) &&
+ v->PHYCLKD18PerState[k] >= 13500.0 / 18.0) {
+ v->Outbpp = TruncToValidBPP(
+ (1.0 - v->Downspreading / 100.0) * 13500,
+ v->OutputLinkDPLanes[k],
+ v->HTotal[k],
+ v->HActive[k],
+ v->PixelClockBackEnd[k],
+ v->ForcedOutputLinkBPP[k],
+ v->LinkDSCEnable,
+ v->Output[k],
+ v->OutputFormat[k],
+ v->DSCInputBitPerComponent[k],
+ v->NumberOfDSCSlices[k],
+ v->AudioSampleRate[k],
+ v->AudioSampleLayout[k],
+ v->ODMCombineEnablePerState[i][k]);
+ if (v->Outbpp == BPP_INVALID && v->PHYCLKD18PerState[k] < 20000.0 / 18.0 &&
+ v->DSCEnable[k] == true && v->ForcedOutputLinkBPP[k] == 0) {
+ v->RequiresDSC[i][k] = true;
+ v->LinkDSCEnable = true;
+ v->Outbpp = TruncToValidBPP(
+ (1.0 - v->Downspreading / 100.0) * 13500,
+ v->OutputLinkDPLanes[k],
+ v->HTotal[k],
+ v->HActive[k],
+ v->PixelClockBackEnd[k],
+ v->ForcedOutputLinkBPP[k],
+ v->LinkDSCEnable,
+ v->Output[k],
+ v->OutputFormat[k],
+ v->DSCInputBitPerComponent[k],
+ v->NumberOfDSCSlices[k],
+ v->AudioSampleRate[k],
+ v->AudioSampleLayout[k],
+ v->ODMCombineEnablePerState[i][k]);
+ }
+ v->OutputBppPerState[i][k] = v->Outbpp;
+ // TODO: Need some other way to handle this nonsense
+ // v->OutputTypeAndRatePerState[i][k] = v->Output[k] & " UHBR13p5"
+ }
+ if (v->Outbpp == BPP_INVALID &&
+ (v->OutputLinkDPRate[k] == dm_dp_rate_na || v->OutputLinkDPRate[k] == dm_dp_rate_uhbr20) &&
+ v->PHYCLKD18PerState[k] >= 20000.0 / 18.0) {
+ v->Outbpp = TruncToValidBPP(
+ (1.0 - v->Downspreading / 100.0) * 20000,
+ v->OutputLinkDPLanes[k],
+ v->HTotal[k],
+ v->HActive[k],
+ v->PixelClockBackEnd[k],
+ v->ForcedOutputLinkBPP[k],
+ v->LinkDSCEnable,
+ v->Output[k],
+ v->OutputFormat[k],
+ v->DSCInputBitPerComponent[k],
+ v->NumberOfDSCSlices[k],
+ v->AudioSampleRate[k],
+ v->AudioSampleLayout[k],
+ v->ODMCombineEnablePerState[i][k]);
+ if (v->Outbpp == BPP_INVALID && v->DSCEnable[k] == true &&
+ v->ForcedOutputLinkBPP[k] == 0) {
+ v->RequiresDSC[i][k] = true;
+ v->LinkDSCEnable = true;
+ v->Outbpp = TruncToValidBPP(
+ (1.0 - v->Downspreading / 100.0) * 20000,
+ v->OutputLinkDPLanes[k],
+ v->HTotal[k],
+ v->HActive[k],
+ v->PixelClockBackEnd[k],
+ v->ForcedOutputLinkBPP[k],
+ v->LinkDSCEnable,
+ v->Output[k],
+ v->OutputFormat[k],
+ v->DSCInputBitPerComponent[k],
+ v->NumberOfDSCSlices[k],
+ v->AudioSampleRate[k],
+ v->AudioSampleLayout[k],
+ v->ODMCombineEnablePerState[i][k]);
+ }
+ v->OutputBppPerState[i][k] = v->Outbpp;
+ // TODO: Need some other way to handle this nonsense
+ // v->OutputTypeAndRatePerState[i][k] = v->Output[k] & " UHBR20"
+ }
+ } else {
+ v->Outbpp = BPP_INVALID;
+ if (v->PHYCLKPerState[i] >= 270.0) {
+ v->Outbpp = TruncToValidBPP(
+ (1.0 - v->Downspreading / 100.0) * 2700,
+ v->OutputLinkDPLanes[k],
+ v->HTotal[k],
+ v->HActive[k],
+ v->PixelClockBackEnd[k],
+ v->ForcedOutputLinkBPP[k],
+ v->LinkDSCEnable,
+ v->Output[k],
+ v->OutputFormat[k],
+ v->DSCInputBitPerComponent[k],
+ v->NumberOfDSCSlices[k],
+ v->AudioSampleRate[k],
+ v->AudioSampleLayout[k],
+ v->ODMCombineEnablePerState[i][k]);
+ v->OutputBppPerState[i][k] = v->Outbpp;
+ // TODO: Need some other way to handle this nonsense
+ // v->OutputTypeAndRatePerState[i][k] = v->Output[k] & " HBR"
+ }
+ if (v->Outbpp == BPP_INVALID && v->PHYCLKPerState[i] >= 540.0) {
+ v->Outbpp = TruncToValidBPP(
+ (1.0 - v->Downspreading / 100.0) * 5400,
+ v->OutputLinkDPLanes[k],
+ v->HTotal[k],
+ v->HActive[k],
+ v->PixelClockBackEnd[k],
+ v->ForcedOutputLinkBPP[k],
+ v->LinkDSCEnable,
+ v->Output[k],
+ v->OutputFormat[k],
+ v->DSCInputBitPerComponent[k],
+ v->NumberOfDSCSlices[k],
+ v->AudioSampleRate[k],
+ v->AudioSampleLayout[k],
+ v->ODMCombineEnablePerState[i][k]);
+ v->OutputBppPerState[i][k] = v->Outbpp;
+ // TODO: Need some other way to handle this nonsense
+ // v->OutputTypeAndRatePerState[i][k] = v->Output[k] & " HBR2"
+ }
+ if (v->Outbpp == BPP_INVALID && v->PHYCLKPerState[i] >= 810.0) {
+ v->Outbpp = TruncToValidBPP(
+ (1.0 - v->Downspreading / 100.0) * 8100,
+ v->OutputLinkDPLanes[k],
+ v->HTotal[k],
+ v->HActive[k],
+ v->PixelClockBackEnd[k],
+ v->ForcedOutputLinkBPP[k],
+ v->LinkDSCEnable,
+ v->Output[k],
+ v->OutputFormat[k],
+ v->DSCInputBitPerComponent[k],
+ v->NumberOfDSCSlices[k],
+ v->AudioSampleRate[k],
+ v->AudioSampleLayout[k],
+ v->ODMCombineEnablePerState[i][k]);
+ v->OutputBppPerState[i][k] = v->Outbpp;
+ // TODO: Need some other way to handle this nonsense
+ // v->OutputTypeAndRatePerState[i][k] = v->Output[k] & " HBR3"
+ }
}
}
} else {
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_rq_dlg_calc_31.c b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_rq_dlg_calc_31.c
index 35d10b4d018b..2244e4fb8c96 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_rq_dlg_calc_31.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_rq_dlg_calc_31.c
@@ -902,7 +902,6 @@ static void dml_rq_dlg_get_dlg_params(
double hratio_c;
double vratio_l;
double vratio_c;
- bool scl_enable;
unsigned int swath_width_ub_l;
unsigned int dpte_groups_per_row_ub_l;
@@ -1020,7 +1019,6 @@ static void dml_rq_dlg_get_dlg_params(
hratio_c = scl->hscl_ratio_c;
vratio_l = scl->vscl_ratio;
vratio_c = scl->vscl_ratio_c;
- scl_enable = scl->scl_enable;
swath_width_ub_l = rq_dlg_param->rq_l.swath_width_ub;
dpte_groups_per_row_ub_l = rq_dlg_param->rq_l.dpte_groups_per_row_ub;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c
index acda3e1babd4..c52b76610bd2 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c
@@ -308,6 +308,10 @@ int dcn314_populate_dml_pipes_from_context_fpu(struct dc *dc, struct dc_state *c
pipe->plane_state->src_rect.width < pipe->plane_state->dst_rect.width))
upscaled = true;
+ /* Apply HostVM policy - either based on hypervisor globally enabled, or rIOMMU active */
+ if (dc->debug.dml_hostvm_override == DML_HOSTVM_NO_OVERRIDE)
+ pipes[i].pipe.src.hostvm = dc->vm_pa_config.is_hvm_enabled || dc->res_pool->hubbub->riommu_active;
+
/*
* Immediate flip can be set dynamically after enabling the plane.
* We need to require support for immediate flip or underflow can be
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_mode_vba_314.c b/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_mode_vba_314.c
index c843b394aeb4..daf319370190 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_mode_vba_314.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_mode_vba_314.c
@@ -27,7 +27,6 @@
#define UNIT_TEST 0
#if !UNIT_TEST
#include "dc.h"
-#include "dc_link.h"
#endif
#include "../display_mode_lib.h"
#include "display_mode_vba_314.h"
@@ -4406,11 +4405,11 @@ void dml314_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_
v->AudioSampleRate[k],
v->AudioSampleLayout[k],
v->ODMCombineEnablePerState[i][k]);
- } else if (v->Output[k] == dm_dp || v->Output[k] == dm_edp) {
+ } else if (v->Output[k] == dm_dp || v->Output[k] == dm_edp || v->Output[k] == dm_dp2p0) {
if (v->DSCEnable[k] == true) {
v->RequiresDSC[i][k] = true;
v->LinkDSCEnable = true;
- if (v->Output[k] == dm_dp) {
+ if (v->Output[k] == dm_dp || v->Output[k] == dm_dp2p0) {
v->RequiresFEC[i][k] = true;
} else {
v->RequiresFEC[i][k] = false;
@@ -4418,107 +4417,201 @@ void dml314_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_
} else {
v->RequiresDSC[i][k] = false;
v->LinkDSCEnable = false;
- v->RequiresFEC[i][k] = false;
- }
-
- v->Outbpp = BPP_INVALID;
- if (v->PHYCLKPerState[i] >= 270.0) {
- v->Outbpp = TruncToValidBPP(
- (1.0 - v->Downspreading / 100.0) * 2700,
- v->OutputLinkDPLanes[k],
- v->HTotal[k],
- v->HActive[k],
- v->PixelClockBackEnd[k],
- v->ForcedOutputLinkBPP[k],
- v->LinkDSCEnable,
- v->Output[k],
- v->OutputFormat[k],
- v->DSCInputBitPerComponent[k],
- v->NumberOfDSCSlices[k],
- v->AudioSampleRate[k],
- v->AudioSampleLayout[k],
- v->ODMCombineEnablePerState[i][k]);
- v->OutputBppPerState[i][k] = v->Outbpp;
- // TODO: Need some other way to handle this nonsense
- // v->OutputTypeAndRatePerState[i][k] = v->Output[k] & " HBR"
- }
- if (v->Outbpp == BPP_INVALID && v->PHYCLKPerState[i] >= 540.0) {
- v->Outbpp = TruncToValidBPP(
- (1.0 - v->Downspreading / 100.0) * 5400,
- v->OutputLinkDPLanes[k],
- v->HTotal[k],
- v->HActive[k],
- v->PixelClockBackEnd[k],
- v->ForcedOutputLinkBPP[k],
- v->LinkDSCEnable,
- v->Output[k],
- v->OutputFormat[k],
- v->DSCInputBitPerComponent[k],
- v->NumberOfDSCSlices[k],
- v->AudioSampleRate[k],
- v->AudioSampleLayout[k],
- v->ODMCombineEnablePerState[i][k]);
- v->OutputBppPerState[i][k] = v->Outbpp;
- // TODO: Need some other way to handle this nonsense
- // v->OutputTypeAndRatePerState[i][k] = v->Output[k] & " HBR2"
- }
- if (v->Outbpp == BPP_INVALID && v->PHYCLKPerState[i] >= 810.0) {
- v->Outbpp = TruncToValidBPP(
- (1.0 - v->Downspreading / 100.0) * 8100,
- v->OutputLinkDPLanes[k],
- v->HTotal[k],
- v->HActive[k],
- v->PixelClockBackEnd[k],
- v->ForcedOutputLinkBPP[k],
- v->LinkDSCEnable,
- v->Output[k],
- v->OutputFormat[k],
- v->DSCInputBitPerComponent[k],
- v->NumberOfDSCSlices[k],
- v->AudioSampleRate[k],
- v->AudioSampleLayout[k],
- v->ODMCombineEnablePerState[i][k]);
- v->OutputBppPerState[i][k] = v->Outbpp;
- // TODO: Need some other way to handle this nonsense
- // v->OutputTypeAndRatePerState[i][k] = v->Output[k] & " HBR3"
- }
- if (v->Outbpp == BPP_INVALID && v->PHYCLKD18PerState[i] >= 10000.0 / 18) {
- v->Outbpp = TruncToValidBPP(
- (1.0 - v->Downspreading / 100.0) * 10000,
- 4,
- v->HTotal[k],
- v->HActive[k],
- v->PixelClockBackEnd[k],
- v->ForcedOutputLinkBPP[k],
- v->LinkDSCEnable,
- v->Output[k],
- v->OutputFormat[k],
- v->DSCInputBitPerComponent[k],
- v->NumberOfDSCSlices[k],
- v->AudioSampleRate[k],
- v->AudioSampleLayout[k],
- v->ODMCombineEnablePerState[i][k]);
- v->OutputBppPerState[i][k] = v->Outbpp;
- //v->OutputTypeAndRatePerState[i][k] = v->Output[k] & "10x4";
+ if (v->Output[k] == dm_dp2p0) {
+ v->RequiresFEC[i][k] = true;
+ } else {
+ v->RequiresFEC[i][k] = false;
+ }
}
- if (v->Outbpp == BPP_INVALID && v->PHYCLKD18PerState[i] >= 12000.0 / 18) {
- v->Outbpp = TruncToValidBPP(
- 12000,
- 4,
- v->HTotal[k],
- v->HActive[k],
- v->PixelClockBackEnd[k],
- v->ForcedOutputLinkBPP[k],
- v->LinkDSCEnable,
- v->Output[k],
- v->OutputFormat[k],
- v->DSCInputBitPerComponent[k],
- v->NumberOfDSCSlices[k],
- v->AudioSampleRate[k],
- v->AudioSampleLayout[k],
- v->ODMCombineEnablePerState[i][k]);
- v->OutputBppPerState[i][k] = v->Outbpp;
- //v->OutputTypeAndRatePerState[i][k] = v->Output[k] & "12x4";
+ if (v->Output[k] == dm_dp2p0) {
+ v->Outbpp = BPP_INVALID;
+ if ((v->OutputLinkDPRate[k] == dm_dp_rate_na || v->OutputLinkDPRate[k] == dm_dp_rate_uhbr10) &&
+ v->PHYCLKD18PerState[k] >= 10000.0 / 18.0) {
+ v->Outbpp = TruncToValidBPP(
+ (1.0 - v->Downspreading / 100.0) * 10000,
+ v->OutputLinkDPLanes[k],
+ v->HTotal[k],
+ v->HActive[k],
+ v->PixelClockBackEnd[k],
+ v->ForcedOutputLinkBPP[k],
+ v->LinkDSCEnable,
+ v->Output[k],
+ v->OutputFormat[k],
+ v->DSCInputBitPerComponent[k],
+ v->NumberOfDSCSlices[k],
+ v->AudioSampleRate[k],
+ v->AudioSampleLayout[k],
+ v->ODMCombineEnablePerState[i][k]);
+ if (v->Outbpp == BPP_INVALID && v->PHYCLKD18PerState[k] < 13500.0 / 18.0 &&
+ v->DSCEnable[k] == true && v->ForcedOutputLinkBPP[k] == 0) {
+ v->RequiresDSC[i][k] = true;
+ v->LinkDSCEnable = true;
+ v->Outbpp = TruncToValidBPP(
+ (1.0 - v->Downspreading / 100.0) * 10000,
+ v->OutputLinkDPLanes[k],
+ v->HTotal[k],
+ v->HActive[k],
+ v->PixelClockBackEnd[k],
+ v->ForcedOutputLinkBPP[k],
+ v->LinkDSCEnable,
+ v->Output[k],
+ v->OutputFormat[k],
+ v->DSCInputBitPerComponent[k],
+ v->NumberOfDSCSlices[k],
+ v->AudioSampleRate[k],
+ v->AudioSampleLayout[k],
+ v->ODMCombineEnablePerState[i][k]);
+ }
+ v->OutputBppPerState[i][k] = v->Outbpp;
+ // TODO: Need some other way to handle this nonsense
+ // v->OutputTypeAndRatePerState[i][k] = v->Output[k] & " UHBR10"
+ }
+ if (v->Outbpp == BPP_INVALID &&
+ (v->OutputLinkDPRate[k] == dm_dp_rate_na || v->OutputLinkDPRate[k] == dm_dp_rate_uhbr13p5) &&
+ v->PHYCLKD18PerState[k] >= 13500.0 / 18.0) {
+ v->Outbpp = TruncToValidBPP(
+ (1.0 - v->Downspreading / 100.0) * 13500,
+ v->OutputLinkDPLanes[k],
+ v->HTotal[k],
+ v->HActive[k],
+ v->PixelClockBackEnd[k],
+ v->ForcedOutputLinkBPP[k],
+ v->LinkDSCEnable,
+ v->Output[k],
+ v->OutputFormat[k],
+ v->DSCInputBitPerComponent[k],
+ v->NumberOfDSCSlices[k],
+ v->AudioSampleRate[k],
+ v->AudioSampleLayout[k],
+ v->ODMCombineEnablePerState[i][k]);
+ if (v->Outbpp == BPP_INVALID && v->PHYCLKD18PerState[k] < 20000.0 / 18.0 &&
+ v->DSCEnable[k] == true && v->ForcedOutputLinkBPP[k] == 0) {
+ v->RequiresDSC[i][k] = true;
+ v->LinkDSCEnable = true;
+ v->Outbpp = TruncToValidBPP(
+ (1.0 - v->Downspreading / 100.0) * 13500,
+ v->OutputLinkDPLanes[k],
+ v->HTotal[k],
+ v->HActive[k],
+ v->PixelClockBackEnd[k],
+ v->ForcedOutputLinkBPP[k],
+ v->LinkDSCEnable,
+ v->Output[k],
+ v->OutputFormat[k],
+ v->DSCInputBitPerComponent[k],
+ v->NumberOfDSCSlices[k],
+ v->AudioSampleRate[k],
+ v->AudioSampleLayout[k],
+ v->ODMCombineEnablePerState[i][k]);
+ }
+ v->OutputBppPerState[i][k] = v->Outbpp;
+ // TODO: Need some other way to handle this nonsense
+ // v->OutputTypeAndRatePerState[i][k] = v->Output[k] & " UHBR13p5"
+ }
+ if (v->Outbpp == BPP_INVALID &&
+ (v->OutputLinkDPRate[k] == dm_dp_rate_na || v->OutputLinkDPRate[k] == dm_dp_rate_uhbr20) &&
+ v->PHYCLKD18PerState[k] >= 20000.0 / 18.0) {
+ v->Outbpp = TruncToValidBPP(
+ (1.0 - v->Downspreading / 100.0) * 20000,
+ v->OutputLinkDPLanes[k],
+ v->HTotal[k],
+ v->HActive[k],
+ v->PixelClockBackEnd[k],
+ v->ForcedOutputLinkBPP[k],
+ v->LinkDSCEnable,
+ v->Output[k],
+ v->OutputFormat[k],
+ v->DSCInputBitPerComponent[k],
+ v->NumberOfDSCSlices[k],
+ v->AudioSampleRate[k],
+ v->AudioSampleLayout[k],
+ v->ODMCombineEnablePerState[i][k]);
+ if (v->Outbpp == BPP_INVALID && v->DSCEnable[k] == true &&
+ v->ForcedOutputLinkBPP[k] == 0) {
+ v->RequiresDSC[i][k] = true;
+ v->LinkDSCEnable = true;
+ v->Outbpp = TruncToValidBPP(
+ (1.0 - v->Downspreading / 100.0) * 20000,
+ v->OutputLinkDPLanes[k],
+ v->HTotal[k],
+ v->HActive[k],
+ v->PixelClockBackEnd[k],
+ v->ForcedOutputLinkBPP[k],
+ v->LinkDSCEnable,
+ v->Output[k],
+ v->OutputFormat[k],
+ v->DSCInputBitPerComponent[k],
+ v->NumberOfDSCSlices[k],
+ v->AudioSampleRate[k],
+ v->AudioSampleLayout[k],
+ v->ODMCombineEnablePerState[i][k]);
+ }
+ v->OutputBppPerState[i][k] = v->Outbpp;
+ // TODO: Need some other way to handle this nonsense
+ // v->OutputTypeAndRatePerState[i][k] = v->Output[k] & " UHBR20"
+ }
+ } else {
+ v->Outbpp = BPP_INVALID;
+ if (v->PHYCLKPerState[i] >= 270.0) {
+ v->Outbpp = TruncToValidBPP(
+ (1.0 - v->Downspreading / 100.0) * 2700,
+ v->OutputLinkDPLanes[k],
+ v->HTotal[k],
+ v->HActive[k],
+ v->PixelClockBackEnd[k],
+ v->ForcedOutputLinkBPP[k],
+ v->LinkDSCEnable,
+ v->Output[k],
+ v->OutputFormat[k],
+ v->DSCInputBitPerComponent[k],
+ v->NumberOfDSCSlices[k],
+ v->AudioSampleRate[k],
+ v->AudioSampleLayout[k],
+ v->ODMCombineEnablePerState[i][k]);
+ v->OutputBppPerState[i][k] = v->Outbpp;
+ // TODO: Need some other way to handle this nonsense
+ // v->OutputTypeAndRatePerState[i][k] = v->Output[k] & " HBR"
+ }
+ if (v->Outbpp == BPP_INVALID && v->PHYCLKPerState[i] >= 540.0) {
+ v->Outbpp = TruncToValidBPP(
+ (1.0 - v->Downspreading / 100.0) * 5400,
+ v->OutputLinkDPLanes[k],
+ v->HTotal[k],
+ v->HActive[k],
+ v->PixelClockBackEnd[k],
+ v->ForcedOutputLinkBPP[k],
+ v->LinkDSCEnable,
+ v->Output[k],
+ v->OutputFormat[k],
+ v->DSCInputBitPerComponent[k],
+ v->NumberOfDSCSlices[k],
+ v->AudioSampleRate[k],
+ v->AudioSampleLayout[k],
+ v->ODMCombineEnablePerState[i][k]);
+ v->OutputBppPerState[i][k] = v->Outbpp;
+ // TODO: Need some other way to handle this nonsense
+ // v->OutputTypeAndRatePerState[i][k] = v->Output[k] & " HBR2"
+ }
+ if (v->Outbpp == BPP_INVALID && v->PHYCLKPerState[i] >= 810.0) {
+ v->Outbpp = TruncToValidBPP(
+ (1.0 - v->Downspreading / 100.0) * 8100,
+ v->OutputLinkDPLanes[k],
+ v->HTotal[k],
+ v->HActive[k],
+ v->PixelClockBackEnd[k],
+ v->ForcedOutputLinkBPP[k],
+ v->LinkDSCEnable,
+ v->Output[k],
+ v->OutputFormat[k],
+ v->DSCInputBitPerComponent[k],
+ v->NumberOfDSCSlices[k],
+ v->AudioSampleRate[k],
+ v->AudioSampleLayout[k],
+ v->ODMCombineEnablePerState[i][k]);
+ v->OutputBppPerState[i][k] = v->Outbpp;
+ // TODO: Need some other way to handle this nonsense
+ // v->OutputTypeAndRatePerState[i][k] = v->Output[k] & " HBR3"
+ }
}
}
} else {
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
index e47828e3b6d5..6b29d3a9520f 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
@@ -1270,7 +1270,7 @@ static bool is_dtbclk_required(struct dc *dc, struct dc_state *context)
for (i = 0; i < dc->res_pool->pipe_count; i++) {
if (!context->res_ctx.pipe_ctx[i].stream)
continue;
- if (link_is_dp_128b_132b_signal(&context->res_ctx.pipe_ctx[i]))
+ if (dc->link_srv->dp_is_128b_132b_signal(&context->res_ctx.pipe_ctx[i]))
return true;
}
return false;
@@ -2315,6 +2315,9 @@ static int build_synthetic_soc_states(struct clk_bw_params *bw_params,
num_dcfclk_dpms++;
}
+ if (num_dcfclk_dpms > 0 && bw_params->clk_table.entries[0].fclk_mhz > min_fclk_mhz)
+ min_fclk_mhz = bw_params->clk_table.entries[0].fclk_mhz;
+
if (!max_dcfclk_mhz || !max_dispclk_mhz || !max_dtbclk_mhz)
return -1;
@@ -2423,7 +2426,6 @@ static int build_synthetic_soc_states(struct clk_bw_params *bw_params,
for (i = *num_entries - 1; i >= 0 ; i--) {
if (table[i].fabricclk_mhz < min_fclk_mhz) {
table[i].fabricclk_mhz = min_fclk_mhz;
- break;
}
}
}
@@ -2432,7 +2434,6 @@ static int build_synthetic_soc_states(struct clk_bw_params *bw_params,
for (i = *num_entries - 1; i >= 0 ; i--) {
if (table[i].dcfclk_mhz < min_dcfclk_mhz) {
table[i].dcfclk_mhz = min_dcfclk_mhz;
- break;
}
}
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c
index 3b2a014ccf8f..f74730c2abbd 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c
@@ -24,7 +24,6 @@
*/
#include "dc.h"
-#include "dc_link.h"
#include "../display_mode_lib.h"
#include "display_mode_vba_32.h"
#include "../dml_inline_defs.h"
@@ -690,7 +689,8 @@ static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerforman
mode_lib->vba.PixelClock,
mode_lib->vba.VRatio,
mode_lib->vba.VRatioChroma,
- mode_lib->vba.UsesMALLForPStateChange);
+ mode_lib->vba.UsesMALLForPStateChange,
+ mode_lib->vba.UseUnboundedRequesting);
for (k = 0; k < mode_lib->vba.NumberOfActiveSurfaces; ++k) {
v->MaxVStartupLines[k] = ((mode_lib->vba.Interlace[k] &&
@@ -3216,7 +3216,8 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
mode_lib->vba.PixelClock,
mode_lib->vba.VRatio,
mode_lib->vba.VRatioChroma,
- mode_lib->vba.UsesMALLForPStateChange);
+ mode_lib->vba.UsesMALLForPStateChange,
+ mode_lib->vba.UseUnboundedRequesting);
v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.VMDataOnlyReturnBWPerState = dml32_get_return_bw_mbps_vm_only(&mode_lib->vba.soc, i,
mode_lib->vba.DCFCLKState[i][j], mode_lib->vba.FabricClockPerState[i],
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c
index d1000aa4c481..61cc4904ade4 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c
@@ -6271,7 +6271,8 @@ bool dml32_CalculateDETSwathFillLatencyHiding(unsigned int NumberOfActiveSurface
double PixelClock[],
double VRatioY[],
double VRatioC[],
- enum dm_use_mall_for_pstate_change_mode UsesMALLForPStateChange[])
+ enum dm_use_mall_for_pstate_change_mode UsesMALLForPStateChange[],
+ enum unbounded_requesting_policy UseUnboundedRequesting)
{
int k;
double SwathSizeAllSurfaces = 0;
@@ -6283,6 +6284,9 @@ bool dml32_CalculateDETSwathFillLatencyHiding(unsigned int NumberOfActiveSurface
double SwathSizePerSurfaceC[DC__NUM_DPP__MAX];
bool NotEnoughDETSwathFillLatencyHiding = false;
+ if (UseUnboundedRequesting == dm_unbounded_requesting)
+ return false;
+
/* calculate sum of single swath size for all pipes in bytes */
for (k = 0; k < NumberOfActiveSurfaces; k++) {
SwathSizePerSurfaceY[k] = SwathHeightY[k] * SwathWidthY[k] * BytePerPixelInDETY[k] * NumOfDPP[k];
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.h b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.h
index 9ba792c633a5..592d174df6c6 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.h
@@ -1163,6 +1163,7 @@ bool dml32_CalculateDETSwathFillLatencyHiding(unsigned int NumberOfActiveSurface
double PixelClock[],
double VRatioY[],
double VRatioC[],
- enum dm_use_mall_for_pstate_change_mode UsesMALLForPStateChange[]);
+ enum dm_use_mall_for_pstate_change_mode UsesMALLForPStateChange[],
+ enum unbounded_requesting_policy UseUnboundedRequesting);
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c
index b80cef70fa60..57b9bd896678 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c
@@ -294,6 +294,9 @@ static int build_synthetic_soc_states(struct clk_bw_params *bw_params,
num_dcfclk_dpms++;
}
+ if (num_dcfclk_dpms > 0 && bw_params->clk_table.entries[0].fclk_mhz > min_fclk_mhz)
+ min_fclk_mhz = bw_params->clk_table.entries[0].fclk_mhz;
+
if (!max_dcfclk_mhz || !max_dispclk_mhz || !max_dtbclk_mhz)
return -1;
@@ -402,7 +405,6 @@ static int build_synthetic_soc_states(struct clk_bw_params *bw_params,
for (i = *num_entries - 1; i >= 0 ; i--) {
if (table[i].fabricclk_mhz < min_fclk_mhz) {
table[i].fabricclk_mhz = min_fclk_mhz;
- break;
}
}
}
@@ -411,7 +413,6 @@ static int build_synthetic_soc_states(struct clk_bw_params *bw_params,
for (i = *num_entries - 1; i >= 0 ; i--) {
if (table[i].dcfclk_mhz < min_dcfclk_mhz) {
table[i].dcfclk_mhz = min_dcfclk_mhz;
- break;
}
}
diff --git a/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c b/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c
index d52cbc0e9b67..2bdc47615543 100644
--- a/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c
+++ b/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c
@@ -47,6 +47,59 @@ static bool dsc_policy_disable_dsc_stream_overhead;
#define MIN(X, Y) ((X) < (Y) ? (X) : (Y))
#endif
+uint32_t dc_bandwidth_in_kbps_from_timing(
+ const struct dc_crtc_timing *timing)
+{
+ uint32_t bits_per_channel = 0;
+ uint32_t kbps;
+
+ if (timing->flags.DSC)
+ return dc_dsc_stream_bandwidth_in_kbps(timing,
+ timing->dsc_cfg.bits_per_pixel,
+ timing->dsc_cfg.num_slices_h,
+ timing->dsc_cfg.is_dp);
+
+ switch (timing->display_color_depth) {
+ case COLOR_DEPTH_666:
+ bits_per_channel = 6;
+ break;
+ case COLOR_DEPTH_888:
+ bits_per_channel = 8;
+ break;
+ case COLOR_DEPTH_101010:
+ bits_per_channel = 10;
+ break;
+ case COLOR_DEPTH_121212:
+ bits_per_channel = 12;
+ break;
+ case COLOR_DEPTH_141414:
+ bits_per_channel = 14;
+ break;
+ case COLOR_DEPTH_161616:
+ bits_per_channel = 16;
+ break;
+ default:
+ ASSERT(bits_per_channel != 0);
+ bits_per_channel = 8;
+ break;
+ }
+
+ kbps = timing->pix_clk_100hz / 10;
+ kbps *= bits_per_channel;
+
+ if (timing->flags.Y_ONLY != 1) {
+ /*Only YOnly make reduce bandwidth by 1/3 compares to RGB*/
+ kbps *= 3;
+ if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR420)
+ kbps /= 2;
+ else if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR422)
+ kbps = kbps * 2 / 3;
+ }
+
+ return kbps;
+}
+
+
/* Forward Declerations */
static bool decide_dsc_bandwidth_range(
const uint32_t min_bpp_x16,
@@ -79,8 +132,7 @@ static bool setup_dsc_config(
const struct dsc_enc_caps *dsc_enc_caps,
int target_bandwidth_kbps,
const struct dc_crtc_timing *timing,
- int min_slice_height_override,
- int max_dsc_target_bpp_limit_override_x16,
+ const struct dc_dsc_config_options *options,
struct dc_dsc_config *dsc_cfg);
static bool dsc_buff_block_size_from_dpcd(int dpcd_buff_block_size, int *buff_block_size)
@@ -352,6 +404,11 @@ bool dc_dsc_compute_bandwidth_range(
struct dsc_enc_caps dsc_enc_caps;
struct dsc_enc_caps dsc_common_caps;
struct dc_dsc_config config;
+ struct dc_dsc_config_options options = {0};
+
+ options.dsc_min_slice_height_override = dsc_min_slice_height_override;
+ options.max_target_bpp_limit_override_x16 = max_bpp_x16;
+ options.slice_height_granularity = 1;
get_dsc_enc_caps(dsc, &dsc_enc_caps, timing->pix_clk_100hz);
@@ -360,7 +417,7 @@ bool dc_dsc_compute_bandwidth_range(
if (is_dsc_possible)
is_dsc_possible = setup_dsc_config(dsc_sink_caps, &dsc_enc_caps, 0, timing,
- dsc_min_slice_height_override, max_bpp_x16, &config);
+ &options, &config);
if (is_dsc_possible)
is_dsc_possible = decide_dsc_bandwidth_range(min_bpp_x16, max_bpp_x16,
@@ -740,8 +797,7 @@ static bool setup_dsc_config(
const struct dsc_enc_caps *dsc_enc_caps,
int target_bandwidth_kbps,
const struct dc_crtc_timing *timing,
- int min_slice_height_override,
- int max_dsc_target_bpp_limit_override_x16,
+ const struct dc_dsc_config_options *options,
struct dc_dsc_config *dsc_cfg)
{
struct dsc_enc_caps dsc_common_caps;
@@ -760,7 +816,7 @@ static bool setup_dsc_config(
memset(dsc_cfg, 0, sizeof(struct dc_dsc_config));
- dc_dsc_get_policy_for_timing(timing, max_dsc_target_bpp_limit_override_x16, &policy);
+ dc_dsc_get_policy_for_timing(timing, options->max_target_bpp_limit_override_x16, &policy);
pic_width = timing->h_addressable + timing->h_border_left + timing->h_border_right;
pic_height = timing->v_addressable + timing->v_border_top + timing->v_border_bottom;
@@ -909,12 +965,13 @@ static bool setup_dsc_config(
// Slice height (i.e. number of slices per column): start with policy and pick the first one that height is divisible by.
// For 4:2:0 make sure the slice height is divisible by 2 as well.
- if (min_slice_height_override == 0)
+ if (options->dsc_min_slice_height_override == 0)
slice_height = min(policy.min_slice_height, pic_height);
else
- slice_height = min(min_slice_height_override, pic_height);
+ slice_height = min((int)(options->dsc_min_slice_height_override), pic_height);
while (slice_height < pic_height && (pic_height % slice_height != 0 ||
+ slice_height % options->slice_height_granularity != 0 ||
(timing->pixel_encoding == PIXEL_ENCODING_YCBCR420 && slice_height % 2 != 0)))
slice_height++;
@@ -958,8 +1015,7 @@ done:
bool dc_dsc_compute_config(
const struct display_stream_compressor *dsc,
const struct dsc_dec_dpcd_caps *dsc_sink_caps,
- uint32_t dsc_min_slice_height_override,
- uint32_t max_target_bpp_limit_override,
+ const struct dc_dsc_config_options *options,
uint32_t target_bandwidth_kbps,
const struct dc_crtc_timing *timing,
struct dc_dsc_config *dsc_cfg)
@@ -971,8 +1027,7 @@ bool dc_dsc_compute_config(
is_dsc_possible = setup_dsc_config(dsc_sink_caps,
&dsc_enc_caps,
target_bandwidth_kbps,
- timing, dsc_min_slice_height_override,
- max_target_bpp_limit_override * 16, dsc_cfg);
+ timing, options, dsc_cfg);
return is_dsc_possible;
}
@@ -1104,3 +1159,10 @@ void dc_dsc_policy_set_disable_dsc_stream_overhead(bool disable)
{
dsc_policy_disable_dsc_stream_overhead = disable;
}
+
+void dc_dsc_get_default_config_option(const struct dc *dc, struct dc_dsc_config_options *options)
+{
+ options->dsc_min_slice_height_override = dc->debug.dsc_min_slice_height_override;
+ options->max_target_bpp_limit_override_x16 = 0;
+ options->slice_height_granularity = 1;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dsc/rc_calc.c b/drivers/gpu/drm/amd/display/dc/dsc/rc_calc.c
index e97cf09be9d5..64cee8c80110 100644
--- a/drivers/gpu/drm/amd/display/dc/dsc/rc_calc.c
+++ b/drivers/gpu/drm/amd/display/dc/dsc/rc_calc.c
@@ -39,6 +39,7 @@
*/
void calc_rc_params(struct rc_params *rc, const struct drm_dsc_config *pps)
{
+#if defined(CONFIG_DRM_AMD_DC_FP)
enum colour_mode mode;
enum bits_per_comp bpc;
bool is_navite_422_or_420;
@@ -59,4 +60,5 @@ void calc_rc_params(struct rc_params *rc, const struct drm_dsc_config *pps)
slice_width, slice_height,
pps->dsc_version_minor);
DC_FP_END();
+#endif
}
diff --git a/drivers/gpu/drm/amd/display/dc/hdcp/hdcp_msg.c b/drivers/gpu/drm/amd/display/dc/hdcp/hdcp_msg.c
index e1422e5e86c9..25ffc052d53b 100644
--- a/drivers/gpu/drm/amd/display/dc/hdcp/hdcp_msg.c
+++ b/drivers/gpu/drm/amd/display/dc/hdcp/hdcp_msg.c
@@ -27,7 +27,7 @@
#include "dm_services.h"
#include "dm_helpers.h"
-#include "include/hdcp_types.h"
+#include "include/hdcp_msg_types.h"
#include "include/signal_types.h"
#include "core_types.h"
#include "link.h"
diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_types.h b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
index ed3c03108da6..2eb597a24425 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/core_types.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
@@ -51,9 +51,7 @@ void enable_surface_flip_reporting(struct dc_plane_state *plane_state,
#include "clock_source.h"
#include "audio.h"
#include "dm_pp_smu.h"
-#ifdef CONFIG_DRM_AMD_DC_HDCP
#include "dm_cp_psp.h"
-#endif
#include "link_hwss.h"
/********** DAL Core*********************/
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h
index 131fcfa28bca..f4aa76e02518 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h
@@ -70,28 +70,38 @@ struct dpp_input_csc_matrix {
};
static const struct dpp_input_csc_matrix __maybe_unused dpp_input_csc_matrix[] = {
- {COLOR_SPACE_SRGB,
- {0x2000, 0, 0, 0, 0, 0x2000, 0, 0, 0, 0, 0x2000, 0} },
- {COLOR_SPACE_SRGB_LIMITED,
- {0x2000, 0, 0, 0, 0, 0x2000, 0, 0, 0, 0, 0x2000, 0} },
- {COLOR_SPACE_YCBCR601,
- {0x2cdd, 0x2000, 0, 0xe991, 0xe926, 0x2000, 0xf4fd, 0x10ef,
- 0, 0x2000, 0x38b4, 0xe3a6} },
- {COLOR_SPACE_YCBCR601_LIMITED,
- {0x3353, 0x2568, 0, 0xe400, 0xe5dc, 0x2568, 0xf367, 0x1108,
- 0, 0x2568, 0x40de, 0xdd3a} },
- {COLOR_SPACE_YCBCR709,
- {0x3265, 0x2000, 0, 0xe6ce, 0xf105, 0x2000, 0xfa01, 0xa7d, 0,
- 0x2000, 0x3b61, 0xe24f} },
- {COLOR_SPACE_YCBCR709_LIMITED,
- {0x39a6, 0x2568, 0, 0xe0d6, 0xeedd, 0x2568, 0xf925, 0x9a8, 0,
- 0x2568, 0x43ee, 0xdbb2} },
- {COLOR_SPACE_2020_YCBCR,
- {0x2F30, 0x2000, 0, 0xE869, 0xEDB7, 0x2000, 0xFABC, 0xBC6, 0,
- 0x2000, 0x3C34, 0xE1E6} },
- {COLOR_SPACE_2020_RGB_LIMITEDRANGE,
- {0x35E0, 0x255F, 0, 0xE2B3, 0xEB20, 0x255F, 0xF9FD, 0xB1E, 0,
- 0x255F, 0x44BD, 0xDB43} }
+ { COLOR_SPACE_SRGB,
+ { 0x2000, 0, 0, 0,
+ 0, 0x2000, 0, 0,
+ 0, 0, 0x2000, 0 } },
+ { COLOR_SPACE_SRGB_LIMITED,
+ { 0x2000, 0, 0, 0,
+ 0, 0x2000, 0, 0,
+ 0, 0, 0x2000, 0 } },
+ { COLOR_SPACE_YCBCR601,
+ { 0x2cdd, 0x2000, 0, 0xe991,
+ 0xe926, 0x2000, 0xf4fd, 0x10ef,
+ 0, 0x2000, 0x38b4, 0xe3a6 } },
+ { COLOR_SPACE_YCBCR601_LIMITED,
+ { 0x3353, 0x2568, 0, 0xe400,
+ 0xe5dc, 0x2568, 0xf367, 0x1108,
+ 0, 0x2568, 0x40de, 0xdd3a } },
+ { COLOR_SPACE_YCBCR709,
+ { 0x3265, 0x2000, 0, 0xe6ce,
+ 0xf105, 0x2000, 0xfa01, 0xa7d,
+ 0, 0x2000, 0x3b61, 0xe24f } },
+ { COLOR_SPACE_YCBCR709_LIMITED,
+ { 0x39a6, 0x2568, 0, 0xe0d6,
+ 0xeedd, 0x2568, 0xf925, 0x9a8,
+ 0, 0x2568, 0x43ee, 0xdbb2 } },
+ { COLOR_SPACE_2020_YCBCR,
+ { 0x2F30, 0x2000, 0, 0xE869,
+ 0xEDB7, 0x2000, 0xFABC, 0xBC6,
+ 0, 0x2000, 0x3C34, 0xE1E6 } },
+ { COLOR_SPACE_2020_RGB_LIMITEDRANGE,
+ { 0x35E0, 0x255F, 0, 0xE2B3,
+ 0xEB20, 0x255F, 0xF9FD, 0xB1E,
+ 0, 0x255F, 0x44BD, 0xDB43 } }
};
struct dpp_grph_csc_adjustment {
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dwb.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dwb.h
index b982be64c792..86b711dcc785 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/dwb.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dwb.h
@@ -53,9 +53,7 @@ enum dwb_source {
/* DCN1.x, DCN2.x support 2 pipes */
enum dwb_pipe {
dwb_pipe0 = 0,
-#if defined(CONFIG_DRM_AMD_DC_DCN)
dwb_pipe1,
-#endif
dwb_pipe_max_num,
};
@@ -72,14 +70,11 @@ enum wbscl_coef_filter_type_sel {
};
-#if defined(CONFIG_DRM_AMD_DC_DCN)
enum dwb_boundary_mode {
DWBSCL_BOUNDARY_MODE_EDGE = 0,
DWBSCL_BOUNDARY_MODE_BLACK = 1
};
-#endif
-#if defined(CONFIG_DRM_AMD_DC_DCN)
enum dwb_output_csc_mode {
DWB_OUTPUT_CSC_DISABLE = 0,
DWB_OUTPUT_CSC_COEF_A = 1,
@@ -132,7 +127,6 @@ struct dwb_efc_display_settings {
unsigned int dwbOutputBlack; // 0 - Normal, 1 - Output Black
};
-#endif
struct dwb_warmup_params {
bool warmup_en; /* false: normal mode, true: enable pattern generator */
bool warmup_mode; /* false: 420, true: 444 */
@@ -208,7 +202,7 @@ struct dwbc_funcs {
struct dwb_warmup_params *warmup_params);
-#if defined(CONFIG_DRM_AMD_DC_DCN)
+#if defined(CONFIG_DRM_AMD_DC_FP)
void (*dwb_program_output_csc)(
struct dwbc *dwbc,
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
index d5ea7545583e..b5d353c41aa9 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
@@ -146,7 +146,7 @@ struct hubp_funcs {
void (*set_blank)(struct hubp *hubp, bool blank);
void (*set_blank_regs)(struct hubp *hubp, bool blank);
-#ifdef CONFIG_DRM_AMD_DC_DCN
+#ifdef CONFIG_DRM_AMD_DC_FP
void (*phantom_hubp_post_enable)(struct hubp *hubp);
#endif
void (*set_hubp_blank_en)(struct hubp *hubp, bool blank);
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h b/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h
index a819f0f97c5f..b95ae9596c3b 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h
@@ -275,20 +275,6 @@ enum dc_lut_mode {
LUT_RAM_B
};
-enum symclk_state {
- SYMCLK_OFF_TX_OFF,
- SYMCLK_ON_TX_ON,
- SYMCLK_ON_TX_OFF,
-};
-
-struct phy_state {
- struct {
- uint8_t otg : 1;
- uint8_t reserved : 7;
- } symclk_ref_cnts;
- enum symclk_state symclk_state;
-};
-
/**
* speakersToChannels
*
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h b/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h
index ec572a9e4054..dbe7afa9d3a2 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h
@@ -75,58 +75,6 @@ struct encoder_feature_support {
bool fec_supported;
};
-union dpcd_psr_configuration {
- struct {
- unsigned char ENABLE : 1;
- unsigned char TRANSMITTER_ACTIVE_IN_PSR : 1;
- unsigned char CRC_VERIFICATION : 1;
- unsigned char FRAME_CAPTURE_INDICATION : 1;
- /* For eDP 1.4, PSR v2*/
- unsigned char LINE_CAPTURE_INDICATION : 1;
- /* For eDP 1.4, PSR v2*/
- unsigned char IRQ_HPD_WITH_CRC_ERROR : 1;
- unsigned char ENABLE_PSR2 : 1;
- /* For eDP 1.5, PSR v2 w/ early transport */
- unsigned char EARLY_TRANSPORT_ENABLE : 1;
- } bits;
- unsigned char raw;
-};
-
-union dpcd_alpm_configuration {
- struct {
- unsigned char ENABLE : 1;
- unsigned char IRQ_HPD_ENABLE : 1;
- unsigned char RESERVED : 6;
- } bits;
- unsigned char raw;
-};
-
-union dpcd_sink_active_vtotal_control_mode {
- struct {
- unsigned char ENABLE : 1;
- unsigned char RESERVED : 7;
- } bits;
- unsigned char raw;
-};
-
-union psr_error_status {
- struct {
- unsigned char LINK_CRC_ERROR :1;
- unsigned char RFB_STORAGE_ERROR :1;
- unsigned char VSC_SDP_ERROR :1;
- unsigned char RESERVED :5;
- } bits;
- unsigned char raw;
-};
-
-union psr_sink_psr_status {
- struct {
- unsigned char SINK_SELF_REFRESH_STATUS :3;
- unsigned char RESERVED :5;
- } bits;
- unsigned char raw;
-};
-
struct link_encoder {
const struct link_encoder_funcs *funcs;
int32_t aux_channel_offset;
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h b/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h
index bb5ad70d4266..c4fbbf08ef86 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h
@@ -30,7 +30,6 @@
#include "audio_types.h"
#include "hw_shared.h"
-#include "dc_link.h"
struct dc_bios;
struct dc_context;
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
index 1d9f9c53d2bd..c21e7ffd5bd0 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
@@ -182,7 +182,7 @@ struct timing_generator_funcs {
bool (*enable_crtc)(struct timing_generator *tg);
bool (*disable_crtc)(struct timing_generator *tg);
-#ifdef CONFIG_DRM_AMD_DC_DCN
+#ifdef CONFIG_DRM_AMD_DC_FP
void (*phantom_crtc_post_enable)(struct timing_generator *tg);
#endif
void (*disable_phantom_crtc)(struct timing_generator *tg);
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer_private.h b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer_private.h
index a4d61bb724b6..45d37c584551 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer_private.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer_private.h
@@ -148,7 +148,7 @@ struct hwseq_private_funcs {
void (*PLAT_58856_wa)(struct dc_state *context,
struct pipe_ctx *pipe_ctx);
void (*setup_hpo_hw_control)(const struct dce_hwseq *hws, bool enable);
-#ifdef CONFIG_DRM_AMD_DC_DCN
+#ifdef CONFIG_DRM_AMD_DC_FP
void (*program_mall_pipe_config)(struct dc *dc, struct dc_state *context);
void (*subvp_update_force_pstate)(struct dc *dc, struct dc_state *context);
void (*update_mall_sel)(struct dc *dc, struct dc_state *context);
diff --git a/drivers/gpu/drm/amd/display/dc/inc/link.h b/drivers/gpu/drm/amd/display/dc/inc/link.h
index e70fa0059223..11aaa7a9518a 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/link.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/link.h
@@ -28,17 +28,57 @@
/* FILE POLICY AND INTENDED USAGE:
*
- * This header declares link functions exposed to dc. All functions must have
- * "link_" as prefix. For example link_run_my_function. This header is strictly
- * private in dc and should never be included in other header files. dc
- * components should include this header in their .c files in order to access
- * functions in link folder. This file should never include any header files in
- * link folder. If there is a need to expose a function declared in one of
- * header files in side link folder, you need to move the function declaration
- * into this file and prefix it with "link_".
+ * This header defines link component function interfaces aka link_service.
+ * link_service provides the only entry point to link functions with function
+ * pointer style. This header is strictly private in dc and should never be
+ * included by DM because it exposes too much dc detail including all dc
+ * private types defined in core_types.h. Otherwise it will break DM - DC
+ * encapsulation and turn DM into a maintenance nightmare.
+ *
+ * The following shows a link component relation map.
+ *
+ * DM to DC:
+ * DM includes dc.h
+ * dc_link_exports.c or other dc files implement dc.h
+ *
+ * DC to Link:
+ * dc_link_exports.c or other dc files include link.h
+ * link_factory.c implements link.h
+ *
+ * Link sub-component to Link sub-component:
+ * link_factory.c includes --> link_xxx.h
+ * link_xxx.c implements link_xxx.h
+
+ * As you can see if you ever need to add a new dc link function and call it on
+ * DM/dc side, it is very difficult because you will need layers of translation.
+ * The most appropriate approach to implement new requirements on DM/dc side is
+ * to extend or generalize the functionality of existing link function
+ * interfaces so minimal modification is needed outside link component to
+ * achieve your new requirements. This approach reduces or even eliminates the
+ * effort needed outside link component to support a new link feature. This also
+ * reduces code discrepancy among DMs to support the same link feature. If we
+ * test full code path on one version of DM, and there is no feature specific
+ * modification required on other DMs, then we can have higher confidence that
+ * the feature will run on other DMs and produce the same result. The following
+ * are some good examples to start with:
+ *
+ * - detect_link --> to add new link detection or capability retrieval routines
+ *
+ * - validate_mode_timing --> to add new timing validation conditions
+ *
+ * - set_dpms_on/set_dpms_off --> to include new link enablement sequences
+ *
+ * If you must add new link functions, you will need to:
+ * 1. declare the function pointer here under the suitable commented category.
+ * 2. Implement your function in the suitable link_xxx.c file.
+ * 3. Assign the function to link_service in link_factory.c
+ * 4. NEVER include link_xxx.h headers outside link component.
+ * 5. NEVER include link.h on DM side.
*/
#include "core_types.h"
-#include "dc_link.h"
+
+struct link_service *link_create_link_service(void);
+void link_destroy_link_service(struct link_service **link_srv);
struct link_init_data {
const struct dc *dc;
@@ -49,14 +89,6 @@ struct link_init_data {
bool is_dpia_link;
};
-struct dc_link *link_create(const struct link_init_data *init_params);
-void link_destroy(struct dc_link **link);
-
-// TODO - convert any function declarations below to function pointers
-struct gpio *link_get_hpd_gpio(struct dc_bios *dcb,
- struct graphics_object_id link_id,
- struct gpio_service *gpio_service);
-
struct ddc_service_init_data {
struct graphics_object_id id;
struct dc_context *ctx;
@@ -64,94 +96,221 @@ struct ddc_service_init_data {
bool is_dpia_link;
};
-struct ddc_service *link_create_ddc_service(
- struct ddc_service_init_data *ddc_init_data);
+struct link_service {
+ /************************** Factory ***********************************/
+ struct dc_link *(*create_link)(
+ const struct link_init_data *init_params);
+ void (*destroy_link)(struct dc_link **link);
-void link_destroy_ddc_service(struct ddc_service **ddc);
-bool link_is_in_aux_transaction_mode(struct ddc_service *ddc);
+ /************************** Detection *********************************/
+ bool (*detect_link)(struct dc_link *link, enum dc_detect_reason reason);
+ bool (*detect_connection_type)(struct dc_link *link,
+ enum dc_connection_type *type);
+ struct dc_sink *(*add_remote_sink)(
+ struct dc_link *link,
+ const uint8_t *edid,
+ int len,
+ struct dc_sink_init_data *init_data);
+ void (*remove_remote_sink)(struct dc_link *link, struct dc_sink *sink);
+ bool (*get_hpd_state)(struct dc_link *link);
+ struct gpio *(*get_hpd_gpio)(struct dc_bios *dcb,
+ struct graphics_object_id link_id,
+ struct gpio_service *gpio_service);
+ void (*enable_hpd)(const struct dc_link *link);
+ void (*disable_hpd)(const struct dc_link *link);
+ void (*enable_hpd_filter)(struct dc_link *link, bool enable);
+ bool (*reset_cur_dp_mst_topology)(struct dc_link *link);
+ const struct dc_link_status *(*get_status)(const struct dc_link *link);
+ bool (*is_hdcp1x_supported)(struct dc_link *link,
+ enum signal_type signal);
+ bool (*is_hdcp2x_supported)(struct dc_link *link,
+ enum signal_type signal);
+ void (*clear_dprx_states)(struct dc_link *link);
-bool link_query_ddc_data(
- struct ddc_service *ddc,
- uint32_t address,
- uint8_t *write_buf,
- uint32_t write_size,
- uint8_t *read_buf,
- uint32_t read_size);
+ /*************************** Resource *********************************/
+ void (*get_cur_res_map)(const struct dc *dc, uint32_t *map);
+ void (*restore_res_map)(const struct dc *dc, uint32_t *map);
+ void (*get_cur_link_res)(const struct dc_link *link,
+ struct link_resource *link_res);
-/* Attempt to submit an aux payload, retrying on timeouts, defers, and busy
- * states as outlined in the DP spec. Returns true if the request was
- * successful.
- *
- * NOTE: The function requires explicit mutex on DM side in order to prevent
- * potential race condition. DC components should call the dpcd read/write
- * function in dm_helpers in order to access dpcd safely
- */
-bool link_aux_transfer_with_retries_no_mutex(struct ddc_service *ddc,
- struct aux_payload *payload);
-uint32_t link_get_aux_defer_delay(struct ddc_service *ddc);
+ /*************************** Validation *******************************/
+ enum dc_status (*validate_mode_timing)(
+ const struct dc_stream_state *stream,
+ struct dc_link *link,
+ const struct dc_crtc_timing *timing);
+ uint32_t (*dp_link_bandwidth_kbps)(
+ const struct dc_link *link,
+ const struct dc_link_settings *link_settings);
-bool link_is_dp_128b_132b_signal(struct pipe_ctx *pipe_ctx);
-enum dp_link_encoding link_dp_get_encoding_format(
- const struct dc_link_settings *link_settings);
+ /*************************** DPMS *************************************/
+ void (*set_dpms_on)(struct dc_state *state, struct pipe_ctx *pipe_ctx);
+ void (*set_dpms_off)(struct pipe_ctx *pipe_ctx);
+ void (*resume)(struct dc_link *link);
+ void (*blank_all_dp_displays)(struct dc *dc);
+ void (*blank_all_edp_displays)(struct dc *dc);
+ void (*blank_dp_stream)(struct dc_link *link, bool hw_init);
+ enum dc_status (*increase_mst_payload)(
+ struct pipe_ctx *pipe_ctx, uint32_t req_pbn);
+ enum dc_status (*reduce_mst_payload)(
+ struct pipe_ctx *pipe_ctx, uint32_t req_pbn);
+ void (*set_dsc_on_stream)(struct pipe_ctx *pipe_ctx, bool enable);
+ bool (*set_dsc_enable)(struct pipe_ctx *pipe_ctx, bool enable);
+ bool (*update_dsc_config)(struct pipe_ctx *pipe_ctx);
-bool link_decide_link_settings(
- struct dc_stream_state *stream,
- struct dc_link_settings *link_setting);
-
-void link_dp_trace_set_edp_power_timestamp(struct dc_link *link,
- bool power_up);
-uint64_t link_dp_trace_get_edp_poweron_timestamp(struct dc_link *link);
-uint64_t link_dp_trace_get_edp_poweroff_timestamp(struct dc_link *link);
-
-bool link_is_edp_ilr_optimization_required(struct dc_link *link,
- struct dc_crtc_timing *crtc_timing);
-
-bool link_backlight_enable_aux(struct dc_link *link, bool enable);
-void link_edp_add_delay_for_T9(struct dc_link *link);
-bool link_edp_receiver_ready_T9(struct dc_link *link);
-bool link_edp_receiver_ready_T7(struct dc_link *link);
-bool link_power_alpm_dpcd_enable(struct dc_link *link, bool enable);
-bool link_set_sink_vtotal_in_psr_active(const struct dc_link *link,
- uint16_t psr_vtotal_idle, uint16_t psr_vtotal_su);
-void link_get_psr_residency(const struct dc_link *link, uint32_t *residency);
-enum dc_status link_increase_mst_payload(struct pipe_ctx *pipe_ctx, uint32_t req_pbn);
-enum dc_status link_reduce_mst_payload(struct pipe_ctx *pipe_ctx, uint32_t req_pbn);
-void link_blank_all_dp_displays(struct dc *dc);
-void link_blank_all_edp_displays(struct dc *dc);
-void link_blank_dp_stream(struct dc_link *link, bool hw_init);
-void link_resume(struct dc_link *link);
-void link_set_dpms_on(
- struct dc_state *state,
- struct pipe_ctx *pipe_ctx);
-void link_set_dpms_off(struct pipe_ctx *pipe_ctx);
-void link_dp_source_sequence_trace(struct dc_link *link, uint8_t dp_test_mode);
-void link_set_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable);
-bool link_set_dsc_enable(struct pipe_ctx *pipe_ctx, bool enable);
-bool link_update_dsc_config(struct pipe_ctx *pipe_ctx);
-enum dc_status link_validate_mode_timing(
- const struct dc_stream_state *stream,
+
+ /*************************** DDC **************************************/
+ struct ddc_service *(*create_ddc_service)(
+ struct ddc_service_init_data *ddc_init_data);
+ void (*destroy_ddc_service)(struct ddc_service **ddc);
+ bool (*query_ddc_data)(
+ struct ddc_service *ddc,
+ uint32_t address,
+ uint8_t *write_buf,
+ uint32_t write_size,
+ uint8_t *read_buf,
+ uint32_t read_size);
+ int (*aux_transfer_raw)(struct ddc_service *ddc,
+ struct aux_payload *payload,
+ enum aux_return_code_type *operation_result);
+ bool (*aux_transfer_with_retries_no_mutex)(struct ddc_service *ddc,
+ struct aux_payload *payload);
+ bool (*is_in_aux_transaction_mode)(struct ddc_service *ddc);
+ uint32_t (*get_aux_defer_delay)(struct ddc_service *ddc);
+
+
+ /*************************** DP Capability ****************************/
+ bool (*dp_is_sink_present)(struct dc_link *link);
+ bool (*dp_is_fec_supported)(const struct dc_link *link);
+ bool (*dp_is_128b_132b_signal)(struct pipe_ctx *pipe_ctx);
+ bool (*dp_get_max_link_enc_cap)(const struct dc_link *link,
+ struct dc_link_settings *max_link_enc_cap);
+ const struct dc_link_settings *(*dp_get_verified_link_cap)(
+ const struct dc_link *link);
+ enum dp_link_encoding (*dp_get_encoding_format)(
+ const struct dc_link_settings *link_settings);
+ bool (*dp_should_enable_fec)(const struct dc_link *link);
+ bool (*dp_decide_link_settings)(
+ struct dc_stream_state *stream,
+ struct dc_link_settings *link_setting);
+ enum dp_link_encoding (*mst_decide_link_encoding_format)(
+ const struct dc_link *link);
+ bool (*edp_decide_link_settings)(struct dc_link *link,
+ struct dc_link_settings *link_setting, uint32_t req_bw);
+ uint32_t (*bw_kbps_from_raw_frl_link_rate_data)(uint8_t bw);
+ bool (*dp_overwrite_extended_receiver_cap)(struct dc_link *link);
+ enum lttpr_mode (*dp_decide_lttpr_mode)(struct dc_link *link,
+ struct dc_link_settings *link_setting);
+
+
+ /*************************** DP DPIA/PHY ******************************/
+ int (*dpia_handle_usb4_bandwidth_allocation_for_link)(
+ struct dc_link *link, int peak_bw);
+ void (*dpia_handle_bw_alloc_response)(
+ struct dc_link *link, uint8_t bw, uint8_t result);
+ void (*dp_set_drive_settings)(
struct dc_link *link,
- const struct dc_crtc_timing *timing);
-bool link_detect(struct dc_link *link, enum dc_detect_reason reason);
-bool link_detect_connection_type(struct dc_link *link,
- enum dc_connection_type *type);
-const struct dc_link_status *link_get_status(const struct dc_link *link);
-#ifdef CONFIG_DRM_AMD_DC_HDCP
-/* return true if the connected receiver supports the hdcp version */
-bool link_is_hdcp14(struct dc_link *link, enum signal_type signal);
-bool link_is_hdcp22(struct dc_link *link, enum signal_type signal);
-#endif
-void link_clear_dprx_states(struct dc_link *link);
-bool link_reset_cur_dp_mst_topology(struct dc_link *link);
-uint32_t dp_link_bandwidth_kbps(
- const struct dc_link *link,
- const struct dc_link_settings *link_settings);
-uint32_t link_timing_bandwidth_kbps(const struct dc_crtc_timing *timing);
-void link_get_cur_res_map(const struct dc *dc, uint32_t *map);
-void link_restore_res_map(const struct dc *dc, uint32_t *map);
+ const struct link_resource *link_res,
+ struct link_training_settings *lt_settings);
+ void (*dpcd_write_rx_power_ctrl)(struct dc_link *link, bool on);
+
+ /*************************** DP IRQ Handler ***************************/
+ bool (*dp_parse_link_loss_status)(
+ struct dc_link *link,
+ union hpd_irq_data *hpd_irq_dpcd_data);
+ bool (*dp_should_allow_hpd_rx_irq)(const struct dc_link *link);
+ void (*dp_handle_link_loss)(struct dc_link *link);
+ enum dc_status (*dp_read_hpd_rx_irq_data)(
+ struct dc_link *link,
+ union hpd_irq_data *irq_data);
+ bool (*dp_handle_hpd_rx_irq)(struct dc_link *link,
+ union hpd_irq_data *out_hpd_irq_dpcd_data,
+ bool *out_link_loss,
+ bool defer_handling, bool *has_left_work);
+
+
+ /*************************** eDP Panel Control ************************/
+ void (*edp_panel_backlight_power_on)(
+ struct dc_link *link, bool wait_for_hpd);
+ int (*edp_get_backlight_level)(const struct dc_link *link);
+ bool (*edp_get_backlight_level_nits)(struct dc_link *link,
+ uint32_t *backlight_millinits_avg,
+ uint32_t *backlight_millinits_peak);
+ bool (*edp_set_backlight_level)(const struct dc_link *link,
+ uint32_t backlight_pwm_u16_16,
+ uint32_t frame_ramp);
+ bool (*edp_set_backlight_level_nits)(struct dc_link *link,
+ bool isHDR,
+ uint32_t backlight_millinits,
+ uint32_t transition_time_in_ms);
+ int (*edp_get_target_backlight_pwm)(const struct dc_link *link);
+ bool (*edp_get_psr_state)(
+ const struct dc_link *link, enum dc_psr_state *state);
+ bool (*edp_set_psr_allow_active)(
+ struct dc_link *link,
+ const bool *allow_active,
+ bool wait,
+ bool force_static,
+ const unsigned int *power_opts);
+ bool (*edp_setup_psr)(struct dc_link *link,
+ const struct dc_stream_state *stream,
+ struct psr_config *psr_config,
+ struct psr_context *psr_context);
+ bool (*edp_set_sink_vtotal_in_psr_active)(
+ const struct dc_link *link,
+ uint16_t psr_vtotal_idle,
+ uint16_t psr_vtotal_su);
+ void (*edp_get_psr_residency)(
+ const struct dc_link *link, uint32_t *residency);
+ bool (*edp_wait_for_t12)(struct dc_link *link);
+ bool (*edp_is_ilr_optimization_required)(struct dc_link *link,
+ struct dc_crtc_timing *crtc_timing);
+ bool (*edp_backlight_enable_aux)(struct dc_link *link, bool enable);
+ void (*edp_add_delay_for_T9)(struct dc_link *link);
+ bool (*edp_receiver_ready_T9)(struct dc_link *link);
+ bool (*edp_receiver_ready_T7)(struct dc_link *link);
+ bool (*edp_power_alpm_dpcd_enable)(struct dc_link *link, bool enable);
+
+
+ /*************************** DP CTS ************************************/
+ void (*dp_handle_automated_test)(struct dc_link *link);
+ bool (*dp_set_test_pattern)(
+ struct dc_link *link,
+ enum dp_test_pattern test_pattern,
+ enum dp_test_pattern_color_space test_pattern_color_space,
+ const struct link_training_settings *p_link_settings,
+ const unsigned char *p_custom_pattern,
+ unsigned int cust_pattern_size);
+ void (*dp_set_preferred_link_settings)(struct dc *dc,
+ struct dc_link_settings *link_setting,
+ struct dc_link *link);
+ void (*dp_set_preferred_training_settings)(struct dc *dc,
+ struct dc_link_settings *link_setting,
+ struct dc_link_training_overrides *lt_overrides,
+ struct dc_link *link,
+ bool skip_immediate_retrain);
+
+
+ /*************************** DP Trace *********************************/
+ bool (*dp_trace_is_initialized)(struct dc_link *link);
+ void (*dp_trace_set_is_logged_flag)(struct dc_link *link,
+ bool in_detection,
+ bool is_logged);
+ bool (*dp_trace_is_logged)(struct dc_link *link, bool in_detection);
+ unsigned long long (*dp_trace_get_lt_end_timestamp)(
+ struct dc_link *link, bool in_detection);
+ const struct dp_trace_lt_counts *(*dp_trace_get_lt_counts)(
+ struct dc_link *link, bool in_detection);
+ unsigned int (*dp_trace_get_link_loss_count)(struct dc_link *link);
+ void (*dp_trace_set_edp_power_timestamp)(struct dc_link *link,
+ bool power_up);
+ uint64_t (*dp_trace_get_edp_poweron_timestamp)(struct dc_link *link);
+ uint64_t (*dp_trace_get_edp_poweroff_timestamp)(struct dc_link *link);
+ void (*dp_trace_source_sequence)(
+ struct dc_link *link, uint8_t dp_test_mode);
+};
#endif /* __DC_LINK_HPD_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/inc/resource.h b/drivers/gpu/drm/amd/display/dc/inc/resource.h
index fa6da93caa88..eaeb684c8a48 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/resource.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/resource.h
@@ -201,7 +201,7 @@ bool get_temp_dp_link_res(struct dc_link *link,
struct link_resource *link_res,
struct dc_link_settings *link_settings);
-#if defined(CONFIG_DRM_AMD_DC_DCN)
+#if defined(CONFIG_DRM_AMD_DC_FP)
struct hpo_dp_link_encoder *resource_get_hpo_dp_link_enc_for_det_lt(
const struct resource_context *res_ctx,
const struct resource_pool *pool,
diff --git a/drivers/gpu/drm/amd/display/dc/link/Makefile b/drivers/gpu/drm/amd/display/dc/link/Makefile
index 40352d8d7648..a52b56e2859e 100644
--- a/drivers/gpu/drm/amd/display/dc/link/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/link/Makefile
@@ -55,7 +55,7 @@ LINK_PROTOCOLS = link_hpd.o link_ddc.o link_dpcd.o link_dp_dpia.o \
link_dp_training.o link_dp_training_8b_10b.o link_dp_training_128b_132b.o \
link_dp_training_dpia.o link_dp_training_auxless.o \
link_dp_training_fixed_vs_pe_retimer.o link_dp_phy.o link_dp_capability.o \
-link_edp_panel_control.o link_dp_irq_handler.o
+link_edp_panel_control.o link_dp_irq_handler.o link_dp_dpia_bw.o
AMD_DAL_LINK_PROTOCOLS = $(addprefix $(AMDDALPATH)/dc/link/protocols/, \
$(LINK_PROTOCOLS))
diff --git a/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c b/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c
index 942300e0bd92..db9f1baa27e5 100644
--- a/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c
+++ b/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c
@@ -28,6 +28,7 @@
#include "link/protocols/link_dp_training.h"
#include "link/protocols/link_dp_phy.h"
#include "link/protocols/link_dp_training_fixed_vs_pe_retimer.h"
+#include "link/protocols/link_dp_capability.h"
#include "link/link_dpms.h"
#include "resource.h"
#include "dm_helpers.h"
@@ -75,7 +76,7 @@ static bool is_dp_phy_pattern(enum dp_test_pattern test_pattern)
return false;
}
-void dp_retrain_link_dp_test(struct dc_link *link,
+static void dp_retrain_link_dp_test(struct dc_link *link,
struct dc_link_settings *link_setting,
bool skip_video_pattern)
{
@@ -250,7 +251,7 @@ static void dp_test_send_phy_test_pattern(struct dc_link *link)
/* prepare link training settings */
link_training_settings.link_settings = link->cur_link_settings;
- link_training_settings.lttpr_mode = dc_link_decide_lttpr_mode(link, &link->cur_link_settings);
+ link_training_settings.lttpr_mode = dp_decide_lttpr_mode(link, &link->cur_link_settings);
if ((link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) &&
link_training_settings.lttpr_mode == LTTPR_MODE_TRANSPARENT)
@@ -408,7 +409,7 @@ static void dp_test_send_phy_test_pattern(struct dc_link *link)
* all the time. Do not touch it.
* forward request to DS
*/
- dc_link_dp_set_test_pattern(
+ dp_set_test_pattern(
link,
test_pattern,
DP_TEST_PATTERN_COLOR_SPACE_UNDEFINED,
@@ -585,7 +586,7 @@ static void set_crtc_test_pattern(struct dc_link *link,
}
}
-void dc_link_dp_handle_automated_test(struct dc_link *link)
+void dp_handle_automated_test(struct dc_link *link)
{
union test_request test_request;
union test_response test_response;
@@ -651,7 +652,7 @@ void dc_link_dp_handle_automated_test(struct dc_link *link)
sizeof(test_response));
}
-bool dc_link_dp_set_test_pattern(
+bool dp_set_test_pattern(
struct dc_link *link,
enum dp_test_pattern test_pattern,
enum dp_test_pattern_color_space test_pattern_color_space,
@@ -941,28 +942,9 @@ bool dc_link_dp_set_test_pattern(
return true;
}
-void dc_link_set_drive_settings(struct dc *dc,
- struct link_training_settings *lt_settings,
- const struct dc_link *link)
-{
-
- int i;
- struct link_resource link_res;
-
- for (i = 0; i < dc->link_count; i++)
- if (dc->links[i] == link)
- break;
-
- if (i >= dc->link_count)
- ASSERT_CRITICAL(false);
-
- link_get_cur_link_res(link, &link_res);
- dp_set_drive_settings(dc->links[i], &link_res, lt_settings);
-}
-
-void dc_link_set_preferred_link_settings(struct dc *dc,
- struct dc_link_settings *link_setting,
- struct dc_link *link)
+void dp_set_preferred_link_settings(struct dc *dc,
+ struct dc_link_settings *link_setting,
+ struct dc_link *link)
{
int i;
struct pipe_ctx *pipe;
@@ -1001,11 +983,11 @@ void dc_link_set_preferred_link_settings(struct dc *dc,
dp_retrain_link_dp_test(link, &store_settings, false);
}
-void dc_link_set_preferred_training_settings(struct dc *dc,
- struct dc_link_settings *link_setting,
- struct dc_link_training_overrides *lt_overrides,
- struct dc_link *link,
- bool skip_immediate_retrain)
+void dp_set_preferred_training_settings(struct dc *dc,
+ struct dc_link_settings *link_setting,
+ struct dc_link_training_overrides *lt_overrides,
+ struct dc_link *link,
+ bool skip_immediate_retrain)
{
if (lt_overrides != NULL)
link->preferred_training_settings = *lt_overrides;
@@ -1025,22 +1007,5 @@ void dc_link_set_preferred_training_settings(struct dc *dc,
/* Retrain now, or wait until next stream update to apply */
if (skip_immediate_retrain == false)
- dc_link_set_preferred_link_settings(dc, &link->preferred_link_setting, link);
-}
-
-void dc_link_set_test_pattern(struct dc_link *link,
- enum dp_test_pattern test_pattern,
- enum dp_test_pattern_color_space test_pattern_color_space,
- const struct link_training_settings *p_link_settings,
- const unsigned char *p_custom_pattern,
- unsigned int cust_pattern_size)
-{
- if (link != NULL)
- dc_link_dp_set_test_pattern(
- link,
- test_pattern,
- test_pattern_color_space,
- p_link_settings,
- p_custom_pattern,
- cust_pattern_size);
+ dp_set_preferred_link_settings(dc, &link->preferred_link_setting, link);
}
diff --git a/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.h b/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.h
index 7f17838b653b..eae23ea7f6ec 100644
--- a/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.h
+++ b/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.h
@@ -25,9 +25,20 @@
#ifndef __LINK_DP_CTS_H__
#define __LINK_DP_CTS_H__
#include "link.h"
-
-void dp_retrain_link_dp_test(struct dc_link *link,
+void dp_handle_automated_test(struct dc_link *link);
+bool dp_set_test_pattern(
+ struct dc_link *link,
+ enum dp_test_pattern test_pattern,
+ enum dp_test_pattern_color_space test_pattern_color_space,
+ const struct link_training_settings *p_link_settings,
+ const unsigned char *p_custom_pattern,
+ unsigned int cust_pattern_size);
+void dp_set_preferred_link_settings(struct dc *dc,
struct dc_link_settings *link_setting,
- bool skip_video_pattern);
-
+ struct dc_link *link);
+void dp_set_preferred_training_settings(struct dc *dc,
+ struct dc_link_settings *link_setting,
+ struct dc_link_training_overrides *lt_overrides,
+ struct dc_link *link,
+ bool skip_immediate_retrain);
#endif /* __LINK_DP_CTS_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_trace.c b/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_trace.c
index 459b362ed374..fbcd8fb58ea8 100644
--- a/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_trace.c
+++ b/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_trace.c
@@ -37,7 +37,7 @@ void dp_trace_reset(struct dc_link *link)
memset(&link->dp_trace, 0, sizeof(link->dp_trace));
}
-bool dc_dp_trace_is_initialized(struct dc_link *link)
+bool dp_trace_is_initialized(struct dc_link *link)
{
return link->dp_trace.is_initialized;
}
@@ -76,7 +76,7 @@ void dp_trace_lt_total_count_increment(struct dc_link *link,
link->dp_trace.commit_lt_trace.counts.total++;
}
-void dc_dp_trace_set_is_logged_flag(struct dc_link *link,
+void dp_trace_set_is_logged_flag(struct dc_link *link,
bool in_detection,
bool is_logged)
{
@@ -86,8 +86,7 @@ void dc_dp_trace_set_is_logged_flag(struct dc_link *link,
link->dp_trace.commit_lt_trace.is_logged = is_logged;
}
-bool dc_dp_trace_is_logged(struct dc_link *link,
- bool in_detection)
+bool dp_trace_is_logged(struct dc_link *link, bool in_detection)
{
if (in_detection)
return link->dp_trace.detect_lt_trace.is_logged;
@@ -123,7 +122,7 @@ void dp_trace_set_lt_end_timestamp(struct dc_link *link,
link->dp_trace.commit_lt_trace.timestamps.end = dm_get_timestamp(link->dc->ctx);
}
-unsigned long long dc_dp_trace_get_lt_end_timestamp(struct dc_link *link,
+unsigned long long dp_trace_get_lt_end_timestamp(struct dc_link *link,
bool in_detection)
{
if (in_detection)
@@ -132,7 +131,7 @@ unsigned long long dc_dp_trace_get_lt_end_timestamp(struct dc_link *link,
return link->dp_trace.commit_lt_trace.timestamps.end;
}
-struct dp_trace_lt_counts *dc_dp_trace_get_lt_counts(struct dc_link *link,
+const struct dp_trace_lt_counts *dp_trace_get_lt_counts(struct dc_link *link,
bool in_detection)
{
if (in_detection)
@@ -141,12 +140,12 @@ struct dp_trace_lt_counts *dc_dp_trace_get_lt_counts(struct dc_link *link,
return &link->dp_trace.commit_lt_trace.counts;
}
-unsigned int dc_dp_trace_get_link_loss_count(struct dc_link *link)
+unsigned int dp_trace_get_link_loss_count(struct dc_link *link)
{
return link->dp_trace.link_loss_count;
}
-void link_dp_trace_set_edp_power_timestamp(struct dc_link *link,
+void dp_trace_set_edp_power_timestamp(struct dc_link *link,
bool power_up)
{
if (!power_up)
@@ -156,17 +155,17 @@ void link_dp_trace_set_edp_power_timestamp(struct dc_link *link,
link->dp_trace.edp_trace_power_timestamps.poweron = dm_get_timestamp(link->dc->ctx);
}
-uint64_t link_dp_trace_get_edp_poweron_timestamp(struct dc_link *link)
+uint64_t dp_trace_get_edp_poweron_timestamp(struct dc_link *link)
{
return link->dp_trace.edp_trace_power_timestamps.poweron;
}
-uint64_t link_dp_trace_get_edp_poweroff_timestamp(struct dc_link *link)
+uint64_t dp_trace_get_edp_poweroff_timestamp(struct dc_link *link)
{
return link->dp_trace.edp_trace_power_timestamps.poweroff;
}
-void link_dp_source_sequence_trace(struct dc_link *link, uint8_t dp_test_mode)
+void dp_trace_source_sequence(struct dc_link *link, uint8_t dp_test_mode)
{
if (link != NULL && link->dc->debug.enable_driver_sequence_debug)
core_link_write_dpcd(link, DP_SOURCE_SEQUENCE,
diff --git a/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_trace.h b/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_trace.h
index 89feea1b2692..ab437a0c9101 100644
--- a/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_trace.h
+++ b/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_trace.h
@@ -28,7 +28,7 @@
void dp_trace_init(struct dc_link *link);
void dp_trace_reset(struct dc_link *link);
-bool dc_dp_trace_is_initialized(struct dc_link *link);
+bool dp_trace_is_initialized(struct dc_link *link);
void dp_trace_detect_lt_init(struct dc_link *link);
void dp_trace_commit_lt_init(struct dc_link *link);
void dp_trace_link_loss_increment(struct dc_link *link);
@@ -37,10 +37,10 @@ void dp_trace_lt_fail_count_update(struct dc_link *link,
bool in_detection);
void dp_trace_lt_total_count_increment(struct dc_link *link,
bool in_detection);
-void dc_dp_trace_set_is_logged_flag(struct dc_link *link,
+void dp_trace_set_is_logged_flag(struct dc_link *link,
bool in_detection,
bool is_logged);
-bool dc_dp_trace_is_logged(struct dc_link *link,
+bool dp_trace_is_logged(struct dc_link *link,
bool in_detection);
void dp_trace_lt_result_update(struct dc_link *link,
enum link_training_result result,
@@ -49,10 +49,15 @@ void dp_trace_set_lt_start_timestamp(struct dc_link *link,
bool in_detection);
void dp_trace_set_lt_end_timestamp(struct dc_link *link,
bool in_detection);
-unsigned long long dc_dp_trace_get_lt_end_timestamp(struct dc_link *link,
+unsigned long long dp_trace_get_lt_end_timestamp(struct dc_link *link,
bool in_detection);
-struct dp_trace_lt_counts *dc_dp_trace_get_lt_counts(struct dc_link *link,
+const struct dp_trace_lt_counts *dp_trace_get_lt_counts(struct dc_link *link,
bool in_detection);
-unsigned int dc_dp_trace_get_link_loss_count(struct dc_link *link);
+unsigned int dp_trace_get_link_loss_count(struct dc_link *link);
+void dp_trace_set_edp_power_timestamp(struct dc_link *link,
+ bool power_up);
+uint64_t dp_trace_get_edp_poweron_timestamp(struct dc_link *link);
+uint64_t dp_trace_get_edp_poweroff_timestamp(struct dc_link *link);
+void dp_trace_source_sequence(struct dc_link *link, uint8_t dp_test_mode);
#endif /* __LINK_DP_TRACE_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.c b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.c
index b092b00b3599..bebf9c4c8702 100644
--- a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.c
+++ b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.c
@@ -44,7 +44,7 @@ void setup_dio_stream_encoder(struct pipe_ctx *pipe_ctx)
link_enc->funcs->connect_dig_be_to_fe(link_enc,
pipe_ctx->stream_res.stream_enc->id, true);
if (dc_is_dp_signal(pipe_ctx->stream->signal))
- link_dp_source_sequence_trace(pipe_ctx->stream->link,
+ pipe_ctx->stream->ctx->dc->link_srv->dp_trace_source_sequence(pipe_ctx->stream->link,
DPCD_SOURCE_SEQ_AFTER_CONNECT_DIG_FE_BE);
if (stream_enc->funcs->enable_fifo)
stream_enc->funcs->enable_fifo(stream_enc);
@@ -63,7 +63,8 @@ void reset_dio_stream_encoder(struct pipe_ctx *pipe_ctx)
pipe_ctx->stream_res.stream_enc->id,
false);
if (dc_is_dp_signal(pipe_ctx->stream->signal))
- link_dp_source_sequence_trace(pipe_ctx->stream->link,
+ pipe_ctx->stream->ctx->dc->link_srv->dp_trace_source_sequence(
+ pipe_ctx->stream->link,
DPCD_SOURCE_SEQ_AFTER_DISCONNECT_DIG_FE_BE);
}
@@ -105,7 +106,8 @@ void setup_dio_stream_attribute(struct pipe_ctx *pipe_ctx)
&stream->timing);
if (dc_is_dp_signal(stream->signal))
- link_dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_DP_STREAM_ATTR);
+ link->dc->link_srv->dp_trace_source_sequence(link,
+ DPCD_SOURCE_SEQ_AFTER_DP_STREAM_ATTR);
}
void enable_dio_dp_link_output(struct dc_link *link,
@@ -126,7 +128,8 @@ void enable_dio_dp_link_output(struct dc_link *link,
link_enc,
link_settings,
clock_source);
- link_dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_ENABLE_LINK_PHY);
+ link->dc->link_srv->dp_trace_source_sequence(link,
+ DPCD_SOURCE_SEQ_AFTER_ENABLE_LINK_PHY);
}
void disable_dio_link_output(struct dc_link *link,
@@ -136,7 +139,8 @@ void disable_dio_link_output(struct dc_link *link,
struct link_encoder *link_enc = link_enc_cfg_get_link_enc(link);
link_enc->funcs->disable_output(link_enc, signal);
- link_dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_DISABLE_LINK_PHY);
+ link->dc->link_srv->dp_trace_source_sequence(link,
+ DPCD_SOURCE_SEQ_AFTER_DISABLE_LINK_PHY);
}
void set_dio_dp_link_test_pattern(struct dc_link *link,
@@ -146,7 +150,7 @@ void set_dio_dp_link_test_pattern(struct dc_link *link,
struct link_encoder *link_enc = link_enc_cfg_get_link_enc(link);
link_enc->funcs->dp_set_phy_pattern(link_enc, tp_params);
- link_dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_SET_SOURCE_PATTERN);
+ link->dc->link_srv->dp_trace_source_sequence(link, DPCD_SOURCE_SEQ_AFTER_SET_SOURCE_PATTERN);
}
void set_dio_dp_lane_settings(struct dc_link *link,
@@ -195,7 +199,8 @@ void enable_dio_audio_packet(struct pipe_ctx *pipe_ctx)
pipe_ctx->stream_res.stream_enc, false);
if (dc_is_dp_signal(pipe_ctx->stream->signal))
- link_dp_source_sequence_trace(pipe_ctx->stream->link,
+ pipe_ctx->stream->ctx->dc->link_srv->dp_trace_source_sequence(
+ pipe_ctx->stream->link,
DPCD_SOURCE_SEQ_AFTER_ENABLE_AUDIO_STREAM);
}
@@ -214,7 +219,8 @@ void disable_dio_audio_packet(struct pipe_ctx *pipe_ctx)
}
if (dc_is_dp_signal(pipe_ctx->stream->signal))
- link_dp_source_sequence_trace(pipe_ctx->stream->link,
+ pipe_ctx->stream->ctx->dc->link_srv->dp_trace_source_sequence(
+ pipe_ctx->stream->link,
DPCD_SOURCE_SEQ_AFTER_DISABLE_AUDIO_STREAM);
}
diff --git a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_dp.c b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_dp.c
index aa1c5e253b43..edd7d026a762 100644
--- a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_dp.c
+++ b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_dp.c
@@ -68,7 +68,8 @@ static void set_hpo_dp_hblank_min_symbol_width(struct pipe_ctx *pipe_ctx,
struct dc_crtc_timing *timing = &pipe_ctx->stream->timing;
struct fixed31_32 h_blank_in_ms, time_slot_in_ms, mtp_cnt_per_h_blank;
uint32_t link_bw_in_kbps =
- dc_link_bandwidth_kbps(pipe_ctx->stream->link, link_settings);
+ hpo_dp_stream_encoder->ctx->dc->link_srv->dp_link_bandwidth_kbps(
+ pipe_ctx->stream->link, link_settings);
uint16_t hblank_min_symbol_width = 0;
if (link_bw_in_kbps > 0) {
@@ -115,7 +116,8 @@ static void setup_hpo_dp_stream_attribute(struct pipe_ctx *pipe_ctx)
stream->use_vsc_sdp_for_colorimetry,
stream->timing.flags.DSC,
false);
- link_dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_DP_STREAM_ATTR);
+ link->dc->link_srv->dp_trace_source_sequence(link,
+ DPCD_SOURCE_SEQ_AFTER_DP_STREAM_ATTR);
}
static void enable_hpo_dp_fpga_link_output(struct dc_link *link,
@@ -201,7 +203,7 @@ static void set_hpo_dp_link_test_pattern(struct dc_link *link,
{
link_res->hpo_dp_link_enc->funcs->set_link_test_pattern(
link_res->hpo_dp_link_enc, tp_params);
- link_dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_SET_SOURCE_PATTERN);
+ link->dc->link_srv->dp_trace_source_sequence(link, DPCD_SOURCE_SEQ_AFTER_SET_SOURCE_PATTERN);
}
static void set_hpo_dp_lane_settings(struct dc_link *link,
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_detection.c b/drivers/gpu/drm/amd/display/dc/link/link_detection.c
index 38216c789d77..fee71ebdfc73 100644
--- a/drivers/gpu/drm/amd/display/dc/link/link_detection.c
+++ b/drivers/gpu/drm/amd/display/dc/link/link_detection.c
@@ -466,7 +466,6 @@ static void link_disconnect_remap(struct dc_sink *prev_sink, struct dc_link *lin
link->local_sink = prev_sink;
}
-#if defined(CONFIG_DRM_AMD_DC_HDCP)
static void query_hdcp_capability(enum signal_type signal, struct dc_link *link)
{
struct hdcp_protection_message msg22;
@@ -508,7 +507,6 @@ static void query_hdcp_capability(enum signal_type signal, struct dc_link *link)
}
}
-#endif // CONFIG_DRM_AMD_DC_HDCP
static void read_current_link_settings_on_detect(struct dc_link *link)
{
union lane_count_set lane_count_set = {0};
@@ -855,6 +853,7 @@ static bool detect_link_and_local_sink(struct dc_link *link,
struct dc_sink *prev_sink = NULL;
struct dpcd_caps prev_dpcd_caps;
enum dc_connection_type new_connection_type = dc_connection_none;
+ enum dc_connection_type pre_connection_type = link->type;
const uint32_t post_oui_delay = 30; // 30ms
DC_LOGGER_INIT(link->ctx->logger);
@@ -878,7 +877,7 @@ static bool detect_link_and_local_sink(struct dc_link *link,
return true;
}
- if (!dc_link_detect_connection_type(link, &new_connection_type)) {
+ if (!link_detect_connection_type(link, &new_connection_type)) {
BREAK_TO_DEBUGGER();
return false;
}
@@ -957,6 +956,8 @@ static bool detect_link_and_local_sink(struct dc_link *link,
}
if (!detect_dp(link, &sink_caps, reason)) {
+ link->type = pre_connection_type;
+
if (prev_sink)
dc_sink_release(prev_sink);
return false;
@@ -1084,9 +1085,7 @@ static bool detect_link_and_local_sink(struct dc_link *link,
* TODO debug why certain monitors don't like
* two link trainings
*/
-#if defined(CONFIG_DRM_AMD_DC_HDCP)
query_hdcp_capability(sink->sink_signal, link);
-#endif
} else {
// If edid is the same, then discard new sink and revert back to original sink
if (same_edid) {
@@ -1094,9 +1093,7 @@ static bool detect_link_and_local_sink(struct dc_link *link,
sink = prev_sink;
prev_sink = NULL;
}
-#if defined(CONFIG_DRM_AMD_DC_HDCP)
query_hdcp_capability(sink->sink_signal, link);
-#endif
}
/* HDMI-DVI Dongle */
@@ -1162,9 +1159,7 @@ static bool detect_link_and_local_sink(struct dc_link *link,
/* From Connected-to-Disconnected. */
link->type = dc_connection_none;
sink_caps.signal = SIGNAL_TYPE_NONE;
-#if defined(CONFIG_DRM_AMD_DC_HDCP)
memset(&link->hdcp_caps, 0, sizeof(struct hdcp_caps));
-#endif
/* When we unplug a passive DP-HDMI dongle connection, dongle_max_pix_clk
* is not cleared. If we emulate a DP signal on this connection, it thinks
* the dongle is still there and limits the number of modes we can emulate.
@@ -1189,7 +1184,7 @@ static bool detect_link_and_local_sink(struct dc_link *link,
}
/**
- * dc_link_detect_connection_type() - Determine if there is a sink connected
+ * link_detect_connection_type() - Determine if there is a sink connected
*
* @type: Returned connection type
* Does not detect downstream devices, such as MST sinks
@@ -1213,7 +1208,7 @@ bool link_detect_connection_type(struct dc_link *link, enum dc_connection_type *
/* Link may not have physical HPD pin. */
if (link->ep_type != DISPLAY_ENDPOINT_PHY) {
- if (link->is_hpd_pending || !dc_link_dpia_query_hpd_status(link))
+ if (link->is_hpd_pending || !dpia_query_hpd_status(link))
*type = dc_connection_none;
else
*type = dc_connection_single;
@@ -1244,11 +1239,16 @@ bool link_detect(struct dc_link *link, enum dc_detect_reason reason)
bool is_delegated_to_mst_top_mgr = false;
enum dc_connection_type pre_link_type = link->type;
+ DC_LOGGER_INIT(link->ctx->logger);
+
is_local_sink_detect_success = detect_link_and_local_sink(link, reason);
if (is_local_sink_detect_success && link->local_sink)
verify_link_capability(link, link->local_sink, reason);
+ DC_LOG_DC("%s: link_index=%d is_local_sink_detect_success=%d pre_link_type=%d link_type=%d\n", __func__,
+ link->link_index, is_local_sink_detect_success, pre_link_type, link->type);
+
if (is_local_sink_detect_success && link->local_sink &&
dc_is_dp_signal(link->local_sink->sink_signal) &&
link->dpcd_caps.is_mst_capable)
@@ -1266,7 +1266,6 @@ void link_clear_dprx_states(struct dc_link *link)
{
memset(&link->dprx_states, 0, sizeof(link->dprx_states));
}
-#if defined(CONFIG_DRM_AMD_DC_HDCP)
bool link_is_hdcp14(struct dc_link *link, enum signal_type signal)
{
@@ -1314,10 +1313,108 @@ bool link_is_hdcp22(struct dc_link *link, enum signal_type signal)
return ret;
}
-#endif // CONFIG_DRM_AMD_DC_HDCP
const struct dc_link_status *link_get_status(const struct dc_link *link)
{
return &link->link_status;
}
+
+static bool link_add_remote_sink_helper(struct dc_link *dc_link, struct dc_sink *sink)
+{
+ if (dc_link->sink_count >= MAX_SINKS_PER_LINK) {
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+
+ dc_sink_retain(sink);
+
+ dc_link->remote_sinks[dc_link->sink_count] = sink;
+ dc_link->sink_count++;
+
+ return true;
+}
+
+struct dc_sink *link_add_remote_sink(
+ struct dc_link *link,
+ const uint8_t *edid,
+ int len,
+ struct dc_sink_init_data *init_data)
+{
+ struct dc_sink *dc_sink;
+ enum dc_edid_status edid_status;
+
+ if (len > DC_MAX_EDID_BUFFER_SIZE) {
+ dm_error("Max EDID buffer size breached!\n");
+ return NULL;
+ }
+
+ if (!init_data) {
+ BREAK_TO_DEBUGGER();
+ return NULL;
+ }
+
+ if (!init_data->link) {
+ BREAK_TO_DEBUGGER();
+ return NULL;
+ }
+
+ dc_sink = dc_sink_create(init_data);
+
+ if (!dc_sink)
+ return NULL;
+
+ memmove(dc_sink->dc_edid.raw_edid, edid, len);
+ dc_sink->dc_edid.length = len;
+
+ if (!link_add_remote_sink_helper(
+ link,
+ dc_sink))
+ goto fail_add_sink;
+
+ edid_status = dm_helpers_parse_edid_caps(
+ link,
+ &dc_sink->dc_edid,
+ &dc_sink->edid_caps);
+
+ /*
+ * Treat device as no EDID device if EDID
+ * parsing fails
+ */
+ if (edid_status != EDID_OK && edid_status != EDID_PARTIAL_VALID) {
+ dc_sink->dc_edid.length = 0;
+ dm_error("Bad EDID, status%d!\n", edid_status);
+ }
+
+ return dc_sink;
+
+fail_add_sink:
+ dc_sink_release(dc_sink);
+ return NULL;
+}
+
+void link_remove_remote_sink(struct dc_link *link, struct dc_sink *sink)
+{
+ int i;
+
+ if (!link->sink_count) {
+ BREAK_TO_DEBUGGER();
+ return;
+ }
+
+ for (i = 0; i < link->sink_count; i++) {
+ if (link->remote_sinks[i] == sink) {
+ dc_sink_release(sink);
+ link->remote_sinks[i] = NULL;
+
+ /* shrink array to remove empty place */
+ while (i < link->sink_count - 1) {
+ link->remote_sinks[i] = link->remote_sinks[i+1];
+ i++;
+ }
+ link->remote_sinks[i] = NULL;
+ link->sink_count--;
+ return;
+ }
+ }
+}
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_detection.h b/drivers/gpu/drm/amd/display/dc/link/link_detection.h
index 1831636516fb..7da05078721e 100644
--- a/drivers/gpu/drm/amd/display/dc/link/link_detection.h
+++ b/drivers/gpu/drm/amd/display/dc/link/link_detection.h
@@ -26,5 +26,18 @@
#ifndef __DC_LINK_DETECTION_H__
#define __DC_LINK_DETECTION_H__
#include "link.h"
-
+bool link_detect(struct dc_link *link, enum dc_detect_reason reason);
+bool link_detect_connection_type(struct dc_link *link,
+ enum dc_connection_type *type);
+struct dc_sink *link_add_remote_sink(
+ struct dc_link *link,
+ const uint8_t *edid,
+ int len,
+ struct dc_sink_init_data *init_data);
+void link_remove_remote_sink(struct dc_link *link, struct dc_sink *sink);
+bool link_reset_cur_dp_mst_topology(struct dc_link *link);
+const struct dc_link_status *link_get_status(const struct dc_link *link);
+bool link_is_hdcp14(struct dc_link *link, enum signal_type signal);
+bool link_is_hdcp22(struct dc_link *link, enum signal_type signal);
+void link_clear_dprx_states(struct dc_link *link);
#endif /* __DC_LINK_DETECTION_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_dpms.c b/drivers/gpu/drm/amd/display/dc/link/link_dpms.c
index 257e1c3ba00a..020d668ce09e 100644
--- a/drivers/gpu/drm/amd/display/dc/link/link_dpms.c
+++ b/drivers/gpu/drm/amd/display/dc/link/link_dpms.c
@@ -37,6 +37,7 @@
#include "link_dpms.h"
#include "link_hwss.h"
+#include "link_validation.h"
#include "accessories/link_fpga.h"
#include "accessories/link_dp_trace.h"
#include "protocols/link_dpcd.h"
@@ -46,6 +47,7 @@
#include "protocols/link_dp_capability.h"
#include "protocols/link_dp_training.h"
#include "protocols/link_edp_panel_control.h"
+#include "protocols/link_dp_dpia_bw.h"
#include "dm_helpers.h"
#include "link_enc_cfg.h"
@@ -136,7 +138,7 @@ void link_blank_dp_stream(struct dc_link *link, bool hw_init)
}
if ((!link->wa_flags.dp_keep_receiver_powered) || hw_init)
- dc_link_dp_receiver_power_ctrl(link, false);
+ dpcd_write_rx_power_ctrl(link, false);
}
}
@@ -646,7 +648,6 @@ static void write_i2c_redriver_setting(
if (!i2c_success)
DC_LOG_DEBUG("Set redriver failed");
}
-#if defined(CONFIG_DRM_AMD_DC_HDCP)
static void update_psp_stream_config(struct pipe_ctx *pipe_ctx, bool dpms_off)
{
@@ -672,7 +673,7 @@ static void update_psp_stream_config(struct pipe_ctx *pipe_ctx, bool dpms_off)
/* stream encoder index */
config.stream_enc_idx = pipe_ctx->stream_res.stream_enc->id - ENGINE_ID_DIGA;
- if (link_is_dp_128b_132b_signal(pipe_ctx))
+ if (dp_is_128b_132b_signal(pipe_ctx))
config.stream_enc_idx =
pipe_ctx->stream_res.hpo_dp_stream_enc->id - ENGINE_ID_HPO_DP_0;
@@ -681,7 +682,7 @@ static void update_psp_stream_config(struct pipe_ctx *pipe_ctx, bool dpms_off)
/* link encoder index */
config.link_enc_idx = link_enc->transmitter - TRANSMITTER_UNIPHY_A;
- if (link_is_dp_128b_132b_signal(pipe_ctx))
+ if (dp_is_128b_132b_signal(pipe_ctx))
config.link_enc_idx = pipe_ctx->link_res.hpo_dp_link_enc->inst;
/* dio output index is dpia index for DPIA endpoint & dcio index by default */
@@ -702,7 +703,7 @@ static void update_psp_stream_config(struct pipe_ctx *pipe_ctx, bool dpms_off)
config.assr_enabled = (panel_mode == DP_PANEL_MODE_EDP) ? 1 : 0;
config.mst_enabled = (pipe_ctx->stream->signal ==
SIGNAL_TYPE_DISPLAY_PORT_MST) ? 1 : 0;
- config.dp2_enabled = link_is_dp_128b_132b_signal(pipe_ctx) ? 1 : 0;
+ config.dp2_enabled = dp_is_128b_132b_signal(pipe_ctx) ? 1 : 0;
config.usb4_enabled = (pipe_ctx->stream->link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) ?
1 : 0;
config.dpms_off = dpms_off;
@@ -712,7 +713,6 @@ static void update_psp_stream_config(struct pipe_ctx *pipe_ctx, bool dpms_off)
cp_psp->funcs.update_stream_config(cp_psp->handle, &config);
}
-#endif
static void set_avmute(struct pipe_ctx *pipe_ctx, bool enable)
{
@@ -817,7 +817,7 @@ void link_set_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable)
/* Enable DSC in encoder */
if (dc_is_dp_signal(stream->signal) && !IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)
- && !link_is_dp_128b_132b_signal(pipe_ctx)) {
+ && !dp_is_128b_132b_signal(pipe_ctx)) {
DC_LOG_DSC("Setting stream encoder DSC config for engine %d:", (int)pipe_ctx->stream_res.stream_enc->id);
dsc_optc_config_log(dsc, &dsc_optc_cfg);
pipe_ctx->stream_res.stream_enc->funcs->dp_set_dsc_config(pipe_ctx->stream_res.stream_enc,
@@ -843,7 +843,7 @@ void link_set_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable)
/* disable DSC in stream encoder */
if (dc_is_dp_signal(stream->signal)) {
- if (link_is_dp_128b_132b_signal(pipe_ctx))
+ if (dp_is_128b_132b_signal(pipe_ctx))
pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->dp_set_dsc_pps_info_packet(
pipe_ctx->stream_res.hpo_dp_stream_enc,
false,
@@ -902,7 +902,7 @@ bool link_set_dsc_pps_packet(struct pipe_ctx *pipe_ctx, bool enable, bool immedi
memcpy(&stream->dsc_packed_pps[0], &dsc_packed_pps[0], sizeof(stream->dsc_packed_pps));
if (dc_is_dp_signal(stream->signal)) {
DC_LOG_DSC("Setting stream encoder DSC PPS SDP for engine %d\n", (int)pipe_ctx->stream_res.stream_enc->id);
- if (link_is_dp_128b_132b_signal(pipe_ctx))
+ if (dp_is_128b_132b_signal(pipe_ctx))
pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->dp_set_dsc_pps_info_packet(
pipe_ctx->stream_res.hpo_dp_stream_enc,
true,
@@ -919,7 +919,7 @@ bool link_set_dsc_pps_packet(struct pipe_ctx *pipe_ctx, bool enable, bool immedi
/* disable DSC PPS in stream encoder */
memset(&stream->dsc_packed_pps[0], 0, sizeof(stream->dsc_packed_pps));
if (dc_is_dp_signal(stream->signal)) {
- if (link_is_dp_128b_132b_signal(pipe_ctx))
+ if (dp_is_128b_132b_signal(pipe_ctx))
pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->dp_set_dsc_pps_info_packet(
pipe_ctx->stream_res.hpo_dp_stream_enc,
false,
@@ -1001,7 +1001,7 @@ static void enable_stream_features(struct pipe_ctx *pipe_ctx)
}
}
-static void dc_log_vcp_x_y(const struct dc_link *link, struct fixed31_32 avg_time_slots_per_mtp)
+static void log_vcp_x_y(const struct dc_link *link, struct fixed31_32 avg_time_slots_per_mtp)
{
const uint32_t VCP_Y_PRECISION = 1000;
uint64_t vcp_x, vcp_y;
@@ -1044,7 +1044,7 @@ static void dc_log_vcp_x_y(const struct dc_link *link, struct fixed31_32 avg_tim
static struct fixed31_32 get_pbn_per_slot(struct dc_stream_state *stream)
{
struct fixed31_32 mbytes_per_sec;
- uint32_t link_rate_in_mbytes_per_sec = dc_link_bandwidth_kbps(stream->link,
+ uint32_t link_rate_in_mbytes_per_sec = dp_link_bandwidth_kbps(stream->link,
&stream->link->cur_link_settings);
link_rate_in_mbytes_per_sec /= 8000; /* Kbits to MBytes */
@@ -1153,7 +1153,7 @@ static bool poll_for_allocation_change_trigger(struct dc_link *link)
break;
}
- msleep(5);
+ fsleep(5000);
}
if (result == ACT_FAILED) {
@@ -1517,7 +1517,7 @@ static enum dc_status allocate_mst_payload(struct pipe_ctx *pipe_ctx)
pbn = get_pbn_from_timing(pipe_ctx);
avg_time_slots_per_mtp = dc_fixpt_div(pbn, pbn_per_slot);
- dc_log_vcp_x_y(link, avg_time_slots_per_mtp);
+ log_vcp_x_y(link, avg_time_slots_per_mtp);
if (link_hwss->ext.set_throttled_vcp_size)
link_hwss->ext.set_throttled_vcp_size(pipe_ctx, avg_time_slots_per_mtp);
@@ -1535,7 +1535,7 @@ struct fixed31_32 link_calculate_sst_avg_time_slots_per_mtp(
{
struct fixed31_32 link_bw_effective =
dc_fixpt_from_int(
- dc_link_bandwidth_kbps(link, &link->cur_link_settings));
+ dp_link_bandwidth_kbps(link, &link->cur_link_settings));
struct fixed31_32 timeslot_bw_effective =
dc_fixpt_div_int(link_bw_effective, MAX_MTP_SLOT_COUNT);
struct fixed31_32 timing_bw =
@@ -1640,7 +1640,7 @@ static bool write_128b_132b_sst_payload_allocation_table(
}
}
retries++;
- msleep(5);
+ fsleep(5000);
}
if (!result && retries == max_retries) {
@@ -1670,7 +1670,7 @@ static enum dc_status update_sst_payload(struct pipe_ctx *pipe_ctx,
if (!allocate) {
avg_time_slots_per_mtp = dc_fixpt_from_int(0);
- dc_log_vcp_x_y(link, avg_time_slots_per_mtp);
+ log_vcp_x_y(link, avg_time_slots_per_mtp);
if (link_hwss->ext.set_throttled_vcp_size)
link_hwss->ext.set_throttled_vcp_size(pipe_ctx,
@@ -1721,7 +1721,7 @@ static enum dc_status update_sst_payload(struct pipe_ctx *pipe_ctx,
DP_128b_132b_ENCODING) {
avg_time_slots_per_mtp = link_calculate_sst_avg_time_slots_per_mtp(stream, link);
- dc_log_vcp_x_y(link, avg_time_slots_per_mtp);
+ log_vcp_x_y(link, avg_time_slots_per_mtp);
if (link_hwss->ext.set_throttled_vcp_size)
link_hwss->ext.set_throttled_vcp_size(pipe_ctx,
@@ -2044,11 +2044,17 @@ static enum dc_status enable_link_dp(struct dc_state *state,
}
}
- /* Train with fallback when enabling DPIA link. Conventional links are
+ /*
+ * If the link is DP-over-USB4 do the following:
+ * - Train with fallback when enabling DPIA link. Conventional links are
* trained with fallback during sink detection.
+ * - Allocate only what the stream needs for bw in Gbps. Inform the CM
+ * in case stream needs more or less bw from what has been allocated
+ * earlier at plug time.
*/
- if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA)
+ if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) {
do_fallback = true;
+ }
/*
* Temporary w/a to get DP2.0 link rates to work with SST.
@@ -2117,7 +2123,7 @@ static enum dc_status enable_link_dp(struct dc_state *state,
set_default_brightness_aux(link); // TODO: use cached if known
if (link->dpcd_sink_ext_caps.bits.oled == 1)
msleep(bl_oled_enable_delay);
- link_backlight_enable_aux(link, true);
+ edp_backlight_enable_aux(link, true);
}
return status;
@@ -2237,7 +2243,7 @@ void link_set_dpms_off(struct pipe_ctx *pipe_ctx)
ASSERT(is_master_pipe_for_link(link, pipe_ctx));
- if (link_is_dp_128b_132b_signal(pipe_ctx))
+ if (dp_is_128b_132b_signal(pipe_ctx))
vpg = pipe_ctx->stream_res.hpo_dp_stream_enc->vpg;
DC_LOGGER_INIT(pipe_ctx->stream->ctx->logger);
@@ -2262,15 +2268,13 @@ void link_set_dpms_off(struct pipe_ctx *pipe_ctx)
dc->hwss.disable_audio_stream(pipe_ctx);
-#if defined(CONFIG_DRM_AMD_DC_HDCP)
update_psp_stream_config(pipe_ctx, true);
-#endif
dc->hwss.blank_stream(pipe_ctx);
if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
deallocate_mst_payload(pipe_ctx);
else if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT &&
- link_is_dp_128b_132b_signal(pipe_ctx))
+ dp_is_128b_132b_signal(pipe_ctx))
update_sst_payload(pipe_ctx, false);
if (dc_is_hdmi_signal(pipe_ctx->stream->signal)) {
@@ -2299,7 +2303,7 @@ void link_set_dpms_off(struct pipe_ctx *pipe_ctx)
}
if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT &&
- !link_is_dp_128b_132b_signal(pipe_ctx)) {
+ !dp_is_128b_132b_signal(pipe_ctx)) {
/* In DP1.x SST mode, our encoder will go to TPS1
* when link is on but stream is off.
@@ -2319,7 +2323,7 @@ void link_set_dpms_off(struct pipe_ctx *pipe_ctx)
if (dc_is_dp_signal(pipe_ctx->stream->signal))
link_set_dsc_enable(pipe_ctx, false);
}
- if (link_is_dp_128b_132b_signal(pipe_ctx)) {
+ if (dp_is_128b_132b_signal(pipe_ctx)) {
if (pipe_ctx->stream_res.tg->funcs->set_out_mux)
pipe_ctx->stream_res.tg->funcs->set_out_mux(pipe_ctx->stream_res.tg, OUT_MUX_DIO);
}
@@ -2343,7 +2347,7 @@ void link_set_dpms_on(
ASSERT(is_master_pipe_for_link(link, pipe_ctx));
- if (link_is_dp_128b_132b_signal(pipe_ctx))
+ if (dp_is_128b_132b_signal(pipe_ctx))
vpg = pipe_ctx->stream_res.hpo_dp_stream_enc->vpg;
DC_LOGGER_INIT(pipe_ctx->stream->ctx->logger);
@@ -2365,7 +2369,7 @@ void link_set_dpms_on(
ASSERT(link_enc);
if (!dc_is_virtual_signal(pipe_ctx->stream->signal)
- && !link_is_dp_128b_132b_signal(pipe_ctx)) {
+ && !dp_is_128b_132b_signal(pipe_ctx)) {
if (link_enc)
link_enc->funcs->setup(
link_enc,
@@ -2375,7 +2379,7 @@ void link_set_dpms_on(
pipe_ctx->stream->link->link_state_valid = true;
if (pipe_ctx->stream_res.tg->funcs->set_out_mux) {
- if (link_is_dp_128b_132b_signal(pipe_ctx))
+ if (dp_is_128b_132b_signal(pipe_ctx))
otg_out_dest = OUT_MUX_HPO_DP;
else
otg_out_dest = OUT_MUX_DIO;
@@ -2398,7 +2402,7 @@ void link_set_dpms_on(
dc->hwss.update_info_frame(pipe_ctx);
if (dc_is_dp_signal(pipe_ctx->stream->signal))
- link_dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_UPDATE_INFO_FRAME);
+ dp_trace_source_sequence(link, DPCD_SOURCE_SEQ_AFTER_UPDATE_INFO_FRAME);
/* Do not touch link on seamless boot optimization. */
if (pipe_ctx->stream->apply_seamless_boot_optimization) {
@@ -2410,9 +2414,7 @@ void link_set_dpms_on(
dc->hwss.enable_audio_stream(pipe_ctx);
}
-#if defined(CONFIG_DRM_AMD_DC_HDCP)
update_psp_stream_config(pipe_ctx, false);
-#endif
return;
}
@@ -2422,9 +2424,7 @@ void link_set_dpms_on(
!pipe_ctx->stream->timing.flags.DSC &&
!pipe_ctx->next_odm_pipe) {
pipe_ctx->stream->dpms_off = false;
-#if defined(CONFIG_DRM_AMD_DC_HDCP)
update_psp_stream_config(pipe_ctx, false);
-#endif
return;
}
@@ -2477,7 +2477,7 @@ void link_set_dpms_on(
* from transmitter control.
*/
if (!(dc_is_virtual_signal(pipe_ctx->stream->signal) ||
- link_is_dp_128b_132b_signal(pipe_ctx)))
+ dp_is_128b_132b_signal(pipe_ctx)))
if (link_enc)
link_enc->funcs->setup(
link_enc,
@@ -2497,7 +2497,7 @@ void link_set_dpms_on(
if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
allocate_mst_payload(pipe_ctx);
else if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT &&
- link_is_dp_128b_132b_signal(pipe_ctx))
+ dp_is_128b_132b_signal(pipe_ctx))
update_sst_payload(pipe_ctx, true);
dc->hwss.unblank_stream(pipe_ctx,
@@ -2508,14 +2508,12 @@ void link_set_dpms_on(
if (dc_is_dp_signal(pipe_ctx->stream->signal))
enable_stream_features(pipe_ctx);
-#if defined(CONFIG_DRM_AMD_DC_HDCP)
update_psp_stream_config(pipe_ctx, false);
-#endif
dc->hwss.enable_audio_stream(pipe_ctx);
} else { // if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment))
- if (link_is_dp_128b_132b_signal(pipe_ctx))
+ if (dp_is_128b_132b_signal(pipe_ctx))
dp_fpga_hpo_enable_link_and_stream(state, pipe_ctx);
if (dc_is_dp_signal(pipe_ctx->stream->signal) ||
dc_is_virtual_signal(pipe_ctx->stream->signal))
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_dpms.h b/drivers/gpu/drm/amd/display/dc/link/link_dpms.h
index 33d312dabdb8..9398f9c1666a 100644
--- a/drivers/gpu/drm/amd/display/dc/link/link_dpms.h
+++ b/drivers/gpu/drm/amd/display/dc/link/link_dpms.h
@@ -27,14 +27,27 @@
#define __DC_LINK_DPMS_H__
#include "link.h"
-bool link_set_dsc_pps_packet(struct pipe_ctx *pipe_ctx,
- bool enable, bool immediate_update);
-struct fixed31_32 link_calculate_sst_avg_time_slots_per_mtp(
- const struct dc_stream_state *stream,
- const struct dc_link *link);
+void link_set_dpms_on(
+ struct dc_state *state,
+ struct pipe_ctx *pipe_ctx);
+void link_set_dpms_off(struct pipe_ctx *pipe_ctx);
+void link_resume(struct dc_link *link);
+void link_blank_all_dp_displays(struct dc *dc);
+void link_blank_all_edp_displays(struct dc *dc);
+void link_blank_dp_stream(struct dc_link *link, bool hw_init);
void link_set_all_streams_dpms_off_for_link(struct dc_link *link);
void link_get_master_pipes_with_dpms_on(const struct dc_link *link,
struct dc_state *state,
uint8_t *count,
struct pipe_ctx *pipes[MAX_PIPES]);
+enum dc_status link_increase_mst_payload(struct pipe_ctx *pipe_ctx, uint32_t req_pbn);
+enum dc_status link_reduce_mst_payload(struct pipe_ctx *pipe_ctx, uint32_t req_pbn);
+bool link_set_dsc_pps_packet(struct pipe_ctx *pipe_ctx,
+ bool enable, bool immediate_update);
+struct fixed31_32 link_calculate_sst_avg_time_slots_per_mtp(
+ const struct dc_stream_state *stream,
+ const struct dc_link *link);
+void link_set_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable);
+bool link_set_dsc_enable(struct pipe_ctx *pipe_ctx, bool enable);
+bool link_update_dsc_config(struct pipe_ctx *pipe_ctx);
#endif /* __DC_LINK_DPMS_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_factory.c b/drivers/gpu/drm/amd/display/dc/link/link_factory.c
index aeb26a4d539e..3951d48118c4 100644
--- a/drivers/gpu/drm/amd/display/dc/link/link_factory.c
+++ b/drivers/gpu/drm/amd/display/dc/link/link_factory.c
@@ -27,7 +27,20 @@
* This file owns the creation/destruction of link structure.
*/
#include "link_factory.h"
+#include "link_detection.h"
+#include "link_resource.h"
+#include "link_validation.h"
+#include "link_dpms.h"
+#include "accessories/link_dp_cts.h"
+#include "accessories/link_dp_trace.h"
+#include "accessories/link_fpga.h"
#include "protocols/link_ddc.h"
+#include "protocols/link_dp_capability.h"
+#include "protocols/link_dp_dpia_bw.h"
+#include "protocols/link_dp_dpia.h"
+#include "protocols/link_dp_irq_handler.h"
+#include "protocols/link_dp_phy.h"
+#include "protocols/link_dp_training.h"
#include "protocols/link_edp_panel_control.h"
#include "protocols/link_hpd.h"
#include "gpio_service_interface.h"
@@ -39,7 +52,248 @@
DC_LOG_HW_HOTPLUG( \
__VA_ARGS__)
-static enum transmitter translate_encoder_to_transmitter(struct graphics_object_id encoder)
+/* link factory owns the creation/destruction of link structures. */
+static void construct_link_service_factory(struct link_service *link_srv)
+{
+
+ link_srv->create_link = link_create;
+ link_srv->destroy_link = link_destroy;
+}
+
+/* link_detection manages link detection states and receiver states by using
+ * various link protocols. It also provides helper functions to interpret
+ * certain capabilities or status based on the states it manages or retrieve
+ * them directly from connected receivers.
+ */
+static void construct_link_service_detection(struct link_service *link_srv)
+{
+ link_srv->detect_link = link_detect;
+ link_srv->detect_connection_type = link_detect_connection_type;
+ link_srv->add_remote_sink = link_add_remote_sink;
+ link_srv->remove_remote_sink = link_remove_remote_sink;
+ link_srv->get_hpd_state = link_get_hpd_state;
+ link_srv->get_hpd_gpio = link_get_hpd_gpio;
+ link_srv->enable_hpd = link_enable_hpd;
+ link_srv->disable_hpd = link_disable_hpd;
+ link_srv->enable_hpd_filter = link_enable_hpd_filter;
+ link_srv->reset_cur_dp_mst_topology = link_reset_cur_dp_mst_topology;
+ link_srv->get_status = link_get_status;
+ link_srv->is_hdcp1x_supported = link_is_hdcp14;
+ link_srv->is_hdcp2x_supported = link_is_hdcp22;
+ link_srv->clear_dprx_states = link_clear_dprx_states;
+}
+
+/* link resource implements accessors to link resource. */
+static void construct_link_service_resource(struct link_service *link_srv)
+{
+ link_srv->get_cur_res_map = link_get_cur_res_map;
+ link_srv->restore_res_map = link_restore_res_map;
+ link_srv->get_cur_link_res = link_get_cur_link_res;
+}
+
+/* link validation owns timing validation against various link limitations. (ex.
+ * link bandwidth, receiver capability or our hardware capability) It also
+ * provides helper functions exposing bandwidth formulas used in validation.
+ */
+static void construct_link_service_validation(struct link_service *link_srv)
+{
+ link_srv->validate_mode_timing = link_validate_mode_timing;
+ link_srv->dp_link_bandwidth_kbps = dp_link_bandwidth_kbps;
+}
+
+/* link dpms owns the programming sequence of stream's dpms state associated
+ * with the link and link's enable/disable sequences as result of the stream's
+ * dpms state change.
+ */
+static void construct_link_service_dpms(struct link_service *link_srv)
+{
+ link_srv->set_dpms_on = link_set_dpms_on;
+ link_srv->set_dpms_off = link_set_dpms_off;
+ link_srv->resume = link_resume;
+ link_srv->blank_all_dp_displays = link_blank_all_dp_displays;
+ link_srv->blank_all_edp_displays = link_blank_all_edp_displays;
+ link_srv->blank_dp_stream = link_blank_dp_stream;
+ link_srv->increase_mst_payload = link_increase_mst_payload;
+ link_srv->reduce_mst_payload = link_reduce_mst_payload;
+ link_srv->set_dsc_on_stream = link_set_dsc_on_stream;
+ link_srv->set_dsc_enable = link_set_dsc_enable;
+ link_srv->update_dsc_config = link_update_dsc_config;
+}
+
+/* link ddc implements generic display communication protocols such as i2c, aux
+ * and scdc. It should not contain any specific applications of these
+ * protocols such as display capability query, detection, or handshaking such as
+ * link training.
+ */
+static void construct_link_service_ddc(struct link_service *link_srv)
+{
+ link_srv->create_ddc_service = link_create_ddc_service;
+ link_srv->destroy_ddc_service = link_destroy_ddc_service;
+ link_srv->query_ddc_data = link_query_ddc_data;
+ link_srv->aux_transfer_raw = link_aux_transfer_raw;
+ link_srv->aux_transfer_with_retries_no_mutex =
+ link_aux_transfer_with_retries_no_mutex;
+ link_srv->is_in_aux_transaction_mode = link_is_in_aux_transaction_mode;
+ link_srv->get_aux_defer_delay = link_get_aux_defer_delay;
+}
+
+/* link dp capability implements dp specific link capability retrieval sequence.
+ * It is responsible for retrieving, parsing, overriding, deciding capability
+ * obtained from dp link. Link capability consists of encoders, DPRXs, cables,
+ * retimers, usb and all other possible backend capabilities.
+ */
+static void construct_link_service_dp_capability(struct link_service *link_srv)
+{
+ link_srv->dp_is_sink_present = dp_is_sink_present;
+ link_srv->dp_is_fec_supported = dp_is_fec_supported;
+ link_srv->dp_is_128b_132b_signal = dp_is_128b_132b_signal;
+ link_srv->dp_get_max_link_enc_cap = dp_get_max_link_enc_cap;
+ link_srv->dp_get_verified_link_cap = dp_get_verified_link_cap;
+ link_srv->dp_get_encoding_format = link_dp_get_encoding_format;
+ link_srv->dp_should_enable_fec = dp_should_enable_fec;
+ link_srv->dp_decide_link_settings = link_decide_link_settings;
+ link_srv->mst_decide_link_encoding_format =
+ mst_decide_link_encoding_format;
+ link_srv->edp_decide_link_settings = edp_decide_link_settings;
+ link_srv->bw_kbps_from_raw_frl_link_rate_data =
+ link_bw_kbps_from_raw_frl_link_rate_data;
+ link_srv->dp_overwrite_extended_receiver_cap =
+ dp_overwrite_extended_receiver_cap;
+ link_srv->dp_decide_lttpr_mode = dp_decide_lttpr_mode;
+}
+
+/* link dp phy/dpia implements basic dp phy/dpia functionality such as
+ * enable/disable output and set lane/drive settings. It is responsible for
+ * maintaining and update software state representing current phy/dpia status
+ * such as current link settings.
+ */
+static void construct_link_service_dp_phy_or_dpia(struct link_service *link_srv)
+{
+ link_srv->dpia_handle_usb4_bandwidth_allocation_for_link =
+ dpia_handle_usb4_bandwidth_allocation_for_link;
+ link_srv->dpia_handle_bw_alloc_response = dpia_handle_bw_alloc_response;
+ link_srv->dp_set_drive_settings = dp_set_drive_settings;
+ link_srv->dpcd_write_rx_power_ctrl = dpcd_write_rx_power_ctrl;
+}
+
+/* link dp irq handler implements DP HPD short pulse handling sequence according
+ * to DP specifications
+ */
+static void construct_link_service_dp_irq_handler(struct link_service *link_srv)
+{
+ link_srv->dp_parse_link_loss_status = dp_parse_link_loss_status;
+ link_srv->dp_should_allow_hpd_rx_irq = dp_should_allow_hpd_rx_irq;
+ link_srv->dp_handle_link_loss = dp_handle_link_loss;
+ link_srv->dp_read_hpd_rx_irq_data = dp_read_hpd_rx_irq_data;
+ link_srv->dp_handle_hpd_rx_irq = dp_handle_hpd_rx_irq;
+}
+
+/* link edp panel control implements retrieval and configuration of eDP panel
+ * features such as PSR and ABM and it also manages specs defined eDP panel
+ * power sequences.
+ */
+static void construct_link_service_edp_panel_control(struct link_service *link_srv)
+{
+ link_srv->edp_panel_backlight_power_on = edp_panel_backlight_power_on;
+ link_srv->edp_get_backlight_level = edp_get_backlight_level;
+ link_srv->edp_get_backlight_level_nits = edp_get_backlight_level_nits;
+ link_srv->edp_set_backlight_level = edp_set_backlight_level;
+ link_srv->edp_set_backlight_level_nits = edp_set_backlight_level_nits;
+ link_srv->edp_get_target_backlight_pwm = edp_get_target_backlight_pwm;
+ link_srv->edp_get_psr_state = edp_get_psr_state;
+ link_srv->edp_set_psr_allow_active = edp_set_psr_allow_active;
+ link_srv->edp_setup_psr = edp_setup_psr;
+ link_srv->edp_set_sink_vtotal_in_psr_active =
+ edp_set_sink_vtotal_in_psr_active;
+ link_srv->edp_get_psr_residency = edp_get_psr_residency;
+ link_srv->edp_wait_for_t12 = edp_wait_for_t12;
+ link_srv->edp_is_ilr_optimization_required =
+ edp_is_ilr_optimization_required;
+ link_srv->edp_backlight_enable_aux = edp_backlight_enable_aux;
+ link_srv->edp_add_delay_for_T9 = edp_add_delay_for_T9;
+ link_srv->edp_receiver_ready_T9 = edp_receiver_ready_T9;
+ link_srv->edp_receiver_ready_T7 = edp_receiver_ready_T7;
+ link_srv->edp_power_alpm_dpcd_enable = edp_power_alpm_dpcd_enable;
+}
+
+/* link dp cts implements dp compliance test automation protocols and manual
+ * testing interfaces for debugging and certification purpose.
+ */
+static void construct_link_service_dp_cts(struct link_service *link_srv)
+{
+ link_srv->dp_handle_automated_test = dp_handle_automated_test;
+ link_srv->dp_set_test_pattern = dp_set_test_pattern;
+ link_srv->dp_set_preferred_link_settings =
+ dp_set_preferred_link_settings;
+ link_srv->dp_set_preferred_training_settings =
+ dp_set_preferred_training_settings;
+}
+
+/* link dp trace implements tracing interfaces for tracking major dp sequences
+ * including execution status and timestamps
+ */
+static void construct_link_service_dp_trace(struct link_service *link_srv)
+{
+ link_srv->dp_trace_is_initialized = dp_trace_is_initialized;
+ link_srv->dp_trace_set_is_logged_flag = dp_trace_set_is_logged_flag;
+ link_srv->dp_trace_is_logged = dp_trace_is_logged;
+ link_srv->dp_trace_get_lt_end_timestamp = dp_trace_get_lt_end_timestamp;
+ link_srv->dp_trace_get_lt_counts = dp_trace_get_lt_counts;
+ link_srv->dp_trace_get_link_loss_count = dp_trace_get_link_loss_count;
+ link_srv->dp_trace_set_edp_power_timestamp =
+ dp_trace_set_edp_power_timestamp;
+ link_srv->dp_trace_get_edp_poweron_timestamp =
+ dp_trace_get_edp_poweron_timestamp;
+ link_srv->dp_trace_get_edp_poweroff_timestamp =
+ dp_trace_get_edp_poweroff_timestamp;
+ link_srv->dp_trace_source_sequence = dp_trace_source_sequence;
+}
+
+static void construct_link_service(struct link_service *link_srv)
+{
+ /* All link service functions should fall under some sub categories.
+ * If a new function doesn't perfectly fall under an existing sub
+ * category, it must be that you are either adding a whole new aspect of
+ * responsibility to link service or something doesn't belong to link
+ * service. In that case please contact the arch owner to arrange a
+ * design review meeting.
+ */
+ construct_link_service_factory(link_srv);
+ construct_link_service_detection(link_srv);
+ construct_link_service_resource(link_srv);
+ construct_link_service_validation(link_srv);
+ construct_link_service_dpms(link_srv);
+ construct_link_service_ddc(link_srv);
+ construct_link_service_dp_capability(link_srv);
+ construct_link_service_dp_phy_or_dpia(link_srv);
+ construct_link_service_dp_irq_handler(link_srv);
+ construct_link_service_edp_panel_control(link_srv);
+ construct_link_service_dp_cts(link_srv);
+ construct_link_service_dp_trace(link_srv);
+}
+
+struct link_service *link_create_link_service(void)
+{
+ struct link_service *link_srv = kzalloc(sizeof(*link_srv), GFP_KERNEL);
+
+ if (link_srv == NULL)
+ goto fail;
+
+ construct_link_service(link_srv);
+
+ return link_srv;
+fail:
+ return NULL;
+}
+
+void link_destroy_link_service(struct link_service **link_srv)
+{
+ kfree(*link_srv);
+ *link_srv = NULL;
+}
+
+static enum transmitter translate_encoder_to_transmitter(
+ struct graphics_object_id encoder)
{
switch (encoder.id) {
case ENCODER_ID_INTERNAL_UNIPHY:
@@ -181,7 +435,7 @@ static enum channel_id get_ddc_line(struct dc_link *link)
return channel;
}
-static bool dc_link_construct_phy(struct dc_link *link,
+static bool construct_phy(struct dc_link *link,
const struct link_init_data *init_params)
{
uint8_t i;
@@ -274,14 +528,18 @@ static bool dc_link_construct_phy(struct dc_link *link,
link->irq_source_hpd = DC_IRQ_SOURCE_INVALID;
switch (link->dc->config.allow_edp_hotplug_detection) {
- case 1: // only the 1st eDP handles hotplug
+ case HPD_EN_FOR_ALL_EDP:
+ link->irq_source_hpd_rx =
+ dal_irq_get_rx_source(link->hpd_gpio);
+ break;
+ case HPD_EN_FOR_PRIMARY_EDP_ONLY:
if (link->link_index == 0)
link->irq_source_hpd_rx =
dal_irq_get_rx_source(link->hpd_gpio);
else
link->irq_source_hpd = DC_IRQ_SOURCE_INVALID;
break;
- case 2: // only the 2nd eDP handles hotplug
+ case HPD_EN_FOR_SECONDARY_EDP_ONLY:
if (link->link_index == 1)
link->irq_source_hpd_rx =
dal_irq_get_rx_source(link->hpd_gpio);
@@ -289,6 +547,7 @@ static bool dc_link_construct_phy(struct dc_link *link,
link->irq_source_hpd = DC_IRQ_SOURCE_INVALID;
break;
default:
+ link->irq_source_hpd = DC_IRQ_SOURCE_INVALID;
break;
}
}
@@ -473,7 +732,7 @@ create_fail:
return false;
}
-static bool dc_link_construct_dpia(struct dc_link *link,
+static bool construct_dpia(struct dc_link *link,
const struct link_init_data *init_params)
{
struct ddc_service_init_data ddc_service_init_data = { 0 };
@@ -543,9 +802,9 @@ static bool link_construct(struct dc_link *link,
{
/* Handle dpia case */
if (init_params->is_dpia_link == true)
- return dc_link_construct_dpia(link, init_params);
+ return construct_dpia(link, init_params);
else
- return dc_link_construct_phy(link, init_params);
+ return construct_phy(link, init_params);
}
struct dc_link *link_create(const struct link_init_data *init_params)
@@ -574,4 +833,3 @@ void link_destroy(struct dc_link **link)
kfree(*link);
*link = NULL;
}
-
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_factory.h b/drivers/gpu/drm/amd/display/dc/link/link_factory.h
index 5b846147c4a6..e96220d48d03 100644
--- a/drivers/gpu/drm/amd/display/dc/link/link_factory.h
+++ b/drivers/gpu/drm/amd/display/dc/link/link_factory.h
@@ -25,5 +25,7 @@
#ifndef __LINK_FACTORY_H__
#define __LINK_FACTORY_H__
#include "link.h"
+struct dc_link *link_create(const struct link_init_data *init_params);
+void link_destroy(struct dc_link **link);
#endif /* __LINK_FACTORY_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_resource.h b/drivers/gpu/drm/amd/display/dc/link/link_resource.h
index 45554d30adf0..1907bda3cb6e 100644
--- a/drivers/gpu/drm/amd/display/dc/link/link_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/link/link_resource.h
@@ -25,7 +25,8 @@
#ifndef __LINK_RESOURCE_H__
#define __LINK_RESOURCE_H__
#include "link.h"
+void link_get_cur_res_map(const struct dc *dc, uint32_t *map);
+void link_restore_res_map(const struct dc *dc, uint32_t *map);
void link_get_cur_link_res(const struct dc_link *link,
struct link_resource *link_res);
-
#endif /* __LINK_RESOURCE_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_validation.c b/drivers/gpu/drm/amd/display/dc/link/link_validation.c
index d4f6ee6ca948..9a5010f86003 100644
--- a/drivers/gpu/drm/amd/display/dc/link/link_validation.c
+++ b/drivers/gpu/drm/amd/display/dc/link/link_validation.c
@@ -29,6 +29,7 @@
* provides helper functions exposing bandwidth formulas used in validation.
*/
#include "link_validation.h"
+#include "protocols/link_dp_capability.h"
#include "resource.h"
#define DC_LOGGER_INIT(logger)
@@ -123,7 +124,7 @@ static bool dp_active_dongle_validate_timing(
if (dongle_caps->dp_hdmi_frl_max_link_bw_in_kbps > 0) { // DP to HDMI FRL converter
struct dc_crtc_timing outputTiming = *timing;
-#if defined(CONFIG_DRM_AMD_DC_DCN)
+#if defined(CONFIG_DRM_AMD_DC_FP)
if (timing->flags.DSC && !timing->dsc_cfg.is_frl)
/* DP input has DSC, HDMI FRL output doesn't have DSC, remove DSC from output timing */
outputTiming.flags.DSC = 0;
@@ -233,7 +234,7 @@ uint32_t dp_link_bandwidth_kbps(
*/
link_rate_per_lane_kbps = link_settings->link_rate * LINK_RATE_REF_FREQ_IN_KHZ * BITS_PER_DP_BYTE;
total_data_bw_efficiency_x10000 = DATA_EFFICIENCY_8b_10b_x10000;
- if (dc_link_should_enable_fec(link)) {
+ if (dp_should_enable_fec(link)) {
total_data_bw_efficiency_x10000 /= 100;
total_data_bw_efficiency_x10000 *= DATA_EFFICIENCY_8b_10b_FEC_EFFICIENCY_x100;
}
@@ -254,19 +255,16 @@ uint32_t dp_link_bandwidth_kbps(
return link_rate_per_lane_kbps * link_settings->lane_count / 10000 * total_data_bw_efficiency_x10000;
}
-uint32_t link_timing_bandwidth_kbps(
- const struct dc_crtc_timing *timing)
+uint32_t link_timing_bandwidth_kbps(const struct dc_crtc_timing *timing)
{
uint32_t bits_per_channel = 0;
uint32_t kbps;
-#if defined(CONFIG_DRM_AMD_DC_DCN)
if (timing->flags.DSC)
return dc_dsc_stream_bandwidth_in_kbps(timing,
timing->dsc_cfg.bits_per_pixel,
timing->dsc_cfg.num_slices_h,
timing->dsc_cfg.is_dp);
-#endif /* CONFIG_DRM_AMD_DC_DCN */
switch (timing->display_color_depth) {
case COLOR_DEPTH_666:
@@ -329,7 +327,7 @@ static bool dp_validate_mode_timing(
timing->v_addressable == (uint32_t) 480)
return true;
- link_setting = dc_link_get_link_cap(link);
+ link_setting = dp_get_verified_link_cap(link);
/* TODO: DYNAMIC_VALIDATION needs to be implemented */
/*if (flags.DYNAMIC_VALIDATION == 1 &&
@@ -338,7 +336,7 @@ static bool dp_validate_mode_timing(
*/
req_bw = dc_bandwidth_in_kbps_from_timing(timing);
- max_bw = dc_link_bandwidth_kbps(link, link_setting);
+ max_bw = dp_link_bandwidth_kbps(link, link_setting);
if (req_bw <= max_bw) {
/* remember the biggest mode here, during
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_validation.h b/drivers/gpu/drm/amd/display/dc/link/link_validation.h
index ab6a44f50032..2191d3a4950c 100644
--- a/drivers/gpu/drm/amd/display/dc/link/link_validation.h
+++ b/drivers/gpu/drm/amd/display/dc/link/link_validation.h
@@ -25,4 +25,11 @@
#ifndef __LINK_VALIDATION_H__
#define __LINK_VALIDATION_H__
#include "link.h"
+enum dc_status link_validate_mode_timing(
+ const struct dc_stream_state *stream,
+ struct dc_link *link,
+ const struct dc_crtc_timing *timing);
+uint32_t dp_link_bandwidth_kbps(
+ const struct dc_link *link,
+ const struct dc_link_settings *link_settings);
#endif /* __LINK_VALIDATION_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_ddc.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_ddc.c
index 5269125bc2a4..0fa1228bc178 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_ddc.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_ddc.c
@@ -53,7 +53,7 @@ struct aux_payloads {
struct vector payloads;
};
-static bool dal_ddc_i2c_payloads_create(
+static bool i2c_payloads_create(
struct dc_context *ctx,
struct i2c_payloads *payloads,
uint32_t count)
@@ -65,16 +65,24 @@ static bool dal_ddc_i2c_payloads_create(
return false;
}
-static struct i2c_payload *dal_ddc_i2c_payloads_get(struct i2c_payloads *p)
+static struct i2c_payload *i2c_payloads_get(struct i2c_payloads *p)
{
return (struct i2c_payload *)p->payloads.container;
}
-static uint32_t dal_ddc_i2c_payloads_get_count(struct i2c_payloads *p)
+static uint32_t i2c_payloads_get_count(struct i2c_payloads *p)
{
return p->payloads.count;
}
+static void i2c_payloads_destroy(struct i2c_payloads *p)
+{
+ if (!p)
+ return;
+
+ dal_vector_destruct(&p->payloads);
+}
+
#define DDC_MIN(a, b) (((a) < (b)) ? (a) : (b))
static void i2c_payloads_add(
@@ -364,10 +372,10 @@ bool link_query_ddc_data(
struct i2c_command command = {0};
struct i2c_payloads payloads;
- if (!dal_ddc_i2c_payloads_create(ddc->ctx, &payloads, payloads_num))
+ if (!i2c_payloads_create(ddc->ctx, &payloads, payloads_num))
return false;
- command.payloads = dal_ddc_i2c_payloads_get(&payloads);
+ command.payloads = i2c_payloads_get(&payloads);
command.number_of_payloads = 0;
command.engine = DDC_I2C_COMMAND_ENGINE;
command.speed = ddc->ctx->dc->caps.i2c_speed_in_khz;
@@ -379,20 +387,20 @@ bool link_query_ddc_data(
&payloads, address, read_size, read_buf, false);
command.number_of_payloads =
- dal_ddc_i2c_payloads_get_count(&payloads);
+ i2c_payloads_get_count(&payloads);
success = dm_helpers_submit_i2c(
ddc->ctx,
ddc->link,
&command);
- dal_vector_destruct(&payloads.payloads);
+ i2c_payloads_destroy(&payloads);
}
return success;
}
-int dc_link_aux_transfer_raw(struct ddc_service *ddc,
+int link_aux_transfer_raw(struct ddc_service *ddc,
struct aux_payload *payload,
enum aux_return_code_type *operation_result)
{
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_ddc.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_ddc.h
index aaa5064408ba..860ef15d7f1b 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_ddc.h
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_ddc.h
@@ -37,13 +37,41 @@
#define EDID_SEGMENT_SIZE 256
+struct ddc_service *link_create_ddc_service(
+ struct ddc_service_init_data *ddc_init_data);
+
+void link_destroy_ddc_service(struct ddc_service **ddc);
+
void set_ddc_transaction_type(
struct ddc_service *ddc,
enum ddc_transaction_type type);
+uint32_t link_get_aux_defer_delay(struct ddc_service *ddc);
+
+bool link_is_in_aux_transaction_mode(struct ddc_service *ddc);
+
bool try_to_configure_aux_timeout(struct ddc_service *ddc,
uint32_t timeout);
+bool link_query_ddc_data(
+ struct ddc_service *ddc,
+ uint32_t address,
+ uint8_t *write_buf,
+ uint32_t write_size,
+ uint8_t *read_buf,
+ uint32_t read_size);
+
+/* Attempt to submit an aux payload, retrying on timeouts, defers, and busy
+ * states as outlined in the DP spec. Returns true if the request was
+ * successful.
+ *
+ * NOTE: The function requires explicit mutex on DM side in order to prevent
+ * potential race condition. DC components should call the dpcd read/write
+ * function in dm_helpers in order to access dpcd safely
+ */
+bool link_aux_transfer_with_retries_no_mutex(struct ddc_service *ddc,
+ struct aux_payload *payload);
+
void write_scdc_data(
struct ddc_service *ddc_service,
uint32_t pix_clk,
@@ -57,5 +85,8 @@ void set_dongle_type(struct ddc_service *ddc,
struct ddc *get_ddc_pin(struct ddc_service *ddc_service);
+int link_aux_transfer_raw(struct ddc_service *ddc,
+ struct aux_payload *payload,
+ enum aux_return_code_type *operation_result);
#endif /* __DAL_DDC_SERVICE_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c
index d4370856f164..e9bcb35ae185 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c
@@ -42,6 +42,8 @@
#include "link_edp_panel_control.h"
#include "link_dp_irq_handler.h"
#include "link/accessories/link_dp_trace.h"
+#include "link/link_detection.h"
+#include "link/link_validation.h"
#include "link_dp_training.h"
#include "atomfirmware.h"
#include "resource.h"
@@ -155,7 +157,7 @@ uint8_t dp_parse_lttpr_repeater_count(uint8_t lttpr_repeater_count)
return 0; // invalid value
}
-uint32_t dc_link_bw_kbps_from_raw_frl_link_rate_data(uint8_t bw)
+uint32_t link_bw_kbps_from_raw_frl_link_rate_data(uint8_t bw)
{
switch (bw) {
case 0b001:
@@ -278,7 +280,7 @@ static void dp_wa_power_up_0010FA(struct dc_link *link, uint8_t *dpcd_data,
if (!link->dpcd_caps.dpcd_rev.raw) {
do {
- dc_link_dp_receiver_power_ctrl(link, true);
+ dpcd_write_rx_power_ctrl(link, true);
core_link_read_dpcd(link, DP_DPCD_REV,
dpcd_data, length);
link->dpcd_caps.dpcd_rev.raw = dpcd_data[
@@ -309,7 +311,7 @@ static void dp_wa_power_up_0010FA(struct dc_link *link, uint8_t *dpcd_data,
link->wa_flags.dp_keep_receiver_powered = false;
}
-bool dc_link_is_fec_supported(const struct dc_link *link)
+bool dp_is_fec_supported(const struct dc_link *link)
{
/* TODO - use asic cap instead of link_enc->features
* we no longer know which link enc to use for this link before commit
@@ -325,7 +327,7 @@ bool dc_link_is_fec_supported(const struct dc_link *link)
!IS_FPGA_MAXIMUS_DC(link->ctx->dce_environment));
}
-bool dc_link_should_enable_fec(const struct dc_link *link)
+bool dp_should_enable_fec(const struct dc_link *link)
{
bool force_disable = false;
@@ -342,10 +344,10 @@ bool dc_link_should_enable_fec(const struct dc_link *link)
|| !link->dc->caps.edp_dsc_support))
force_disable = true;
- return !force_disable && dc_link_is_fec_supported(link);
+ return !force_disable && dp_is_fec_supported(link);
}
-bool link_is_dp_128b_132b_signal(struct pipe_ctx *pipe_ctx)
+bool dp_is_128b_132b_signal(struct pipe_ctx *pipe_ctx)
{
/* If this assert is hit then we have a link encoder dynamic management issue */
ASSERT(pipe_ctx->stream_res.hpo_dp_stream_enc ? pipe_ctx->link_res.hpo_dp_link_enc != NULL : true);
@@ -645,7 +647,7 @@ static bool decide_dp_link_settings(struct dc_link *link, struct dc_link_setting
initial_link_setting;
uint32_t link_bw;
- if (req_bw > dc_link_bandwidth_kbps(link, &link->verified_link_cap))
+ if (req_bw > dp_link_bandwidth_kbps(link, &link->verified_link_cap))
return false;
/* search for the minimum link setting that:
@@ -654,7 +656,7 @@ static bool decide_dp_link_settings(struct dc_link *link, struct dc_link_setting
*/
while (current_link_setting.link_rate <=
link->verified_link_cap.link_rate) {
- link_bw = dc_link_bandwidth_kbps(
+ link_bw = dp_link_bandwidth_kbps(
link,
&current_link_setting);
if (req_bw <= link_bw) {
@@ -679,7 +681,8 @@ static bool decide_dp_link_settings(struct dc_link *link, struct dc_link_setting
return false;
}
-bool dc_link_decide_edp_link_settings(struct dc_link *link, struct dc_link_settings *link_setting, uint32_t req_bw)
+bool edp_decide_link_settings(struct dc_link *link,
+ struct dc_link_settings *link_setting, uint32_t req_bw)
{
struct dc_link_settings initial_link_setting;
struct dc_link_settings current_link_setting;
@@ -709,7 +712,7 @@ bool dc_link_decide_edp_link_settings(struct dc_link *link, struct dc_link_setti
*/
while (current_link_setting.link_rate <=
link->verified_link_cap.link_rate) {
- link_bw = dc_link_bandwidth_kbps(
+ link_bw = dp_link_bandwidth_kbps(
link,
&current_link_setting);
if (req_bw <= link_bw) {
@@ -764,7 +767,7 @@ bool decide_edp_link_settings_with_dsc(struct dc_link *link,
initial_link_setting.use_link_rate_set = false;
initial_link_setting.link_rate_set = 0;
current_link_setting = initial_link_setting;
- if (req_bw > dc_link_bandwidth_kbps(link, &link->verified_link_cap))
+ if (req_bw > dp_link_bandwidth_kbps(link, &link->verified_link_cap))
return false;
/* search for the minimum link setting that:
@@ -773,7 +776,7 @@ bool decide_edp_link_settings_with_dsc(struct dc_link *link,
*/
while (current_link_setting.link_rate <=
max_link_rate) {
- link_bw = dc_link_bandwidth_kbps(
+ link_bw = dp_link_bandwidth_kbps(
link,
&current_link_setting);
if (req_bw <= link_bw) {
@@ -830,7 +833,7 @@ bool decide_edp_link_settings_with_dsc(struct dc_link *link,
*/
while (current_link_setting.link_rate <=
max_link_rate) {
- link_bw = dc_link_bandwidth_kbps(
+ link_bw = dp_link_bandwidth_kbps(
link,
&current_link_setting);
if (req_bw <= link_bw) {
@@ -922,12 +925,12 @@ bool link_decide_link_settings(struct dc_stream_state *stream,
tmp_link_setting.link_rate = LINK_RATE_UNKNOWN;
tmp_timing.flags.DSC = 0;
orig_req_bw = dc_bandwidth_in_kbps_from_timing(&tmp_timing);
- dc_link_decide_edp_link_settings(link, &tmp_link_setting, orig_req_bw);
+ edp_decide_link_settings(link, &tmp_link_setting, orig_req_bw);
max_link_rate = tmp_link_setting.link_rate;
}
decide_edp_link_settings_with_dsc(link, link_setting, req_bw, max_link_rate);
} else {
- dc_link_decide_edp_link_settings(link, link_setting, req_bw);
+ edp_decide_link_settings(link, link_setting, req_bw);
}
} else {
decide_dp_link_settings(link, link_setting, req_bw);
@@ -948,7 +951,7 @@ enum dp_link_encoding link_dp_get_encoding_format(const struct dc_link_settings
return DP_UNKNOWN_ENCODING;
}
-enum dp_link_encoding dc_link_dp_mst_decide_link_encoding_format(const struct dc_link *link)
+enum dp_link_encoding mst_decide_link_encoding_format(const struct dc_link *link)
{
struct dc_link_settings link_settings = {0};
@@ -1005,7 +1008,7 @@ static enum dc_status wake_up_aux_channel(struct dc_link *link)
* signal and may need up to 1 ms before being able to reply.
*/
if (status != DC_OK || dpcd_power_state == DP_SET_POWER_D3) {
- udelay(1000);
+ fsleep(1000);
aux_channel_retry_cnt++;
}
}
@@ -1121,7 +1124,7 @@ static void get_active_converter_info(
union hdmi_encoded_link_bw hdmi_encoded_link_bw;
link->dpcd_caps.dongle_caps.dp_hdmi_frl_max_link_bw_in_kbps =
- dc_link_bw_kbps_from_raw_frl_link_rate_data(
+ link_bw_kbps_from_raw_frl_link_rate_data(
hdmi_color_caps.bits.MAX_ENCODED_LINK_BW_SUPPORT);
// Intersect reported max link bw support with the supported link rate post FRL link training
@@ -1216,7 +1219,7 @@ static void apply_usbc_combo_phy_reset_wa(struct dc_link *link,
dp_disable_link_phy(link, &link_res, link->connector_signal);
}
-static bool dp_overwrite_extended_receiver_cap(struct dc_link *link)
+bool dp_overwrite_extended_receiver_cap(struct dc_link *link)
{
uint8_t dpcd_data[16];
uint32_t read_dpcd_retry_cnt = 3;
@@ -1278,12 +1281,6 @@ static bool dp_overwrite_extended_receiver_cap(struct dc_link *link)
return true;
}
-void dc_link_overwrite_extended_receiver_cap(
- struct dc_link *link)
-{
- dp_overwrite_extended_receiver_cap(link);
-}
-
void dpcd_set_source_specific_data(struct dc_link *link)
{
if (!link->dc->vendor_signature.is_valid) {
@@ -1972,7 +1969,7 @@ void detect_edp_sink_caps(struct dc_link *link)
sizeof(link->dpcd_caps.alpm_caps.raw));
}
-bool dc_link_dp_get_max_link_enc_cap(const struct dc_link *link, struct dc_link_settings *max_link_enc_cap)
+bool dp_get_max_link_enc_cap(const struct dc_link *link, struct dc_link_settings *max_link_enc_cap)
{
struct link_encoder *link_enc = NULL;
@@ -1995,7 +1992,7 @@ bool dc_link_dp_get_max_link_enc_cap(const struct dc_link *link, struct dc_link_
return false;
}
-const struct dc_link_settings *dc_link_get_link_cap(
+const struct dc_link_settings *dp_get_verified_link_cap(
const struct dc_link *link)
{
if (link->preferred_link_setting.lane_count != LANE_COUNT_UNKNOWN &&
@@ -2121,9 +2118,9 @@ static bool dp_verify_link_cap(
if (status == LINK_TRAINING_SUCCESS) {
success = true;
- udelay(1000);
- if (dc_link_dp_read_hpd_rx_irq_data(link, &irq_data) == DC_OK &&
- dc_link_check_link_loss_status(
+ fsleep(1000);
+ if (dp_read_hpd_rx_irq_data(link, &irq_data) == DC_OK &&
+ dp_parse_link_loss_status(
link,
&irq_data))
(*fail_count)++;
@@ -2163,7 +2160,7 @@ bool dp_verify_link_cap_with_retries(
memset(&link->verified_link_cap, 0,
sizeof(struct dc_link_settings));
- if (!dc_link_detect_connection_type(link, &type) || type == dc_connection_none) {
+ if (!link_detect_connection_type(link, &type) || type == dc_connection_none) {
link->verified_link_cap = fail_safe_link_settings;
break;
} else if (dp_verify_link_cap(link, known_limit_link_setting,
@@ -2171,7 +2168,7 @@ bool dp_verify_link_cap_with_retries(
success = true;
break;
}
- msleep(10);
+ fsleep(10 * 1000);
}
dp_trace_lt_fail_count_update(link, fail_count, true);
@@ -2181,10 +2178,9 @@ bool dp_verify_link_cap_with_retries(
}
/**
- * dc_link_is_dp_sink_present() - Check if there is a native DP
- * or passive DP-HDMI dongle connected
+ * Check if there is a native DP or passive DP-HDMI dongle connected
*/
-bool dc_link_is_dp_sink_present(struct dc_link *link)
+bool dp_is_sink_present(struct dc_link *link)
{
enum gpio_result gpio_result;
uint32_t clock_pin = 0;
@@ -2231,7 +2227,7 @@ bool dc_link_is_dp_sink_present(struct dc_link *link)
gpio_result = dal_gpio_get_value(ddc->pin_clock, &clock_pin);
ASSERT(gpio_result == GPIO_RESULT_OK);
if (clock_pin)
- udelay(1000);
+ fsleep(1000);
else
break;
} while (retry++ < 3);
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.h
index f79e4a4a9db6..8f0ce97f2362 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.h
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.h
@@ -34,32 +34,56 @@ void detect_edp_sink_caps(struct dc_link *link);
struct dc_link_settings dp_get_max_link_cap(struct dc_link *link);
+bool dp_get_max_link_enc_cap(const struct dc_link *link,
+ struct dc_link_settings *max_link_enc_cap);
+
+const struct dc_link_settings *dp_get_verified_link_cap(
+ const struct dc_link *link);
+
+enum dp_link_encoding link_dp_get_encoding_format(
+ const struct dc_link_settings *link_settings);
enum dc_status dp_retrieve_lttpr_cap(struct dc_link *link);
/* Convert PHY repeater count read from DPCD uint8_t. */
uint8_t dp_parse_lttpr_repeater_count(uint8_t lttpr_repeater_count);
+bool dp_is_sink_present(struct dc_link *link);
+
bool dp_is_lttpr_present(struct dc_link *link);
+bool dp_is_fec_supported(const struct dc_link *link);
+
bool is_dp_active_dongle(const struct dc_link *link);
bool is_dp_branch_device(const struct dc_link *link);
void dpcd_write_cable_id_to_dprx(struct dc_link *link);
+bool dp_should_enable_fec(const struct dc_link *link);
+
+bool dp_is_128b_132b_signal(struct pipe_ctx *pipe_ctx);
+
/* Initialize output parameter lt_settings. */
void dp_decide_training_settings(
struct dc_link *link,
const struct dc_link_settings *link_setting,
struct link_training_settings *lt_settings);
+bool link_decide_link_settings(
+ struct dc_stream_state *stream,
+ struct dc_link_settings *link_setting);
+
+bool edp_decide_link_settings(struct dc_link *link,
+ struct dc_link_settings *link_setting, uint32_t req_bw);
bool decide_edp_link_settings_with_dsc(struct dc_link *link,
struct dc_link_settings *link_setting,
uint32_t req_bw,
enum dc_link_rate max_link_rate);
+enum dp_link_encoding mst_decide_link_encoding_format(const struct dc_link *link);
+
void dpcd_set_source_specific_data(struct dc_link *link);
/*query dpcd for version and mst cap addresses*/
@@ -76,4 +100,8 @@ bool dp_verify_link_cap_with_retries(
struct dc_link_settings *known_limit_link_setting,
int attempts);
+uint32_t link_bw_kbps_from_raw_frl_link_rate_data(uint8_t bw);
+
+bool dp_overwrite_extended_receiver_cap(struct dc_link *link);
+
#endif /* __DC_LINK_DP_CAPABILITY_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.c
index 32f48a48e9dd..4626fabc0a96 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.c
@@ -26,7 +26,6 @@
#include "dc.h"
#include "inc/core_status.h"
-#include "dc_link.h"
#include "dpcd_defs.h"
#include "link_dp_dpia.h"
@@ -79,7 +78,7 @@ enum dc_status dpcd_get_tunneling_device_data(struct dc_link *link)
return status;
}
-bool dc_link_dpia_query_hpd_status(struct dc_link *link)
+bool dpia_query_hpd_status(struct dc_link *link)
{
union dmub_rb_cmd cmd = {0};
struct dc_dmub_srv *dmub_srv = link->ctx->dmub_srv;
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.h
index 98935cc10bb7..363f45a1a964 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.h
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.h
@@ -37,7 +37,5 @@ enum dc_status dpcd_get_tunneling_device_data(struct dc_link *link);
/* Query hot plug status of USB4 DP tunnel.
* Returns true if HPD high.
*/
-bool dc_link_dpia_query_hpd_status(struct dc_link *link);
-
-
+bool dpia_query_hpd_status(struct dc_link *link);
#endif /* __DC_LINK_DPIA_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.c
index f69e681b3b5b..931f7c6446de 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.c
@@ -26,12 +26,17 @@
/*********************************************************************/
// USB4 DPIA BANDWIDTH ALLOCATION LOGIC
/*********************************************************************/
-#include "dc.h"
-#include "dc_link.h"
#include "link_dp_dpia_bw.h"
-#include "drm_dp_helper_dc.h"
#include "link_dpcd.h"
+#include "dc_dmub_srv.h"
+#define DC_LOGGER \
+ link->ctx->logger
+
+/* Number of Host Routers per motherboard is 2 */
+#define MAX_HR_NUM 2
+/* Number of DPIA per host router is 2 */
+#define MAX_DPIA_NUM (MAX_HR_NUM * 2)
#define Kbps_TO_Gbps (1000 * 1000)
// ------------------------------------------------------------------
@@ -84,12 +89,11 @@ static int get_estimated_bw(struct dc_link *link)
{
uint8_t bw_estimated_bw = 0;
- if (core_link_read_dpcd(
- link,
- ESTIMATED_BW,
- &bw_estimated_bw,
- sizeof(uint8_t)) != DC_OK)
- dm_output_to_console("%s: AUX W/R ERROR @ 0x%x\n", __func__, ESTIMATED_BW);
+ core_link_read_dpcd(
+ link,
+ ESTIMATED_BW,
+ &bw_estimated_bw,
+ sizeof(uint8_t));
return bw_estimated_bw * (Kbps_TO_Gbps / link->dpia_bw_alloc_config.bw_granularity);
}
@@ -133,8 +137,9 @@ static uint8_t get_lowest_dpia_index(struct dc_link *link)
{
const struct dc *dc_struct = link->dc;
uint8_t idx = 0xFF;
+ int i;
- for (int i = 0; i < MAX_PIPES * 2; ++i) {
+ for (i = 0; i < MAX_PIPES * 2; ++i) {
if (!dc_struct->links[i] ||
dc_struct->links[i]->ep_type != DISPLAY_ENDPOINT_USB4_DPIA)
@@ -161,8 +166,9 @@ static int get_host_router_total_bw(struct dc_link *link, uint8_t type)
uint8_t idx = (link->link_index - lowest_dpia_index) / 2, idx_temp = 0;
struct dc_link *link_temp;
int total_bw = 0;
+ int i;
- for (int i = 0; i < MAX_PIPES * 2; ++i) {
+ for (i = 0; i < MAX_PIPES * 2; ++i) {
if (!dc_struct->links[i] || dc_struct->links[i]->ep_type != DISPLAY_ENDPOINT_USB4_DPIA)
continue;
@@ -194,15 +200,13 @@ static int get_host_router_total_bw(struct dc_link *link, uint8_t type)
*/
static bool dpia_bw_alloc_unplug(struct dc_link *link)
{
- bool ret = false;
-
if (!link)
return true;
return deallocate_usb4_bw(&link->dpia_bw_alloc_config.sink_allocated_bw,
link->dpia_bw_alloc_config.sink_allocated_bw, link);
}
-static void dc_link_set_usb4_req_bw_req(struct dc_link *link, int req_bw)
+static void set_usb4_req_bw_req(struct dc_link *link, int req_bw)
{
uint8_t requested_bw;
uint32_t temp;
@@ -227,9 +231,7 @@ static void dc_link_set_usb4_req_bw_req(struct dc_link *link, int req_bw)
link,
REQUESTED_BW,
&requested_bw,
- sizeof(uint8_t)) != DC_OK)
- dm_output_to_console("%s: AUX W/R ERROR @ 0x%x\n", __func__, REQUESTED_BW);
- else
+ sizeof(uint8_t)) == DC_OK)
link->dpia_bw_alloc_config.response_ready = false; // Reset flag
}
/*
@@ -246,7 +248,7 @@ static bool get_cm_response_ready_flag(struct dc_link *link)
// ------------------------------------------------------------------
// PUBLIC FUNCTIONS
// ------------------------------------------------------------------
-bool set_dptx_usb4_bw_alloc_support(struct dc_link *link)
+bool link_dp_dpia_set_dptx_usb4_bw_alloc_support(struct dc_link *link)
{
bool ret = false;
uint8_t response = 0,
@@ -257,22 +259,18 @@ bool set_dptx_usb4_bw_alloc_support(struct dc_link *link)
goto out;
if (core_link_read_dpcd(
- link,
- DP_TUNNELING_CAPABILITIES,
- &response,
- sizeof(uint8_t)) != DC_OK)
- dm_output_to_console("%s: AUX W/R ERROR @ 0x%x\n", __func__, DP_TUNNELING_CAPABILITIES);
-
- bw_support_dpia = (response >> 7) & 1;
+ link,
+ DP_TUNNELING_CAPABILITIES,
+ &response,
+ sizeof(uint8_t)) == DC_OK)
+ bw_support_dpia = (response >> 7) & 1;
if (core_link_read_dpcd(
link,
USB4_DRIVER_BW_CAPABILITY,
&response,
- sizeof(uint8_t)) != DC_OK)
- dm_output_to_console("%s: AUX W/R ERROR @ 0x%x\n", __func__, DP_TUNNELING_CAPABILITIES);
-
- bw_support_cm = (response >> 7) & 1;
+ sizeof(uint8_t)) == DC_OK)
+ bw_support_cm = (response >> 7) & 1;
/* Send request acknowledgment to Turn ON DPTX support */
if (bw_support_cm && bw_support_dpia) {
@@ -282,15 +280,14 @@ bool set_dptx_usb4_bw_alloc_support(struct dc_link *link)
link,
DPTX_BW_ALLOCATION_MODE_CONTROL,
&response,
- sizeof(uint8_t)) != DC_OK)
- dm_output_to_console("%s: AUX W/R ERROR @ 0x%x\n",
- "**** FAILURE Enabling DPtx BW Allocation Mode Support ***\n",
- __func__, DP_TUNNELING_CAPABILITIES);
- else {
-
+ sizeof(uint8_t)) != DC_OK) {
+ DC_LOG_DEBUG("%s: **** FAILURE Enabling DPtx BW Allocation Mode Support ***\n",
+ __func__);
+ } else {
// SUCCESS Enabled DPtx BW Allocation Mode Support
link->dpia_bw_alloc_config.bw_alloc_enabled = true;
- dm_output_to_console("**** SUCCESS Enabling DPtx BW Allocation Mode Support ***\n");
+ DC_LOG_DEBUG("%s: **** SUCCESS Enabling DPtx BW Allocation Mode Support ***\n",
+ __func__);
ret = true;
init_usb4_bw_struct(link);
@@ -300,8 +297,12 @@ bool set_dptx_usb4_bw_alloc_support(struct dc_link *link)
out:
return ret;
}
-void dc_link_get_usb4_req_bw_resp(struct dc_link *link, uint8_t bw, uint8_t result)
+void dpia_handle_bw_alloc_response(struct dc_link *link, uint8_t bw, uint8_t result)
{
+ int bw_needed = 0;
+ int estimated = 0;
+ int host_router_total_estimated_bw = 0;
+
if (!get_bw_alloc_proceed_flag((link)))
return;
@@ -309,13 +310,13 @@ void dc_link_get_usb4_req_bw_resp(struct dc_link *link, uint8_t bw, uint8_t resu
case DPIA_BW_REQ_FAILED:
- dm_output_to_console("%s: *** *** BW REQ FAILURE for DP-TX Request *** ***\n", __func__);
+ DC_LOG_DEBUG("%s: *** *** BW REQ FAILURE for DP-TX Request *** ***\n", __func__);
// Update the new Estimated BW value updated by CM
link->dpia_bw_alloc_config.estimated_bw =
bw * (Kbps_TO_Gbps / link->dpia_bw_alloc_config.bw_granularity);
- dc_link_set_usb4_req_bw_req(link, link->dpia_bw_alloc_config.estimated_bw);
+ set_usb4_req_bw_req(link, link->dpia_bw_alloc_config.estimated_bw);
link->dpia_bw_alloc_config.response_ready = false;
/*
@@ -329,18 +330,18 @@ void dc_link_get_usb4_req_bw_resp(struct dc_link *link, uint8_t bw, uint8_t resu
case DPIA_BW_REQ_SUCCESS:
- dm_output_to_console("%s: *** BW REQ SUCCESS for DP-TX Request ***\n", __func__);
+ DC_LOG_DEBUG("%s: *** BW REQ SUCCESS for DP-TX Request ***\n", __func__);
// 1. SUCCESS 1st time before any Pruning is done
// 2. SUCCESS after prev. FAIL before any Pruning is done
// 3. SUCCESS after Pruning is done but before enabling link
- int needed = bw * (Kbps_TO_Gbps / link->dpia_bw_alloc_config.bw_granularity);
+ bw_needed = bw * (Kbps_TO_Gbps / link->dpia_bw_alloc_config.bw_granularity);
// 1.
if (!link->dpia_bw_alloc_config.sink_allocated_bw) {
- allocate_usb4_bw(&link->dpia_bw_alloc_config.sink_allocated_bw, needed, link);
+ allocate_usb4_bw(&link->dpia_bw_alloc_config.sink_allocated_bw, bw_needed, link);
link->dpia_bw_alloc_config.sink_verified_bw =
link->dpia_bw_alloc_config.sink_allocated_bw;
@@ -354,12 +355,12 @@ void dc_link_get_usb4_req_bw_resp(struct dc_link *link, uint8_t bw, uint8_t resu
else if (link->dpia_bw_alloc_config.sink_allocated_bw) {
// Find out how much do we need to de-alloc
- if (link->dpia_bw_alloc_config.sink_allocated_bw > needed)
+ if (link->dpia_bw_alloc_config.sink_allocated_bw > bw_needed)
deallocate_usb4_bw(&link->dpia_bw_alloc_config.sink_allocated_bw,
- link->dpia_bw_alloc_config.sink_allocated_bw - needed, link);
+ link->dpia_bw_alloc_config.sink_allocated_bw - bw_needed, link);
else
allocate_usb4_bw(&link->dpia_bw_alloc_config.sink_allocated_bw,
- needed - link->dpia_bw_alloc_config.sink_allocated_bw, link);
+ bw_needed - link->dpia_bw_alloc_config.sink_allocated_bw, link);
}
// 4. If this is the 2nd sink then any unused bw will be reallocated to master DPIA
@@ -370,27 +371,20 @@ void dc_link_get_usb4_req_bw_resp(struct dc_link *link, uint8_t bw, uint8_t resu
case DPIA_EST_BW_CHANGED:
- dm_output_to_console("%s: *** ESTIMATED BW CHANGED for DP-TX Request ***\n", __func__);
+ DC_LOG_DEBUG("%s: *** ESTIMATED BW CHANGED for DP-TX Request ***\n", __func__);
- int available = 0, estimated = bw * (Kbps_TO_Gbps / link->dpia_bw_alloc_config.bw_granularity);
- int host_router_total_estimated_bw = get_host_router_total_bw(link, HOST_ROUTER_BW_ESTIMATED);
+ estimated = bw * (Kbps_TO_Gbps / link->dpia_bw_alloc_config.bw_granularity);
+ host_router_total_estimated_bw = get_host_router_total_bw(link, HOST_ROUTER_BW_ESTIMATED);
// 1. If due to unplug of other sink
if (estimated == host_router_total_estimated_bw) {
-
// First update the estimated & max_bw fields
if (link->dpia_bw_alloc_config.estimated_bw < estimated) {
- available = estimated - link->dpia_bw_alloc_config.estimated_bw;
link->dpia_bw_alloc_config.estimated_bw = estimated;
}
}
// 2. If due to realloc bw btw 2 dpia due to plug OR realloc unused Bw
else {
-
- // We took from another unplugged/problematic sink to give to us
- if (link->dpia_bw_alloc_config.estimated_bw < estimated)
- available = estimated - link->dpia_bw_alloc_config.estimated_bw;
-
// We lost estimated bw usually due to plug event of other dpia
link->dpia_bw_alloc_config.estimated_bw = estimated;
}
@@ -398,12 +392,12 @@ void dc_link_get_usb4_req_bw_resp(struct dc_link *link, uint8_t bw, uint8_t resu
case DPIA_BW_ALLOC_CAPS_CHANGED:
- dm_output_to_console("%s: *** BW ALLOC CAPABILITY CHANGED for DP-TX Request ***\n", __func__);
+ DC_LOG_DEBUG("%s: *** BW ALLOC CAPABILITY CHANGED for DP-TX Request ***\n", __func__);
link->dpia_bw_alloc_config.bw_alloc_enabled = false;
break;
}
}
-int dc_link_dp_dpia_handle_usb4_bandwidth_allocation_for_link(struct dc_link *link, int peak_bw)
+int dpia_handle_usb4_bandwidth_allocation_for_link(struct dc_link *link, int peak_bw)
{
int ret = 0;
uint8_t timeout = 10;
@@ -417,14 +411,14 @@ int dc_link_dp_dpia_handle_usb4_bandwidth_allocation_for_link(struct dc_link *li
// If DP over USB4 then we need to check BW allocation
link->dpia_bw_alloc_config.sink_max_bw = peak_bw;
- dc_link_set_usb4_req_bw_req(link, link->dpia_bw_alloc_config.sink_max_bw);
+ set_usb4_req_bw_req(link, link->dpia_bw_alloc_config.sink_max_bw);
do {
- if (!timeout > 0)
+ if (!(timeout > 0))
timeout--;
else
break;
- udelay(10 * 1000);
+ fsleep(10 * 1000);
} while (!get_cm_response_ready_flag(link));
if (!timeout)
@@ -439,3 +433,65 @@ int dc_link_dp_dpia_handle_usb4_bandwidth_allocation_for_link(struct dc_link *li
out:
return ret;
}
+int link_dp_dpia_allocate_usb4_bandwidth_for_stream(struct dc_link *link, int req_bw)
+{
+ int ret = 0;
+ uint8_t timeout = 10;
+
+ if (!get_bw_alloc_proceed_flag(link))
+ goto out;
+
+ /*
+ * Sometimes stream uses same timing parameters as the already
+ * allocated max sink bw so no need to re-alloc
+ */
+ if (req_bw != link->dpia_bw_alloc_config.sink_allocated_bw) {
+ set_usb4_req_bw_req(link, req_bw);
+ do {
+ if (!(timeout > 0))
+ timeout--;
+ else
+ break;
+ udelay(10 * 1000);
+ } while (!get_cm_response_ready_flag(link));
+
+ if (!timeout)
+ ret = 0;// ERROR TIMEOUT waiting for response for allocating bw
+ else if (link->dpia_bw_alloc_config.sink_allocated_bw > 0)
+ ret = get_host_router_total_bw(link, HOST_ROUTER_BW_ALLOCATED);
+ }
+
+out:
+ return ret;
+}
+bool dpia_validate_usb4_bw(struct dc_link **link, int *bw_needed_per_dpia, uint8_t num_dpias)
+{
+ bool ret = true;
+ int bw_needed_per_hr[MAX_HR_NUM] = { 0, 0 };
+ uint8_t lowest_dpia_index = 0, dpia_index = 0;
+ uint8_t i;
+
+ if (!num_dpias || num_dpias > MAX_DPIA_NUM)
+ return ret;
+
+ //Get total Host Router BW & Validate against each Host Router max BW
+ for (i = 0; i < num_dpias; ++i) {
+
+ if (!link[i]->dpia_bw_alloc_config.bw_alloc_enabled)
+ continue;
+
+ lowest_dpia_index = get_lowest_dpia_index(link[i]);
+ if (link[i]->link_index < lowest_dpia_index)
+ continue;
+
+ dpia_index = (link[i]->link_index - lowest_dpia_index) / 2;
+ bw_needed_per_hr[dpia_index] += bw_needed_per_dpia[i];
+ if (bw_needed_per_hr[dpia_index] > get_host_router_total_bw(link[i], HOST_ROUTER_BW_ALLOCATED)) {
+
+ ret = false;
+ break;
+ }
+ }
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.h
index c2c3049adcd1..382616c8b698 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.h
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.h
@@ -26,6 +26,8 @@
#ifndef DC_INC_LINK_DP_DPIA_BW_H_
#define DC_INC_LINK_DP_DPIA_BW_H_
+#include "link.h"
+
/*
* Host Router BW type
*/
@@ -42,6 +44,54 @@ enum bw_type {
*
* return: SUCCESS or FAILURE
*/
-bool set_dptx_usb4_bw_alloc_support(struct dc_link *link);
+bool link_dp_dpia_set_dptx_usb4_bw_alloc_support(struct dc_link *link);
+
+/*
+ * Allocates only what the stream needs for bw, so if:
+ * If (stream_req_bw < or > already_allocated_bw_at_HPD)
+ * => Deallocate Max Bw & then allocate only what the stream needs
+ *
+ * @link: pointer to the dc_link struct instance
+ * @req_bw: Bw requested by the stream
+ *
+ * return: allocated bw else return 0
+ */
+int link_dp_dpia_allocate_usb4_bandwidth_for_stream(struct dc_link *link, int req_bw);
+
+/*
+ * Handle the USB4 BW Allocation related functionality here:
+ * Plug => Try to allocate max bw from timing parameters supported by the sink
+ * Unplug => de-allocate bw
+ *
+ * @link: pointer to the dc_link struct instance
+ * @peak_bw: Peak bw used by the link/sink
+ *
+ * return: allocated bw else return 0
+ */
+int dpia_handle_usb4_bandwidth_allocation_for_link(struct dc_link *link, int peak_bw);
+
+/*
+ * Handle function for when the status of the Request above is complete.
+ * We will find out the result of allocating on CM and update structs.
+ *
+ * @link: pointer to the dc_link struct instance
+ * @bw: Allocated or Estimated BW depending on the result
+ * @result: Response type
+ *
+ * return: none
+ */
+void dpia_handle_bw_alloc_response(struct dc_link *link, uint8_t bw, uint8_t result);
+
+/*
+ * Handle the validation of total BW here and confirm that the bw used by each
+ * DPIA doesn't exceed available BW for each host router (HR)
+ *
+ * @link[]: array of link pointer to all possible DPIA links
+ * @bw_needed[]: bw needed for each DPIA link based on timing
+ * @num_dpias: Number of DPIAs for the above 2 arrays. Should always be <= MAX_DPIA_NUM
+ *
+ * return: TRUE if bw used by DPIAs doesn't exceed available BW else return FALSE
+ */
+bool dpia_validate_usb4_bw(struct dc_link **link, int *bw_needed, uint8_t num_dpias);
#endif /* DC_INC_LINK_DP_DPIA_BW_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.c
index 9d80427520cf..ba95facc4ee8 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.c
@@ -33,13 +33,14 @@
#include "link_dpcd.h"
#include "link_dp_training.h"
#include "link_dp_capability.h"
+#include "link_edp_panel_control.h"
#include "link/accessories/link_dp_trace.h"
#include "link/link_dpms.h"
#include "dm_helpers.h"
#define DC_LOGGER_INIT(logger)
-bool dc_link_check_link_loss_status(
+bool dp_parse_link_loss_status(
struct dc_link *link,
union hpd_irq_data *hpd_irq_dpcd_data)
{
@@ -155,9 +156,9 @@ static bool handle_hpd_irq_psr_sink(struct dc_link *link)
/* PSR error, disable and re-enable PSR */
if (link->psr_settings.psr_allow_active) {
allow_active = false;
- dc_link_set_psr_allow_active(link, &allow_active, true, false, NULL);
+ edp_set_psr_allow_active(link, &allow_active, true, false, NULL);
allow_active = true;
- dc_link_set_psr_allow_active(link, &allow_active, true, false, NULL);
+ edp_set_psr_allow_active(link, &allow_active, true, false, NULL);
}
return true;
@@ -174,7 +175,7 @@ static bool handle_hpd_irq_psr_sink(struct dc_link *link)
return false;
}
-void dc_link_dp_handle_link_loss(struct dc_link *link)
+void dp_handle_link_loss(struct dc_link *link)
{
struct pipe_ctx *pipes[MAX_PIPES];
struct dc_state *state = link->dc->current_state;
@@ -200,7 +201,7 @@ void dc_link_dp_handle_link_loss(struct dc_link *link)
}
}
-enum dc_status dc_link_dp_read_hpd_rx_irq_data(
+enum dc_status dp_read_hpd_rx_irq_data(
struct dc_link *link,
union hpd_irq_data *irq_data)
{
@@ -247,7 +248,7 @@ enum dc_status dc_link_dp_read_hpd_rx_irq_data(
}
/*************************Short Pulse IRQ***************************/
-bool dc_link_dp_allow_hpd_rx_irq(const struct dc_link *link)
+bool dp_should_allow_hpd_rx_irq(const struct dc_link *link)
{
/*
* Don't handle RX IRQ unless one of following is met:
@@ -262,8 +263,9 @@ bool dc_link_dp_allow_hpd_rx_irq(const struct dc_link *link)
return false;
}
-bool dc_link_handle_hpd_rx_irq(struct dc_link *link, union hpd_irq_data *out_hpd_irq_dpcd_data, bool *out_link_loss,
- bool defer_handling, bool *has_left_work)
+bool dp_handle_hpd_rx_irq(struct dc_link *link,
+ union hpd_irq_data *out_hpd_irq_dpcd_data, bool *out_link_loss,
+ bool defer_handling, bool *has_left_work)
{
union hpd_irq_data hpd_irq_dpcd_data = {0};
union device_service_irq device_service_clear = {0};
@@ -288,7 +290,7 @@ bool dc_link_handle_hpd_rx_irq(struct dc_link *link, union hpd_irq_data *out_hpd
* dal_dpsst_ls_read_hpd_irq_data
* Order of calls is important too
*/
- result = dc_link_dp_read_hpd_rx_irq_data(link, &hpd_irq_dpcd_data);
+ result = dp_read_hpd_rx_irq_data(link, &hpd_irq_dpcd_data);
if (out_hpd_irq_dpcd_data)
*out_hpd_irq_dpcd_data = hpd_irq_dpcd_data;
@@ -315,7 +317,7 @@ bool dc_link_handle_hpd_rx_irq(struct dc_link *link, union hpd_irq_data *out_hpd
return false;
}
- if (!dc_link_dp_allow_hpd_rx_irq(link)) {
+ if (!dp_should_allow_hpd_rx_irq(link)) {
DC_LOG_HW_HPD_IRQ("%s: skipping HPD handling on %d\n",
__func__, link->link_index);
return false;
@@ -348,9 +350,9 @@ bool dc_link_handle_hpd_rx_irq(struct dc_link *link, union hpd_irq_data *out_hpd
* then DM should call DC to do the detection.
* NOTE: Do not handle link loss on eDP since it is internal link*/
if ((link->connector_signal != SIGNAL_TYPE_EDP) &&
- dc_link_check_link_loss_status(
- link,
- &hpd_irq_dpcd_data)) {
+ dp_parse_link_loss_status(
+ link,
+ &hpd_irq_dpcd_data)) {
/* Connectivity log: link loss */
CONN_DATA_LINK_LOSS(link,
hpd_irq_dpcd_data.raw,
@@ -360,7 +362,7 @@ bool dc_link_handle_hpd_rx_irq(struct dc_link *link, union hpd_irq_data *out_hpd
if (defer_handling && has_left_work)
*has_left_work = true;
else
- dc_link_dp_handle_link_loss(link);
+ dp_handle_link_loss(link);
status = false;
if (out_link_loss)
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.h
index 39b2e51ea79d..ac33730fedd4 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.h
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.h
@@ -27,5 +27,15 @@
#define __DC_LINK_DP_IRQ_HANDLER_H__
#include "link.h"
-
+bool dp_parse_link_loss_status(
+ struct dc_link *link,
+ union hpd_irq_data *hpd_irq_dpcd_data);
+bool dp_should_allow_hpd_rx_irq(const struct dc_link *link);
+void dp_handle_link_loss(struct dc_link *link);
+enum dc_status dp_read_hpd_rx_irq_data(
+ struct dc_link *link,
+ union hpd_irq_data *irq_data);
+bool dp_handle_hpd_rx_irq(struct dc_link *link,
+ union hpd_irq_data *out_hpd_irq_dpcd_data, bool *out_link_loss,
+ bool defer_handling, bool *has_left_work);
#endif /* __DC_LINK_DP_IRQ_HANDLER_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_phy.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_phy.c
index cd9fb8126bcf..b7abba55bc2f 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_phy.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_phy.c
@@ -40,7 +40,7 @@
#define DC_LOGGER \
link->ctx->logger
-void dc_link_dp_receiver_power_ctrl(struct dc_link *link, bool on)
+void dpcd_write_rx_power_ctrl(struct dc_link *link, bool on)
{
uint8_t state;
@@ -64,7 +64,7 @@ void dp_enable_link_phy(
link->cur_link_settings = *link_settings;
link->dc->hwss.enable_dp_link_output(link, link_res, signal,
clock_source, link_settings);
- dc_link_dp_receiver_power_ctrl(link, true);
+ dpcd_write_rx_power_ctrl(link, true);
}
void dp_disable_link_phy(struct dc_link *link,
@@ -74,7 +74,7 @@ void dp_disable_link_phy(struct dc_link *link,
struct dc *dc = link->ctx->dc;
if (!link->wa_flags.dp_keep_receiver_powered)
- dc_link_dp_receiver_power_ctrl(link, false);
+ dpcd_write_rx_power_ctrl(link, false);
dc->hwss.disable_link_output(link, link_res, signal);
/* Clear current link setting.*/
@@ -143,7 +143,7 @@ enum dc_status dp_set_fec_ready(struct dc_link *link, const struct link_resource
link_enc = link_enc_cfg_get_link_enc(link);
ASSERT(link_enc);
- if (!dc_link_should_enable_fec(link))
+ if (!dp_should_enable_fec(link))
return status;
if (link_enc->funcs->fec_set_ready &&
@@ -183,7 +183,7 @@ void dp_set_fec_enable(struct dc_link *link, bool enable)
link_enc = link_enc_cfg_get_link_enc(link);
ASSERT(link_enc);
- if (!dc_link_should_enable_fec(link))
+ if (!dp_should_enable_fec(link))
return;
if (link_enc->funcs->fec_set_enable &&
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_phy.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_phy.h
index dba1f29df319..1eb0619d6710 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_phy.h
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_phy.h
@@ -51,6 +51,9 @@ void dp_set_drive_settings(
enum dc_status dp_set_fec_ready(struct dc_link *link,
const struct link_resource *link_res, bool ready);
+
void dp_set_fec_enable(struct dc_link *link, bool enable);
+void dpcd_write_rx_power_ctrl(struct dc_link *link, bool on);
+
#endif /* __DC_LINK_DP_PHY_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c
index b48d4d822991..a9025671ee4a 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c
@@ -41,6 +41,8 @@
#include "link_dp_phy.h"
#include "link_dp_capability.h"
#include "link_edp_panel_control.h"
+#include "link/link_detection.h"
+#include "link/link_validation.h"
#include "atomfirmware.h"
#include "link_enc_cfg.h"
#include "resource.h"
@@ -258,10 +260,7 @@ void dp_wait_for_training_aux_rd_interval(
struct dc_link *link,
uint32_t wait_in_micro_secs)
{
- if (wait_in_micro_secs > 1000)
- msleep(wait_in_micro_secs/1000);
- else
- udelay(wait_in_micro_secs);
+ fsleep(wait_in_micro_secs);
DC_LOG_HW_LINK_TRAINING("%s:\n wait = %d\n",
__func__,
@@ -725,12 +724,10 @@ void override_training_settings(
if (link->preferred_training_settings.fec_enable != NULL)
lt_settings->should_set_fec_ready = *link->preferred_training_settings.fec_enable;
-#if defined(CONFIG_DRM_AMD_DC_DCN)
/* Check DP tunnel LTTPR mode debug option. */
if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA && link->dc->debug.dpia_debug.bits.force_non_lttpr)
lt_settings->lttpr_mode = LTTPR_MODE_NON_LTTPR;
-#endif
dp_get_lttpr_mode_override(link, &lt_settings->lttpr_mode);
}
@@ -780,7 +777,7 @@ enum dc_dp_training_pattern decide_eq_training_pattern(struct dc_link *link,
return pattern;
}
-enum lttpr_mode dc_link_decide_lttpr_mode(struct dc_link *link,
+enum lttpr_mode dp_decide_lttpr_mode(struct dc_link *link,
struct dc_link_settings *link_setting)
{
enum dp_link_encoding encoding = link_dp_get_encoding_format(link_setting);
@@ -865,8 +862,9 @@ static enum dc_status configure_lttpr_mode_non_transparent(
uint8_t repeater_id;
enum dc_status result = DC_ERROR_UNEXPECTED;
uint8_t repeater_mode = DP_PHY_REPEATER_MODE_TRANSPARENT;
+ const struct dc *dc = link->dc;
- enum dp_link_encoding encoding = link_dp_get_encoding_format(&lt_settings->link_settings);
+ enum dp_link_encoding encoding = dc->link_srv->dp_get_encoding_format(&lt_settings->link_settings);
if (encoding == DP_8b_10b_ENCODING) {
DC_LOG_HW_LINK_TRAINING("%s\n Set LTTPR to Transparent Mode\n", __func__);
@@ -970,7 +968,7 @@ static void dpcd_exit_training_mode(struct dc_link *link, enum dp_link_encoding
if ((core_link_read_dpcd(link, DP_SINK_STATUS, &sink_status, 1) == DC_OK) &&
(sink_status & DP_INTRA_HOP_AUX_REPLY_INDICATION) == 0)
break;
- udelay(1000);
+ fsleep(1000);
}
}
}
@@ -1573,7 +1571,6 @@ bool perform_link_training_with_retries(
msleep(delay_dp_power_up_in_ms);
}
-#ifdef CONFIG_DRM_AMD_DC_HDCP
if (panel_mode == DP_PANEL_MODE_EDP) {
struct cp_psp *cp_psp = &stream->ctx->cp_psp;
@@ -1587,17 +1584,16 @@ bool perform_link_training_with_retries(
result = cp_psp->funcs.enable_assr(cp_psp->handle, link);
}
}
-#endif
dp_set_panel_mode(link, panel_mode);
if (link->aux_access_disabled) {
- dc_link_dp_perform_link_training_skip_aux(link, &pipe_ctx->link_res, &cur_link_settings);
+ dp_perform_link_training_skip_aux(link, &pipe_ctx->link_res, &cur_link_settings);
return true;
} else {
/** @todo Consolidate USB4 DP and DPx.x training. */
if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) {
- status = dc_link_dpia_perform_link_training(
+ status = dpia_perform_link_training(
link,
&pipe_ctx->link_res,
&cur_link_settings,
@@ -1649,7 +1645,7 @@ bool perform_link_training_with_retries(
if (status == LINK_TRAINING_ABORT) {
enum dc_connection_type type = dc_connection_none;
- dc_link_detect_connection_type(link, &type);
+ link_detect_connection_type(link, &type);
if (type == dc_connection_none) {
DC_LOG_HW_LINK_TRAINING("%s: Aborting training because sink unplugged\n", __func__);
break;
@@ -1682,7 +1678,7 @@ bool perform_link_training_with_retries(
* minimum link bandwidth.
*/
req_bw = dc_bandwidth_in_kbps_from_timing(&stream->timing);
- link_bw = dc_link_bandwidth_kbps(link, &cur_link_settings);
+ link_bw = dp_link_bandwidth_kbps(link, &cur_link_settings);
is_link_bw_low = (req_bw > link_bw);
is_link_bw_min = ((cur_link_settings.link_rate <= LINK_RATE_LOW) &&
(cur_link_settings.lane_count <= LANE_COUNT_ONE));
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.h
index a04948635369..7d027bac8255 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.h
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.h
@@ -119,6 +119,9 @@ enum dc_dp_training_pattern decide_cr_training_pattern(
enum dc_dp_training_pattern decide_eq_training_pattern(struct dc_link *link,
const struct dc_link_settings *link_settings);
+enum lttpr_mode dp_decide_lttpr_mode(struct dc_link *link,
+ struct dc_link_settings *link_setting);
+
void dp_get_lttpr_mode_override(struct dc_link *link,
enum lttpr_mode *override);
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_auxless.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_auxless.c
index e50ec5012559..4c6b886a9da8 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_auxless.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_auxless.c
@@ -30,7 +30,7 @@
#include "link_dp_phy.h"
#define DC_LOGGER \
link->ctx->logger
-bool dc_link_dp_perform_link_training_skip_aux(
+bool dp_perform_link_training_skip_aux(
struct dc_link *link,
const struct link_resource *link_res,
const struct dc_link_settings *link_setting)
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_auxless.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_auxless.h
index 413999cd03c4..546387a5f32d 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_auxless.h
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_auxless.h
@@ -28,7 +28,7 @@
#define __DC_LINK_DP_TRAINING_AUXLESS_H__
#include "link_dp_training.h"
-bool dc_link_dp_perform_link_training_skip_aux(
+bool dp_perform_link_training_skip_aux(
struct dc_link *link,
const struct link_resource *link_res,
const struct dc_link_settings *link_setting);
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_dpia.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_dpia.c
index e60da0532c53..ab4aafdb5e5c 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_dpia.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_dpia.c
@@ -29,7 +29,6 @@
#include "link_dp_training_dpia.h"
#include "dc.h"
#include "inc/core_status.h"
-#include "dc_link.h"
#include "dpcd_defs.h"
#include "link_dp_dpia.h"
@@ -986,7 +985,7 @@ static void dpia_training_abort(
core_link_send_set_config(link, DPIA_SET_CFG_SET_LINK, data);
}
-enum link_training_result dc_link_dpia_perform_link_training(
+enum link_training_result dpia_perform_link_training(
struct dc_link *link,
const struct link_resource *link_res,
const struct dc_link_settings *link_setting,
@@ -999,7 +998,7 @@ enum link_training_result dc_link_dpia_perform_link_training(
struct dc_link_settings link_settings = *link_setting; // non-const copy to pass in
- lt_settings.lttpr_mode = dc_link_decide_lttpr_mode(link, &link_settings);
+ lt_settings.lttpr_mode = dp_decide_lttpr_mode(link, &link_settings);
/* Configure link as prescribed in link_setting and set LTTPR mode. */
result = dpia_configure_link(link, link_res, link_setting, &lt_settings);
@@ -1035,7 +1034,7 @@ enum link_training_result dc_link_dpia_perform_link_training(
* falling back to lower bandwidth settings possible.
*/
if (result == LINK_TRAINING_SUCCESS) {
- msleep(5);
+ fsleep(5000);
if (!link->is_automated)
result = dp_check_link_loss_status(link, &lt_settings);
} else if (result == LINK_TRAINING_ABORT)
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_dpia.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_dpia.h
index 0150f2916421..b39fb9faf1c2 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_dpia.h
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_dpia.h
@@ -32,7 +32,7 @@
* DPIA equivalent of dc_link_dp_perfrorm_link_training.
* Aborts link training upon detection of sink unplug.
*/
-enum link_training_result dc_link_dpia_perform_link_training(
+enum link_training_result dpia_perform_link_training(
struct dc_link *link,
const struct link_resource *link_res,
const struct dc_link_settings *link_setting,
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c
index 97e02b5b21ae..93a6bbe954bb 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c
@@ -139,7 +139,7 @@ enum dp_panel_mode dp_get_panel_mode(struct dc_link *link)
return DP_PANEL_MODE_DEFAULT;
}
-bool dc_link_set_backlight_level_nits(struct dc_link *link,
+bool edp_set_backlight_level_nits(struct dc_link *link,
bool isHDR,
uint32_t backlight_millinits,
uint32_t transition_time_in_ms)
@@ -171,7 +171,7 @@ bool dc_link_set_backlight_level_nits(struct dc_link *link,
return true;
}
-bool dc_link_get_backlight_level_nits(struct dc_link *link,
+bool edp_get_backlight_level_nits(struct dc_link *link,
uint32_t *backlight_millinits_avg,
uint32_t *backlight_millinits_peak)
{
@@ -201,7 +201,7 @@ bool dc_link_get_backlight_level_nits(struct dc_link *link,
return true;
}
-bool link_backlight_enable_aux(struct dc_link *link, bool enable)
+bool edp_backlight_enable_aux(struct dc_link *link, bool enable)
{
uint8_t backlight_enable = enable ? 1 : 0;
@@ -243,13 +243,13 @@ bool set_default_brightness_aux(struct dc_link *link)
if (default_backlight < 5000 || default_backlight > 5000000)
default_backlight = 150000; //
- return dc_link_set_backlight_level_nits(link, true,
+ return edp_set_backlight_level_nits(link, true,
default_backlight, 0);
}
return false;
}
-bool link_is_edp_ilr_optimization_required(struct dc_link *link,
+bool edp_is_ilr_optimization_required(struct dc_link *link,
struct dc_crtc_timing *crtc_timing)
{
struct dc_link_settings link_setting;
@@ -285,7 +285,7 @@ bool link_is_edp_ilr_optimization_required(struct dc_link *link,
req_bw = dc_bandwidth_in_kbps_from_timing(crtc_timing);
if (!crtc_timing->flags.DSC)
- dc_link_decide_edp_link_settings(link, &link_setting, req_bw);
+ edp_decide_link_settings(link, &link_setting, req_bw);
else
decide_edp_link_settings_with_dsc(link, &link_setting, req_bw, LINK_RATE_UNKNOWN);
@@ -299,7 +299,7 @@ bool link_is_edp_ilr_optimization_required(struct dc_link *link,
return false;
}
-void dc_link_edp_panel_backlight_power_on(struct dc_link *link, bool wait_for_hpd)
+void edp_panel_backlight_power_on(struct dc_link *link, bool wait_for_hpd)
{
if (link->connector_signal != SIGNAL_TYPE_EDP)
return;
@@ -311,7 +311,7 @@ void dc_link_edp_panel_backlight_power_on(struct dc_link *link, bool wait_for_hp
link->dc->hwss.edp_backlight_control(link, true);
}
-bool dc_link_wait_for_t12(struct dc_link *link)
+bool edp_wait_for_t12(struct dc_link *link)
{
if (link->connector_signal == SIGNAL_TYPE_EDP && link->dc->hwss.edp_wait_for_T12) {
link->dc->hwss.edp_wait_for_T12(link);
@@ -322,13 +322,13 @@ bool dc_link_wait_for_t12(struct dc_link *link)
return false;
}
-void link_edp_add_delay_for_T9(struct dc_link *link)
+void edp_add_delay_for_T9(struct dc_link *link)
{
if (link && link->panel_config.pps.extra_delay_backlight_off > 0)
- udelay(link->panel_config.pps.extra_delay_backlight_off * 1000);
+ fsleep(link->panel_config.pps.extra_delay_backlight_off * 1000);
}
-bool link_edp_receiver_ready_T9(struct dc_link *link)
+bool edp_receiver_ready_T9(struct dc_link *link)
{
unsigned int tries = 0;
unsigned char sinkstatus = 0;
@@ -353,7 +353,7 @@ bool link_edp_receiver_ready_T9(struct dc_link *link)
return result;
}
-bool link_edp_receiver_ready_T7(struct dc_link *link)
+bool edp_receiver_ready_T7(struct dc_link *link)
{
unsigned char sinkstatus = 0;
unsigned char edpRev = 0;
@@ -383,12 +383,12 @@ bool link_edp_receiver_ready_T7(struct dc_link *link)
}
if (link && link->panel_config.pps.extra_t7_ms > 0)
- udelay(link->panel_config.pps.extra_t7_ms * 1000);
+ fsleep(link->panel_config.pps.extra_t7_ms * 1000);
return result;
}
-bool link_power_alpm_dpcd_enable(struct dc_link *link, bool enable)
+bool edp_power_alpm_dpcd_enable(struct dc_link *link, bool enable)
{
bool ret = false;
union dpcd_alpm_configuration alpm_config;
@@ -422,7 +422,7 @@ static struct pipe_ctx *get_pipe_from_link(const struct dc_link *link)
return pipe_ctx;
}
-bool dc_link_set_backlight_level(const struct dc_link *link,
+bool edp_set_backlight_level(const struct dc_link *link,
uint32_t backlight_pwm_u16_16,
uint32_t frame_ramp)
{
@@ -453,7 +453,7 @@ bool dc_link_set_backlight_level(const struct dc_link *link,
return true;
}
-bool dc_link_set_psr_allow_active(struct dc_link *link, const bool *allow_active,
+bool edp_set_psr_allow_active(struct dc_link *link, const bool *allow_active,
bool wait, bool force_static, const unsigned int *power_opts)
{
struct dc *dc = link->ctx->dc;
@@ -502,7 +502,7 @@ bool dc_link_set_psr_allow_active(struct dc_link *link, const bool *allow_active
return true;
}
-bool dc_link_get_psr_state(const struct dc_link *link, enum dc_psr_state *state)
+bool edp_get_psr_state(const struct dc_link *link, enum dc_psr_state *state)
{
struct dc *dc = link->ctx->dc;
struct dmcu *dmcu = dc->res_pool->dmcu;
@@ -557,7 +557,7 @@ transmitter_to_phy_id(struct dc_link *link)
}
}
-bool dc_link_setup_psr(struct dc_link *link,
+bool edp_setup_psr(struct dc_link *link,
const struct dc_stream_state *stream, struct psr_config *psr_config,
struct psr_context *psr_context)
{
@@ -623,7 +623,7 @@ bool dc_link_setup_psr(struct dc_link *link,
sizeof(psr_configuration.raw));
if (link->psr_settings.psr_version == DC_PSR_VERSION_SU_1) {
- link_power_alpm_dpcd_enable(link, true);
+ edp_power_alpm_dpcd_enable(link, true);
psr_context->su_granularity_required =
psr_config->su_granularity_required;
psr_context->su_y_granularity =
@@ -695,7 +695,6 @@ bool dc_link_setup_psr(struct dc_link *link,
psr_context->psr_level.u32all = 0;
/*skip power down the single pipe since it blocks the cstate*/
-#if defined(CONFIG_DRM_AMD_DC_DCN)
if (link->ctx->asic_id.chip_family >= FAMILY_RV) {
switch (link->ctx->asic_id.chip_family) {
case FAMILY_YELLOW_CARP:
@@ -709,10 +708,6 @@ bool dc_link_setup_psr(struct dc_link *link,
break;
}
}
-#else
- if (link->ctx->asic_id.chip_family >= FAMILY_RV)
- psr_context->psr_level.bits.SKIP_CRTC_DISABLE = true;
-#endif
/* SMU will perform additional powerdown sequence.
* For unsupported ASICs, set psr_level flag to skip PSR
@@ -757,7 +752,7 @@ bool dc_link_setup_psr(struct dc_link *link,
}
-void link_get_psr_residency(const struct dc_link *link, uint32_t *residency)
+void edp_get_psr_residency(const struct dc_link *link, uint32_t *residency)
{
struct dc *dc = link->ctx->dc;
struct dmub_psr *psr = dc->res_pool->psr;
@@ -772,7 +767,7 @@ void link_get_psr_residency(const struct dc_link *link, uint32_t *residency)
else
*residency = 0;
}
-bool link_set_sink_vtotal_in_psr_active(const struct dc_link *link, uint16_t psr_vtotal_idle, uint16_t psr_vtotal_su)
+bool edp_set_sink_vtotal_in_psr_active(const struct dc_link *link, uint16_t psr_vtotal_idle, uint16_t psr_vtotal_su)
{
struct dc *dc = link->ctx->dc;
struct dmub_psr *psr = dc->res_pool->psr;
@@ -803,7 +798,7 @@ static struct abm *get_abm_from_stream_res(const struct dc_link *link)
return abm;
}
-int dc_link_get_backlight_level(const struct dc_link *link)
+int edp_get_backlight_level(const struct dc_link *link)
{
struct abm *abm = get_abm_from_stream_res(link);
struct panel_cntl *panel_cntl = link->panel_cntl;
@@ -822,7 +817,7 @@ int dc_link_get_backlight_level(const struct dc_link *link)
return DC_ERROR_UNEXPECTED;
}
-int dc_link_get_target_backlight_pwm(const struct dc_link *link)
+int edp_get_target_backlight_pwm(const struct dc_link *link)
{
struct abm *abm = get_abm_from_stream_res(link);
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h
index 7f91a564b089..28f552080558 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h
@@ -30,4 +30,34 @@
enum dp_panel_mode dp_get_panel_mode(struct dc_link *link);
void dp_set_panel_mode(struct dc_link *link, enum dp_panel_mode panel_mode);
bool set_default_brightness_aux(struct dc_link *link);
+void edp_panel_backlight_power_on(struct dc_link *link, bool wait_for_hpd);
+int edp_get_backlight_level(const struct dc_link *link);
+bool edp_get_backlight_level_nits(struct dc_link *link,
+ uint32_t *backlight_millinits_avg,
+ uint32_t *backlight_millinits_peak);
+bool edp_set_backlight_level(const struct dc_link *link,
+ uint32_t backlight_pwm_u16_16,
+ uint32_t frame_ramp);
+bool edp_set_backlight_level_nits(struct dc_link *link,
+ bool isHDR,
+ uint32_t backlight_millinits,
+ uint32_t transition_time_in_ms);
+int edp_get_target_backlight_pwm(const struct dc_link *link);
+bool edp_get_psr_state(const struct dc_link *link, enum dc_psr_state *state);
+bool edp_set_psr_allow_active(struct dc_link *link, const bool *allow_active,
+ bool wait, bool force_static, const unsigned int *power_opts);
+bool edp_setup_psr(struct dc_link *link,
+ const struct dc_stream_state *stream, struct psr_config *psr_config,
+ struct psr_context *psr_context);
+bool edp_set_sink_vtotal_in_psr_active(const struct dc_link *link,
+ uint16_t psr_vtotal_idle, uint16_t psr_vtotal_su);
+void edp_get_psr_residency(const struct dc_link *link, uint32_t *residency);
+bool edp_wait_for_t12(struct dc_link *link);
+bool edp_is_ilr_optimization_required(struct dc_link *link,
+ struct dc_crtc_timing *crtc_timing);
+bool edp_backlight_enable_aux(struct dc_link *link, bool enable);
+void edp_add_delay_for_T9(struct dc_link *link);
+bool edp_receiver_ready_T9(struct dc_link *link);
+bool edp_receiver_ready_T7(struct dc_link *link);
+bool edp_power_alpm_dpcd_enable(struct dc_link *link, bool enable);
#endif /* __DC_LINK_EDP_POWER_CONTROL_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_hpd.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_hpd.c
index 5f39dfe06e9a..e3d729ab5b9f 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_hpd.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_hpd.c
@@ -33,18 +33,18 @@
#include "link_hpd.h"
#include "gpio_service_interface.h"
-bool dc_link_get_hpd_state(struct dc_link *dc_link)
+bool link_get_hpd_state(struct dc_link *link)
{
uint32_t state;
- dal_gpio_lock_pin(dc_link->hpd_gpio);
- dal_gpio_get_value(dc_link->hpd_gpio, &state);
- dal_gpio_unlock_pin(dc_link->hpd_gpio);
+ dal_gpio_lock_pin(link->hpd_gpio);
+ dal_gpio_get_value(link->hpd_gpio, &state);
+ dal_gpio_unlock_pin(link->hpd_gpio);
return state;
}
-void dc_link_enable_hpd(const struct dc_link *link)
+void link_enable_hpd(const struct dc_link *link)
{
struct link_encoder *encoder = link->link_enc;
@@ -52,7 +52,7 @@ void dc_link_enable_hpd(const struct dc_link *link)
encoder->funcs->enable_hpd(encoder);
}
-void dc_link_disable_hpd(const struct dc_link *link)
+void link_disable_hpd(const struct dc_link *link)
{
struct link_encoder *encoder = link->link_enc;
@@ -60,7 +60,7 @@ void dc_link_disable_hpd(const struct dc_link *link)
encoder->funcs->disable_hpd(encoder);
}
-void dc_link_enable_hpd_filter(struct dc_link *link, bool enable)
+void link_enable_hpd_filter(struct dc_link *link, bool enable)
{
struct gpio *hpd;
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_hpd.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_hpd.h
index 3d122def0c88..4fb526b264f9 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_hpd.h
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_hpd.h
@@ -44,4 +44,11 @@ bool program_hpd_filter(const struct dc_link *link);
*/
bool dpia_query_hpd_status(struct dc_link *link);
bool query_hpd_status(struct dc_link *link, uint32_t *is_hpd_high);
+bool link_get_hpd_state(struct dc_link *link);
+struct gpio *link_get_hpd_gpio(struct dc_bios *dcb,
+ struct graphics_object_id link_id,
+ struct gpio_service *gpio_service);
+void link_enable_hpd(const struct dc_link *link);
+void link_disable_hpd(const struct dc_link *link);
+void link_enable_hpd_filter(struct dc_link *link, bool enable);
#endif /* __DC_LINK_HPD_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/os_types.h b/drivers/gpu/drm/amd/display/dc/os_types.h
index 6b88ae14f1f9..aad8095660c9 100644
--- a/drivers/gpu/drm/amd/display/dc/os_types.h
+++ b/drivers/gpu/drm/amd/display/dc/os_types.h
@@ -53,11 +53,11 @@
#define dm_error(fmt, ...) DRM_ERROR(fmt, ##__VA_ARGS__)
-#if defined(CONFIG_DRM_AMD_DC_DCN)
+#if defined(CONFIG_DRM_AMD_DC_FP)
#include "amdgpu_dm/dc_fpu.h"
#define DC_FP_START() dc_fpu_begin(__func__, __LINE__)
#define DC_FP_END() dc_fpu_end(__func__, __LINE__)
-#endif
+#endif /* CONFIG_DRM_AMD_DC_FP */
/*
*
diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
index 007d6bdc3e39..3175a4fe4d52 100644
--- a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
+++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
@@ -410,8 +410,8 @@ union dmub_fw_boot_options {
uint32_t usb4_cm_version: 1; /**< 1 CM support */
uint32_t dpia_hpd_int_enable_supported: 1; /* 1 if dpia hpd int enable supported */
uint32_t usb4_dpia_bw_alloc_supported: 1; /* 1 if USB4 dpia BW allocation supported */
-
- uint32_t reserved : 15; /**< reserved */
+ uint32_t disable_clk_ds: 1; /* 1 if disallow dispclk_ds and dppclk_ds*/
+ uint32_t reserved : 14; /**< reserved */
} bits; /**< boot bits */
uint32_t all; /**< 32-bit access to bits */
};
@@ -1971,7 +1971,7 @@ struct dmub_cmd_psr_copy_settings_data {
uint8_t cmd_version;
/**
* Panel Instance.
- * Panel isntance to identify which psr_state to use
+ * Panel instance to identify which psr_state to use
* Currently the support is only for 0 or 1
*/
uint8_t panel_inst;
@@ -2029,7 +2029,7 @@ struct dmub_cmd_psr_set_level_data {
uint8_t cmd_version;
/**
* Panel Instance.
- * Panel isntance to identify which psr_state to use
+ * Panel instance to identify which psr_state to use
* Currently the support is only for 0 or 1
*/
uint8_t panel_inst;
@@ -2056,7 +2056,7 @@ struct dmub_rb_cmd_psr_enable_data {
uint8_t cmd_version;
/**
* Panel Instance.
- * Panel isntance to identify which psr_state to use
+ * Panel instance to identify which psr_state to use
* Currently the support is only for 0 or 1
*/
uint8_t panel_inst;
@@ -2100,7 +2100,7 @@ struct dmub_cmd_psr_set_version_data {
uint8_t cmd_version;
/**
* Panel Instance.
- * Panel isntance to identify which psr_state to use
+ * Panel instance to identify which psr_state to use
* Currently the support is only for 0 or 1
*/
uint8_t panel_inst;
@@ -2131,7 +2131,7 @@ struct dmub_cmd_psr_force_static_data {
uint8_t cmd_version;
/**
* Panel Instance.
- * Panel isntance to identify which psr_state to use
+ * Panel instance to identify which psr_state to use
* Currently the support is only for 0 or 1
*/
uint8_t panel_inst;
@@ -2206,7 +2206,7 @@ struct dmub_cmd_update_dirty_rect_data {
uint8_t cmd_version;
/**
* Panel Instance.
- * Panel isntance to identify which psr_state to use
+ * Panel instance to identify which psr_state to use
* Currently the support is only for 0 or 1
*/
uint8_t panel_inst;
@@ -2344,7 +2344,7 @@ struct dmub_cmd_update_cursor_payload0 {
uint8_t cmd_version;
/**
* Panel Instance.
- * Panel isntance to identify which psr_state to use
+ * Panel instance to identify which psr_state to use
* Currently the support is only for 0 or 1
*/
uint8_t panel_inst;
@@ -2391,7 +2391,7 @@ struct dmub_cmd_psr_set_vtotal_data {
uint8_t cmd_version;
/**
* Panel Instance.
- * Panel isntance to identify which psr_state to use
+ * Panel instance to identify which psr_state to use
* Currently the support is only for 0 or 1
*/
uint8_t panel_inst;
@@ -2429,7 +2429,7 @@ struct dmub_cmd_psr_set_power_opt_data {
uint8_t cmd_version;
/**
* Panel Instance.
- * Panel isntance to identify which psr_state to use
+ * Panel instance to identify which psr_state to use
* Currently the support is only for 0 or 1
*/
uint8_t panel_inst;
diff --git a/drivers/gpu/drm/amd/display/include/hdcp_types.h b/drivers/gpu/drm/amd/display/include/hdcp_msg_types.h
index 42229b4effdc..42229b4effdc 100644
--- a/drivers/gpu/drm/amd/display/include/hdcp_types.h
+++ b/drivers/gpu/drm/amd/display/include/hdcp_msg_types.h
diff --git a/drivers/gpu/drm/amd/display/include/link_service_types.h b/drivers/gpu/drm/amd/display/include/link_service_types.h
index 18b9173d5a96..cd870af5fd25 100644
--- a/drivers/gpu/drm/amd/display/include/link_service_types.h
+++ b/drivers/gpu/drm/amd/display/include/link_service_types.h
@@ -34,10 +34,6 @@
struct ddc;
struct irq_manager;
-enum {
- MAX_CONTROLLER_NUM = 6
-};
-
enum dp_power_state {
DP_POWER_STATE_D0 = 1,
DP_POWER_STATE_D3
@@ -60,28 +56,6 @@ enum {
DATA_EFFICIENCY_128b_132b_x10000 = 9646, /* 96.71% data efficiency x 99.75% downspread factor */
};
-enum link_training_result {
- LINK_TRAINING_SUCCESS,
- LINK_TRAINING_CR_FAIL_LANE0,
- LINK_TRAINING_CR_FAIL_LANE1,
- LINK_TRAINING_CR_FAIL_LANE23,
- /* CR DONE bit is cleared during EQ step */
- LINK_TRAINING_EQ_FAIL_CR,
- /* CR DONE bit is cleared but LANE0_CR_DONE is set during EQ step */
- LINK_TRAINING_EQ_FAIL_CR_PARTIAL,
- /* other failure during EQ step */
- LINK_TRAINING_EQ_FAIL_EQ,
- LINK_TRAINING_LQA_FAIL,
- /* one of the CR,EQ or symbol lock is dropped */
- LINK_TRAINING_LINK_LOSS,
- /* Abort link training (because sink unplugged) */
- LINK_TRAINING_ABORT,
- DP_128b_132b_LT_FAILED,
- DP_128b_132b_MAX_LOOP_COUNT_REACHED,
- DP_128b_132b_CHANNEL_EQ_DONE_TIMEOUT,
- DP_128b_132b_CDS_DONE_TIMEOUT,
-};
-
enum lttpr_mode {
LTTPR_MODE_UNKNOWN,
LTTPR_MODE_NON_LTTPR,
diff --git a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
index 2be45b314922..315da61ee897 100644
--- a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
+++ b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
@@ -955,26 +955,20 @@ void mod_freesync_build_vrr_infopacket(struct mod_freesync *mod_freesync,
* Check if Freesync is supported. Return if false. If true,
* set the corresponding bit in the info packet
*/
- bool freesync_on_desktop;
- bool fams_enable;
-
- fams_enable = stream->ctx->dc->current_state->bw_ctx.bw.dcn.clk.fw_based_mclk_switching;
- freesync_on_desktop = stream->freesync_on_desktop && fams_enable;
-
if (!vrr->send_info_frame)
return;
switch (packet_type) {
case PACKET_TYPE_FS_V3:
- build_vrr_infopacket_v3(stream->signal, vrr, app_tf, infopacket, freesync_on_desktop);
+ build_vrr_infopacket_v3(stream->signal, vrr, app_tf, infopacket, stream->freesync_on_desktop);
break;
case PACKET_TYPE_FS_V2:
- build_vrr_infopacket_v2(stream->signal, vrr, app_tf, infopacket, freesync_on_desktop);
+ build_vrr_infopacket_v2(stream->signal, vrr, app_tf, infopacket, stream->freesync_on_desktop);
break;
case PACKET_TYPE_VRR:
case PACKET_TYPE_FS_V1:
default:
- build_vrr_infopacket_v1(stream->signal, vrr, infopacket, freesync_on_desktop);
+ build_vrr_infopacket_v1(stream->signal, vrr, infopacket, stream->freesync_on_desktop);
}
if (true == pack_sdp_v1_3 &&
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.h b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.h
index eb6f9b9c504a..c62df3bcc7cb 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.h
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.h
@@ -26,13 +26,11 @@
#ifndef MOD_HDCP_LOG_H_
#define MOD_HDCP_LOG_H_
-#ifdef CONFIG_DRM_AMD_DC_HDCP
#define HDCP_LOG_ERR(hdcp, ...) DRM_DEBUG_KMS(__VA_ARGS__)
#define HDCP_LOG_VER(hdcp, ...) DRM_DEBUG_KMS(__VA_ARGS__)
#define HDCP_LOG_FSM(hdcp, ...) DRM_DEBUG_KMS(__VA_ARGS__)
#define HDCP_LOG_TOP(hdcp, ...) pr_debug("[HDCP_TOP]:"__VA_ARGS__)
#define HDCP_LOG_DDC(hdcp, ...) pr_debug("[HDCP_DDC]:"__VA_ARGS__)
-#endif
/* default logs */
#define HDCP_ERROR_TRACE(hdcp, status) \
diff --git a/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h b/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h
index 3348bb97ef81..a4d344a4db9e 100644
--- a/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h
+++ b/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h
@@ -104,6 +104,7 @@ struct mod_hdcp_displayport {
uint8_t rev;
uint8_t assr_enabled;
uint8_t mst_enabled;
+ uint8_t dp2_enabled;
uint8_t usb4_enabled;
};
diff --git a/drivers/gpu/drm/amd/display/modules/power/power_helpers.c b/drivers/gpu/drm/amd/display/modules/power/power_helpers.c
index e39b133d05af..fa469de3e935 100644
--- a/drivers/gpu/drm/amd/display/modules/power/power_helpers.c
+++ b/drivers/gpu/drm/amd/display/modules/power/power_helpers.c
@@ -678,13 +678,8 @@ bool dmub_init_abm_config(struct resource_pool *res_pool,
bool result = false;
uint32_t i, j = 0;
-#if defined(CONFIG_DRM_AMD_DC_DCN)
if (res_pool->abm == NULL && res_pool->multiple_abms[inst] == NULL)
return false;
-#else
- if (res_pool->abm == NULL)
- return false;
-#endif
memset(&ram_table, 0, sizeof(ram_table));
memset(&config, 0, sizeof(config));
@@ -737,12 +732,10 @@ bool dmub_init_abm_config(struct resource_pool *res_pool,
config.min_abm_backlight = ram_table.min_abm_backlight;
-#if defined(CONFIG_DRM_AMD_DC_DCN)
if (res_pool->multiple_abms[inst]) {
result = res_pool->multiple_abms[inst]->funcs->init_abm_config(
res_pool->multiple_abms[inst], (char *)(&config), sizeof(struct abm_config_table), inst);
} else
-#endif
result = res_pool->abm->funcs->init_abm_config(
res_pool->abm, (char *)(&config), sizeof(struct abm_config_table), 0);
diff --git a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_10_1_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_10_1_0_offset.h
index 18d34bbceebe..79c41004c0b6 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_10_1_0_offset.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_10_1_0_offset.h
@@ -4868,6 +4868,10 @@
#define mmCP_ME2_PIPE2_INT_STATUS_BASE_IDX 0
#define mmCP_ME2_PIPE3_INT_STATUS 0x1e34
#define mmCP_ME2_PIPE3_INT_STATUS_BASE_IDX 0
+#define mmCP_ME1_INT_STAT_DEBUG 0x1e35
+#define mmCP_ME1_INT_STAT_DEBUG_BASE_IDX 0
+#define mmCP_ME2_INT_STAT_DEBUG 0x1e36
+#define mmCP_ME2_INT_STAT_DEBUG_BASE_IDX 0
#define mmCP_GFX_QUEUE_INDEX 0x1e37
#define mmCP_GFX_QUEUE_INDEX_BASE_IDX 0
#define mmCC_GC_EDC_CONFIG 0x1e38
diff --git a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_10_1_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_10_1_0_sh_mask.h
index 4127896ffcdf..52043e143067 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_10_1_0_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_10_1_0_sh_mask.h
@@ -18680,6 +18680,60 @@
//CC_GC_EDC_CONFIG
#define CC_GC_EDC_CONFIG__DIS_EDC__SHIFT 0x1
#define CC_GC_EDC_CONFIG__DIS_EDC_MASK 0x00000002L
+//CP_ME1_INT_STAT_DEBUG
+#define CP_ME1_INT_STAT_DEBUG__CMP_QUERY_STATUS_INT_ASSERTED__SHIFT 0xc
+#define CP_ME1_INT_STAT_DEBUG__DEQUEUE_REQUEST_INT_ASSERTED__SHIFT 0xd
+#define CP_ME1_INT_STAT_DEBUG__CP_ECC_ERROR_INT_ASSERTED__SHIFT 0xe
+#define CP_ME1_INT_STAT_DEBUG__SUA_VIOLATION_INT_STATUS__SHIFT 0xf
+#define CP_ME1_INT_STAT_DEBUG__GPF_INT_ASSERTED__SHIFT 0x10
+#define CP_ME1_INT_STAT_DEBUG__WRM_POLL_TIMEOUT_INT_ASSERTED__SHIFT 0x11
+#define CP_ME1_INT_STAT_DEBUG__PRIV_REG_INT_ASSERTED__SHIFT 0x17
+#define CP_ME1_INT_STAT_DEBUG__OPCODE_ERROR_INT_ASSERTED__SHIFT 0x18
+#define CP_ME1_INT_STAT_DEBUG__TIME_STAMP_INT_ASSERTED__SHIFT 0x1a
+#define CP_ME1_INT_STAT_DEBUG__RESERVED_BIT_ERROR_INT_ASSERTED__SHIFT 0x1b
+#define CP_ME1_INT_STAT_DEBUG__GENERIC2_INT_ASSERTED__SHIFT 0x1d
+#define CP_ME1_INT_STAT_DEBUG__GENERIC1_INT_ASSERTED__SHIFT 0x1e
+#define CP_ME1_INT_STAT_DEBUG__GENERIC0_INT_ASSERTED__SHIFT 0x1f
+#define CP_ME1_INT_STAT_DEBUG__CMP_QUERY_STATUS_INT_ASSERTED_MASK 0x00001000L
+#define CP_ME1_INT_STAT_DEBUG__DEQUEUE_REQUEST_INT_ASSERTED_MASK 0x00002000L
+#define CP_ME1_INT_STAT_DEBUG__CP_ECC_ERROR_INT_ASSERTED_MASK 0x00004000L
+#define CP_ME1_INT_STAT_DEBUG__SUA_VIOLATION_INT_STATUS_MASK 0x00008000L
+#define CP_ME1_INT_STAT_DEBUG__GPF_INT_ASSERTED_MASK 0x00010000L
+#define CP_ME1_INT_STAT_DEBUG__WRM_POLL_TIMEOUT_INT_ASSERTED_MASK 0x00020000L
+#define CP_ME1_INT_STAT_DEBUG__PRIV_REG_INT_ASSERTED_MASK 0x00800000L
+#define CP_ME1_INT_STAT_DEBUG__OPCODE_ERROR_INT_ASSERTED_MASK 0x01000000L
+#define CP_ME1_INT_STAT_DEBUG__TIME_STAMP_INT_ASSERTED_MASK 0x04000000L
+#define CP_ME1_INT_STAT_DEBUG__RESERVED_BIT_ERROR_INT_ASSERTED_MASK 0x08000000L
+#define CP_ME1_INT_STAT_DEBUG__GENERIC2_INT_ASSERTED_MASK 0x20000000L
+#define CP_ME1_INT_STAT_DEBUG__GENERIC1_INT_ASSERTED_MASK 0x40000000L
+#define CP_ME1_INT_STAT_DEBUG__GENERIC0_INT_ASSERTED_MASK 0x80000000L
+//CP_ME2_INT_STAT_DEBUG
+#define CP_ME2_INT_STAT_DEBUG__CMP_QUERY_STATUS_INT_ASSERTED__SHIFT 0xc
+#define CP_ME2_INT_STAT_DEBUG__DEQUEUE_REQUEST_INT_ASSERTED__SHIFT 0xd
+#define CP_ME2_INT_STAT_DEBUG__CP_ECC_ERROR_INT_ASSERTED__SHIFT 0xe
+#define CP_ME2_INT_STAT_DEBUG__SUA_VIOLATION_INT_STATUS__SHIFT 0xf
+#define CP_ME2_INT_STAT_DEBUG__GPF_INT_ASSERTED__SHIFT 0x10
+#define CP_ME2_INT_STAT_DEBUG__WRM_POLL_TIMEOUT_INT_ASSERTED__SHIFT 0x11
+#define CP_ME2_INT_STAT_DEBUG__PRIV_REG_INT_ASSERTED__SHIFT 0x17
+#define CP_ME2_INT_STAT_DEBUG__OPCODE_ERROR_INT_ASSERTED__SHIFT 0x18
+#define CP_ME2_INT_STAT_DEBUG__TIME_STAMP_INT_ASSERTED__SHIFT 0x1a
+#define CP_ME2_INT_STAT_DEBUG__RESERVED_BIT_ERROR_INT_ASSERTED__SHIFT 0x1b
+#define CP_ME2_INT_STAT_DEBUG__GENERIC2_INT_ASSERTED__SHIFT 0x1d
+#define CP_ME2_INT_STAT_DEBUG__GENERIC1_INT_ASSERTED__SHIFT 0x1e
+#define CP_ME2_INT_STAT_DEBUG__GENERIC0_INT_ASSERTED__SHIFT 0x1f
+#define CP_ME2_INT_STAT_DEBUG__CMP_QUERY_STATUS_INT_ASSERTED_MASK 0x00001000L
+#define CP_ME2_INT_STAT_DEBUG__DEQUEUE_REQUEST_INT_ASSERTED_MASK 0x00002000L
+#define CP_ME2_INT_STAT_DEBUG__CP_ECC_ERROR_INT_ASSERTED_MASK 0x00004000L
+#define CP_ME2_INT_STAT_DEBUG__SUA_VIOLATION_INT_STATUS_MASK 0x00008000L
+#define CP_ME2_INT_STAT_DEBUG__GPF_INT_ASSERTED_MASK 0x00010000L
+#define CP_ME2_INT_STAT_DEBUG__WRM_POLL_TIMEOUT_INT_ASSERTED_MASK 0x00020000L
+#define CP_ME2_INT_STAT_DEBUG__PRIV_REG_INT_ASSERTED_MASK 0x00800000L
+#define CP_ME2_INT_STAT_DEBUG__OPCODE_ERROR_INT_ASSERTED_MASK 0x01000000L
+#define CP_ME2_INT_STAT_DEBUG__TIME_STAMP_INT_ASSERTED_MASK 0x04000000L
+#define CP_ME2_INT_STAT_DEBUG__RESERVED_BIT_ERROR_INT_ASSERTED_MASK 0x08000000L
+#define CP_ME2_INT_STAT_DEBUG__GENERIC2_INT_ASSERTED_MASK 0x20000000L
+#define CP_ME2_INT_STAT_DEBUG__GENERIC1_INT_ASSERTED_MASK 0x40000000L
+#define CP_ME2_INT_STAT_DEBUG__GENERIC0_INT_ASSERTED_MASK 0x80000000L
//CP_ME1_PIPE_PRIORITY_CNTS
#define CP_ME1_PIPE_PRIORITY_CNTS__PRIORITY1_CNT__SHIFT 0x0
#define CP_ME1_PIPE_PRIORITY_CNTS__PRIORITY2A_CNT__SHIFT 0x8
diff --git a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_10_3_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_10_3_0_offset.h
index 3973110f149c..a734abaa91a5 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_10_3_0_offset.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_10_3_0_offset.h
@@ -4531,6 +4531,10 @@
#define mmCP_GFX_QUEUE_INDEX_BASE_IDX 0
#define mmCC_GC_EDC_CONFIG 0x1e38
#define mmCC_GC_EDC_CONFIG_BASE_IDX 0
+#define mmCP_ME1_INT_STAT_DEBUG 0x1e35
+#define mmCP_ME1_INT_STAT_DEBUG_BASE_IDX 0
+#define mmCP_ME2_INT_STAT_DEBUG 0x1e36
+#define mmCP_ME2_INT_STAT_DEBUG_BASE_IDX 0
#define mmCP_ME1_PIPE_PRIORITY_CNTS 0x1e39
#define mmCP_ME1_PIPE_PRIORITY_CNTS_BASE_IDX 0
#define mmCP_ME1_PIPE0_PRIORITY 0x1e3a
diff --git a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_10_3_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_10_3_0_sh_mask.h
index d4e8ff22ecb8..d7a17bae2584 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_10_3_0_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_10_3_0_sh_mask.h
@@ -17028,6 +17028,60 @@
//CC_GC_EDC_CONFIG
#define CC_GC_EDC_CONFIG__DIS_EDC__SHIFT 0x1
#define CC_GC_EDC_CONFIG__DIS_EDC_MASK 0x00000002L
+//CP_ME1_INT_STAT_DEBUG
+#define CP_ME1_INT_STAT_DEBUG__CMP_QUERY_STATUS_INT_ASSERTED__SHIFT 0xc
+#define CP_ME1_INT_STAT_DEBUG__DEQUEUE_REQUEST_INT_ASSERTED__SHIFT 0xd
+#define CP_ME1_INT_STAT_DEBUG__CP_ECC_ERROR_INT_ASSERTED__SHIFT 0xe
+#define CP_ME1_INT_STAT_DEBUG__SUA_VIOLATION_INT_STATUS__SHIFT 0xf
+#define CP_ME1_INT_STAT_DEBUG__GPF_INT_ASSERTED__SHIFT 0x10
+#define CP_ME1_INT_STAT_DEBUG__WRM_POLL_TIMEOUT_INT_ASSERTED__SHIFT 0x11
+#define CP_ME1_INT_STAT_DEBUG__PRIV_REG_INT_ASSERTED__SHIFT 0x17
+#define CP_ME1_INT_STAT_DEBUG__OPCODE_ERROR_INT_ASSERTED__SHIFT 0x18
+#define CP_ME1_INT_STAT_DEBUG__TIME_STAMP_INT_ASSERTED__SHIFT 0x1a
+#define CP_ME1_INT_STAT_DEBUG__RESERVED_BIT_ERROR_INT_ASSERTED__SHIFT 0x1b
+#define CP_ME1_INT_STAT_DEBUG__GENERIC2_INT_ASSERTED__SHIFT 0x1d
+#define CP_ME1_INT_STAT_DEBUG__GENERIC1_INT_ASSERTED__SHIFT 0x1e
+#define CP_ME1_INT_STAT_DEBUG__GENERIC0_INT_ASSERTED__SHIFT 0x1f
+#define CP_ME1_INT_STAT_DEBUG__CMP_QUERY_STATUS_INT_ASSERTED_MASK 0x00001000L
+#define CP_ME1_INT_STAT_DEBUG__DEQUEUE_REQUEST_INT_ASSERTED_MASK 0x00002000L
+#define CP_ME1_INT_STAT_DEBUG__CP_ECC_ERROR_INT_ASSERTED_MASK 0x00004000L
+#define CP_ME1_INT_STAT_DEBUG__SUA_VIOLATION_INT_STATUS_MASK 0x00008000L
+#define CP_ME1_INT_STAT_DEBUG__GPF_INT_ASSERTED_MASK 0x00010000L
+#define CP_ME1_INT_STAT_DEBUG__WRM_POLL_TIMEOUT_INT_ASSERTED_MASK 0x00020000L
+#define CP_ME1_INT_STAT_DEBUG__PRIV_REG_INT_ASSERTED_MASK 0x00800000L
+#define CP_ME1_INT_STAT_DEBUG__OPCODE_ERROR_INT_ASSERTED_MASK 0x01000000L
+#define CP_ME1_INT_STAT_DEBUG__TIME_STAMP_INT_ASSERTED_MASK 0x04000000L
+#define CP_ME1_INT_STAT_DEBUG__RESERVED_BIT_ERROR_INT_ASSERTED_MASK 0x08000000L
+#define CP_ME1_INT_STAT_DEBUG__GENERIC2_INT_ASSERTED_MASK 0x20000000L
+#define CP_ME1_INT_STAT_DEBUG__GENERIC1_INT_ASSERTED_MASK 0x40000000L
+#define CP_ME1_INT_STAT_DEBUG__GENERIC0_INT_ASSERTED_MASK 0x80000000L
+//CP_ME2_INT_STAT_DEBUG
+#define CP_ME2_INT_STAT_DEBUG__CMP_QUERY_STATUS_INT_ASSERTED__SHIFT 0xc
+#define CP_ME2_INT_STAT_DEBUG__DEQUEUE_REQUEST_INT_ASSERTED__SHIFT 0xd
+#define CP_ME2_INT_STAT_DEBUG__CP_ECC_ERROR_INT_ASSERTED__SHIFT 0xe
+#define CP_ME2_INT_STAT_DEBUG__SUA_VIOLATION_INT_STATUS__SHIFT 0xf
+#define CP_ME2_INT_STAT_DEBUG__GPF_INT_ASSERTED__SHIFT 0x10
+#define CP_ME2_INT_STAT_DEBUG__WRM_POLL_TIMEOUT_INT_ASSERTED__SHIFT 0x11
+#define CP_ME2_INT_STAT_DEBUG__PRIV_REG_INT_ASSERTED__SHIFT 0x17
+#define CP_ME2_INT_STAT_DEBUG__OPCODE_ERROR_INT_ASSERTED__SHIFT 0x18
+#define CP_ME2_INT_STAT_DEBUG__TIME_STAMP_INT_ASSERTED__SHIFT 0x1a
+#define CP_ME2_INT_STAT_DEBUG__RESERVED_BIT_ERROR_INT_ASSERTED__SHIFT 0x1b
+#define CP_ME2_INT_STAT_DEBUG__GENERIC2_INT_ASSERTED__SHIFT 0x1d
+#define CP_ME2_INT_STAT_DEBUG__GENERIC1_INT_ASSERTED__SHIFT 0x1e
+#define CP_ME2_INT_STAT_DEBUG__GENERIC0_INT_ASSERTED__SHIFT 0x1f
+#define CP_ME2_INT_STAT_DEBUG__CMP_QUERY_STATUS_INT_ASSERTED_MASK 0x00001000L
+#define CP_ME2_INT_STAT_DEBUG__DEQUEUE_REQUEST_INT_ASSERTED_MASK 0x00002000L
+#define CP_ME2_INT_STAT_DEBUG__CP_ECC_ERROR_INT_ASSERTED_MASK 0x00004000L
+#define CP_ME2_INT_STAT_DEBUG__SUA_VIOLATION_INT_STATUS_MASK 0x00008000L
+#define CP_ME2_INT_STAT_DEBUG__GPF_INT_ASSERTED_MASK 0x00010000L
+#define CP_ME2_INT_STAT_DEBUG__WRM_POLL_TIMEOUT_INT_ASSERTED_MASK 0x00020000L
+#define CP_ME2_INT_STAT_DEBUG__PRIV_REG_INT_ASSERTED_MASK 0x00800000L
+#define CP_ME2_INT_STAT_DEBUG__OPCODE_ERROR_INT_ASSERTED_MASK 0x01000000L
+#define CP_ME2_INT_STAT_DEBUG__TIME_STAMP_INT_ASSERTED_MASK 0x04000000L
+#define CP_ME2_INT_STAT_DEBUG__RESERVED_BIT_ERROR_INT_ASSERTED_MASK 0x08000000L
+#define CP_ME2_INT_STAT_DEBUG__GENERIC2_INT_ASSERTED_MASK 0x20000000L
+#define CP_ME2_INT_STAT_DEBUG__GENERIC1_INT_ASSERTED_MASK 0x40000000L
+#define CP_ME2_INT_STAT_DEBUG__GENERIC0_INT_ASSERTED_MASK 0x80000000L
//CP_ME1_PIPE_PRIORITY_CNTS
#define CP_ME1_PIPE_PRIORITY_CNTS__PRIORITY1_CNT__SHIFT 0x0
#define CP_ME1_PIPE_PRIORITY_CNTS__PRIORITY2A_CNT__SHIFT 0x8
diff --git a/drivers/gpu/drm/amd/include/asic_reg/hdp/hdp_4_4_2_offset.h b/drivers/gpu/drm/amd/include/asic_reg/hdp/hdp_4_4_2_offset.h
new file mode 100644
index 000000000000..546b043ccdf5
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/hdp/hdp_4_4_2_offset.h
@@ -0,0 +1,219 @@
+/*
+ * Copyright 2022 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef _hdp_4_4_2_OFFSET_HEADER
+#define _hdp_4_4_2_OFFSET_HEADER
+
+
+
+// addressBlock: aid_hdp_hdpdec
+// base address: 0x3c80
+#define regHDP_MMHUB_TLVL 0x0000
+#define regHDP_MMHUB_TLVL_BASE_IDX 0
+#define regHDP_MMHUB_UNITID 0x0001
+#define regHDP_MMHUB_UNITID_BASE_IDX 0
+#define regHDP_NONSURFACE_BASE 0x0040
+#define regHDP_NONSURFACE_BASE_BASE_IDX 0
+#define regHDP_NONSURFACE_INFO 0x0041
+#define regHDP_NONSURFACE_INFO_BASE_IDX 0
+#define regHDP_NONSURFACE_BASE_HI 0x0042
+#define regHDP_NONSURFACE_BASE_HI_BASE_IDX 0
+#define regHDP_SURFACE_WRITE_FLAGS 0x00c4
+#define regHDP_SURFACE_WRITE_FLAGS_BASE_IDX 0
+#define regHDP_SURFACE_READ_FLAGS 0x00c5
+#define regHDP_SURFACE_READ_FLAGS_BASE_IDX 0
+#define regHDP_SURFACE_WRITE_FLAGS_CLR 0x00c6
+#define regHDP_SURFACE_WRITE_FLAGS_CLR_BASE_IDX 0
+#define regHDP_SURFACE_READ_FLAGS_CLR 0x00c7
+#define regHDP_SURFACE_READ_FLAGS_CLR_BASE_IDX 0
+#define regHDP_NONSURF_FLAGS 0x00c8
+#define regHDP_NONSURF_FLAGS_BASE_IDX 0
+#define regHDP_NONSURF_FLAGS_CLR 0x00c9
+#define regHDP_NONSURF_FLAGS_CLR_BASE_IDX 0
+#define regHDP_HOST_PATH_CNTL 0x00cc
+#define regHDP_HOST_PATH_CNTL_BASE_IDX 0
+#define regHDP_SW_SEMAPHORE 0x00cd
+#define regHDP_SW_SEMAPHORE_BASE_IDX 0
+#define regHDP_DEBUG0 0x00ce
+#define regHDP_DEBUG0_BASE_IDX 0
+#define regHDP_LAST_SURFACE_HIT 0x00d0
+#define regHDP_LAST_SURFACE_HIT_BASE_IDX 0
+#define regHDP_OUTSTANDING_REQ 0x00d2
+#define regHDP_OUTSTANDING_REQ_BASE_IDX 0
+#define regHDP_MISC_CNTL 0x00d3
+#define regHDP_MISC_CNTL_BASE_IDX 0
+#define regHDP_MEM_POWER_CTRL 0x00d4
+#define regHDP_MEM_POWER_CTRL_BASE_IDX 0
+#define regHDP_MMHUB_CNTL 0x00d5
+#define regHDP_MMHUB_CNTL_BASE_IDX 0
+#define regHDP_EDC_CNT 0x00d6
+#define regHDP_EDC_CNT_BASE_IDX 0
+#define regHDP_VERSION 0x00d7
+#define regHDP_VERSION_BASE_IDX 0
+#define regHDP_CLK_CNTL 0x00d8
+#define regHDP_CLK_CNTL_BASE_IDX 0
+#define regHDP_MEMIO_CNTL 0x00f6
+#define regHDP_MEMIO_CNTL_BASE_IDX 0
+#define regHDP_MEMIO_ADDR 0x00f7
+#define regHDP_MEMIO_ADDR_BASE_IDX 0
+#define regHDP_MEMIO_STATUS 0x00f8
+#define regHDP_MEMIO_STATUS_BASE_IDX 0
+#define regHDP_MEMIO_WR_DATA 0x00f9
+#define regHDP_MEMIO_WR_DATA_BASE_IDX 0
+#define regHDP_MEMIO_RD_DATA 0x00fa
+#define regHDP_MEMIO_RD_DATA_BASE_IDX 0
+#define regHDP_XDP_DIRECT2HDP_FIRST 0x0100
+#define regHDP_XDP_DIRECT2HDP_FIRST_BASE_IDX 0
+#define regHDP_XDP_D2H_FLUSH 0x0101
+#define regHDP_XDP_D2H_FLUSH_BASE_IDX 0
+#define regHDP_XDP_D2H_BAR_UPDATE 0x0102
+#define regHDP_XDP_D2H_BAR_UPDATE_BASE_IDX 0
+#define regHDP_XDP_D2H_RSVD_3 0x0103
+#define regHDP_XDP_D2H_RSVD_3_BASE_IDX 0
+#define regHDP_XDP_D2H_RSVD_4 0x0104
+#define regHDP_XDP_D2H_RSVD_4_BASE_IDX 0
+#define regHDP_XDP_D2H_RSVD_5 0x0105
+#define regHDP_XDP_D2H_RSVD_5_BASE_IDX 0
+#define regHDP_XDP_D2H_RSVD_6 0x0106
+#define regHDP_XDP_D2H_RSVD_6_BASE_IDX 0
+#define regHDP_XDP_D2H_RSVD_7 0x0107
+#define regHDP_XDP_D2H_RSVD_7_BASE_IDX 0
+#define regHDP_XDP_D2H_RSVD_8 0x0108
+#define regHDP_XDP_D2H_RSVD_8_BASE_IDX 0
+#define regHDP_XDP_D2H_RSVD_9 0x0109
+#define regHDP_XDP_D2H_RSVD_9_BASE_IDX 0
+#define regHDP_XDP_D2H_RSVD_10 0x010a
+#define regHDP_XDP_D2H_RSVD_10_BASE_IDX 0
+#define regHDP_XDP_D2H_RSVD_11 0x010b
+#define regHDP_XDP_D2H_RSVD_11_BASE_IDX 0
+#define regHDP_XDP_D2H_RSVD_12 0x010c
+#define regHDP_XDP_D2H_RSVD_12_BASE_IDX 0
+#define regHDP_XDP_D2H_RSVD_13 0x010d
+#define regHDP_XDP_D2H_RSVD_13_BASE_IDX 0
+#define regHDP_XDP_D2H_RSVD_14 0x010e
+#define regHDP_XDP_D2H_RSVD_14_BASE_IDX 0
+#define regHDP_XDP_D2H_RSVD_15 0x010f
+#define regHDP_XDP_D2H_RSVD_15_BASE_IDX 0
+#define regHDP_XDP_D2H_RSVD_16 0x0110
+#define regHDP_XDP_D2H_RSVD_16_BASE_IDX 0
+#define regHDP_XDP_D2H_RSVD_17 0x0111
+#define regHDP_XDP_D2H_RSVD_17_BASE_IDX 0
+#define regHDP_XDP_D2H_RSVD_18 0x0112
+#define regHDP_XDP_D2H_RSVD_18_BASE_IDX 0
+#define regHDP_XDP_D2H_RSVD_19 0x0113
+#define regHDP_XDP_D2H_RSVD_19_BASE_IDX 0
+#define regHDP_XDP_D2H_RSVD_20 0x0114
+#define regHDP_XDP_D2H_RSVD_20_BASE_IDX 0
+#define regHDP_XDP_D2H_RSVD_21 0x0115
+#define regHDP_XDP_D2H_RSVD_21_BASE_IDX 0
+#define regHDP_XDP_D2H_RSVD_22 0x0116
+#define regHDP_XDP_D2H_RSVD_22_BASE_IDX 0
+#define regHDP_XDP_D2H_RSVD_23 0x0117
+#define regHDP_XDP_D2H_RSVD_23_BASE_IDX 0
+#define regHDP_XDP_D2H_RSVD_24 0x0118
+#define regHDP_XDP_D2H_RSVD_24_BASE_IDX 0
+#define regHDP_XDP_D2H_RSVD_25 0x0119
+#define regHDP_XDP_D2H_RSVD_25_BASE_IDX 0
+#define regHDP_XDP_D2H_RSVD_26 0x011a
+#define regHDP_XDP_D2H_RSVD_26_BASE_IDX 0
+#define regHDP_XDP_D2H_RSVD_27 0x011b
+#define regHDP_XDP_D2H_RSVD_27_BASE_IDX 0
+#define regHDP_XDP_D2H_RSVD_28 0x011c
+#define regHDP_XDP_D2H_RSVD_28_BASE_IDX 0
+#define regHDP_XDP_D2H_RSVD_29 0x011d
+#define regHDP_XDP_D2H_RSVD_29_BASE_IDX 0
+#define regHDP_XDP_D2H_RSVD_30 0x011e
+#define regHDP_XDP_D2H_RSVD_30_BASE_IDX 0
+#define regHDP_XDP_D2H_RSVD_31 0x011f
+#define regHDP_XDP_D2H_RSVD_31_BASE_IDX 0
+#define regHDP_XDP_D2H_RSVD_32 0x0120
+#define regHDP_XDP_D2H_RSVD_32_BASE_IDX 0
+#define regHDP_XDP_D2H_RSVD_33 0x0121
+#define regHDP_XDP_D2H_RSVD_33_BASE_IDX 0
+#define regHDP_XDP_D2H_RSVD_34 0x0122
+#define regHDP_XDP_D2H_RSVD_34_BASE_IDX 0
+#define regHDP_XDP_DIRECT2HDP_LAST 0x0123
+#define regHDP_XDP_DIRECT2HDP_LAST_BASE_IDX 0
+#define regHDP_XDP_P2P_BAR_CFG 0x0124
+#define regHDP_XDP_P2P_BAR_CFG_BASE_IDX 0
+#define regHDP_XDP_P2P_MBX_OFFSET 0x0125
+#define regHDP_XDP_P2P_MBX_OFFSET_BASE_IDX 0
+#define regHDP_XDP_P2P_MBX_ADDR0 0x0126
+#define regHDP_XDP_P2P_MBX_ADDR0_BASE_IDX 0
+#define regHDP_XDP_P2P_MBX_ADDR1 0x0127
+#define regHDP_XDP_P2P_MBX_ADDR1_BASE_IDX 0
+#define regHDP_XDP_P2P_MBX_ADDR2 0x0128
+#define regHDP_XDP_P2P_MBX_ADDR2_BASE_IDX 0
+#define regHDP_XDP_P2P_MBX_ADDR3 0x0129
+#define regHDP_XDP_P2P_MBX_ADDR3_BASE_IDX 0
+#define regHDP_XDP_P2P_MBX_ADDR4 0x012a
+#define regHDP_XDP_P2P_MBX_ADDR4_BASE_IDX 0
+#define regHDP_XDP_P2P_MBX_ADDR5 0x012b
+#define regHDP_XDP_P2P_MBX_ADDR5_BASE_IDX 0
+#define regHDP_XDP_P2P_MBX_ADDR6 0x012c
+#define regHDP_XDP_P2P_MBX_ADDR6_BASE_IDX 0
+#define regHDP_XDP_HDP_MBX_MC_CFG 0x012d
+#define regHDP_XDP_HDP_MBX_MC_CFG_BASE_IDX 0
+#define regHDP_XDP_HDP_MC_CFG 0x012e
+#define regHDP_XDP_HDP_MC_CFG_BASE_IDX 0
+#define regHDP_XDP_HST_CFG 0x012f
+#define regHDP_XDP_HST_CFG_BASE_IDX 0
+#define regHDP_XDP_HDP_IPH_CFG 0x0131
+#define regHDP_XDP_HDP_IPH_CFG_BASE_IDX 0
+#define regHDP_XDP_P2P_BAR0 0x0134
+#define regHDP_XDP_P2P_BAR0_BASE_IDX 0
+#define regHDP_XDP_P2P_BAR1 0x0135
+#define regHDP_XDP_P2P_BAR1_BASE_IDX 0
+#define regHDP_XDP_P2P_BAR2 0x0136
+#define regHDP_XDP_P2P_BAR2_BASE_IDX 0
+#define regHDP_XDP_P2P_BAR3 0x0137
+#define regHDP_XDP_P2P_BAR3_BASE_IDX 0
+#define regHDP_XDP_P2P_BAR4 0x0138
+#define regHDP_XDP_P2P_BAR4_BASE_IDX 0
+#define regHDP_XDP_P2P_BAR5 0x0139
+#define regHDP_XDP_P2P_BAR5_BASE_IDX 0
+#define regHDP_XDP_P2P_BAR6 0x013a
+#define regHDP_XDP_P2P_BAR6_BASE_IDX 0
+#define regHDP_XDP_P2P_BAR7 0x013b
+#define regHDP_XDP_P2P_BAR7_BASE_IDX 0
+#define regHDP_XDP_FLUSH_ARMED_STS 0x013c
+#define regHDP_XDP_FLUSH_ARMED_STS_BASE_IDX 0
+#define regHDP_XDP_FLUSH_CNTR0_STS 0x013d
+#define regHDP_XDP_FLUSH_CNTR0_STS_BASE_IDX 0
+#define regHDP_XDP_BUSY_STS 0x013e
+#define regHDP_XDP_BUSY_STS_BASE_IDX 0
+#define regHDP_XDP_STICKY 0x013f
+#define regHDP_XDP_STICKY_BASE_IDX 0
+#define regHDP_XDP_CHKN 0x0140
+#define regHDP_XDP_CHKN_BASE_IDX 0
+#define regHDP_XDP_BARS_ADDR_39_36 0x0144
+#define regHDP_XDP_BARS_ADDR_39_36_BASE_IDX 0
+#define regHDP_XDP_MC_VM_FB_LOCATION_BASE 0x0145
+#define regHDP_XDP_MC_VM_FB_LOCATION_BASE_BASE_IDX 0
+#define regHDP_XDP_GPU_IOV_VIOLATION_LOG 0x0148
+#define regHDP_XDP_GPU_IOV_VIOLATION_LOG_BASE_IDX 0
+#define regHDP_XDP_GPU_IOV_VIOLATION_LOG2 0x0149
+#define regHDP_XDP_GPU_IOV_VIOLATION_LOG2_BASE_IDX 0
+#define regHDP_XDP_MMHUB_ERROR 0x014a
+#define regHDP_XDP_MMHUB_ERROR_BASE_IDX 0
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/hdp/hdp_4_4_2_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/hdp/hdp_4_4_2_sh_mask.h
new file mode 100644
index 000000000000..3ccd2797936e
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/hdp/hdp_4_4_2_sh_mask.h
@@ -0,0 +1,663 @@
+/*
+ * Copyright 2022 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef _hdp_4_4_2_SH_MASK_HEADER
+#define _hdp_4_4_2_SH_MASK_HEADER
+
+
+// addressBlock: aid_hdp_hdpdec
+//HDP_MMHUB_TLVL
+#define HDP_MMHUB_TLVL__HDP_WR_TLVL__SHIFT 0x0
+#define HDP_MMHUB_TLVL__HDP_RD_TLVL__SHIFT 0x4
+#define HDP_MMHUB_TLVL__XDP_WR_TLVL__SHIFT 0x8
+#define HDP_MMHUB_TLVL__XDP_RD_TLVL__SHIFT 0xc
+#define HDP_MMHUB_TLVL__XDP_MBX_WR_TLVL__SHIFT 0x10
+#define HDP_MMHUB_TLVL__HDP_WR_TLVL_MASK 0x0000000FL
+#define HDP_MMHUB_TLVL__HDP_RD_TLVL_MASK 0x000000F0L
+#define HDP_MMHUB_TLVL__XDP_WR_TLVL_MASK 0x00000F00L
+#define HDP_MMHUB_TLVL__XDP_RD_TLVL_MASK 0x0000F000L
+#define HDP_MMHUB_TLVL__XDP_MBX_WR_TLVL_MASK 0x000F0000L
+//HDP_MMHUB_UNITID
+#define HDP_MMHUB_UNITID__HDP_UNITID__SHIFT 0x0
+#define HDP_MMHUB_UNITID__XDP_UNITID__SHIFT 0x8
+#define HDP_MMHUB_UNITID__XDP_MBX_UNITID__SHIFT 0x10
+#define HDP_MMHUB_UNITID__HDP_UNITID_MASK 0x0000003FL
+#define HDP_MMHUB_UNITID__XDP_UNITID_MASK 0x00003F00L
+#define HDP_MMHUB_UNITID__XDP_MBX_UNITID_MASK 0x003F0000L
+//HDP_NONSURFACE_BASE
+#define HDP_NONSURFACE_BASE__NONSURF_BASE_39_8__SHIFT 0x0
+#define HDP_NONSURFACE_BASE__NONSURF_BASE_39_8_MASK 0xFFFFFFFFL
+//HDP_NONSURFACE_INFO
+#define HDP_NONSURFACE_INFO__NONSURF_SWAP__SHIFT 0x4
+#define HDP_NONSURFACE_INFO__NONSURF_VMID__SHIFT 0x8
+#define HDP_NONSURFACE_INFO__NONSURF_SWAP_MASK 0x00000030L
+#define HDP_NONSURFACE_INFO__NONSURF_VMID_MASK 0x00000F00L
+//HDP_NONSURFACE_BASE_HI
+#define HDP_NONSURFACE_BASE_HI__NONSURF_BASE_47_40__SHIFT 0x0
+#define HDP_NONSURFACE_BASE_HI__NONSURF_BASE_47_40_MASK 0x000000FFL
+//HDP_SURFACE_WRITE_FLAGS
+#define HDP_SURFACE_WRITE_FLAGS__SURF0_WRITE_FLAG__SHIFT 0x0
+#define HDP_SURFACE_WRITE_FLAGS__SURF1_WRITE_FLAG__SHIFT 0x1
+#define HDP_SURFACE_WRITE_FLAGS__SURF0_WRITE_FLAG_MASK 0x00000001L
+#define HDP_SURFACE_WRITE_FLAGS__SURF1_WRITE_FLAG_MASK 0x00000002L
+//HDP_SURFACE_READ_FLAGS
+#define HDP_SURFACE_READ_FLAGS__SURF0_READ_FLAG__SHIFT 0x0
+#define HDP_SURFACE_READ_FLAGS__SURF1_READ_FLAG__SHIFT 0x1
+#define HDP_SURFACE_READ_FLAGS__SURF0_READ_FLAG_MASK 0x00000001L
+#define HDP_SURFACE_READ_FLAGS__SURF1_READ_FLAG_MASK 0x00000002L
+//HDP_SURFACE_WRITE_FLAGS_CLR
+#define HDP_SURFACE_WRITE_FLAGS_CLR__SURF0_WRITE_FLAG_CLR__SHIFT 0x0
+#define HDP_SURFACE_WRITE_FLAGS_CLR__SURF1_WRITE_FLAG_CLR__SHIFT 0x1
+#define HDP_SURFACE_WRITE_FLAGS_CLR__SURF0_WRITE_FLAG_CLR_MASK 0x00000001L
+#define HDP_SURFACE_WRITE_FLAGS_CLR__SURF1_WRITE_FLAG_CLR_MASK 0x00000002L
+//HDP_SURFACE_READ_FLAGS_CLR
+#define HDP_SURFACE_READ_FLAGS_CLR__SURF0_READ_FLAG_CLR__SHIFT 0x0
+#define HDP_SURFACE_READ_FLAGS_CLR__SURF1_READ_FLAG_CLR__SHIFT 0x1
+#define HDP_SURFACE_READ_FLAGS_CLR__SURF0_READ_FLAG_CLR_MASK 0x00000001L
+#define HDP_SURFACE_READ_FLAGS_CLR__SURF1_READ_FLAG_CLR_MASK 0x00000002L
+//HDP_NONSURF_FLAGS
+#define HDP_NONSURF_FLAGS__NONSURF_WRITE_FLAG__SHIFT 0x0
+#define HDP_NONSURF_FLAGS__NONSURF_READ_FLAG__SHIFT 0x1
+#define HDP_NONSURF_FLAGS__NONSURF_WRITE_FLAG_MASK 0x00000001L
+#define HDP_NONSURF_FLAGS__NONSURF_READ_FLAG_MASK 0x00000002L
+//HDP_NONSURF_FLAGS_CLR
+#define HDP_NONSURF_FLAGS_CLR__NONSURF_WRITE_FLAG_CLR__SHIFT 0x0
+#define HDP_NONSURF_FLAGS_CLR__NONSURF_READ_FLAG_CLR__SHIFT 0x1
+#define HDP_NONSURF_FLAGS_CLR__NONSURF_WRITE_FLAG_CLR_MASK 0x00000001L
+#define HDP_NONSURF_FLAGS_CLR__NONSURF_READ_FLAG_CLR_MASK 0x00000002L
+//HDP_HOST_PATH_CNTL
+#define HDP_HOST_PATH_CNTL__WR_STALL_TIMER__SHIFT 0x9
+#define HDP_HOST_PATH_CNTL__RD_STALL_TIMER__SHIFT 0xb
+#define HDP_HOST_PATH_CNTL__WRITE_COMBINE_TIMER_PRELOAD_CFG__SHIFT 0x12
+#define HDP_HOST_PATH_CNTL__WRITE_COMBINE_TIMER__SHIFT 0x13
+#define HDP_HOST_PATH_CNTL__WRITE_COMBINE_EN__SHIFT 0x15
+#define HDP_HOST_PATH_CNTL__WRITE_COMBINE_64B_EN__SHIFT 0x16
+#define HDP_HOST_PATH_CNTL__ALL_SURFACES_DIS__SHIFT 0x1d
+#define HDP_HOST_PATH_CNTL__WR_STALL_TIMER_MASK 0x00000600L
+#define HDP_HOST_PATH_CNTL__RD_STALL_TIMER_MASK 0x00001800L
+#define HDP_HOST_PATH_CNTL__WRITE_COMBINE_TIMER_PRELOAD_CFG_MASK 0x00040000L
+#define HDP_HOST_PATH_CNTL__WRITE_COMBINE_TIMER_MASK 0x00180000L
+#define HDP_HOST_PATH_CNTL__WRITE_COMBINE_EN_MASK 0x00200000L
+#define HDP_HOST_PATH_CNTL__WRITE_COMBINE_64B_EN_MASK 0x00400000L
+#define HDP_HOST_PATH_CNTL__ALL_SURFACES_DIS_MASK 0x20000000L
+//HDP_SW_SEMAPHORE
+#define HDP_SW_SEMAPHORE__SW_SEMAPHORE__SHIFT 0x0
+#define HDP_SW_SEMAPHORE__SW_SEMAPHORE_MASK 0xFFFFFFFFL
+//HDP_DEBUG0
+#define HDP_DEBUG0__HDP_DEBUG__SHIFT 0x0
+#define HDP_DEBUG0__HDP_DEBUG_MASK 0xFFFFFFFFL
+//HDP_LAST_SURFACE_HIT
+#define HDP_LAST_SURFACE_HIT__LAST_SURFACE_HIT__SHIFT 0x0
+#define HDP_LAST_SURFACE_HIT__LAST_SURFACE_HIT_MASK 0x00000003L
+//HDP_OUTSTANDING_REQ
+#define HDP_OUTSTANDING_REQ__WRITE_REQ__SHIFT 0x0
+#define HDP_OUTSTANDING_REQ__READ_REQ__SHIFT 0x8
+#define HDP_OUTSTANDING_REQ__WRITE_REQ_MASK 0x000000FFL
+#define HDP_OUTSTANDING_REQ__READ_REQ_MASK 0x0000FF00L
+//HDP_MISC_CNTL
+#define HDP_MISC_CNTL__IDLE_HYSTERESIS_CNTL__SHIFT 0x2
+#define HDP_MISC_CNTL__ATOMIC_BUFFER_PROTECT_ENABLE__SHIFT 0x4
+#define HDP_MISC_CNTL__OUTSTANDING_WRITE_COUNT_1024__SHIFT 0x5
+#define HDP_MISC_CNTL__RAW_ADDR_CAM_ENABLE__SHIFT 0x7
+#define HDP_MISC_CNTL__MMHUB_EARLY_WRACK_ENABLE__SHIFT 0x8
+#define HDP_MISC_CNTL__EARLY_WRACK_MISSING_PROTECT_ENABLE__SHIFT 0x9
+#define HDP_MISC_CNTL__SIMULTANEOUS_READS_WRITES__SHIFT 0xb
+#define HDP_MISC_CNTL__SYSHUB_CHANNEL_PRIORITY__SHIFT 0xc
+#define HDP_MISC_CNTL__READ_BUFFER_WATERMARK__SHIFT 0xe
+#define HDP_MISC_CNTL__SRAM_ECC_ENABLE__SHIFT 0x14
+#define HDP_MISC_CNTL__FED_ENABLE__SHIFT 0x15
+#define HDP_MISC_CNTL__ATOMIC_FED_ENABLE__SHIFT 0x16
+#define HDP_MISC_CNTL__MMHUB_WRBURST_ENABLE__SHIFT 0x18
+#define HDP_MISC_CNTL__HDP_MMHUB_PENDING_WR_TAG_CHECK__SHIFT 0x1a
+#define HDP_MISC_CNTL__XDP_MMHUB_PENDING_WR_TAG_CHECK__SHIFT 0x1b
+#define HDP_MISC_CNTL__MMHUB_WRBURST_SIZE__SHIFT 0x1e
+#define HDP_MISC_CNTL__IDLE_HYSTERESIS_CNTL_MASK 0x0000000CL
+#define HDP_MISC_CNTL__ATOMIC_BUFFER_PROTECT_ENABLE_MASK 0x00000010L
+#define HDP_MISC_CNTL__OUTSTANDING_WRITE_COUNT_1024_MASK 0x00000020L
+#define HDP_MISC_CNTL__RAW_ADDR_CAM_ENABLE_MASK 0x00000080L
+#define HDP_MISC_CNTL__MMHUB_EARLY_WRACK_ENABLE_MASK 0x00000100L
+#define HDP_MISC_CNTL__EARLY_WRACK_MISSING_PROTECT_ENABLE_MASK 0x00000200L
+#define HDP_MISC_CNTL__SIMULTANEOUS_READS_WRITES_MASK 0x00000800L
+#define HDP_MISC_CNTL__SYSHUB_CHANNEL_PRIORITY_MASK 0x00003000L
+#define HDP_MISC_CNTL__READ_BUFFER_WATERMARK_MASK 0x0000C000L
+#define HDP_MISC_CNTL__SRAM_ECC_ENABLE_MASK 0x00100000L
+#define HDP_MISC_CNTL__FED_ENABLE_MASK 0x00200000L
+#define HDP_MISC_CNTL__ATOMIC_FED_ENABLE_MASK 0x00400000L
+#define HDP_MISC_CNTL__MMHUB_WRBURST_ENABLE_MASK 0x01000000L
+#define HDP_MISC_CNTL__HDP_MMHUB_PENDING_WR_TAG_CHECK_MASK 0x04000000L
+#define HDP_MISC_CNTL__XDP_MMHUB_PENDING_WR_TAG_CHECK_MASK 0x08000000L
+#define HDP_MISC_CNTL__MMHUB_WRBURST_SIZE_MASK 0x40000000L
+//HDP_MEM_POWER_CTRL
+#define HDP_MEM_POWER_CTRL__IPH_MEM_POWER_CTRL_EN__SHIFT 0x0
+#define HDP_MEM_POWER_CTRL__IPH_MEM_POWER_LS_EN__SHIFT 0x1
+#define HDP_MEM_POWER_CTRL__IPH_MEM_POWER_DS_EN__SHIFT 0x2
+#define HDP_MEM_POWER_CTRL__IPH_MEM_POWER_SD_EN__SHIFT 0x3
+#define HDP_MEM_POWER_CTRL__IPH_MEM_IDLE_HYSTERESIS__SHIFT 0x4
+#define HDP_MEM_POWER_CTRL__IPH_MEM_POWER_UP_RECOVER_DELAY__SHIFT 0x8
+#define HDP_MEM_POWER_CTRL__IPH_MEM_POWER_DOWN_LS_ENTER_DELAY__SHIFT 0xe
+#define HDP_MEM_POWER_CTRL__RC_MEM_POWER_CTRL_EN__SHIFT 0x10
+#define HDP_MEM_POWER_CTRL__RC_MEM_POWER_LS_EN__SHIFT 0x11
+#define HDP_MEM_POWER_CTRL__RC_MEM_POWER_DS_EN__SHIFT 0x12
+#define HDP_MEM_POWER_CTRL__RC_MEM_POWER_SD_EN__SHIFT 0x13
+#define HDP_MEM_POWER_CTRL__RC_MEM_IDLE_HYSTERESIS__SHIFT 0x14
+#define HDP_MEM_POWER_CTRL__RC_MEM_POWER_UP_RECOVER_DELAY__SHIFT 0x18
+#define HDP_MEM_POWER_CTRL__RC_MEM_POWER_DOWN_LS_ENTER_DELAY__SHIFT 0x1e
+#define HDP_MEM_POWER_CTRL__IPH_MEM_POWER_CTRL_EN_MASK 0x00000001L
+#define HDP_MEM_POWER_CTRL__IPH_MEM_POWER_LS_EN_MASK 0x00000002L
+#define HDP_MEM_POWER_CTRL__IPH_MEM_POWER_DS_EN_MASK 0x00000004L
+#define HDP_MEM_POWER_CTRL__IPH_MEM_POWER_SD_EN_MASK 0x00000008L
+#define HDP_MEM_POWER_CTRL__IPH_MEM_IDLE_HYSTERESIS_MASK 0x00000070L
+#define HDP_MEM_POWER_CTRL__IPH_MEM_POWER_UP_RECOVER_DELAY_MASK 0x00003F00L
+#define HDP_MEM_POWER_CTRL__IPH_MEM_POWER_DOWN_LS_ENTER_DELAY_MASK 0x0000C000L
+#define HDP_MEM_POWER_CTRL__RC_MEM_POWER_CTRL_EN_MASK 0x00010000L
+#define HDP_MEM_POWER_CTRL__RC_MEM_POWER_LS_EN_MASK 0x00020000L
+#define HDP_MEM_POWER_CTRL__RC_MEM_POWER_DS_EN_MASK 0x00040000L
+#define HDP_MEM_POWER_CTRL__RC_MEM_POWER_SD_EN_MASK 0x00080000L
+#define HDP_MEM_POWER_CTRL__RC_MEM_IDLE_HYSTERESIS_MASK 0x00700000L
+#define HDP_MEM_POWER_CTRL__RC_MEM_POWER_UP_RECOVER_DELAY_MASK 0x3F000000L
+#define HDP_MEM_POWER_CTRL__RC_MEM_POWER_DOWN_LS_ENTER_DELAY_MASK 0xC0000000L
+//HDP_MMHUB_CNTL
+#define HDP_MMHUB_CNTL__HDP_MMHUB_RO__SHIFT 0x0
+#define HDP_MMHUB_CNTL__HDP_MMHUB_GCC__SHIFT 0x1
+#define HDP_MMHUB_CNTL__HDP_MMHUB_SNOOP__SHIFT 0x2
+#define HDP_MMHUB_CNTL__HDP_MMHUB_RO_OVERRIDE__SHIFT 0x4
+#define HDP_MMHUB_CNTL__HDP_MMHUB_GCC_OVERRIDE__SHIFT 0x5
+#define HDP_MMHUB_CNTL__HDP_MMHUB_SNOOP_OVERRIDE__SHIFT 0x6
+#define HDP_MMHUB_CNTL__HDP_MMHUB_RO_MASK 0x00000001L
+#define HDP_MMHUB_CNTL__HDP_MMHUB_GCC_MASK 0x00000002L
+#define HDP_MMHUB_CNTL__HDP_MMHUB_SNOOP_MASK 0x00000004L
+#define HDP_MMHUB_CNTL__HDP_MMHUB_RO_OVERRIDE_MASK 0x00000010L
+#define HDP_MMHUB_CNTL__HDP_MMHUB_GCC_OVERRIDE_MASK 0x00000020L
+#define HDP_MMHUB_CNTL__HDP_MMHUB_SNOOP_OVERRIDE_MASK 0x00000040L
+//HDP_EDC_CNT
+#define HDP_EDC_CNT__MEM0_SED_COUNT__SHIFT 0x0
+#define HDP_EDC_CNT__MEM0_SED_COUNT_MASK 0x00000003L
+//HDP_VERSION
+#define HDP_VERSION__MINVER__SHIFT 0x0
+#define HDP_VERSION__MAJVER__SHIFT 0x8
+#define HDP_VERSION__REV__SHIFT 0x10
+#define HDP_VERSION__MINVER_MASK 0x000000FFL
+#define HDP_VERSION__MAJVER_MASK 0x0000FF00L
+#define HDP_VERSION__REV_MASK 0x00FF0000L
+//HDP_CLK_CNTL
+#define HDP_CLK_CNTL__REG_CLK_ENABLE_COUNT__SHIFT 0x0
+#define HDP_CLK_CNTL__REG_WAKE_DYN_CLK__SHIFT 0x4
+#define HDP_CLK_CNTL__IPH_MEM_CLK_SOFT_OVERRIDE__SHIFT 0x1a
+#define HDP_CLK_CNTL__RC_MEM_CLK_SOFT_OVERRIDE__SHIFT 0x1b
+#define HDP_CLK_CNTL__DBUS_CLK_SOFT_OVERRIDE__SHIFT 0x1c
+#define HDP_CLK_CNTL__DYN_CLK_SOFT_OVERRIDE__SHIFT 0x1d
+#define HDP_CLK_CNTL__XDP_REG_CLK_SOFT_OVERRIDE__SHIFT 0x1e
+#define HDP_CLK_CNTL__HDP_REG_CLK_SOFT_OVERRIDE__SHIFT 0x1f
+#define HDP_CLK_CNTL__REG_CLK_ENABLE_COUNT_MASK 0x0000000FL
+#define HDP_CLK_CNTL__REG_WAKE_DYN_CLK_MASK 0x00000010L
+#define HDP_CLK_CNTL__IPH_MEM_CLK_SOFT_OVERRIDE_MASK 0x04000000L
+#define HDP_CLK_CNTL__RC_MEM_CLK_SOFT_OVERRIDE_MASK 0x08000000L
+#define HDP_CLK_CNTL__DBUS_CLK_SOFT_OVERRIDE_MASK 0x10000000L
+#define HDP_CLK_CNTL__DYN_CLK_SOFT_OVERRIDE_MASK 0x20000000L
+#define HDP_CLK_CNTL__XDP_REG_CLK_SOFT_OVERRIDE_MASK 0x40000000L
+#define HDP_CLK_CNTL__HDP_REG_CLK_SOFT_OVERRIDE_MASK 0x80000000L
+//HDP_MEMIO_CNTL
+#define HDP_MEMIO_CNTL__MEMIO_SEND__SHIFT 0x0
+#define HDP_MEMIO_CNTL__MEMIO_OP__SHIFT 0x1
+#define HDP_MEMIO_CNTL__MEMIO_BE__SHIFT 0x2
+#define HDP_MEMIO_CNTL__MEMIO_WR_STROBE__SHIFT 0x6
+#define HDP_MEMIO_CNTL__MEMIO_RD_STROBE__SHIFT 0x7
+#define HDP_MEMIO_CNTL__MEMIO_ADDR_UPPER__SHIFT 0x8
+#define HDP_MEMIO_CNTL__MEMIO_CLR_WR_ERROR__SHIFT 0xe
+#define HDP_MEMIO_CNTL__MEMIO_CLR_RD_ERROR__SHIFT 0xf
+#define HDP_MEMIO_CNTL__MEMIO_VF__SHIFT 0x10
+#define HDP_MEMIO_CNTL__MEMIO_VFID__SHIFT 0x11
+#define HDP_MEMIO_CNTL__MEMIO_SEND_MASK 0x00000001L
+#define HDP_MEMIO_CNTL__MEMIO_OP_MASK 0x00000002L
+#define HDP_MEMIO_CNTL__MEMIO_BE_MASK 0x0000003CL
+#define HDP_MEMIO_CNTL__MEMIO_WR_STROBE_MASK 0x00000040L
+#define HDP_MEMIO_CNTL__MEMIO_RD_STROBE_MASK 0x00000080L
+#define HDP_MEMIO_CNTL__MEMIO_ADDR_UPPER_MASK 0x00003F00L
+#define HDP_MEMIO_CNTL__MEMIO_CLR_WR_ERROR_MASK 0x00004000L
+#define HDP_MEMIO_CNTL__MEMIO_CLR_RD_ERROR_MASK 0x00008000L
+#define HDP_MEMIO_CNTL__MEMIO_VF_MASK 0x00010000L
+#define HDP_MEMIO_CNTL__MEMIO_VFID_MASK 0x003E0000L
+//HDP_MEMIO_ADDR
+#define HDP_MEMIO_ADDR__MEMIO_ADDR_LOWER__SHIFT 0x0
+#define HDP_MEMIO_ADDR__MEMIO_ADDR_LOWER_MASK 0xFFFFFFFFL
+//HDP_MEMIO_STATUS
+#define HDP_MEMIO_STATUS__MEMIO_WR_STATUS__SHIFT 0x0
+#define HDP_MEMIO_STATUS__MEMIO_RD_STATUS__SHIFT 0x1
+#define HDP_MEMIO_STATUS__MEMIO_WR_ERROR__SHIFT 0x2
+#define HDP_MEMIO_STATUS__MEMIO_RD_ERROR__SHIFT 0x3
+#define HDP_MEMIO_STATUS__MEMIO_WR_STATUS_MASK 0x00000001L
+#define HDP_MEMIO_STATUS__MEMIO_RD_STATUS_MASK 0x00000002L
+#define HDP_MEMIO_STATUS__MEMIO_WR_ERROR_MASK 0x00000004L
+#define HDP_MEMIO_STATUS__MEMIO_RD_ERROR_MASK 0x00000008L
+//HDP_MEMIO_WR_DATA
+#define HDP_MEMIO_WR_DATA__MEMIO_WR_DATA__SHIFT 0x0
+#define HDP_MEMIO_WR_DATA__MEMIO_WR_DATA_MASK 0xFFFFFFFFL
+//HDP_MEMIO_RD_DATA
+#define HDP_MEMIO_RD_DATA__MEMIO_RD_DATA__SHIFT 0x0
+#define HDP_MEMIO_RD_DATA__MEMIO_RD_DATA_MASK 0xFFFFFFFFL
+//HDP_XDP_DIRECT2HDP_FIRST
+#define HDP_XDP_DIRECT2HDP_FIRST__RESERVED__SHIFT 0x0
+#define HDP_XDP_DIRECT2HDP_FIRST__RESERVED_MASK 0xFFFFFFFFL
+//HDP_XDP_D2H_FLUSH
+#define HDP_XDP_D2H_FLUSH__D2H_FLUSH_FLUSH_NUM__SHIFT 0x0
+#define HDP_XDP_D2H_FLUSH__D2H_FLUSH_MBX_ENC_DATA__SHIFT 0x4
+#define HDP_XDP_D2H_FLUSH__D2H_FLUSH_MBX_ADDR_SEL__SHIFT 0x8
+#define HDP_XDP_D2H_FLUSH__D2H_FLUSH_XPB_CLG__SHIFT 0xb
+#define HDP_XDP_D2H_FLUSH__D2H_FLUSH_SEND_HOST__SHIFT 0x10
+#define HDP_XDP_D2H_FLUSH__D2H_FLUSH_ALTER_FLUSH_NUM__SHIFT 0x12
+#define HDP_XDP_D2H_FLUSH__D2H_FLUSH_RSVD_0__SHIFT 0x13
+#define HDP_XDP_D2H_FLUSH__D2H_FLUSH_RSVD_1__SHIFT 0x14
+#define HDP_XDP_D2H_FLUSH__D2H_FLUSH_FLUSH_NUM_MASK 0x0000000FL
+#define HDP_XDP_D2H_FLUSH__D2H_FLUSH_MBX_ENC_DATA_MASK 0x000000F0L
+#define HDP_XDP_D2H_FLUSH__D2H_FLUSH_MBX_ADDR_SEL_MASK 0x00000700L
+#define HDP_XDP_D2H_FLUSH__D2H_FLUSH_XPB_CLG_MASK 0x0000F800L
+#define HDP_XDP_D2H_FLUSH__D2H_FLUSH_SEND_HOST_MASK 0x00010000L
+#define HDP_XDP_D2H_FLUSH__D2H_FLUSH_ALTER_FLUSH_NUM_MASK 0x00040000L
+#define HDP_XDP_D2H_FLUSH__D2H_FLUSH_RSVD_0_MASK 0x00080000L
+#define HDP_XDP_D2H_FLUSH__D2H_FLUSH_RSVD_1_MASK 0x00100000L
+//HDP_XDP_D2H_BAR_UPDATE
+#define HDP_XDP_D2H_BAR_UPDATE__D2H_BAR_UPDATE_ADDR__SHIFT 0x0
+#define HDP_XDP_D2H_BAR_UPDATE__D2H_BAR_UPDATE_FLUSH_NUM__SHIFT 0x10
+#define HDP_XDP_D2H_BAR_UPDATE__D2H_BAR_UPDATE_BAR_NUM__SHIFT 0x14
+#define HDP_XDP_D2H_BAR_UPDATE__D2H_BAR_UPDATE_ADDR_MASK 0x0000FFFFL
+#define HDP_XDP_D2H_BAR_UPDATE__D2H_BAR_UPDATE_FLUSH_NUM_MASK 0x000F0000L
+#define HDP_XDP_D2H_BAR_UPDATE__D2H_BAR_UPDATE_BAR_NUM_MASK 0x00700000L
+//HDP_XDP_D2H_RSVD_3
+#define HDP_XDP_D2H_RSVD_3__RESERVED__SHIFT 0x0
+#define HDP_XDP_D2H_RSVD_3__RESERVED_MASK 0xFFFFFFFFL
+//HDP_XDP_D2H_RSVD_4
+#define HDP_XDP_D2H_RSVD_4__RESERVED__SHIFT 0x0
+#define HDP_XDP_D2H_RSVD_4__RESERVED_MASK 0xFFFFFFFFL
+//HDP_XDP_D2H_RSVD_5
+#define HDP_XDP_D2H_RSVD_5__RESERVED__SHIFT 0x0
+#define HDP_XDP_D2H_RSVD_5__RESERVED_MASK 0xFFFFFFFFL
+//HDP_XDP_D2H_RSVD_6
+#define HDP_XDP_D2H_RSVD_6__RESERVED__SHIFT 0x0
+#define HDP_XDP_D2H_RSVD_6__RESERVED_MASK 0xFFFFFFFFL
+//HDP_XDP_D2H_RSVD_7
+#define HDP_XDP_D2H_RSVD_7__RESERVED__SHIFT 0x0
+#define HDP_XDP_D2H_RSVD_7__RESERVED_MASK 0xFFFFFFFFL
+//HDP_XDP_D2H_RSVD_8
+#define HDP_XDP_D2H_RSVD_8__RESERVED__SHIFT 0x0
+#define HDP_XDP_D2H_RSVD_8__RESERVED_MASK 0xFFFFFFFFL
+//HDP_XDP_D2H_RSVD_9
+#define HDP_XDP_D2H_RSVD_9__RESERVED__SHIFT 0x0
+#define HDP_XDP_D2H_RSVD_9__RESERVED_MASK 0xFFFFFFFFL
+//HDP_XDP_D2H_RSVD_10
+#define HDP_XDP_D2H_RSVD_10__RESERVED__SHIFT 0x0
+#define HDP_XDP_D2H_RSVD_10__RESERVED_MASK 0xFFFFFFFFL
+//HDP_XDP_D2H_RSVD_11
+#define HDP_XDP_D2H_RSVD_11__RESERVED__SHIFT 0x0
+#define HDP_XDP_D2H_RSVD_11__RESERVED_MASK 0xFFFFFFFFL
+//HDP_XDP_D2H_RSVD_12
+#define HDP_XDP_D2H_RSVD_12__RESERVED__SHIFT 0x0
+#define HDP_XDP_D2H_RSVD_12__RESERVED_MASK 0xFFFFFFFFL
+//HDP_XDP_D2H_RSVD_13
+#define HDP_XDP_D2H_RSVD_13__RESERVED__SHIFT 0x0
+#define HDP_XDP_D2H_RSVD_13__RESERVED_MASK 0xFFFFFFFFL
+//HDP_XDP_D2H_RSVD_14
+#define HDP_XDP_D2H_RSVD_14__RESERVED__SHIFT 0x0
+#define HDP_XDP_D2H_RSVD_14__RESERVED_MASK 0xFFFFFFFFL
+//HDP_XDP_D2H_RSVD_15
+#define HDP_XDP_D2H_RSVD_15__RESERVED__SHIFT 0x0
+#define HDP_XDP_D2H_RSVD_15__RESERVED_MASK 0xFFFFFFFFL
+//HDP_XDP_D2H_RSVD_16
+#define HDP_XDP_D2H_RSVD_16__RESERVED__SHIFT 0x0
+#define HDP_XDP_D2H_RSVD_16__RESERVED_MASK 0xFFFFFFFFL
+//HDP_XDP_D2H_RSVD_17
+#define HDP_XDP_D2H_RSVD_17__RESERVED__SHIFT 0x0
+#define HDP_XDP_D2H_RSVD_17__RESERVED_MASK 0xFFFFFFFFL
+//HDP_XDP_D2H_RSVD_18
+#define HDP_XDP_D2H_RSVD_18__RESERVED__SHIFT 0x0
+#define HDP_XDP_D2H_RSVD_18__RESERVED_MASK 0xFFFFFFFFL
+//HDP_XDP_D2H_RSVD_19
+#define HDP_XDP_D2H_RSVD_19__RESERVED__SHIFT 0x0
+#define HDP_XDP_D2H_RSVD_19__RESERVED_MASK 0xFFFFFFFFL
+//HDP_XDP_D2H_RSVD_20
+#define HDP_XDP_D2H_RSVD_20__RESERVED__SHIFT 0x0
+#define HDP_XDP_D2H_RSVD_20__RESERVED_MASK 0xFFFFFFFFL
+//HDP_XDP_D2H_RSVD_21
+#define HDP_XDP_D2H_RSVD_21__RESERVED__SHIFT 0x0
+#define HDP_XDP_D2H_RSVD_21__RESERVED_MASK 0xFFFFFFFFL
+//HDP_XDP_D2H_RSVD_22
+#define HDP_XDP_D2H_RSVD_22__RESERVED__SHIFT 0x0
+#define HDP_XDP_D2H_RSVD_22__RESERVED_MASK 0xFFFFFFFFL
+//HDP_XDP_D2H_RSVD_23
+#define HDP_XDP_D2H_RSVD_23__RESERVED__SHIFT 0x0
+#define HDP_XDP_D2H_RSVD_23__RESERVED_MASK 0xFFFFFFFFL
+//HDP_XDP_D2H_RSVD_24
+#define HDP_XDP_D2H_RSVD_24__RESERVED__SHIFT 0x0
+#define HDP_XDP_D2H_RSVD_24__RESERVED_MASK 0xFFFFFFFFL
+//HDP_XDP_D2H_RSVD_25
+#define HDP_XDP_D2H_RSVD_25__RESERVED__SHIFT 0x0
+#define HDP_XDP_D2H_RSVD_25__RESERVED_MASK 0xFFFFFFFFL
+//HDP_XDP_D2H_RSVD_26
+#define HDP_XDP_D2H_RSVD_26__RESERVED__SHIFT 0x0
+#define HDP_XDP_D2H_RSVD_26__RESERVED_MASK 0xFFFFFFFFL
+//HDP_XDP_D2H_RSVD_27
+#define HDP_XDP_D2H_RSVD_27__RESERVED__SHIFT 0x0
+#define HDP_XDP_D2H_RSVD_27__RESERVED_MASK 0xFFFFFFFFL
+//HDP_XDP_D2H_RSVD_28
+#define HDP_XDP_D2H_RSVD_28__RESERVED__SHIFT 0x0
+#define HDP_XDP_D2H_RSVD_28__RESERVED_MASK 0xFFFFFFFFL
+//HDP_XDP_D2H_RSVD_29
+#define HDP_XDP_D2H_RSVD_29__RESERVED__SHIFT 0x0
+#define HDP_XDP_D2H_RSVD_29__RESERVED_MASK 0xFFFFFFFFL
+//HDP_XDP_D2H_RSVD_30
+#define HDP_XDP_D2H_RSVD_30__RESERVED__SHIFT 0x0
+#define HDP_XDP_D2H_RSVD_30__RESERVED_MASK 0xFFFFFFFFL
+//HDP_XDP_D2H_RSVD_31
+#define HDP_XDP_D2H_RSVD_31__RESERVED__SHIFT 0x0
+#define HDP_XDP_D2H_RSVD_31__RESERVED_MASK 0xFFFFFFFFL
+//HDP_XDP_D2H_RSVD_32
+#define HDP_XDP_D2H_RSVD_32__RESERVED__SHIFT 0x0
+#define HDP_XDP_D2H_RSVD_32__RESERVED_MASK 0xFFFFFFFFL
+//HDP_XDP_D2H_RSVD_33
+#define HDP_XDP_D2H_RSVD_33__RESERVED__SHIFT 0x0
+#define HDP_XDP_D2H_RSVD_33__RESERVED_MASK 0xFFFFFFFFL
+//HDP_XDP_D2H_RSVD_34
+#define HDP_XDP_D2H_RSVD_34__RESERVED__SHIFT 0x0
+#define HDP_XDP_D2H_RSVD_34__RESERVED_MASK 0xFFFFFFFFL
+//HDP_XDP_DIRECT2HDP_LAST
+#define HDP_XDP_DIRECT2HDP_LAST__RESERVED__SHIFT 0x0
+#define HDP_XDP_DIRECT2HDP_LAST__RESERVED_MASK 0xFFFFFFFFL
+//HDP_XDP_P2P_BAR_CFG
+#define HDP_XDP_P2P_BAR_CFG__P2P_BAR_CFG_ADDR_SIZE__SHIFT 0x0
+#define HDP_XDP_P2P_BAR_CFG__P2P_BAR_CFG_BAR_FROM__SHIFT 0x4
+#define HDP_XDP_P2P_BAR_CFG__P2P_BAR_CFG_ADDR_SIZE_MASK 0x0000000FL
+#define HDP_XDP_P2P_BAR_CFG__P2P_BAR_CFG_BAR_FROM_MASK 0x00000030L
+//HDP_XDP_P2P_MBX_OFFSET
+#define HDP_XDP_P2P_MBX_OFFSET__P2P_MBX_OFFSET__SHIFT 0x0
+#define HDP_XDP_P2P_MBX_OFFSET__P2P_MBX_OFFSET_MASK 0x0001FFFFL
+//HDP_XDP_P2P_MBX_ADDR0
+#define HDP_XDP_P2P_MBX_ADDR0__VALID__SHIFT 0x0
+#define HDP_XDP_P2P_MBX_ADDR0__ADDR_35_19__SHIFT 0x3
+#define HDP_XDP_P2P_MBX_ADDR0__ADDR_39_36__SHIFT 0x14
+#define HDP_XDP_P2P_MBX_ADDR0__ADDR_47_40__SHIFT 0x18
+#define HDP_XDP_P2P_MBX_ADDR0__VALID_MASK 0x00000001L
+#define HDP_XDP_P2P_MBX_ADDR0__ADDR_35_19_MASK 0x000FFFF8L
+#define HDP_XDP_P2P_MBX_ADDR0__ADDR_39_36_MASK 0x00F00000L
+#define HDP_XDP_P2P_MBX_ADDR0__ADDR_47_40_MASK 0xFF000000L
+//HDP_XDP_P2P_MBX_ADDR1
+#define HDP_XDP_P2P_MBX_ADDR1__VALID__SHIFT 0x0
+#define HDP_XDP_P2P_MBX_ADDR1__ADDR_35_19__SHIFT 0x3
+#define HDP_XDP_P2P_MBX_ADDR1__ADDR_39_36__SHIFT 0x14
+#define HDP_XDP_P2P_MBX_ADDR1__ADDR_47_40__SHIFT 0x18
+#define HDP_XDP_P2P_MBX_ADDR1__VALID_MASK 0x00000001L
+#define HDP_XDP_P2P_MBX_ADDR1__ADDR_35_19_MASK 0x000FFFF8L
+#define HDP_XDP_P2P_MBX_ADDR1__ADDR_39_36_MASK 0x00F00000L
+#define HDP_XDP_P2P_MBX_ADDR1__ADDR_47_40_MASK 0xFF000000L
+//HDP_XDP_P2P_MBX_ADDR2
+#define HDP_XDP_P2P_MBX_ADDR2__VALID__SHIFT 0x0
+#define HDP_XDP_P2P_MBX_ADDR2__ADDR_35_19__SHIFT 0x3
+#define HDP_XDP_P2P_MBX_ADDR2__ADDR_39_36__SHIFT 0x14
+#define HDP_XDP_P2P_MBX_ADDR2__ADDR_47_40__SHIFT 0x18
+#define HDP_XDP_P2P_MBX_ADDR2__VALID_MASK 0x00000001L
+#define HDP_XDP_P2P_MBX_ADDR2__ADDR_35_19_MASK 0x000FFFF8L
+#define HDP_XDP_P2P_MBX_ADDR2__ADDR_39_36_MASK 0x00F00000L
+#define HDP_XDP_P2P_MBX_ADDR2__ADDR_47_40_MASK 0xFF000000L
+//HDP_XDP_P2P_MBX_ADDR3
+#define HDP_XDP_P2P_MBX_ADDR3__VALID__SHIFT 0x0
+#define HDP_XDP_P2P_MBX_ADDR3__ADDR_35_19__SHIFT 0x3
+#define HDP_XDP_P2P_MBX_ADDR3__ADDR_39_36__SHIFT 0x14
+#define HDP_XDP_P2P_MBX_ADDR3__ADDR_47_40__SHIFT 0x18
+#define HDP_XDP_P2P_MBX_ADDR3__VALID_MASK 0x00000001L
+#define HDP_XDP_P2P_MBX_ADDR3__ADDR_35_19_MASK 0x000FFFF8L
+#define HDP_XDP_P2P_MBX_ADDR3__ADDR_39_36_MASK 0x00F00000L
+#define HDP_XDP_P2P_MBX_ADDR3__ADDR_47_40_MASK 0xFF000000L
+//HDP_XDP_P2P_MBX_ADDR4
+#define HDP_XDP_P2P_MBX_ADDR4__VALID__SHIFT 0x0
+#define HDP_XDP_P2P_MBX_ADDR4__ADDR_35_19__SHIFT 0x3
+#define HDP_XDP_P2P_MBX_ADDR4__ADDR_39_36__SHIFT 0x14
+#define HDP_XDP_P2P_MBX_ADDR4__ADDR_47_40__SHIFT 0x18
+#define HDP_XDP_P2P_MBX_ADDR4__VALID_MASK 0x00000001L
+#define HDP_XDP_P2P_MBX_ADDR4__ADDR_35_19_MASK 0x000FFFF8L
+#define HDP_XDP_P2P_MBX_ADDR4__ADDR_39_36_MASK 0x00F00000L
+#define HDP_XDP_P2P_MBX_ADDR4__ADDR_47_40_MASK 0xFF000000L
+//HDP_XDP_P2P_MBX_ADDR5
+#define HDP_XDP_P2P_MBX_ADDR5__VALID__SHIFT 0x0
+#define HDP_XDP_P2P_MBX_ADDR5__ADDR_35_19__SHIFT 0x3
+#define HDP_XDP_P2P_MBX_ADDR5__ADDR_39_36__SHIFT 0x14
+#define HDP_XDP_P2P_MBX_ADDR5__ADDR_47_40__SHIFT 0x18
+#define HDP_XDP_P2P_MBX_ADDR5__VALID_MASK 0x00000001L
+#define HDP_XDP_P2P_MBX_ADDR5__ADDR_35_19_MASK 0x000FFFF8L
+#define HDP_XDP_P2P_MBX_ADDR5__ADDR_39_36_MASK 0x00F00000L
+#define HDP_XDP_P2P_MBX_ADDR5__ADDR_47_40_MASK 0xFF000000L
+//HDP_XDP_P2P_MBX_ADDR6
+#define HDP_XDP_P2P_MBX_ADDR6__VALID__SHIFT 0x0
+#define HDP_XDP_P2P_MBX_ADDR6__ADDR_35_19__SHIFT 0x3
+#define HDP_XDP_P2P_MBX_ADDR6__ADDR_39_36__SHIFT 0x14
+#define HDP_XDP_P2P_MBX_ADDR6__ADDR_47_40__SHIFT 0x18
+#define HDP_XDP_P2P_MBX_ADDR6__VALID_MASK 0x00000001L
+#define HDP_XDP_P2P_MBX_ADDR6__ADDR_35_19_MASK 0x000FFFF8L
+#define HDP_XDP_P2P_MBX_ADDR6__ADDR_39_36_MASK 0x00F00000L
+#define HDP_XDP_P2P_MBX_ADDR6__ADDR_47_40_MASK 0xFF000000L
+//HDP_XDP_HDP_MBX_MC_CFG
+#define HDP_XDP_HDP_MBX_MC_CFG__HDP_MBX_MC_CFG_TAP_WRREQ_QOS__SHIFT 0x0
+#define HDP_XDP_HDP_MBX_MC_CFG__HDP_MBX_MC_CFG_TAP_WRREQ_SWAP__SHIFT 0x4
+#define HDP_XDP_HDP_MBX_MC_CFG__HDP_MBX_MC_CFG_TAP_WRREQ_VMID__SHIFT 0x8
+#define HDP_XDP_HDP_MBX_MC_CFG__HDP_MBX_MC_CFG_TAP_WRREQ_RO__SHIFT 0xc
+#define HDP_XDP_HDP_MBX_MC_CFG__HDP_MBX_MC_CFG_TAP_WRREQ_GCC__SHIFT 0xd
+#define HDP_XDP_HDP_MBX_MC_CFG__HDP_MBX_MC_CFG_TAP_WRREQ_SNOOP__SHIFT 0xe
+#define HDP_XDP_HDP_MBX_MC_CFG__HDP_MBX_MC_CFG_TAP_WRREQ_QOS_MASK 0x0000000FL
+#define HDP_XDP_HDP_MBX_MC_CFG__HDP_MBX_MC_CFG_TAP_WRREQ_SWAP_MASK 0x00000030L
+#define HDP_XDP_HDP_MBX_MC_CFG__HDP_MBX_MC_CFG_TAP_WRREQ_VMID_MASK 0x00000F00L
+#define HDP_XDP_HDP_MBX_MC_CFG__HDP_MBX_MC_CFG_TAP_WRREQ_RO_MASK 0x00001000L
+#define HDP_XDP_HDP_MBX_MC_CFG__HDP_MBX_MC_CFG_TAP_WRREQ_GCC_MASK 0x00002000L
+#define HDP_XDP_HDP_MBX_MC_CFG__HDP_MBX_MC_CFG_TAP_WRREQ_SNOOP_MASK 0x00004000L
+//HDP_XDP_HDP_MC_CFG
+#define HDP_XDP_HDP_MC_CFG__HDP_MC_CFG_HST_TAP_REQ_SNOOP_OVERRIDE__SHIFT 0x0
+#define HDP_XDP_HDP_MC_CFG__HDP_MC_CFG_HST_TAP_REQ_GCC_OVERRIDE__SHIFT 0x1
+#define HDP_XDP_HDP_MC_CFG__HDP_MC_CFG_HST_TAP_REQ_RO_OVERRIDE__SHIFT 0x2
+#define HDP_XDP_HDP_MC_CFG__HDP_MC_CFG_HST_TAP_REQ_SNOOP__SHIFT 0x3
+#define HDP_XDP_HDP_MC_CFG__HDP_MC_CFG_HST_TAP_REQ_SWAP__SHIFT 0x4
+#define HDP_XDP_HDP_MC_CFG__HDP_MC_CFG_HST_TAP_REQ_VMID__SHIFT 0x8
+#define HDP_XDP_HDP_MC_CFG__HDP_MC_CFG_HST_TAP_REQ_RO__SHIFT 0xc
+#define HDP_XDP_HDP_MC_CFG__HDP_MC_CFG_HST_TAP_REQ_GCC__SHIFT 0xd
+#define HDP_XDP_HDP_MC_CFG__HDP_MC_CFG_XDP_HIGHER_PRI_THRESH__SHIFT 0xe
+#define HDP_XDP_HDP_MC_CFG__HDP_MC_CFG_HST_TAP_REQ_SNOOP_OVERRIDE_MASK 0x00000001L
+#define HDP_XDP_HDP_MC_CFG__HDP_MC_CFG_HST_TAP_REQ_GCC_OVERRIDE_MASK 0x00000002L
+#define HDP_XDP_HDP_MC_CFG__HDP_MC_CFG_HST_TAP_REQ_RO_OVERRIDE_MASK 0x00000004L
+#define HDP_XDP_HDP_MC_CFG__HDP_MC_CFG_HST_TAP_REQ_SNOOP_MASK 0x00000008L
+#define HDP_XDP_HDP_MC_CFG__HDP_MC_CFG_HST_TAP_REQ_SWAP_MASK 0x00000030L
+#define HDP_XDP_HDP_MC_CFG__HDP_MC_CFG_HST_TAP_REQ_VMID_MASK 0x00000F00L
+#define HDP_XDP_HDP_MC_CFG__HDP_MC_CFG_HST_TAP_REQ_RO_MASK 0x00001000L
+#define HDP_XDP_HDP_MC_CFG__HDP_MC_CFG_HST_TAP_REQ_GCC_MASK 0x00002000L
+#define HDP_XDP_HDP_MC_CFG__HDP_MC_CFG_XDP_HIGHER_PRI_THRESH_MASK 0x000FC000L
+//HDP_XDP_HST_CFG
+#define HDP_XDP_HST_CFG__HST_CFG_WR_COMBINE_EN__SHIFT 0x0
+#define HDP_XDP_HST_CFG__HST_CFG_WR_COMBINE_TIMER__SHIFT 0x1
+#define HDP_XDP_HST_CFG__HST_CFG_WR_BURST_EN__SHIFT 0x3
+#define HDP_XDP_HST_CFG__HST_CFG_WR_COMBINE_64B_EN__SHIFT 0x4
+#define HDP_XDP_HST_CFG__HST_CFG_WR_COMBINE_TIMER_PRELOAD_CFG__SHIFT 0x5
+#define HDP_XDP_HST_CFG__HST_CFG_WR_COMBINE_EN_MASK 0x00000001L
+#define HDP_XDP_HST_CFG__HST_CFG_WR_COMBINE_TIMER_MASK 0x00000006L
+#define HDP_XDP_HST_CFG__HST_CFG_WR_BURST_EN_MASK 0x00000008L
+#define HDP_XDP_HST_CFG__HST_CFG_WR_COMBINE_64B_EN_MASK 0x00000010L
+#define HDP_XDP_HST_CFG__HST_CFG_WR_COMBINE_TIMER_PRELOAD_CFG_MASK 0x00000020L
+//HDP_XDP_HDP_IPH_CFG
+#define HDP_XDP_HDP_IPH_CFG__HDP_IPH_CFG_SYS_FIFO_DEPTH_OVERRIDE__SHIFT 0x0
+#define HDP_XDP_HDP_IPH_CFG__HDP_IPH_CFG_XDP_FIFO_DEPTH_OVERRIDE__SHIFT 0x6
+#define HDP_XDP_HDP_IPH_CFG__HDP_IPH_CFG_INVERSE_PEER_TAG_MATCHING__SHIFT 0xc
+#define HDP_XDP_HDP_IPH_CFG__HDP_IPH_CFG_P2P_RD_EN__SHIFT 0xd
+#define HDP_XDP_HDP_IPH_CFG__HDP_IPH_CFG_SYS_FIFO_DEPTH_OVERRIDE_MASK 0x0000003FL
+#define HDP_XDP_HDP_IPH_CFG__HDP_IPH_CFG_XDP_FIFO_DEPTH_OVERRIDE_MASK 0x00000FC0L
+#define HDP_XDP_HDP_IPH_CFG__HDP_IPH_CFG_INVERSE_PEER_TAG_MATCHING_MASK 0x00001000L
+#define HDP_XDP_HDP_IPH_CFG__HDP_IPH_CFG_P2P_RD_EN_MASK 0x00002000L
+//HDP_XDP_P2P_BAR0
+#define HDP_XDP_P2P_BAR0__ADDR__SHIFT 0x0
+#define HDP_XDP_P2P_BAR0__FLUSH__SHIFT 0x10
+#define HDP_XDP_P2P_BAR0__VALID__SHIFT 0x14
+#define HDP_XDP_P2P_BAR0__ADDR_MASK 0x0000FFFFL
+#define HDP_XDP_P2P_BAR0__FLUSH_MASK 0x000F0000L
+#define HDP_XDP_P2P_BAR0__VALID_MASK 0x00100000L
+//HDP_XDP_P2P_BAR1
+#define HDP_XDP_P2P_BAR1__ADDR__SHIFT 0x0
+#define HDP_XDP_P2P_BAR1__FLUSH__SHIFT 0x10
+#define HDP_XDP_P2P_BAR1__VALID__SHIFT 0x14
+#define HDP_XDP_P2P_BAR1__ADDR_MASK 0x0000FFFFL
+#define HDP_XDP_P2P_BAR1__FLUSH_MASK 0x000F0000L
+#define HDP_XDP_P2P_BAR1__VALID_MASK 0x00100000L
+//HDP_XDP_P2P_BAR2
+#define HDP_XDP_P2P_BAR2__ADDR__SHIFT 0x0
+#define HDP_XDP_P2P_BAR2__FLUSH__SHIFT 0x10
+#define HDP_XDP_P2P_BAR2__VALID__SHIFT 0x14
+#define HDP_XDP_P2P_BAR2__ADDR_MASK 0x0000FFFFL
+#define HDP_XDP_P2P_BAR2__FLUSH_MASK 0x000F0000L
+#define HDP_XDP_P2P_BAR2__VALID_MASK 0x00100000L
+//HDP_XDP_P2P_BAR3
+#define HDP_XDP_P2P_BAR3__ADDR__SHIFT 0x0
+#define HDP_XDP_P2P_BAR3__FLUSH__SHIFT 0x10
+#define HDP_XDP_P2P_BAR3__VALID__SHIFT 0x14
+#define HDP_XDP_P2P_BAR3__ADDR_MASK 0x0000FFFFL
+#define HDP_XDP_P2P_BAR3__FLUSH_MASK 0x000F0000L
+#define HDP_XDP_P2P_BAR3__VALID_MASK 0x00100000L
+//HDP_XDP_P2P_BAR4
+#define HDP_XDP_P2P_BAR4__ADDR__SHIFT 0x0
+#define HDP_XDP_P2P_BAR4__FLUSH__SHIFT 0x10
+#define HDP_XDP_P2P_BAR4__VALID__SHIFT 0x14
+#define HDP_XDP_P2P_BAR4__ADDR_MASK 0x0000FFFFL
+#define HDP_XDP_P2P_BAR4__FLUSH_MASK 0x000F0000L
+#define HDP_XDP_P2P_BAR4__VALID_MASK 0x00100000L
+//HDP_XDP_P2P_BAR5
+#define HDP_XDP_P2P_BAR5__ADDR__SHIFT 0x0
+#define HDP_XDP_P2P_BAR5__FLUSH__SHIFT 0x10
+#define HDP_XDP_P2P_BAR5__VALID__SHIFT 0x14
+#define HDP_XDP_P2P_BAR5__ADDR_MASK 0x0000FFFFL
+#define HDP_XDP_P2P_BAR5__FLUSH_MASK 0x000F0000L
+#define HDP_XDP_P2P_BAR5__VALID_MASK 0x00100000L
+//HDP_XDP_P2P_BAR6
+#define HDP_XDP_P2P_BAR6__ADDR__SHIFT 0x0
+#define HDP_XDP_P2P_BAR6__FLUSH__SHIFT 0x10
+#define HDP_XDP_P2P_BAR6__VALID__SHIFT 0x14
+#define HDP_XDP_P2P_BAR6__ADDR_MASK 0x0000FFFFL
+#define HDP_XDP_P2P_BAR6__FLUSH_MASK 0x000F0000L
+#define HDP_XDP_P2P_BAR6__VALID_MASK 0x00100000L
+//HDP_XDP_P2P_BAR7
+#define HDP_XDP_P2P_BAR7__ADDR__SHIFT 0x0
+#define HDP_XDP_P2P_BAR7__FLUSH__SHIFT 0x10
+#define HDP_XDP_P2P_BAR7__VALID__SHIFT 0x14
+#define HDP_XDP_P2P_BAR7__ADDR_MASK 0x0000FFFFL
+#define HDP_XDP_P2P_BAR7__FLUSH_MASK 0x000F0000L
+#define HDP_XDP_P2P_BAR7__VALID_MASK 0x00100000L
+//HDP_XDP_FLUSH_ARMED_STS
+#define HDP_XDP_FLUSH_ARMED_STS__FLUSH_ARMED_STS__SHIFT 0x0
+#define HDP_XDP_FLUSH_ARMED_STS__FLUSH_ARMED_STS_MASK 0xFFFFFFFFL
+//HDP_XDP_FLUSH_CNTR0_STS
+#define HDP_XDP_FLUSH_CNTR0_STS__FLUSH_CNTR0_STS__SHIFT 0x0
+#define HDP_XDP_FLUSH_CNTR0_STS__FLUSH_CNTR0_STS_MASK 0x03FFFFFFL
+//HDP_XDP_BUSY_STS
+#define HDP_XDP_BUSY_STS__BUSY_BITS__SHIFT 0x0
+#define HDP_XDP_BUSY_STS__BUSY_BITS_MASK 0x00FFFFFFL
+//HDP_XDP_STICKY
+#define HDP_XDP_STICKY__STICKY_STS__SHIFT 0x0
+#define HDP_XDP_STICKY__STICKY_W1C__SHIFT 0x10
+#define HDP_XDP_STICKY__STICKY_STS_MASK 0x0000FFFFL
+#define HDP_XDP_STICKY__STICKY_W1C_MASK 0xFFFF0000L
+//HDP_XDP_CHKN
+#define HDP_XDP_CHKN__CHKN_0_RSVD__SHIFT 0x0
+#define HDP_XDP_CHKN__CHKN_1_RSVD__SHIFT 0x8
+#define HDP_XDP_CHKN__CHKN_2_RSVD__SHIFT 0x10
+#define HDP_XDP_CHKN__CHKN_3_RSVD__SHIFT 0x18
+#define HDP_XDP_CHKN__CHKN_0_RSVD_MASK 0x000000FFL
+#define HDP_XDP_CHKN__CHKN_1_RSVD_MASK 0x0000FF00L
+#define HDP_XDP_CHKN__CHKN_2_RSVD_MASK 0x00FF0000L
+#define HDP_XDP_CHKN__CHKN_3_RSVD_MASK 0xFF000000L
+//HDP_XDP_BARS_ADDR_39_36
+#define HDP_XDP_BARS_ADDR_39_36__BAR0_ADDR_39_36__SHIFT 0x0
+#define HDP_XDP_BARS_ADDR_39_36__BAR1_ADDR_39_36__SHIFT 0x4
+#define HDP_XDP_BARS_ADDR_39_36__BAR2_ADDR_39_36__SHIFT 0x8
+#define HDP_XDP_BARS_ADDR_39_36__BAR3_ADDR_39_36__SHIFT 0xc
+#define HDP_XDP_BARS_ADDR_39_36__BAR4_ADDR_39_36__SHIFT 0x10
+#define HDP_XDP_BARS_ADDR_39_36__BAR5_ADDR_39_36__SHIFT 0x14
+#define HDP_XDP_BARS_ADDR_39_36__BAR6_ADDR_39_36__SHIFT 0x18
+#define HDP_XDP_BARS_ADDR_39_36__BAR7_ADDR_39_36__SHIFT 0x1c
+#define HDP_XDP_BARS_ADDR_39_36__BAR0_ADDR_39_36_MASK 0x0000000FL
+#define HDP_XDP_BARS_ADDR_39_36__BAR1_ADDR_39_36_MASK 0x000000F0L
+#define HDP_XDP_BARS_ADDR_39_36__BAR2_ADDR_39_36_MASK 0x00000F00L
+#define HDP_XDP_BARS_ADDR_39_36__BAR3_ADDR_39_36_MASK 0x0000F000L
+#define HDP_XDP_BARS_ADDR_39_36__BAR4_ADDR_39_36_MASK 0x000F0000L
+#define HDP_XDP_BARS_ADDR_39_36__BAR5_ADDR_39_36_MASK 0x00F00000L
+#define HDP_XDP_BARS_ADDR_39_36__BAR6_ADDR_39_36_MASK 0x0F000000L
+#define HDP_XDP_BARS_ADDR_39_36__BAR7_ADDR_39_36_MASK 0xF0000000L
+//HDP_XDP_MC_VM_FB_LOCATION_BASE
+#define HDP_XDP_MC_VM_FB_LOCATION_BASE__FB_BASE__SHIFT 0x0
+#define HDP_XDP_MC_VM_FB_LOCATION_BASE__FB_BASE_MASK 0x03FFFFFFL
+//HDP_XDP_GPU_IOV_VIOLATION_LOG
+#define HDP_XDP_GPU_IOV_VIOLATION_LOG__VIOLATION_STATUS__SHIFT 0x0
+#define HDP_XDP_GPU_IOV_VIOLATION_LOG__MULTIPLE_VIOLATION_STATUS__SHIFT 0x1
+#define HDP_XDP_GPU_IOV_VIOLATION_LOG__ADDRESS__SHIFT 0x2
+#define HDP_XDP_GPU_IOV_VIOLATION_LOG__OPCODE__SHIFT 0x12
+#define HDP_XDP_GPU_IOV_VIOLATION_LOG__VF__SHIFT 0x13
+#define HDP_XDP_GPU_IOV_VIOLATION_LOG__VFID__SHIFT 0x14
+#define HDP_XDP_GPU_IOV_VIOLATION_LOG__VIOLATION_STATUS_MASK 0x00000001L
+#define HDP_XDP_GPU_IOV_VIOLATION_LOG__MULTIPLE_VIOLATION_STATUS_MASK 0x00000002L
+#define HDP_XDP_GPU_IOV_VIOLATION_LOG__ADDRESS_MASK 0x0003FFFCL
+#define HDP_XDP_GPU_IOV_VIOLATION_LOG__OPCODE_MASK 0x00040000L
+#define HDP_XDP_GPU_IOV_VIOLATION_LOG__VF_MASK 0x00080000L
+#define HDP_XDP_GPU_IOV_VIOLATION_LOG__VFID_MASK 0x00F00000L
+//HDP_XDP_GPU_IOV_VIOLATION_LOG2
+#define HDP_XDP_GPU_IOV_VIOLATION_LOG2__INITIATOR_ID__SHIFT 0x0
+#define HDP_XDP_GPU_IOV_VIOLATION_LOG2__INITIATOR_ID_MASK 0x000003FFL
+//HDP_XDP_MMHUB_ERROR
+#define HDP_XDP_MMHUB_ERROR__HDP_BRESP_01__SHIFT 0x1
+#define HDP_XDP_MMHUB_ERROR__HDP_BRESP_10__SHIFT 0x2
+#define HDP_XDP_MMHUB_ERROR__HDP_BRESP_11__SHIFT 0x3
+#define HDP_XDP_MMHUB_ERROR__HDP_BUSER_FED__SHIFT 0x4
+#define HDP_XDP_MMHUB_ERROR__HDP_BUSER_NACK_01__SHIFT 0x5
+#define HDP_XDP_MMHUB_ERROR__HDP_BUSER_NACK_10__SHIFT 0x6
+#define HDP_XDP_MMHUB_ERROR__HDP_BUSER_NACK_11__SHIFT 0x7
+#define HDP_XDP_MMHUB_ERROR__HDP_RRESP_01__SHIFT 0x9
+#define HDP_XDP_MMHUB_ERROR__HDP_RRESP_10__SHIFT 0xa
+#define HDP_XDP_MMHUB_ERROR__HDP_RRESP_11__SHIFT 0xb
+#define HDP_XDP_MMHUB_ERROR__HDP_RUSER_FED__SHIFT 0xc
+#define HDP_XDP_MMHUB_ERROR__HDP_RUSER_NACK_01__SHIFT 0xd
+#define HDP_XDP_MMHUB_ERROR__HDP_RUSER_NACK_10__SHIFT 0xe
+#define HDP_XDP_MMHUB_ERROR__HDP_RUSER_NACK_11__SHIFT 0xf
+#define HDP_XDP_MMHUB_ERROR__XDP_BRESP_01__SHIFT 0x11
+#define HDP_XDP_MMHUB_ERROR__XDP_BRESP_10__SHIFT 0x12
+#define HDP_XDP_MMHUB_ERROR__XDP_BRESP_11__SHIFT 0x13
+#define HDP_XDP_MMHUB_ERROR__XDP_BUSER_NACK_01__SHIFT 0x15
+#define HDP_XDP_MMHUB_ERROR__XDP_BUSER_NACK_10__SHIFT 0x16
+#define HDP_XDP_MMHUB_ERROR__XDP_BUSER_NACK_11__SHIFT 0x17
+#define HDP_XDP_MMHUB_ERROR__HDP_BRESP_01_MASK 0x00000002L
+#define HDP_XDP_MMHUB_ERROR__HDP_BRESP_10_MASK 0x00000004L
+#define HDP_XDP_MMHUB_ERROR__HDP_BRESP_11_MASK 0x00000008L
+#define HDP_XDP_MMHUB_ERROR__HDP_BUSER_FED_MASK 0x00000010L
+#define HDP_XDP_MMHUB_ERROR__HDP_BUSER_NACK_01_MASK 0x00000020L
+#define HDP_XDP_MMHUB_ERROR__HDP_BUSER_NACK_10_MASK 0x00000040L
+#define HDP_XDP_MMHUB_ERROR__HDP_BUSER_NACK_11_MASK 0x00000080L
+#define HDP_XDP_MMHUB_ERROR__HDP_RRESP_01_MASK 0x00000200L
+#define HDP_XDP_MMHUB_ERROR__HDP_RRESP_10_MASK 0x00000400L
+#define HDP_XDP_MMHUB_ERROR__HDP_RRESP_11_MASK 0x00000800L
+#define HDP_XDP_MMHUB_ERROR__HDP_RUSER_FED_MASK 0x00001000L
+#define HDP_XDP_MMHUB_ERROR__HDP_RUSER_NACK_01_MASK 0x00002000L
+#define HDP_XDP_MMHUB_ERROR__HDP_RUSER_NACK_10_MASK 0x00004000L
+#define HDP_XDP_MMHUB_ERROR__HDP_RUSER_NACK_11_MASK 0x00008000L
+#define HDP_XDP_MMHUB_ERROR__XDP_BRESP_01_MASK 0x00020000L
+#define HDP_XDP_MMHUB_ERROR__XDP_BRESP_10_MASK 0x00040000L
+#define HDP_XDP_MMHUB_ERROR__XDP_BRESP_11_MASK 0x00080000L
+#define HDP_XDP_MMHUB_ERROR__XDP_BUSER_NACK_01_MASK 0x00200000L
+#define HDP_XDP_MMHUB_ERROR__XDP_BUSER_NACK_10_MASK 0x00400000L
+#define HDP_XDP_MMHUB_ERROR__XDP_BUSER_NACK_11_MASK 0x00800000L
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/mp/mp_13_0_6_offset.h b/drivers/gpu/drm/amd/include/asic_reg/mp/mp_13_0_6_offset.h
new file mode 100644
index 000000000000..f04fa95a770c
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/mp/mp_13_0_6_offset.h
@@ -0,0 +1,456 @@
+/*
+ * Copyright 2022 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef _mp_13_0_6_OFFSET_HEADER
+#define _mp_13_0_6_OFFSET_HEADER
+
+
+
+// addressBlock: aid_mp_SmuMp0_SmnDec
+// base address: 0x0
+#define regMP0_SMN_C2PMSG_32 0x0060
+#define regMP0_SMN_C2PMSG_32_BASE_IDX 0
+#define regMP0_SMN_C2PMSG_33 0x0061
+#define regMP0_SMN_C2PMSG_33_BASE_IDX 0
+#define regMP0_SMN_C2PMSG_34 0x0062
+#define regMP0_SMN_C2PMSG_34_BASE_IDX 0
+#define regMP0_SMN_C2PMSG_35 0x0063
+#define regMP0_SMN_C2PMSG_35_BASE_IDX 0
+#define regMP0_SMN_C2PMSG_36 0x0064
+#define regMP0_SMN_C2PMSG_36_BASE_IDX 0
+#define regMP0_SMN_C2PMSG_37 0x0065
+#define regMP0_SMN_C2PMSG_37_BASE_IDX 0
+#define regMP0_SMN_C2PMSG_38 0x0066
+#define regMP0_SMN_C2PMSG_38_BASE_IDX 0
+#define regMP0_SMN_C2PMSG_39 0x0067
+#define regMP0_SMN_C2PMSG_39_BASE_IDX 0
+#define regMP0_SMN_C2PMSG_40 0x0068
+#define regMP0_SMN_C2PMSG_40_BASE_IDX 0
+#define regMP0_SMN_C2PMSG_41 0x0069
+#define regMP0_SMN_C2PMSG_41_BASE_IDX 0
+#define regMP0_SMN_C2PMSG_42 0x006a
+#define regMP0_SMN_C2PMSG_42_BASE_IDX 0
+#define regMP0_SMN_C2PMSG_43 0x006b
+#define regMP0_SMN_C2PMSG_43_BASE_IDX 0
+#define regMP0_SMN_C2PMSG_44 0x006c
+#define regMP0_SMN_C2PMSG_44_BASE_IDX 0
+#define regMP0_SMN_C2PMSG_45 0x006d
+#define regMP0_SMN_C2PMSG_45_BASE_IDX 0
+#define regMP0_SMN_C2PMSG_46 0x006e
+#define regMP0_SMN_C2PMSG_46_BASE_IDX 0
+#define regMP0_SMN_C2PMSG_47 0x006f
+#define regMP0_SMN_C2PMSG_47_BASE_IDX 0
+#define regMP0_SMN_C2PMSG_48 0x0070
+#define regMP0_SMN_C2PMSG_48_BASE_IDX 0
+#define regMP0_SMN_C2PMSG_49 0x0071
+#define regMP0_SMN_C2PMSG_49_BASE_IDX 0
+#define regMP0_SMN_C2PMSG_50 0x0072
+#define regMP0_SMN_C2PMSG_50_BASE_IDX 0
+#define regMP0_SMN_C2PMSG_51 0x0073
+#define regMP0_SMN_C2PMSG_51_BASE_IDX 0
+#define regMP0_SMN_C2PMSG_52 0x0074
+#define regMP0_SMN_C2PMSG_52_BASE_IDX 0
+#define regMP0_SMN_C2PMSG_53 0x0075
+#define regMP0_SMN_C2PMSG_53_BASE_IDX 0
+#define regMP0_SMN_C2PMSG_54 0x0076
+#define regMP0_SMN_C2PMSG_54_BASE_IDX 0
+#define regMP0_SMN_C2PMSG_55 0x0077
+#define regMP0_SMN_C2PMSG_55_BASE_IDX 0
+#define regMP0_SMN_C2PMSG_56 0x0078
+#define regMP0_SMN_C2PMSG_56_BASE_IDX 0
+#define regMP0_SMN_C2PMSG_57 0x0079
+#define regMP0_SMN_C2PMSG_57_BASE_IDX 0
+#define regMP0_SMN_C2PMSG_58 0x007a
+#define regMP0_SMN_C2PMSG_58_BASE_IDX 0
+#define regMP0_SMN_C2PMSG_59 0x007b
+#define regMP0_SMN_C2PMSG_59_BASE_IDX 0
+#define regMP0_SMN_C2PMSG_60 0x007c
+#define regMP0_SMN_C2PMSG_60_BASE_IDX 0
+#define regMP0_SMN_C2PMSG_61 0x007d
+#define regMP0_SMN_C2PMSG_61_BASE_IDX 0
+#define regMP0_SMN_C2PMSG_62 0x007e
+#define regMP0_SMN_C2PMSG_62_BASE_IDX 0
+#define regMP0_SMN_C2PMSG_63 0x007f
+#define regMP0_SMN_C2PMSG_63_BASE_IDX 0
+#define regMP0_SMN_C2PMSG_64 0x0080
+#define regMP0_SMN_C2PMSG_64_BASE_IDX 0
+#define regMP0_SMN_C2PMSG_65 0x0081
+#define regMP0_SMN_C2PMSG_65_BASE_IDX 0
+#define regMP0_SMN_C2PMSG_66 0x0082
+#define regMP0_SMN_C2PMSG_66_BASE_IDX 0
+#define regMP0_SMN_C2PMSG_67 0x0083
+#define regMP0_SMN_C2PMSG_67_BASE_IDX 0
+#define regMP0_SMN_C2PMSG_68 0x0084
+#define regMP0_SMN_C2PMSG_68_BASE_IDX 0
+#define regMP0_SMN_C2PMSG_69 0x0085
+#define regMP0_SMN_C2PMSG_69_BASE_IDX 0
+#define regMP0_SMN_C2PMSG_70 0x0086
+#define regMP0_SMN_C2PMSG_70_BASE_IDX 0
+#define regMP0_SMN_C2PMSG_71 0x0087
+#define regMP0_SMN_C2PMSG_71_BASE_IDX 0
+#define regMP0_SMN_C2PMSG_72 0x0088
+#define regMP0_SMN_C2PMSG_72_BASE_IDX 0
+#define regMP0_SMN_C2PMSG_73 0x0089
+#define regMP0_SMN_C2PMSG_73_BASE_IDX 0
+#define regMP0_SMN_C2PMSG_74 0x008a
+#define regMP0_SMN_C2PMSG_74_BASE_IDX 0
+#define regMP0_SMN_C2PMSG_75 0x008b
+#define regMP0_SMN_C2PMSG_75_BASE_IDX 0
+#define regMP0_SMN_C2PMSG_76 0x008c
+#define regMP0_SMN_C2PMSG_76_BASE_IDX 0
+#define regMP0_SMN_C2PMSG_77 0x008d
+#define regMP0_SMN_C2PMSG_77_BASE_IDX 0
+#define regMP0_SMN_C2PMSG_78 0x008e
+#define regMP0_SMN_C2PMSG_78_BASE_IDX 0
+#define regMP0_SMN_C2PMSG_79 0x008f
+#define regMP0_SMN_C2PMSG_79_BASE_IDX 0
+#define regMP0_SMN_C2PMSG_80 0x0090
+#define regMP0_SMN_C2PMSG_80_BASE_IDX 0
+#define regMP0_SMN_C2PMSG_81 0x0091
+#define regMP0_SMN_C2PMSG_81_BASE_IDX 0
+#define regMP0_SMN_C2PMSG_82 0x0092
+#define regMP0_SMN_C2PMSG_82_BASE_IDX 0
+#define regMP0_SMN_C2PMSG_83 0x0093
+#define regMP0_SMN_C2PMSG_83_BASE_IDX 0
+#define regMP0_SMN_C2PMSG_84 0x0094
+#define regMP0_SMN_C2PMSG_84_BASE_IDX 0
+#define regMP0_SMN_C2PMSG_85 0x0095
+#define regMP0_SMN_C2PMSG_85_BASE_IDX 0
+#define regMP0_SMN_C2PMSG_86 0x0096
+#define regMP0_SMN_C2PMSG_86_BASE_IDX 0
+#define regMP0_SMN_C2PMSG_87 0x0097
+#define regMP0_SMN_C2PMSG_87_BASE_IDX 0
+#define regMP0_SMN_C2PMSG_88 0x0098
+#define regMP0_SMN_C2PMSG_88_BASE_IDX 0
+#define regMP0_SMN_C2PMSG_89 0x0099
+#define regMP0_SMN_C2PMSG_89_BASE_IDX 0
+#define regMP0_SMN_C2PMSG_90 0x009a
+#define regMP0_SMN_C2PMSG_90_BASE_IDX 0
+#define regMP0_SMN_C2PMSG_91 0x009b
+#define regMP0_SMN_C2PMSG_91_BASE_IDX 0
+#define regMP0_SMN_C2PMSG_92 0x009c
+#define regMP0_SMN_C2PMSG_92_BASE_IDX 0
+#define regMP0_SMN_C2PMSG_93 0x009d
+#define regMP0_SMN_C2PMSG_93_BASE_IDX 0
+#define regMP0_SMN_C2PMSG_94 0x009e
+#define regMP0_SMN_C2PMSG_94_BASE_IDX 0
+#define regMP0_SMN_C2PMSG_95 0x009f
+#define regMP0_SMN_C2PMSG_95_BASE_IDX 0
+#define regMP0_SMN_C2PMSG_96 0x00a0
+#define regMP0_SMN_C2PMSG_96_BASE_IDX 0
+#define regMP0_SMN_C2PMSG_97 0x00a1
+#define regMP0_SMN_C2PMSG_97_BASE_IDX 0
+#define regMP0_SMN_C2PMSG_98 0x00a2
+#define regMP0_SMN_C2PMSG_98_BASE_IDX 0
+#define regMP0_SMN_C2PMSG_99 0x00a3
+#define regMP0_SMN_C2PMSG_99_BASE_IDX 0
+#define regMP0_SMN_C2PMSG_100 0x00a4
+#define regMP0_SMN_C2PMSG_100_BASE_IDX 0
+#define regMP0_SMN_C2PMSG_101 0x00a5
+#define regMP0_SMN_C2PMSG_101_BASE_IDX 0
+#define regMP0_SMN_C2PMSG_102 0x00a6
+#define regMP0_SMN_C2PMSG_102_BASE_IDX 0
+#define regMP0_SMN_C2PMSG_103 0x00a7
+#define regMP0_SMN_C2PMSG_103_BASE_IDX 0
+#define regMP0_SMN_IH_CREDIT 0x00c1
+#define regMP0_SMN_IH_CREDIT_BASE_IDX 0
+#define regMP0_SMN_IH_SW_INT 0x00c2
+#define regMP0_SMN_IH_SW_INT_BASE_IDX 0
+#define regMP0_SMN_IH_SW_INT_CTRL 0x00c3
+#define regMP0_SMN_IH_SW_INT_CTRL_BASE_IDX 0
+
+
+// addressBlock: aid_mp_SmuMp1_SmnDec
+// base address: 0x0
+#define regMP1_SMN_C2PMSG_32 0x0260
+#define regMP1_SMN_C2PMSG_32_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_33 0x0261
+#define regMP1_SMN_C2PMSG_33_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_34 0x0262
+#define regMP1_SMN_C2PMSG_34_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_35 0x0263
+#define regMP1_SMN_C2PMSG_35_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_36 0x0264
+#define regMP1_SMN_C2PMSG_36_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_37 0x0265
+#define regMP1_SMN_C2PMSG_37_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_38 0x0266
+#define regMP1_SMN_C2PMSG_38_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_39 0x0267
+#define regMP1_SMN_C2PMSG_39_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_40 0x0268
+#define regMP1_SMN_C2PMSG_40_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_41 0x0269
+#define regMP1_SMN_C2PMSG_41_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_42 0x026a
+#define regMP1_SMN_C2PMSG_42_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_43 0x026b
+#define regMP1_SMN_C2PMSG_43_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_44 0x026c
+#define regMP1_SMN_C2PMSG_44_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_45 0x026d
+#define regMP1_SMN_C2PMSG_45_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_46 0x026e
+#define regMP1_SMN_C2PMSG_46_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_47 0x026f
+#define regMP1_SMN_C2PMSG_47_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_48 0x0270
+#define regMP1_SMN_C2PMSG_48_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_49 0x0271
+#define regMP1_SMN_C2PMSG_49_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_50 0x0272
+#define regMP1_SMN_C2PMSG_50_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_51 0x0273
+#define regMP1_SMN_C2PMSG_51_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_52 0x0274
+#define regMP1_SMN_C2PMSG_52_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_53 0x0275
+#define regMP1_SMN_C2PMSG_53_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_54 0x0276
+#define regMP1_SMN_C2PMSG_54_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_55 0x0277
+#define regMP1_SMN_C2PMSG_55_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_56 0x0278
+#define regMP1_SMN_C2PMSG_56_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_57 0x0279
+#define regMP1_SMN_C2PMSG_57_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_58 0x027a
+#define regMP1_SMN_C2PMSG_58_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_59 0x027b
+#define regMP1_SMN_C2PMSG_59_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_60 0x027c
+#define regMP1_SMN_C2PMSG_60_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_61 0x027d
+#define regMP1_SMN_C2PMSG_61_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_62 0x027e
+#define regMP1_SMN_C2PMSG_62_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_63 0x027f
+#define regMP1_SMN_C2PMSG_63_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_64 0x0280
+#define regMP1_SMN_C2PMSG_64_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_65 0x0281
+#define regMP1_SMN_C2PMSG_65_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_66 0x0282
+#define regMP1_SMN_C2PMSG_66_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_67 0x0283
+#define regMP1_SMN_C2PMSG_67_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_68 0x0284
+#define regMP1_SMN_C2PMSG_68_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_69 0x0285
+#define regMP1_SMN_C2PMSG_69_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_70 0x0286
+#define regMP1_SMN_C2PMSG_70_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_71 0x0287
+#define regMP1_SMN_C2PMSG_71_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_72 0x0288
+#define regMP1_SMN_C2PMSG_72_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_73 0x0289
+#define regMP1_SMN_C2PMSG_73_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_74 0x028a
+#define regMP1_SMN_C2PMSG_74_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_75 0x028b
+#define regMP1_SMN_C2PMSG_75_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_76 0x028c
+#define regMP1_SMN_C2PMSG_76_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_77 0x028d
+#define regMP1_SMN_C2PMSG_77_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_78 0x028e
+#define regMP1_SMN_C2PMSG_78_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_79 0x028f
+#define regMP1_SMN_C2PMSG_79_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_80 0x0290
+#define regMP1_SMN_C2PMSG_80_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_81 0x0291
+#define regMP1_SMN_C2PMSG_81_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_82 0x0292
+#define regMP1_SMN_C2PMSG_82_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_83 0x0293
+#define regMP1_SMN_C2PMSG_83_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_84 0x0294
+#define regMP1_SMN_C2PMSG_84_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_85 0x0295
+#define regMP1_SMN_C2PMSG_85_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_86 0x0296
+#define regMP1_SMN_C2PMSG_86_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_87 0x0297
+#define regMP1_SMN_C2PMSG_87_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_88 0x0298
+#define regMP1_SMN_C2PMSG_88_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_89 0x0299
+#define regMP1_SMN_C2PMSG_89_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_90 0x029a
+#define regMP1_SMN_C2PMSG_90_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_91 0x029b
+#define regMP1_SMN_C2PMSG_91_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_92 0x029c
+#define regMP1_SMN_C2PMSG_92_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_93 0x029d
+#define regMP1_SMN_C2PMSG_93_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_94 0x029e
+#define regMP1_SMN_C2PMSG_94_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_95 0x029f
+#define regMP1_SMN_C2PMSG_95_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_96 0x02a0
+#define regMP1_SMN_C2PMSG_96_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_97 0x02a1
+#define regMP1_SMN_C2PMSG_97_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_98 0x02a2
+#define regMP1_SMN_C2PMSG_98_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_99 0x02a3
+#define regMP1_SMN_C2PMSG_99_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_100 0x02a4
+#define regMP1_SMN_C2PMSG_100_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_101 0x02a5
+#define regMP1_SMN_C2PMSG_101_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_102 0x02a6
+#define regMP1_SMN_C2PMSG_102_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_103 0x02a7
+#define regMP1_SMN_C2PMSG_103_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_104 0x02a8
+#define regMP1_SMN_C2PMSG_104_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_105 0x02a9
+#define regMP1_SMN_C2PMSG_105_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_106 0x02aa
+#define regMP1_SMN_C2PMSG_106_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_107 0x02ab
+#define regMP1_SMN_C2PMSG_107_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_108 0x02ac
+#define regMP1_SMN_C2PMSG_108_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_109 0x02ad
+#define regMP1_SMN_C2PMSG_109_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_110 0x02ae
+#define regMP1_SMN_C2PMSG_110_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_111 0x02af
+#define regMP1_SMN_C2PMSG_111_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_112 0x02b0
+#define regMP1_SMN_C2PMSG_112_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_113 0x02b1
+#define regMP1_SMN_C2PMSG_113_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_114 0x02b2
+#define regMP1_SMN_C2PMSG_114_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_115 0x02b3
+#define regMP1_SMN_C2PMSG_115_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_116 0x02b4
+#define regMP1_SMN_C2PMSG_116_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_117 0x02b5
+#define regMP1_SMN_C2PMSG_117_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_118 0x02b6
+#define regMP1_SMN_C2PMSG_118_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_119 0x02b7
+#define regMP1_SMN_C2PMSG_119_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_120 0x02b8
+#define regMP1_SMN_C2PMSG_120_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_121 0x02b9
+#define regMP1_SMN_C2PMSG_121_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_122 0x02ba
+#define regMP1_SMN_C2PMSG_122_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_123 0x02bb
+#define regMP1_SMN_C2PMSG_123_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_124 0x02bc
+#define regMP1_SMN_C2PMSG_124_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_125 0x02bd
+#define regMP1_SMN_C2PMSG_125_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_126 0x02be
+#define regMP1_SMN_C2PMSG_126_BASE_IDX 0
+#define regMP1_SMN_C2PMSG_127 0x02bf
+#define regMP1_SMN_C2PMSG_127_BASE_IDX 0
+#define regMP1_SMN_IH_CREDIT 0x02c1
+#define regMP1_SMN_IH_CREDIT_BASE_IDX 0
+#define regMP1_SMN_IH_SW_INT 0x02c2
+#define regMP1_SMN_IH_SW_INT_BASE_IDX 0
+#define regMP1_SMN_IH_SW_INT_CTRL 0x02c3
+#define regMP1_SMN_IH_SW_INT_CTRL_BASE_IDX 0
+#define regMP1_SMN_FPS_CNT 0x02c4
+#define regMP1_SMN_FPS_CNT_BASE_IDX 0
+#define regMP1_SMN_PUB_CTRL 0x02c5
+#define regMP1_SMN_PUB_CTRL_BASE_IDX 0
+#define regMP1_SMN_EXT_SCRATCH0 0x0340
+#define regMP1_SMN_EXT_SCRATCH0_BASE_IDX 0
+#define regMP1_SMN_EXT_SCRATCH1 0x0341
+#define regMP1_SMN_EXT_SCRATCH1_BASE_IDX 0
+#define regMP1_SMN_EXT_SCRATCH2 0x0342
+#define regMP1_SMN_EXT_SCRATCH2_BASE_IDX 0
+#define regMP1_SMN_EXT_SCRATCH3 0x0343
+#define regMP1_SMN_EXT_SCRATCH3_BASE_IDX 0
+#define regMP1_SMN_EXT_SCRATCH4 0x0344
+#define regMP1_SMN_EXT_SCRATCH4_BASE_IDX 0
+#define regMP1_SMN_EXT_SCRATCH5 0x0345
+#define regMP1_SMN_EXT_SCRATCH5_BASE_IDX 0
+#define regMP1_SMN_EXT_SCRATCH6 0x0346
+#define regMP1_SMN_EXT_SCRATCH6_BASE_IDX 0
+#define regMP1_SMN_EXT_SCRATCH7 0x0347
+#define regMP1_SMN_EXT_SCRATCH7_BASE_IDX 0
+#define regMP1_SMN_EXT_SCRATCH8 0x0348
+#define regMP1_SMN_EXT_SCRATCH8_BASE_IDX 0
+#define regMP1_SMN_EXT_SCRATCH10 0x034a
+#define regMP1_SMN_EXT_SCRATCH10_BASE_IDX 0
+#define regMP1_SMN_EXT_SCRATCH11 0x034b
+#define regMP1_SMN_EXT_SCRATCH11_BASE_IDX 0
+#define regMP1_SMN_EXT_SCRATCH12 0x034c
+#define regMP1_SMN_EXT_SCRATCH12_BASE_IDX 0
+#define regMP1_SMN_EXT_SCRATCH13 0x034d
+#define regMP1_SMN_EXT_SCRATCH13_BASE_IDX 0
+#define regMP1_SMN_EXT_SCRATCH14 0x034e
+#define regMP1_SMN_EXT_SCRATCH14_BASE_IDX 0
+#define regMP1_SMN_EXT_SCRATCH15 0x034f
+#define regMP1_SMN_EXT_SCRATCH15_BASE_IDX 0
+#define regMP1_SMN_EXT_SCRATCH16 0x0350
+#define regMP1_SMN_EXT_SCRATCH16_BASE_IDX 0
+#define regMP1_SMN_EXT_SCRATCH17 0x0351
+#define regMP1_SMN_EXT_SCRATCH17_BASE_IDX 0
+#define regMP1_SMN_EXT_SCRATCH18 0x0352
+#define regMP1_SMN_EXT_SCRATCH18_BASE_IDX 0
+#define regMP1_SMN_EXT_SCRATCH19 0x0353
+#define regMP1_SMN_EXT_SCRATCH19_BASE_IDX 0
+#define regMP1_SMN_EXT_SCRATCH20 0x0354
+#define regMP1_SMN_EXT_SCRATCH20_BASE_IDX 0
+#define regMP1_SMN_EXT_SCRATCH21 0x0355
+#define regMP1_SMN_EXT_SCRATCH21_BASE_IDX 0
+#define regMP1_SMN_EXT_SCRATCH22 0x0356
+#define regMP1_SMN_EXT_SCRATCH22_BASE_IDX 0
+#define regMP1_SMN_EXT_SCRATCH23 0x0357
+#define regMP1_SMN_EXT_SCRATCH23_BASE_IDX 0
+#define regMP1_SMN_EXT_SCRATCH24 0x0358
+#define regMP1_SMN_EXT_SCRATCH24_BASE_IDX 0
+#define regMP1_SMN_EXT_SCRATCH25 0x0359
+#define regMP1_SMN_EXT_SCRATCH25_BASE_IDX 0
+#define regMP1_SMN_EXT_SCRATCH26 0x035a
+#define regMP1_SMN_EXT_SCRATCH26_BASE_IDX 0
+#define regMP1_SMN_EXT_SCRATCH27 0x035b
+#define regMP1_SMN_EXT_SCRATCH27_BASE_IDX 0
+#define regMP1_SMN_EXT_SCRATCH28 0x035c
+#define regMP1_SMN_EXT_SCRATCH28_BASE_IDX 0
+#define regMP1_SMN_EXT_SCRATCH29 0x035d
+#define regMP1_SMN_EXT_SCRATCH29_BASE_IDX 0
+#define regMP1_SMN_EXT_SCRATCH30 0x035e
+#define regMP1_SMN_EXT_SCRATCH30_BASE_IDX 0
+#define regMP1_SMN_EXT_SCRATCH31 0x035f
+#define regMP1_SMN_EXT_SCRATCH31_BASE_IDX 0
+
+
+// addressBlock: aid_mp_SmuMp1Pub_CruDec
+// base address: 0x0
+#define regMP1_FIRMWARE_FLAGS 0xbee00a
+#define regMP1_FIRMWARE_FLAGS_BASE_IDX 0
+
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/mp/mp_13_0_6_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/mp/mp_13_0_6_sh_mask.h
new file mode 100644
index 000000000000..780d9824d5ed
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/mp/mp_13_0_6_sh_mask.h
@@ -0,0 +1,674 @@
+/*
+ * Copyright 2022 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef _mp_13_0_6_SH_MASK_HEADER
+#define _mp_13_0_6_SH_MASK_HEADER
+
+
+// addressBlock: aid_mp_SmuMp0_SmnDec
+//MP0_SMN_C2PMSG_32
+#define MP0_SMN_C2PMSG_32__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_32__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_33
+#define MP0_SMN_C2PMSG_33__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_33__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_34
+#define MP0_SMN_C2PMSG_34__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_34__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_35
+#define MP0_SMN_C2PMSG_35__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_35__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_36
+#define MP0_SMN_C2PMSG_36__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_36__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_37
+#define MP0_SMN_C2PMSG_37__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_37__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_38
+#define MP0_SMN_C2PMSG_38__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_38__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_39
+#define MP0_SMN_C2PMSG_39__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_39__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_40
+#define MP0_SMN_C2PMSG_40__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_40__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_41
+#define MP0_SMN_C2PMSG_41__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_41__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_42
+#define MP0_SMN_C2PMSG_42__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_42__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_43
+#define MP0_SMN_C2PMSG_43__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_43__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_44
+#define MP0_SMN_C2PMSG_44__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_44__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_45
+#define MP0_SMN_C2PMSG_45__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_45__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_46
+#define MP0_SMN_C2PMSG_46__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_46__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_47
+#define MP0_SMN_C2PMSG_47__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_47__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_48
+#define MP0_SMN_C2PMSG_48__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_48__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_49
+#define MP0_SMN_C2PMSG_49__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_49__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_50
+#define MP0_SMN_C2PMSG_50__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_50__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_51
+#define MP0_SMN_C2PMSG_51__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_51__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_52
+#define MP0_SMN_C2PMSG_52__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_52__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_53
+#define MP0_SMN_C2PMSG_53__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_53__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_54
+#define MP0_SMN_C2PMSG_54__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_54__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_55
+#define MP0_SMN_C2PMSG_55__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_55__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_56
+#define MP0_SMN_C2PMSG_56__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_56__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_57
+#define MP0_SMN_C2PMSG_57__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_57__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_58
+#define MP0_SMN_C2PMSG_58__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_58__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_59
+#define MP0_SMN_C2PMSG_59__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_59__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_60
+#define MP0_SMN_C2PMSG_60__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_60__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_61
+#define MP0_SMN_C2PMSG_61__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_61__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_62
+#define MP0_SMN_C2PMSG_62__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_62__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_63
+#define MP0_SMN_C2PMSG_63__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_63__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_64
+#define MP0_SMN_C2PMSG_64__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_64__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_65
+#define MP0_SMN_C2PMSG_65__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_65__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_66
+#define MP0_SMN_C2PMSG_66__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_66__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_67
+#define MP0_SMN_C2PMSG_67__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_67__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_68
+#define MP0_SMN_C2PMSG_68__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_68__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_69
+#define MP0_SMN_C2PMSG_69__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_69__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_70
+#define MP0_SMN_C2PMSG_70__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_70__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_71
+#define MP0_SMN_C2PMSG_71__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_71__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_72
+#define MP0_SMN_C2PMSG_72__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_72__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_73
+#define MP0_SMN_C2PMSG_73__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_73__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_74
+#define MP0_SMN_C2PMSG_74__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_74__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_75
+#define MP0_SMN_C2PMSG_75__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_75__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_76
+#define MP0_SMN_C2PMSG_76__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_76__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_77
+#define MP0_SMN_C2PMSG_77__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_77__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_78
+#define MP0_SMN_C2PMSG_78__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_78__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_79
+#define MP0_SMN_C2PMSG_79__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_79__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_80
+#define MP0_SMN_C2PMSG_80__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_80__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_81
+#define MP0_SMN_C2PMSG_81__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_81__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_82
+#define MP0_SMN_C2PMSG_82__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_82__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_83
+#define MP0_SMN_C2PMSG_83__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_83__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_84
+#define MP0_SMN_C2PMSG_84__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_84__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_85
+#define MP0_SMN_C2PMSG_85__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_85__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_86
+#define MP0_SMN_C2PMSG_86__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_86__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_87
+#define MP0_SMN_C2PMSG_87__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_87__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_88
+#define MP0_SMN_C2PMSG_88__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_88__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_89
+#define MP0_SMN_C2PMSG_89__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_89__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_90
+#define MP0_SMN_C2PMSG_90__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_90__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_91
+#define MP0_SMN_C2PMSG_91__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_91__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_92
+#define MP0_SMN_C2PMSG_92__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_92__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_93
+#define MP0_SMN_C2PMSG_93__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_93__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_94
+#define MP0_SMN_C2PMSG_94__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_94__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_95
+#define MP0_SMN_C2PMSG_95__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_95__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_96
+#define MP0_SMN_C2PMSG_96__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_96__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_97
+#define MP0_SMN_C2PMSG_97__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_97__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_98
+#define MP0_SMN_C2PMSG_98__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_98__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_99
+#define MP0_SMN_C2PMSG_99__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_99__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_100
+#define MP0_SMN_C2PMSG_100__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_100__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_101
+#define MP0_SMN_C2PMSG_101__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_101__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_102
+#define MP0_SMN_C2PMSG_102__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_102__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_C2PMSG_103
+#define MP0_SMN_C2PMSG_103__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_103__CONTENT_MASK 0xFFFFFFFFL
+//MP0_SMN_IH_CREDIT
+#define MP0_SMN_IH_CREDIT__CREDIT_VALUE__SHIFT 0x0
+#define MP0_SMN_IH_CREDIT__CLIENT_ID__SHIFT 0x10
+#define MP0_SMN_IH_CREDIT__CREDIT_VALUE_MASK 0x00000003L
+#define MP0_SMN_IH_CREDIT__CLIENT_ID_MASK 0x00FF0000L
+//MP0_SMN_IH_SW_INT
+#define MP0_SMN_IH_SW_INT__ID__SHIFT 0x0
+#define MP0_SMN_IH_SW_INT__VALID__SHIFT 0x8
+#define MP0_SMN_IH_SW_INT__ID_MASK 0x000000FFL
+#define MP0_SMN_IH_SW_INT__VALID_MASK 0x00000100L
+//MP0_SMN_IH_SW_INT_CTRL
+#define MP0_SMN_IH_SW_INT_CTRL__INT_MASK__SHIFT 0x0
+#define MP0_SMN_IH_SW_INT_CTRL__INT_ACK__SHIFT 0x8
+#define MP0_SMN_IH_SW_INT_CTRL__INT_MASK_MASK 0x00000001L
+#define MP0_SMN_IH_SW_INT_CTRL__INT_ACK_MASK 0x00000100L
+
+
+// addressBlock: aid_mp_SmuMp1_SmnDec
+//MP1_SMN_C2PMSG_32
+#define MP1_SMN_C2PMSG_32__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_32__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_33
+#define MP1_SMN_C2PMSG_33__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_33__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_34
+#define MP1_SMN_C2PMSG_34__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_34__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_35
+#define MP1_SMN_C2PMSG_35__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_35__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_36
+#define MP1_SMN_C2PMSG_36__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_36__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_37
+#define MP1_SMN_C2PMSG_37__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_37__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_38
+#define MP1_SMN_C2PMSG_38__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_38__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_39
+#define MP1_SMN_C2PMSG_39__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_39__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_40
+#define MP1_SMN_C2PMSG_40__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_40__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_41
+#define MP1_SMN_C2PMSG_41__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_41__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_42
+#define MP1_SMN_C2PMSG_42__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_42__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_43
+#define MP1_SMN_C2PMSG_43__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_43__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_44
+#define MP1_SMN_C2PMSG_44__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_44__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_45
+#define MP1_SMN_C2PMSG_45__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_45__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_46
+#define MP1_SMN_C2PMSG_46__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_46__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_47
+#define MP1_SMN_C2PMSG_47__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_47__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_48
+#define MP1_SMN_C2PMSG_48__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_48__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_49
+#define MP1_SMN_C2PMSG_49__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_49__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_50
+#define MP1_SMN_C2PMSG_50__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_50__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_51
+#define MP1_SMN_C2PMSG_51__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_51__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_52
+#define MP1_SMN_C2PMSG_52__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_52__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_53
+#define MP1_SMN_C2PMSG_53__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_53__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_54
+#define MP1_SMN_C2PMSG_54__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_54__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_55
+#define MP1_SMN_C2PMSG_55__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_55__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_56
+#define MP1_SMN_C2PMSG_56__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_56__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_57
+#define MP1_SMN_C2PMSG_57__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_57__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_58
+#define MP1_SMN_C2PMSG_58__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_58__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_59
+#define MP1_SMN_C2PMSG_59__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_59__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_60
+#define MP1_SMN_C2PMSG_60__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_60__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_61
+#define MP1_SMN_C2PMSG_61__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_61__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_62
+#define MP1_SMN_C2PMSG_62__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_62__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_63
+#define MP1_SMN_C2PMSG_63__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_63__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_64
+#define MP1_SMN_C2PMSG_64__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_64__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_65
+#define MP1_SMN_C2PMSG_65__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_65__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_66
+#define MP1_SMN_C2PMSG_66__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_66__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_67
+#define MP1_SMN_C2PMSG_67__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_67__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_68
+#define MP1_SMN_C2PMSG_68__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_68__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_69
+#define MP1_SMN_C2PMSG_69__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_69__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_70
+#define MP1_SMN_C2PMSG_70__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_70__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_71
+#define MP1_SMN_C2PMSG_71__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_71__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_72
+#define MP1_SMN_C2PMSG_72__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_72__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_73
+#define MP1_SMN_C2PMSG_73__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_73__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_74
+#define MP1_SMN_C2PMSG_74__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_74__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_75
+#define MP1_SMN_C2PMSG_75__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_75__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_76
+#define MP1_SMN_C2PMSG_76__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_76__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_77
+#define MP1_SMN_C2PMSG_77__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_77__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_78
+#define MP1_SMN_C2PMSG_78__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_78__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_79
+#define MP1_SMN_C2PMSG_79__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_79__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_80
+#define MP1_SMN_C2PMSG_80__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_80__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_81
+#define MP1_SMN_C2PMSG_81__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_81__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_82
+#define MP1_SMN_C2PMSG_82__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_82__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_83
+#define MP1_SMN_C2PMSG_83__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_83__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_84
+#define MP1_SMN_C2PMSG_84__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_84__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_85
+#define MP1_SMN_C2PMSG_85__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_85__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_86
+#define MP1_SMN_C2PMSG_86__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_86__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_87
+#define MP1_SMN_C2PMSG_87__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_87__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_88
+#define MP1_SMN_C2PMSG_88__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_88__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_89
+#define MP1_SMN_C2PMSG_89__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_89__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_90
+#define MP1_SMN_C2PMSG_90__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_90__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_91
+#define MP1_SMN_C2PMSG_91__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_91__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_92
+#define MP1_SMN_C2PMSG_92__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_92__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_93
+#define MP1_SMN_C2PMSG_93__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_93__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_94
+#define MP1_SMN_C2PMSG_94__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_94__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_95
+#define MP1_SMN_C2PMSG_95__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_95__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_96
+#define MP1_SMN_C2PMSG_96__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_96__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_97
+#define MP1_SMN_C2PMSG_97__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_97__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_98
+#define MP1_SMN_C2PMSG_98__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_98__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_99
+#define MP1_SMN_C2PMSG_99__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_99__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_100
+#define MP1_SMN_C2PMSG_100__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_100__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_101
+#define MP1_SMN_C2PMSG_101__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_101__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_102
+#define MP1_SMN_C2PMSG_102__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_102__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_103
+#define MP1_SMN_C2PMSG_103__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_103__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_104
+#define MP1_SMN_C2PMSG_104__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_104__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_105
+#define MP1_SMN_C2PMSG_105__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_105__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_106
+#define MP1_SMN_C2PMSG_106__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_106__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_107
+#define MP1_SMN_C2PMSG_107__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_107__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_108
+#define MP1_SMN_C2PMSG_108__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_108__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_109
+#define MP1_SMN_C2PMSG_109__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_109__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_110
+#define MP1_SMN_C2PMSG_110__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_110__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_111
+#define MP1_SMN_C2PMSG_111__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_111__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_112
+#define MP1_SMN_C2PMSG_112__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_112__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_113
+#define MP1_SMN_C2PMSG_113__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_113__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_114
+#define MP1_SMN_C2PMSG_114__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_114__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_115
+#define MP1_SMN_C2PMSG_115__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_115__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_116
+#define MP1_SMN_C2PMSG_116__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_116__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_117
+#define MP1_SMN_C2PMSG_117__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_117__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_118
+#define MP1_SMN_C2PMSG_118__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_118__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_119
+#define MP1_SMN_C2PMSG_119__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_119__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_120
+#define MP1_SMN_C2PMSG_120__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_120__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_121
+#define MP1_SMN_C2PMSG_121__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_121__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_122
+#define MP1_SMN_C2PMSG_122__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_122__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_123
+#define MP1_SMN_C2PMSG_123__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_123__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_124
+#define MP1_SMN_C2PMSG_124__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_124__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_125
+#define MP1_SMN_C2PMSG_125__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_125__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_126
+#define MP1_SMN_C2PMSG_126__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_126__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_C2PMSG_127
+#define MP1_SMN_C2PMSG_127__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_127__CONTENT_MASK 0xFFFFFFFFL
+//MP1_SMN_IH_CREDIT
+#define MP1_SMN_IH_CREDIT__CREDIT_VALUE__SHIFT 0x0
+#define MP1_SMN_IH_CREDIT__CLIENT_ID__SHIFT 0x10
+#define MP1_SMN_IH_CREDIT__CREDIT_VALUE_MASK 0x00000003L
+#define MP1_SMN_IH_CREDIT__CLIENT_ID_MASK 0x00FF0000L
+//MP1_SMN_IH_SW_INT
+#define MP1_SMN_IH_SW_INT__ID__SHIFT 0x0
+#define MP1_SMN_IH_SW_INT__VALID__SHIFT 0x8
+#define MP1_SMN_IH_SW_INT__ID_MASK 0x000000FFL
+#define MP1_SMN_IH_SW_INT__VALID_MASK 0x00000100L
+//MP1_SMN_IH_SW_INT_CTRL
+#define MP1_SMN_IH_SW_INT_CTRL__INT_MASK__SHIFT 0x0
+#define MP1_SMN_IH_SW_INT_CTRL__INT_ACK__SHIFT 0x8
+#define MP1_SMN_IH_SW_INT_CTRL__INT_MASK_MASK 0x00000001L
+#define MP1_SMN_IH_SW_INT_CTRL__INT_ACK_MASK 0x00000100L
+//MP1_SMN_FPS_CNT
+#define MP1_SMN_FPS_CNT__COUNT__SHIFT 0x0
+#define MP1_SMN_FPS_CNT__COUNT_MASK 0xFFFFFFFFL
+//MP1_SMN_PUB_CTRL
+#define MP1_SMN_PUB_CTRL__LX3_RESET__SHIFT 0x0
+#define MP1_SMN_PUB_CTRL__LX3_RESET_MASK 0x00000001L
+//MP1_SMN_EXT_SCRATCH0
+#define MP1_SMN_EXT_SCRATCH0__DATA__SHIFT 0x0
+#define MP1_SMN_EXT_SCRATCH0__DATA_MASK 0xFFFFFFFFL
+//MP1_SMN_EXT_SCRATCH1
+#define MP1_SMN_EXT_SCRATCH1__DATA__SHIFT 0x0
+#define MP1_SMN_EXT_SCRATCH1__DATA_MASK 0xFFFFFFFFL
+//MP1_SMN_EXT_SCRATCH2
+#define MP1_SMN_EXT_SCRATCH2__DATA__SHIFT 0x0
+#define MP1_SMN_EXT_SCRATCH2__DATA_MASK 0xFFFFFFFFL
+//MP1_SMN_EXT_SCRATCH3
+#define MP1_SMN_EXT_SCRATCH3__DATA__SHIFT 0x0
+#define MP1_SMN_EXT_SCRATCH3__DATA_MASK 0xFFFFFFFFL
+//MP1_SMN_EXT_SCRATCH4
+#define MP1_SMN_EXT_SCRATCH4__DATA__SHIFT 0x0
+#define MP1_SMN_EXT_SCRATCH4__DATA_MASK 0xFFFFFFFFL
+//MP1_SMN_EXT_SCRATCH5
+#define MP1_SMN_EXT_SCRATCH5__DATA__SHIFT 0x0
+#define MP1_SMN_EXT_SCRATCH5__DATA_MASK 0xFFFFFFFFL
+//MP1_SMN_EXT_SCRATCH6
+#define MP1_SMN_EXT_SCRATCH6__DATA__SHIFT 0x0
+#define MP1_SMN_EXT_SCRATCH6__DATA_MASK 0xFFFFFFFFL
+//MP1_SMN_EXT_SCRATCH7
+#define MP1_SMN_EXT_SCRATCH7__DATA__SHIFT 0x0
+#define MP1_SMN_EXT_SCRATCH7__DATA_MASK 0xFFFFFFFFL
+//MP1_SMN_EXT_SCRATCH8
+#define MP1_SMN_EXT_SCRATCH8__DATA__SHIFT 0x0
+#define MP1_SMN_EXT_SCRATCH8__DATA_MASK 0xFFFFFFFFL
+//MP1_SMN_EXT_SCRATCH10
+#define MP1_SMN_EXT_SCRATCH10__DATA__SHIFT 0x0
+#define MP1_SMN_EXT_SCRATCH10__DATA_MASK 0xFFFFFFFFL
+//MP1_SMN_EXT_SCRATCH11
+#define MP1_SMN_EXT_SCRATCH11__DATA__SHIFT 0x0
+#define MP1_SMN_EXT_SCRATCH11__DATA_MASK 0xFFFFFFFFL
+//MP1_SMN_EXT_SCRATCH12
+#define MP1_SMN_EXT_SCRATCH12__DATA__SHIFT 0x0
+#define MP1_SMN_EXT_SCRATCH12__DATA_MASK 0xFFFFFFFFL
+//MP1_SMN_EXT_SCRATCH13
+#define MP1_SMN_EXT_SCRATCH13__DATA__SHIFT 0x0
+#define MP1_SMN_EXT_SCRATCH13__DATA_MASK 0xFFFFFFFFL
+//MP1_SMN_EXT_SCRATCH14
+#define MP1_SMN_EXT_SCRATCH14__DATA__SHIFT 0x0
+#define MP1_SMN_EXT_SCRATCH14__DATA_MASK 0xFFFFFFFFL
+//MP1_SMN_EXT_SCRATCH15
+#define MP1_SMN_EXT_SCRATCH15__DATA__SHIFT 0x0
+#define MP1_SMN_EXT_SCRATCH15__DATA_MASK 0xFFFFFFFFL
+//MP1_SMN_EXT_SCRATCH16
+#define MP1_SMN_EXT_SCRATCH16__DATA__SHIFT 0x0
+#define MP1_SMN_EXT_SCRATCH16__DATA_MASK 0xFFFFFFFFL
+//MP1_SMN_EXT_SCRATCH17
+#define MP1_SMN_EXT_SCRATCH17__DATA__SHIFT 0x0
+#define MP1_SMN_EXT_SCRATCH17__DATA_MASK 0xFFFFFFFFL
+//MP1_SMN_EXT_SCRATCH18
+#define MP1_SMN_EXT_SCRATCH18__DATA__SHIFT 0x0
+#define MP1_SMN_EXT_SCRATCH18__DATA_MASK 0xFFFFFFFFL
+//MP1_SMN_EXT_SCRATCH19
+#define MP1_SMN_EXT_SCRATCH19__DATA__SHIFT 0x0
+#define MP1_SMN_EXT_SCRATCH19__DATA_MASK 0xFFFFFFFFL
+//MP1_SMN_EXT_SCRATCH20
+#define MP1_SMN_EXT_SCRATCH20__DATA__SHIFT 0x0
+#define MP1_SMN_EXT_SCRATCH20__DATA_MASK 0xFFFFFFFFL
+//MP1_SMN_EXT_SCRATCH21
+#define MP1_SMN_EXT_SCRATCH21__DATA__SHIFT 0x0
+#define MP1_SMN_EXT_SCRATCH21__DATA_MASK 0xFFFFFFFFL
+//MP1_SMN_EXT_SCRATCH22
+#define MP1_SMN_EXT_SCRATCH22__DATA__SHIFT 0x0
+#define MP1_SMN_EXT_SCRATCH22__DATA_MASK 0xFFFFFFFFL
+//MP1_SMN_EXT_SCRATCH23
+#define MP1_SMN_EXT_SCRATCH23__DATA__SHIFT 0x0
+#define MP1_SMN_EXT_SCRATCH23__DATA_MASK 0xFFFFFFFFL
+//MP1_SMN_EXT_SCRATCH24
+#define MP1_SMN_EXT_SCRATCH24__DATA__SHIFT 0x0
+#define MP1_SMN_EXT_SCRATCH24__DATA_MASK 0xFFFFFFFFL
+//MP1_SMN_EXT_SCRATCH25
+#define MP1_SMN_EXT_SCRATCH25__DATA__SHIFT 0x0
+#define MP1_SMN_EXT_SCRATCH25__DATA_MASK 0xFFFFFFFFL
+//MP1_SMN_EXT_SCRATCH26
+#define MP1_SMN_EXT_SCRATCH26__DATA__SHIFT 0x0
+#define MP1_SMN_EXT_SCRATCH26__DATA_MASK 0xFFFFFFFFL
+//MP1_SMN_EXT_SCRATCH27
+#define MP1_SMN_EXT_SCRATCH27__DATA__SHIFT 0x0
+#define MP1_SMN_EXT_SCRATCH27__DATA_MASK 0xFFFFFFFFL
+//MP1_SMN_EXT_SCRATCH28
+#define MP1_SMN_EXT_SCRATCH28__DATA__SHIFT 0x0
+#define MP1_SMN_EXT_SCRATCH28__DATA_MASK 0xFFFFFFFFL
+//MP1_SMN_EXT_SCRATCH29
+#define MP1_SMN_EXT_SCRATCH29__DATA__SHIFT 0x0
+#define MP1_SMN_EXT_SCRATCH29__DATA_MASK 0xFFFFFFFFL
+//MP1_SMN_EXT_SCRATCH30
+#define MP1_SMN_EXT_SCRATCH30__DATA__SHIFT 0x0
+#define MP1_SMN_EXT_SCRATCH30__DATA_MASK 0xFFFFFFFFL
+//MP1_SMN_EXT_SCRATCH31
+#define MP1_SMN_EXT_SCRATCH31__DATA__SHIFT 0x0
+#define MP1_SMN_EXT_SCRATCH31__DATA_MASK 0xFFFFFFFFL
+
+
+// addressBlock: aid_mp_SmuMp1Pub_CruDec
+//MP1_FIRMWARE_FLAGS
+#define MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED__SHIFT 0x0
+#define MP1_FIRMWARE_FLAGS__RESERVED__SHIFT 0x1
+#define MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK 0x00000001L
+#define MP1_FIRMWARE_FLAGS__RESERVED_MASK 0xFFFFFFFEL
+
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/sdma/sdma_4_4_2_offset.h b/drivers/gpu/drm/amd/include/asic_reg/sdma/sdma_4_4_2_offset.h
new file mode 100644
index 000000000000..31bef0776ded
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/sdma/sdma_4_4_2_offset.h
@@ -0,0 +1,1109 @@
+/*
+ * Copyright 2021 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef _sdma_4_4_2_OFFSET_HEADER
+#define _sdma_4_4_2_OFFSET_HEADER
+
+
+
+// addressBlock: aid_sdma_insts_sdma0_sdmadec
+// base address: 0x4980
+#define regSDMA_UCODE_ADDR 0x0000
+#define regSDMA_UCODE_ADDR_BASE_IDX 0
+#define regSDMA_UCODE_DATA 0x0001
+#define regSDMA_UCODE_DATA_BASE_IDX 0
+#define regSDMA_F32_CNTL 0x0002
+#define regSDMA_F32_CNTL_BASE_IDX 0
+#define regSDMA_MMHUB_CNTL 0x0005
+#define regSDMA_MMHUB_CNTL_BASE_IDX 0
+#define regSDMA_MMHUB_TRUSTLVL 0x0006
+#define regSDMA_MMHUB_TRUSTLVL_BASE_IDX 0
+#define regSDMA_VM_CNTL 0x0010
+#define regSDMA_VM_CNTL_BASE_IDX 0
+#define regSDMA_VM_CTX_LO 0x0011
+#define regSDMA_VM_CTX_LO_BASE_IDX 0
+#define regSDMA_VM_CTX_HI 0x0012
+#define regSDMA_VM_CTX_HI_BASE_IDX 0
+#define regSDMA_ACTIVE_FCN_ID 0x0013
+#define regSDMA_ACTIVE_FCN_ID_BASE_IDX 0
+#define regSDMA_VM_CTX_CNTL 0x0014
+#define regSDMA_VM_CTX_CNTL_BASE_IDX 0
+#define regSDMA_VIRT_RESET_REQ 0x0015
+#define regSDMA_VIRT_RESET_REQ_BASE_IDX 0
+#define regSDMA_VF_ENABLE 0x0016
+#define regSDMA_VF_ENABLE_BASE_IDX 0
+#define regSDMA_CONTEXT_REG_TYPE0 0x0017
+#define regSDMA_CONTEXT_REG_TYPE0_BASE_IDX 0
+#define regSDMA_CONTEXT_REG_TYPE1 0x0018
+#define regSDMA_CONTEXT_REG_TYPE1_BASE_IDX 0
+#define regSDMA_CONTEXT_REG_TYPE2 0x0019
+#define regSDMA_CONTEXT_REG_TYPE2_BASE_IDX 0
+#define regSDMA_CONTEXT_REG_TYPE3 0x001a
+#define regSDMA_CONTEXT_REG_TYPE3_BASE_IDX 0
+#define regSDMA_PUB_REG_TYPE0 0x001b
+#define regSDMA_PUB_REG_TYPE0_BASE_IDX 0
+#define regSDMA_PUB_REG_TYPE1 0x001c
+#define regSDMA_PUB_REG_TYPE1_BASE_IDX 0
+#define regSDMA_PUB_REG_TYPE2 0x001d
+#define regSDMA_PUB_REG_TYPE2_BASE_IDX 0
+#define regSDMA_PUB_REG_TYPE3 0x001e
+#define regSDMA_PUB_REG_TYPE3_BASE_IDX 0
+#define regSDMA_CONTEXT_GROUP_BOUNDARY 0x001f
+#define regSDMA_CONTEXT_GROUP_BOUNDARY_BASE_IDX 0
+#define regSDMA_RB_RPTR_FETCH_HI 0x0020
+#define regSDMA_RB_RPTR_FETCH_HI_BASE_IDX 0
+#define regSDMA_SEM_WAIT_FAIL_TIMER_CNTL 0x0021
+#define regSDMA_SEM_WAIT_FAIL_TIMER_CNTL_BASE_IDX 0
+#define regSDMA_RB_RPTR_FETCH 0x0022
+#define regSDMA_RB_RPTR_FETCH_BASE_IDX 0
+#define regSDMA_IB_OFFSET_FETCH 0x0023
+#define regSDMA_IB_OFFSET_FETCH_BASE_IDX 0
+#define regSDMA_PROGRAM 0x0024
+#define regSDMA_PROGRAM_BASE_IDX 0
+#define regSDMA_STATUS_REG 0x0025
+#define regSDMA_STATUS_REG_BASE_IDX 0
+#define regSDMA_STATUS1_REG 0x0026
+#define regSDMA_STATUS1_REG_BASE_IDX 0
+#define regSDMA_RD_BURST_CNTL 0x0027
+#define regSDMA_RD_BURST_CNTL_BASE_IDX 0
+#define regSDMA_HBM_PAGE_CONFIG 0x0028
+#define regSDMA_HBM_PAGE_CONFIG_BASE_IDX 0
+#define regSDMA_UCODE_CHECKSUM 0x0029
+#define regSDMA_UCODE_CHECKSUM_BASE_IDX 0
+#define regSDMA_FREEZE 0x002b
+#define regSDMA_FREEZE_BASE_IDX 0
+#define regSDMA_PHASE0_QUANTUM 0x002c
+#define regSDMA_PHASE0_QUANTUM_BASE_IDX 0
+#define regSDMA_PHASE1_QUANTUM 0x002d
+#define regSDMA_PHASE1_QUANTUM_BASE_IDX 0
+#define regSDMA_POWER_GATING 0x002e
+#define regSDMA_POWER_GATING_BASE_IDX 0
+#define regSDMA_PGFSM_CONFIG 0x002f
+#define regSDMA_PGFSM_CONFIG_BASE_IDX 0
+#define regSDMA_PGFSM_WRITE 0x0030
+#define regSDMA_PGFSM_WRITE_BASE_IDX 0
+#define regSDMA_PGFSM_READ 0x0031
+#define regSDMA_PGFSM_READ_BASE_IDX 0
+#define regCC_SDMA_EDC_CONFIG 0x0032
+#define regCC_SDMA_EDC_CONFIG_BASE_IDX 0
+#define regSDMA_BA_THRESHOLD 0x0033
+#define regSDMA_BA_THRESHOLD_BASE_IDX 0
+#define regSDMA_ID 0x0034
+#define regSDMA_ID_BASE_IDX 0
+#define regSDMA_VERSION 0x0035
+#define regSDMA_VERSION_BASE_IDX 0
+#define regSDMA_EDC_COUNTER 0x0036
+#define regSDMA_EDC_COUNTER_BASE_IDX 0
+#define regSDMA_EDC_COUNTER2 0x0037
+#define regSDMA_EDC_COUNTER2_BASE_IDX 0
+#define regSDMA_STATUS2_REG 0x0038
+#define regSDMA_STATUS2_REG_BASE_IDX 0
+#define regSDMA_ATOMIC_CNTL 0x0039
+#define regSDMA_ATOMIC_CNTL_BASE_IDX 0
+#define regSDMA_ATOMIC_PREOP_LO 0x003a
+#define regSDMA_ATOMIC_PREOP_LO_BASE_IDX 0
+#define regSDMA_ATOMIC_PREOP_HI 0x003b
+#define regSDMA_ATOMIC_PREOP_HI_BASE_IDX 0
+#define regSDMA_UTCL1_CNTL 0x003c
+#define regSDMA_UTCL1_CNTL_BASE_IDX 0
+#define regSDMA_UTCL1_WATERMK 0x003d
+#define regSDMA_UTCL1_WATERMK_BASE_IDX 0
+#define regSDMA_UTCL1_RD_STATUS 0x003e
+#define regSDMA_UTCL1_RD_STATUS_BASE_IDX 0
+#define regSDMA_UTCL1_WR_STATUS 0x003f
+#define regSDMA_UTCL1_WR_STATUS_BASE_IDX 0
+#define regSDMA_UTCL1_INV0 0x0040
+#define regSDMA_UTCL1_INV0_BASE_IDX 0
+#define regSDMA_UTCL1_INV1 0x0041
+#define regSDMA_UTCL1_INV1_BASE_IDX 0
+#define regSDMA_UTCL1_INV2 0x0042
+#define regSDMA_UTCL1_INV2_BASE_IDX 0
+#define regSDMA_UTCL1_RD_XNACK0 0x0043
+#define regSDMA_UTCL1_RD_XNACK0_BASE_IDX 0
+#define regSDMA_UTCL1_RD_XNACK1 0x0044
+#define regSDMA_UTCL1_RD_XNACK1_BASE_IDX 0
+#define regSDMA_UTCL1_WR_XNACK0 0x0045
+#define regSDMA_UTCL1_WR_XNACK0_BASE_IDX 0
+#define regSDMA_UTCL1_WR_XNACK1 0x0046
+#define regSDMA_UTCL1_WR_XNACK1_BASE_IDX 0
+#define regSDMA_UTCL1_TIMEOUT 0x0047
+#define regSDMA_UTCL1_TIMEOUT_BASE_IDX 0
+#define regSDMA_UTCL1_PAGE 0x0048
+#define regSDMA_UTCL1_PAGE_BASE_IDX 0
+#define regSDMA_POWER_CNTL_IDLE 0x0049
+#define regSDMA_POWER_CNTL_IDLE_BASE_IDX 0
+#define regSDMA_RELAX_ORDERING_LUT 0x004a
+#define regSDMA_RELAX_ORDERING_LUT_BASE_IDX 0
+#define regSDMA_CHICKEN_BITS_2 0x004b
+#define regSDMA_CHICKEN_BITS_2_BASE_IDX 0
+#define regSDMA_STATUS3_REG 0x004c
+#define regSDMA_STATUS3_REG_BASE_IDX 0
+#define regSDMA_PHYSICAL_ADDR_LO 0x004d
+#define regSDMA_PHYSICAL_ADDR_LO_BASE_IDX 0
+#define regSDMA_PHYSICAL_ADDR_HI 0x004e
+#define regSDMA_PHYSICAL_ADDR_HI_BASE_IDX 0
+#define regSDMA_PHASE2_QUANTUM 0x004f
+#define regSDMA_PHASE2_QUANTUM_BASE_IDX 0
+#define regSDMA_ERROR_LOG 0x0050
+#define regSDMA_ERROR_LOG_BASE_IDX 0
+#define regSDMA_PUB_DUMMY_REG0 0x0051
+#define regSDMA_PUB_DUMMY_REG0_BASE_IDX 0
+#define regSDMA_PUB_DUMMY_REG1 0x0052
+#define regSDMA_PUB_DUMMY_REG1_BASE_IDX 0
+#define regSDMA_PUB_DUMMY_REG2 0x0053
+#define regSDMA_PUB_DUMMY_REG2_BASE_IDX 0
+#define regSDMA_PUB_DUMMY_REG3 0x0054
+#define regSDMA_PUB_DUMMY_REG3_BASE_IDX 0
+#define regSDMA_F32_COUNTER 0x0055
+#define regSDMA_F32_COUNTER_BASE_IDX 0
+#define regSDMA_PERFCNT_PERFCOUNTER0_CFG 0x0057
+#define regSDMA_PERFCNT_PERFCOUNTER0_CFG_BASE_IDX 0
+#define regSDMA_PERFCNT_PERFCOUNTER1_CFG 0x0058
+#define regSDMA_PERFCNT_PERFCOUNTER1_CFG_BASE_IDX 0
+#define regSDMA_PERFCNT_PERFCOUNTER_RSLT_CNTL 0x0059
+#define regSDMA_PERFCNT_PERFCOUNTER_RSLT_CNTL_BASE_IDX 0
+#define regSDMA_PERFCNT_MISC_CNTL 0x005a
+#define regSDMA_PERFCNT_MISC_CNTL_BASE_IDX 0
+#define regSDMA_PERFCNT_PERFCOUNTER_LO 0x005b
+#define regSDMA_PERFCNT_PERFCOUNTER_LO_BASE_IDX 0
+#define regSDMA_PERFCNT_PERFCOUNTER_HI 0x005c
+#define regSDMA_PERFCNT_PERFCOUNTER_HI_BASE_IDX 0
+#define regSDMA_CRD_CNTL 0x005d
+#define regSDMA_CRD_CNTL_BASE_IDX 0
+#define regSDMA_GPU_IOV_VIOLATION_LOG 0x005e
+#define regSDMA_GPU_IOV_VIOLATION_LOG_BASE_IDX 0
+#define regSDMA_ULV_CNTL 0x005f
+#define regSDMA_ULV_CNTL_BASE_IDX 0
+#define regSDMA_EA_DBIT_ADDR_DATA 0x0060
+#define regSDMA_EA_DBIT_ADDR_DATA_BASE_IDX 0
+#define regSDMA_EA_DBIT_ADDR_INDEX 0x0061
+#define regSDMA_EA_DBIT_ADDR_INDEX_BASE_IDX 0
+#define regSDMA_GPU_IOV_VIOLATION_LOG2 0x0062
+#define regSDMA_GPU_IOV_VIOLATION_LOG2_BASE_IDX 0
+#define regSDMA_STATUS4_REG 0x0063
+#define regSDMA_STATUS4_REG_BASE_IDX 0
+#define regSDMA_SCRATCH_RAM_DATA 0x0064
+#define regSDMA_SCRATCH_RAM_DATA_BASE_IDX 0
+#define regSDMA_SCRATCH_RAM_ADDR 0x0065
+#define regSDMA_SCRATCH_RAM_ADDR_BASE_IDX 0
+#define regSDMA_CE_CTRL 0x0066
+#define regSDMA_CE_CTRL_BASE_IDX 0
+#define regSDMA_RAS_STATUS 0x0067
+#define regSDMA_RAS_STATUS_BASE_IDX 0
+#define regSDMA_CLK_STATUS 0x0068
+#define regSDMA_CLK_STATUS_BASE_IDX 0
+#define regSDMA_POWER_CNTL 0x006b
+#define regSDMA_POWER_CNTL_BASE_IDX 0
+#define regSDMA_CLK_CTRL 0x006c
+#define regSDMA_CLK_CTRL_BASE_IDX 0
+#define regSDMA_CNTL 0x006d
+#define regSDMA_CNTL_BASE_IDX 0
+#define regSDMA_CHICKEN_BITS 0x006e
+#define regSDMA_CHICKEN_BITS_BASE_IDX 0
+#define regSDMA_GB_ADDR_CONFIG 0x006f
+#define regSDMA_GB_ADDR_CONFIG_BASE_IDX 0
+#define regSDMA_GB_ADDR_CONFIG_READ 0x0070
+#define regSDMA_GB_ADDR_CONFIG_READ_BASE_IDX 0
+#define regSDMA_GFX_RB_CNTL 0x0080
+#define regSDMA_GFX_RB_CNTL_BASE_IDX 0
+#define regSDMA_GFX_RB_BASE 0x0081
+#define regSDMA_GFX_RB_BASE_BASE_IDX 0
+#define regSDMA_GFX_RB_BASE_HI 0x0082
+#define regSDMA_GFX_RB_BASE_HI_BASE_IDX 0
+#define regSDMA_GFX_RB_RPTR 0x0083
+#define regSDMA_GFX_RB_RPTR_BASE_IDX 0
+#define regSDMA_GFX_RB_RPTR_HI 0x0084
+#define regSDMA_GFX_RB_RPTR_HI_BASE_IDX 0
+#define regSDMA_GFX_RB_WPTR 0x0085
+#define regSDMA_GFX_RB_WPTR_BASE_IDX 0
+#define regSDMA_GFX_RB_WPTR_HI 0x0086
+#define regSDMA_GFX_RB_WPTR_HI_BASE_IDX 0
+#define regSDMA_GFX_RB_WPTR_POLL_CNTL 0x0087
+#define regSDMA_GFX_RB_WPTR_POLL_CNTL_BASE_IDX 0
+#define regSDMA_GFX_RB_RPTR_ADDR_HI 0x0088
+#define regSDMA_GFX_RB_RPTR_ADDR_HI_BASE_IDX 0
+#define regSDMA_GFX_RB_RPTR_ADDR_LO 0x0089
+#define regSDMA_GFX_RB_RPTR_ADDR_LO_BASE_IDX 0
+#define regSDMA_GFX_IB_CNTL 0x008a
+#define regSDMA_GFX_IB_CNTL_BASE_IDX 0
+#define regSDMA_GFX_IB_RPTR 0x008b
+#define regSDMA_GFX_IB_RPTR_BASE_IDX 0
+#define regSDMA_GFX_IB_OFFSET 0x008c
+#define regSDMA_GFX_IB_OFFSET_BASE_IDX 0
+#define regSDMA_GFX_IB_BASE_LO 0x008d
+#define regSDMA_GFX_IB_BASE_LO_BASE_IDX 0
+#define regSDMA_GFX_IB_BASE_HI 0x008e
+#define regSDMA_GFX_IB_BASE_HI_BASE_IDX 0
+#define regSDMA_GFX_IB_SIZE 0x008f
+#define regSDMA_GFX_IB_SIZE_BASE_IDX 0
+#define regSDMA_GFX_SKIP_CNTL 0x0090
+#define regSDMA_GFX_SKIP_CNTL_BASE_IDX 0
+#define regSDMA_GFX_CONTEXT_STATUS 0x0091
+#define regSDMA_GFX_CONTEXT_STATUS_BASE_IDX 0
+#define regSDMA_GFX_DOORBELL 0x0092
+#define regSDMA_GFX_DOORBELL_BASE_IDX 0
+#define regSDMA_GFX_CONTEXT_CNTL 0x0093
+#define regSDMA_GFX_CONTEXT_CNTL_BASE_IDX 0
+#define regSDMA_GFX_STATUS 0x00a8
+#define regSDMA_GFX_STATUS_BASE_IDX 0
+#define regSDMA_GFX_DOORBELL_LOG 0x00a9
+#define regSDMA_GFX_DOORBELL_LOG_BASE_IDX 0
+#define regSDMA_GFX_WATERMARK 0x00aa
+#define regSDMA_GFX_WATERMARK_BASE_IDX 0
+#define regSDMA_GFX_DOORBELL_OFFSET 0x00ab
+#define regSDMA_GFX_DOORBELL_OFFSET_BASE_IDX 0
+#define regSDMA_GFX_CSA_ADDR_LO 0x00ac
+#define regSDMA_GFX_CSA_ADDR_LO_BASE_IDX 0
+#define regSDMA_GFX_CSA_ADDR_HI 0x00ad
+#define regSDMA_GFX_CSA_ADDR_HI_BASE_IDX 0
+#define regSDMA_GFX_IB_SUB_REMAIN 0x00af
+#define regSDMA_GFX_IB_SUB_REMAIN_BASE_IDX 0
+#define regSDMA_GFX_PREEMPT 0x00b0
+#define regSDMA_GFX_PREEMPT_BASE_IDX 0
+#define regSDMA_GFX_DUMMY_REG 0x00b1
+#define regSDMA_GFX_DUMMY_REG_BASE_IDX 0
+#define regSDMA_GFX_RB_WPTR_POLL_ADDR_HI 0x00b2
+#define regSDMA_GFX_RB_WPTR_POLL_ADDR_HI_BASE_IDX 0
+#define regSDMA_GFX_RB_WPTR_POLL_ADDR_LO 0x00b3
+#define regSDMA_GFX_RB_WPTR_POLL_ADDR_LO_BASE_IDX 0
+#define regSDMA_GFX_RB_AQL_CNTL 0x00b4
+#define regSDMA_GFX_RB_AQL_CNTL_BASE_IDX 0
+#define regSDMA_GFX_MINOR_PTR_UPDATE 0x00b5
+#define regSDMA_GFX_MINOR_PTR_UPDATE_BASE_IDX 0
+#define regSDMA_GFX_MIDCMD_DATA0 0x00c0
+#define regSDMA_GFX_MIDCMD_DATA0_BASE_IDX 0
+#define regSDMA_GFX_MIDCMD_DATA1 0x00c1
+#define regSDMA_GFX_MIDCMD_DATA1_BASE_IDX 0
+#define regSDMA_GFX_MIDCMD_DATA2 0x00c2
+#define regSDMA_GFX_MIDCMD_DATA2_BASE_IDX 0
+#define regSDMA_GFX_MIDCMD_DATA3 0x00c3
+#define regSDMA_GFX_MIDCMD_DATA3_BASE_IDX 0
+#define regSDMA_GFX_MIDCMD_DATA4 0x00c4
+#define regSDMA_GFX_MIDCMD_DATA4_BASE_IDX 0
+#define regSDMA_GFX_MIDCMD_DATA5 0x00c5
+#define regSDMA_GFX_MIDCMD_DATA5_BASE_IDX 0
+#define regSDMA_GFX_MIDCMD_DATA6 0x00c6
+#define regSDMA_GFX_MIDCMD_DATA6_BASE_IDX 0
+#define regSDMA_GFX_MIDCMD_DATA7 0x00c7
+#define regSDMA_GFX_MIDCMD_DATA7_BASE_IDX 0
+#define regSDMA_GFX_MIDCMD_DATA8 0x00c8
+#define regSDMA_GFX_MIDCMD_DATA8_BASE_IDX 0
+#define regSDMA_GFX_MIDCMD_DATA9 0x00c9
+#define regSDMA_GFX_MIDCMD_DATA9_BASE_IDX 0
+#define regSDMA_GFX_MIDCMD_DATA10 0x00ca
+#define regSDMA_GFX_MIDCMD_DATA10_BASE_IDX 0
+#define regSDMA_GFX_MIDCMD_CNTL 0x00cb
+#define regSDMA_GFX_MIDCMD_CNTL_BASE_IDX 0
+#define regSDMA_PAGE_RB_CNTL 0x00d8
+#define regSDMA_PAGE_RB_CNTL_BASE_IDX 0
+#define regSDMA_PAGE_RB_BASE 0x00d9
+#define regSDMA_PAGE_RB_BASE_BASE_IDX 0
+#define regSDMA_PAGE_RB_BASE_HI 0x00da
+#define regSDMA_PAGE_RB_BASE_HI_BASE_IDX 0
+#define regSDMA_PAGE_RB_RPTR 0x00db
+#define regSDMA_PAGE_RB_RPTR_BASE_IDX 0
+#define regSDMA_PAGE_RB_RPTR_HI 0x00dc
+#define regSDMA_PAGE_RB_RPTR_HI_BASE_IDX 0
+#define regSDMA_PAGE_RB_WPTR 0x00dd
+#define regSDMA_PAGE_RB_WPTR_BASE_IDX 0
+#define regSDMA_PAGE_RB_WPTR_HI 0x00de
+#define regSDMA_PAGE_RB_WPTR_HI_BASE_IDX 0
+#define regSDMA_PAGE_RB_WPTR_POLL_CNTL 0x00df
+#define regSDMA_PAGE_RB_WPTR_POLL_CNTL_BASE_IDX 0
+#define regSDMA_PAGE_RB_RPTR_ADDR_HI 0x00e0
+#define regSDMA_PAGE_RB_RPTR_ADDR_HI_BASE_IDX 0
+#define regSDMA_PAGE_RB_RPTR_ADDR_LO 0x00e1
+#define regSDMA_PAGE_RB_RPTR_ADDR_LO_BASE_IDX 0
+#define regSDMA_PAGE_IB_CNTL 0x00e2
+#define regSDMA_PAGE_IB_CNTL_BASE_IDX 0
+#define regSDMA_PAGE_IB_RPTR 0x00e3
+#define regSDMA_PAGE_IB_RPTR_BASE_IDX 0
+#define regSDMA_PAGE_IB_OFFSET 0x00e4
+#define regSDMA_PAGE_IB_OFFSET_BASE_IDX 0
+#define regSDMA_PAGE_IB_BASE_LO 0x00e5
+#define regSDMA_PAGE_IB_BASE_LO_BASE_IDX 0
+#define regSDMA_PAGE_IB_BASE_HI 0x00e6
+#define regSDMA_PAGE_IB_BASE_HI_BASE_IDX 0
+#define regSDMA_PAGE_IB_SIZE 0x00e7
+#define regSDMA_PAGE_IB_SIZE_BASE_IDX 0
+#define regSDMA_PAGE_SKIP_CNTL 0x00e8
+#define regSDMA_PAGE_SKIP_CNTL_BASE_IDX 0
+#define regSDMA_PAGE_CONTEXT_STATUS 0x00e9
+#define regSDMA_PAGE_CONTEXT_STATUS_BASE_IDX 0
+#define regSDMA_PAGE_DOORBELL 0x00ea
+#define regSDMA_PAGE_DOORBELL_BASE_IDX 0
+#define regSDMA_PAGE_STATUS 0x0100
+#define regSDMA_PAGE_STATUS_BASE_IDX 0
+#define regSDMA_PAGE_DOORBELL_LOG 0x0101
+#define regSDMA_PAGE_DOORBELL_LOG_BASE_IDX 0
+#define regSDMA_PAGE_WATERMARK 0x0102
+#define regSDMA_PAGE_WATERMARK_BASE_IDX 0
+#define regSDMA_PAGE_DOORBELL_OFFSET 0x0103
+#define regSDMA_PAGE_DOORBELL_OFFSET_BASE_IDX 0
+#define regSDMA_PAGE_CSA_ADDR_LO 0x0104
+#define regSDMA_PAGE_CSA_ADDR_LO_BASE_IDX 0
+#define regSDMA_PAGE_CSA_ADDR_HI 0x0105
+#define regSDMA_PAGE_CSA_ADDR_HI_BASE_IDX 0
+#define regSDMA_PAGE_IB_SUB_REMAIN 0x0107
+#define regSDMA_PAGE_IB_SUB_REMAIN_BASE_IDX 0
+#define regSDMA_PAGE_PREEMPT 0x0108
+#define regSDMA_PAGE_PREEMPT_BASE_IDX 0
+#define regSDMA_PAGE_DUMMY_REG 0x0109
+#define regSDMA_PAGE_DUMMY_REG_BASE_IDX 0
+#define regSDMA_PAGE_RB_WPTR_POLL_ADDR_HI 0x010a
+#define regSDMA_PAGE_RB_WPTR_POLL_ADDR_HI_BASE_IDX 0
+#define regSDMA_PAGE_RB_WPTR_POLL_ADDR_LO 0x010b
+#define regSDMA_PAGE_RB_WPTR_POLL_ADDR_LO_BASE_IDX 0
+#define regSDMA_PAGE_RB_AQL_CNTL 0x010c
+#define regSDMA_PAGE_RB_AQL_CNTL_BASE_IDX 0
+#define regSDMA_PAGE_MINOR_PTR_UPDATE 0x010d
+#define regSDMA_PAGE_MINOR_PTR_UPDATE_BASE_IDX 0
+#define regSDMA_PAGE_MIDCMD_DATA0 0x0118
+#define regSDMA_PAGE_MIDCMD_DATA0_BASE_IDX 0
+#define regSDMA_PAGE_MIDCMD_DATA1 0x0119
+#define regSDMA_PAGE_MIDCMD_DATA1_BASE_IDX 0
+#define regSDMA_PAGE_MIDCMD_DATA2 0x011a
+#define regSDMA_PAGE_MIDCMD_DATA2_BASE_IDX 0
+#define regSDMA_PAGE_MIDCMD_DATA3 0x011b
+#define regSDMA_PAGE_MIDCMD_DATA3_BASE_IDX 0
+#define regSDMA_PAGE_MIDCMD_DATA4 0x011c
+#define regSDMA_PAGE_MIDCMD_DATA4_BASE_IDX 0
+#define regSDMA_PAGE_MIDCMD_DATA5 0x011d
+#define regSDMA_PAGE_MIDCMD_DATA5_BASE_IDX 0
+#define regSDMA_PAGE_MIDCMD_DATA6 0x011e
+#define regSDMA_PAGE_MIDCMD_DATA6_BASE_IDX 0
+#define regSDMA_PAGE_MIDCMD_DATA7 0x011f
+#define regSDMA_PAGE_MIDCMD_DATA7_BASE_IDX 0
+#define regSDMA_PAGE_MIDCMD_DATA8 0x0120
+#define regSDMA_PAGE_MIDCMD_DATA8_BASE_IDX 0
+#define regSDMA_PAGE_MIDCMD_DATA9 0x0121
+#define regSDMA_PAGE_MIDCMD_DATA9_BASE_IDX 0
+#define regSDMA_PAGE_MIDCMD_DATA10 0x0122
+#define regSDMA_PAGE_MIDCMD_DATA10_BASE_IDX 0
+#define regSDMA_PAGE_MIDCMD_CNTL 0x0123
+#define regSDMA_PAGE_MIDCMD_CNTL_BASE_IDX 0
+#define regSDMA_RLC0_RB_CNTL 0x0130
+#define regSDMA_RLC0_RB_CNTL_BASE_IDX 0
+#define regSDMA_RLC0_RB_BASE 0x0131
+#define regSDMA_RLC0_RB_BASE_BASE_IDX 0
+#define regSDMA_RLC0_RB_BASE_HI 0x0132
+#define regSDMA_RLC0_RB_BASE_HI_BASE_IDX 0
+#define regSDMA_RLC0_RB_RPTR 0x0133
+#define regSDMA_RLC0_RB_RPTR_BASE_IDX 0
+#define regSDMA_RLC0_RB_RPTR_HI 0x0134
+#define regSDMA_RLC0_RB_RPTR_HI_BASE_IDX 0
+#define regSDMA_RLC0_RB_WPTR 0x0135
+#define regSDMA_RLC0_RB_WPTR_BASE_IDX 0
+#define regSDMA_RLC0_RB_WPTR_HI 0x0136
+#define regSDMA_RLC0_RB_WPTR_HI_BASE_IDX 0
+#define regSDMA_RLC0_RB_WPTR_POLL_CNTL 0x0137
+#define regSDMA_RLC0_RB_WPTR_POLL_CNTL_BASE_IDX 0
+#define regSDMA_RLC0_RB_RPTR_ADDR_HI 0x0138
+#define regSDMA_RLC0_RB_RPTR_ADDR_HI_BASE_IDX 0
+#define regSDMA_RLC0_RB_RPTR_ADDR_LO 0x0139
+#define regSDMA_RLC0_RB_RPTR_ADDR_LO_BASE_IDX 0
+#define regSDMA_RLC0_IB_CNTL 0x013a
+#define regSDMA_RLC0_IB_CNTL_BASE_IDX 0
+#define regSDMA_RLC0_IB_RPTR 0x013b
+#define regSDMA_RLC0_IB_RPTR_BASE_IDX 0
+#define regSDMA_RLC0_IB_OFFSET 0x013c
+#define regSDMA_RLC0_IB_OFFSET_BASE_IDX 0
+#define regSDMA_RLC0_IB_BASE_LO 0x013d
+#define regSDMA_RLC0_IB_BASE_LO_BASE_IDX 0
+#define regSDMA_RLC0_IB_BASE_HI 0x013e
+#define regSDMA_RLC0_IB_BASE_HI_BASE_IDX 0
+#define regSDMA_RLC0_IB_SIZE 0x013f
+#define regSDMA_RLC0_IB_SIZE_BASE_IDX 0
+#define regSDMA_RLC0_SKIP_CNTL 0x0140
+#define regSDMA_RLC0_SKIP_CNTL_BASE_IDX 0
+#define regSDMA_RLC0_CONTEXT_STATUS 0x0141
+#define regSDMA_RLC0_CONTEXT_STATUS_BASE_IDX 0
+#define regSDMA_RLC0_DOORBELL 0x0142
+#define regSDMA_RLC0_DOORBELL_BASE_IDX 0
+#define regSDMA_RLC0_STATUS 0x0158
+#define regSDMA_RLC0_STATUS_BASE_IDX 0
+#define regSDMA_RLC0_DOORBELL_LOG 0x0159
+#define regSDMA_RLC0_DOORBELL_LOG_BASE_IDX 0
+#define regSDMA_RLC0_WATERMARK 0x015a
+#define regSDMA_RLC0_WATERMARK_BASE_IDX 0
+#define regSDMA_RLC0_DOORBELL_OFFSET 0x015b
+#define regSDMA_RLC0_DOORBELL_OFFSET_BASE_IDX 0
+#define regSDMA_RLC0_CSA_ADDR_LO 0x015c
+#define regSDMA_RLC0_CSA_ADDR_LO_BASE_IDX 0
+#define regSDMA_RLC0_CSA_ADDR_HI 0x015d
+#define regSDMA_RLC0_CSA_ADDR_HI_BASE_IDX 0
+#define regSDMA_RLC0_IB_SUB_REMAIN 0x015f
+#define regSDMA_RLC0_IB_SUB_REMAIN_BASE_IDX 0
+#define regSDMA_RLC0_PREEMPT 0x0160
+#define regSDMA_RLC0_PREEMPT_BASE_IDX 0
+#define regSDMA_RLC0_DUMMY_REG 0x0161
+#define regSDMA_RLC0_DUMMY_REG_BASE_IDX 0
+#define regSDMA_RLC0_RB_WPTR_POLL_ADDR_HI 0x0162
+#define regSDMA_RLC0_RB_WPTR_POLL_ADDR_HI_BASE_IDX 0
+#define regSDMA_RLC0_RB_WPTR_POLL_ADDR_LO 0x0163
+#define regSDMA_RLC0_RB_WPTR_POLL_ADDR_LO_BASE_IDX 0
+#define regSDMA_RLC0_RB_AQL_CNTL 0x0164
+#define regSDMA_RLC0_RB_AQL_CNTL_BASE_IDX 0
+#define regSDMA_RLC0_MINOR_PTR_UPDATE 0x0165
+#define regSDMA_RLC0_MINOR_PTR_UPDATE_BASE_IDX 0
+#define regSDMA_RLC0_MIDCMD_DATA0 0x0170
+#define regSDMA_RLC0_MIDCMD_DATA0_BASE_IDX 0
+#define regSDMA_RLC0_MIDCMD_DATA1 0x0171
+#define regSDMA_RLC0_MIDCMD_DATA1_BASE_IDX 0
+#define regSDMA_RLC0_MIDCMD_DATA2 0x0172
+#define regSDMA_RLC0_MIDCMD_DATA2_BASE_IDX 0
+#define regSDMA_RLC0_MIDCMD_DATA3 0x0173
+#define regSDMA_RLC0_MIDCMD_DATA3_BASE_IDX 0
+#define regSDMA_RLC0_MIDCMD_DATA4 0x0174
+#define regSDMA_RLC0_MIDCMD_DATA4_BASE_IDX 0
+#define regSDMA_RLC0_MIDCMD_DATA5 0x0175
+#define regSDMA_RLC0_MIDCMD_DATA5_BASE_IDX 0
+#define regSDMA_RLC0_MIDCMD_DATA6 0x0176
+#define regSDMA_RLC0_MIDCMD_DATA6_BASE_IDX 0
+#define regSDMA_RLC0_MIDCMD_DATA7 0x0177
+#define regSDMA_RLC0_MIDCMD_DATA7_BASE_IDX 0
+#define regSDMA_RLC0_MIDCMD_DATA8 0x0178
+#define regSDMA_RLC0_MIDCMD_DATA8_BASE_IDX 0
+#define regSDMA_RLC0_MIDCMD_DATA9 0x0179
+#define regSDMA_RLC0_MIDCMD_DATA9_BASE_IDX 0
+#define regSDMA_RLC0_MIDCMD_DATA10 0x017a
+#define regSDMA_RLC0_MIDCMD_DATA10_BASE_IDX 0
+#define regSDMA_RLC0_MIDCMD_CNTL 0x017b
+#define regSDMA_RLC0_MIDCMD_CNTL_BASE_IDX 0
+#define regSDMA_RLC1_RB_CNTL 0x0188
+#define regSDMA_RLC1_RB_CNTL_BASE_IDX 0
+#define regSDMA_RLC1_RB_BASE 0x0189
+#define regSDMA_RLC1_RB_BASE_BASE_IDX 0
+#define regSDMA_RLC1_RB_BASE_HI 0x018a
+#define regSDMA_RLC1_RB_BASE_HI_BASE_IDX 0
+#define regSDMA_RLC1_RB_RPTR 0x018b
+#define regSDMA_RLC1_RB_RPTR_BASE_IDX 0
+#define regSDMA_RLC1_RB_RPTR_HI 0x018c
+#define regSDMA_RLC1_RB_RPTR_HI_BASE_IDX 0
+#define regSDMA_RLC1_RB_WPTR 0x018d
+#define regSDMA_RLC1_RB_WPTR_BASE_IDX 0
+#define regSDMA_RLC1_RB_WPTR_HI 0x018e
+#define regSDMA_RLC1_RB_WPTR_HI_BASE_IDX 0
+#define regSDMA_RLC1_RB_WPTR_POLL_CNTL 0x018f
+#define regSDMA_RLC1_RB_WPTR_POLL_CNTL_BASE_IDX 0
+#define regSDMA_RLC1_RB_RPTR_ADDR_HI 0x0190
+#define regSDMA_RLC1_RB_RPTR_ADDR_HI_BASE_IDX 0
+#define regSDMA_RLC1_RB_RPTR_ADDR_LO 0x0191
+#define regSDMA_RLC1_RB_RPTR_ADDR_LO_BASE_IDX 0
+#define regSDMA_RLC1_IB_CNTL 0x0192
+#define regSDMA_RLC1_IB_CNTL_BASE_IDX 0
+#define regSDMA_RLC1_IB_RPTR 0x0193
+#define regSDMA_RLC1_IB_RPTR_BASE_IDX 0
+#define regSDMA_RLC1_IB_OFFSET 0x0194
+#define regSDMA_RLC1_IB_OFFSET_BASE_IDX 0
+#define regSDMA_RLC1_IB_BASE_LO 0x0195
+#define regSDMA_RLC1_IB_BASE_LO_BASE_IDX 0
+#define regSDMA_RLC1_IB_BASE_HI 0x0196
+#define regSDMA_RLC1_IB_BASE_HI_BASE_IDX 0
+#define regSDMA_RLC1_IB_SIZE 0x0197
+#define regSDMA_RLC1_IB_SIZE_BASE_IDX 0
+#define regSDMA_RLC1_SKIP_CNTL 0x0198
+#define regSDMA_RLC1_SKIP_CNTL_BASE_IDX 0
+#define regSDMA_RLC1_CONTEXT_STATUS 0x0199
+#define regSDMA_RLC1_CONTEXT_STATUS_BASE_IDX 0
+#define regSDMA_RLC1_DOORBELL 0x019a
+#define regSDMA_RLC1_DOORBELL_BASE_IDX 0
+#define regSDMA_RLC1_STATUS 0x01b0
+#define regSDMA_RLC1_STATUS_BASE_IDX 0
+#define regSDMA_RLC1_DOORBELL_LOG 0x01b1
+#define regSDMA_RLC1_DOORBELL_LOG_BASE_IDX 0
+#define regSDMA_RLC1_WATERMARK 0x01b2
+#define regSDMA_RLC1_WATERMARK_BASE_IDX 0
+#define regSDMA_RLC1_DOORBELL_OFFSET 0x01b3
+#define regSDMA_RLC1_DOORBELL_OFFSET_BASE_IDX 0
+#define regSDMA_RLC1_CSA_ADDR_LO 0x01b4
+#define regSDMA_RLC1_CSA_ADDR_LO_BASE_IDX 0
+#define regSDMA_RLC1_CSA_ADDR_HI 0x01b5
+#define regSDMA_RLC1_CSA_ADDR_HI_BASE_IDX 0
+#define regSDMA_RLC1_IB_SUB_REMAIN 0x01b7
+#define regSDMA_RLC1_IB_SUB_REMAIN_BASE_IDX 0
+#define regSDMA_RLC1_PREEMPT 0x01b8
+#define regSDMA_RLC1_PREEMPT_BASE_IDX 0
+#define regSDMA_RLC1_DUMMY_REG 0x01b9
+#define regSDMA_RLC1_DUMMY_REG_BASE_IDX 0
+#define regSDMA_RLC1_RB_WPTR_POLL_ADDR_HI 0x01ba
+#define regSDMA_RLC1_RB_WPTR_POLL_ADDR_HI_BASE_IDX 0
+#define regSDMA_RLC1_RB_WPTR_POLL_ADDR_LO 0x01bb
+#define regSDMA_RLC1_RB_WPTR_POLL_ADDR_LO_BASE_IDX 0
+#define regSDMA_RLC1_RB_AQL_CNTL 0x01bc
+#define regSDMA_RLC1_RB_AQL_CNTL_BASE_IDX 0
+#define regSDMA_RLC1_MINOR_PTR_UPDATE 0x01bd
+#define regSDMA_RLC1_MINOR_PTR_UPDATE_BASE_IDX 0
+#define regSDMA_RLC1_MIDCMD_DATA0 0x01c8
+#define regSDMA_RLC1_MIDCMD_DATA0_BASE_IDX 0
+#define regSDMA_RLC1_MIDCMD_DATA1 0x01c9
+#define regSDMA_RLC1_MIDCMD_DATA1_BASE_IDX 0
+#define regSDMA_RLC1_MIDCMD_DATA2 0x01ca
+#define regSDMA_RLC1_MIDCMD_DATA2_BASE_IDX 0
+#define regSDMA_RLC1_MIDCMD_DATA3 0x01cb
+#define regSDMA_RLC1_MIDCMD_DATA3_BASE_IDX 0
+#define regSDMA_RLC1_MIDCMD_DATA4 0x01cc
+#define regSDMA_RLC1_MIDCMD_DATA4_BASE_IDX 0
+#define regSDMA_RLC1_MIDCMD_DATA5 0x01cd
+#define regSDMA_RLC1_MIDCMD_DATA5_BASE_IDX 0
+#define regSDMA_RLC1_MIDCMD_DATA6 0x01ce
+#define regSDMA_RLC1_MIDCMD_DATA6_BASE_IDX 0
+#define regSDMA_RLC1_MIDCMD_DATA7 0x01cf
+#define regSDMA_RLC1_MIDCMD_DATA7_BASE_IDX 0
+#define regSDMA_RLC1_MIDCMD_DATA8 0x01d0
+#define regSDMA_RLC1_MIDCMD_DATA8_BASE_IDX 0
+#define regSDMA_RLC1_MIDCMD_DATA9 0x01d1
+#define regSDMA_RLC1_MIDCMD_DATA9_BASE_IDX 0
+#define regSDMA_RLC1_MIDCMD_DATA10 0x01d2
+#define regSDMA_RLC1_MIDCMD_DATA10_BASE_IDX 0
+#define regSDMA_RLC1_MIDCMD_CNTL 0x01d3
+#define regSDMA_RLC1_MIDCMD_CNTL_BASE_IDX 0
+#define regSDMA_RLC2_RB_CNTL 0x01e0
+#define regSDMA_RLC2_RB_CNTL_BASE_IDX 0
+#define regSDMA_RLC2_RB_BASE 0x01e1
+#define regSDMA_RLC2_RB_BASE_BASE_IDX 0
+#define regSDMA_RLC2_RB_BASE_HI 0x01e2
+#define regSDMA_RLC2_RB_BASE_HI_BASE_IDX 0
+#define regSDMA_RLC2_RB_RPTR 0x01e3
+#define regSDMA_RLC2_RB_RPTR_BASE_IDX 0
+#define regSDMA_RLC2_RB_RPTR_HI 0x01e4
+#define regSDMA_RLC2_RB_RPTR_HI_BASE_IDX 0
+#define regSDMA_RLC2_RB_WPTR 0x01e5
+#define regSDMA_RLC2_RB_WPTR_BASE_IDX 0
+#define regSDMA_RLC2_RB_WPTR_HI 0x01e6
+#define regSDMA_RLC2_RB_WPTR_HI_BASE_IDX 0
+#define regSDMA_RLC2_RB_WPTR_POLL_CNTL 0x01e7
+#define regSDMA_RLC2_RB_WPTR_POLL_CNTL_BASE_IDX 0
+#define regSDMA_RLC2_RB_RPTR_ADDR_HI 0x01e8
+#define regSDMA_RLC2_RB_RPTR_ADDR_HI_BASE_IDX 0
+#define regSDMA_RLC2_RB_RPTR_ADDR_LO 0x01e9
+#define regSDMA_RLC2_RB_RPTR_ADDR_LO_BASE_IDX 0
+#define regSDMA_RLC2_IB_CNTL 0x01ea
+#define regSDMA_RLC2_IB_CNTL_BASE_IDX 0
+#define regSDMA_RLC2_IB_RPTR 0x01eb
+#define regSDMA_RLC2_IB_RPTR_BASE_IDX 0
+#define regSDMA_RLC2_IB_OFFSET 0x01ec
+#define regSDMA_RLC2_IB_OFFSET_BASE_IDX 0
+#define regSDMA_RLC2_IB_BASE_LO 0x01ed
+#define regSDMA_RLC2_IB_BASE_LO_BASE_IDX 0
+#define regSDMA_RLC2_IB_BASE_HI 0x01ee
+#define regSDMA_RLC2_IB_BASE_HI_BASE_IDX 0
+#define regSDMA_RLC2_IB_SIZE 0x01ef
+#define regSDMA_RLC2_IB_SIZE_BASE_IDX 0
+#define regSDMA_RLC2_SKIP_CNTL 0x01f0
+#define regSDMA_RLC2_SKIP_CNTL_BASE_IDX 0
+#define regSDMA_RLC2_CONTEXT_STATUS 0x01f1
+#define regSDMA_RLC2_CONTEXT_STATUS_BASE_IDX 0
+#define regSDMA_RLC2_DOORBELL 0x01f2
+#define regSDMA_RLC2_DOORBELL_BASE_IDX 0
+#define regSDMA_RLC2_STATUS 0x0208
+#define regSDMA_RLC2_STATUS_BASE_IDX 0
+#define regSDMA_RLC2_DOORBELL_LOG 0x0209
+#define regSDMA_RLC2_DOORBELL_LOG_BASE_IDX 0
+#define regSDMA_RLC2_WATERMARK 0x020a
+#define regSDMA_RLC2_WATERMARK_BASE_IDX 0
+#define regSDMA_RLC2_DOORBELL_OFFSET 0x020b
+#define regSDMA_RLC2_DOORBELL_OFFSET_BASE_IDX 0
+#define regSDMA_RLC2_CSA_ADDR_LO 0x020c
+#define regSDMA_RLC2_CSA_ADDR_LO_BASE_IDX 0
+#define regSDMA_RLC2_CSA_ADDR_HI 0x020d
+#define regSDMA_RLC2_CSA_ADDR_HI_BASE_IDX 0
+#define regSDMA_RLC2_IB_SUB_REMAIN 0x020f
+#define regSDMA_RLC2_IB_SUB_REMAIN_BASE_IDX 0
+#define regSDMA_RLC2_PREEMPT 0x0210
+#define regSDMA_RLC2_PREEMPT_BASE_IDX 0
+#define regSDMA_RLC2_DUMMY_REG 0x0211
+#define regSDMA_RLC2_DUMMY_REG_BASE_IDX 0
+#define regSDMA_RLC2_RB_WPTR_POLL_ADDR_HI 0x0212
+#define regSDMA_RLC2_RB_WPTR_POLL_ADDR_HI_BASE_IDX 0
+#define regSDMA_RLC2_RB_WPTR_POLL_ADDR_LO 0x0213
+#define regSDMA_RLC2_RB_WPTR_POLL_ADDR_LO_BASE_IDX 0
+#define regSDMA_RLC2_RB_AQL_CNTL 0x0214
+#define regSDMA_RLC2_RB_AQL_CNTL_BASE_IDX 0
+#define regSDMA_RLC2_MINOR_PTR_UPDATE 0x0215
+#define regSDMA_RLC2_MINOR_PTR_UPDATE_BASE_IDX 0
+#define regSDMA_RLC2_MIDCMD_DATA0 0x0220
+#define regSDMA_RLC2_MIDCMD_DATA0_BASE_IDX 0
+#define regSDMA_RLC2_MIDCMD_DATA1 0x0221
+#define regSDMA_RLC2_MIDCMD_DATA1_BASE_IDX 0
+#define regSDMA_RLC2_MIDCMD_DATA2 0x0222
+#define regSDMA_RLC2_MIDCMD_DATA2_BASE_IDX 0
+#define regSDMA_RLC2_MIDCMD_DATA3 0x0223
+#define regSDMA_RLC2_MIDCMD_DATA3_BASE_IDX 0
+#define regSDMA_RLC2_MIDCMD_DATA4 0x0224
+#define regSDMA_RLC2_MIDCMD_DATA4_BASE_IDX 0
+#define regSDMA_RLC2_MIDCMD_DATA5 0x0225
+#define regSDMA_RLC2_MIDCMD_DATA5_BASE_IDX 0
+#define regSDMA_RLC2_MIDCMD_DATA6 0x0226
+#define regSDMA_RLC2_MIDCMD_DATA6_BASE_IDX 0
+#define regSDMA_RLC2_MIDCMD_DATA7 0x0227
+#define regSDMA_RLC2_MIDCMD_DATA7_BASE_IDX 0
+#define regSDMA_RLC2_MIDCMD_DATA8 0x0228
+#define regSDMA_RLC2_MIDCMD_DATA8_BASE_IDX 0
+#define regSDMA_RLC2_MIDCMD_DATA9 0x0229
+#define regSDMA_RLC2_MIDCMD_DATA9_BASE_IDX 0
+#define regSDMA_RLC2_MIDCMD_DATA10 0x022a
+#define regSDMA_RLC2_MIDCMD_DATA10_BASE_IDX 0
+#define regSDMA_RLC2_MIDCMD_CNTL 0x022b
+#define regSDMA_RLC2_MIDCMD_CNTL_BASE_IDX 0
+#define regSDMA_RLC3_RB_CNTL 0x0238
+#define regSDMA_RLC3_RB_CNTL_BASE_IDX 0
+#define regSDMA_RLC3_RB_BASE 0x0239
+#define regSDMA_RLC3_RB_BASE_BASE_IDX 0
+#define regSDMA_RLC3_RB_BASE_HI 0x023a
+#define regSDMA_RLC3_RB_BASE_HI_BASE_IDX 0
+#define regSDMA_RLC3_RB_RPTR 0x023b
+#define regSDMA_RLC3_RB_RPTR_BASE_IDX 0
+#define regSDMA_RLC3_RB_RPTR_HI 0x023c
+#define regSDMA_RLC3_RB_RPTR_HI_BASE_IDX 0
+#define regSDMA_RLC3_RB_WPTR 0x023d
+#define regSDMA_RLC3_RB_WPTR_BASE_IDX 0
+#define regSDMA_RLC3_RB_WPTR_HI 0x023e
+#define regSDMA_RLC3_RB_WPTR_HI_BASE_IDX 0
+#define regSDMA_RLC3_RB_WPTR_POLL_CNTL 0x023f
+#define regSDMA_RLC3_RB_WPTR_POLL_CNTL_BASE_IDX 0
+#define regSDMA_RLC3_RB_RPTR_ADDR_HI 0x0240
+#define regSDMA_RLC3_RB_RPTR_ADDR_HI_BASE_IDX 0
+#define regSDMA_RLC3_RB_RPTR_ADDR_LO 0x0241
+#define regSDMA_RLC3_RB_RPTR_ADDR_LO_BASE_IDX 0
+#define regSDMA_RLC3_IB_CNTL 0x0242
+#define regSDMA_RLC3_IB_CNTL_BASE_IDX 0
+#define regSDMA_RLC3_IB_RPTR 0x0243
+#define regSDMA_RLC3_IB_RPTR_BASE_IDX 0
+#define regSDMA_RLC3_IB_OFFSET 0x0244
+#define regSDMA_RLC3_IB_OFFSET_BASE_IDX 0
+#define regSDMA_RLC3_IB_BASE_LO 0x0245
+#define regSDMA_RLC3_IB_BASE_LO_BASE_IDX 0
+#define regSDMA_RLC3_IB_BASE_HI 0x0246
+#define regSDMA_RLC3_IB_BASE_HI_BASE_IDX 0
+#define regSDMA_RLC3_IB_SIZE 0x0247
+#define regSDMA_RLC3_IB_SIZE_BASE_IDX 0
+#define regSDMA_RLC3_SKIP_CNTL 0x0248
+#define regSDMA_RLC3_SKIP_CNTL_BASE_IDX 0
+#define regSDMA_RLC3_CONTEXT_STATUS 0x0249
+#define regSDMA_RLC3_CONTEXT_STATUS_BASE_IDX 0
+#define regSDMA_RLC3_DOORBELL 0x024a
+#define regSDMA_RLC3_DOORBELL_BASE_IDX 0
+#define regSDMA_RLC3_STATUS 0x0260
+#define regSDMA_RLC3_STATUS_BASE_IDX 0
+#define regSDMA_RLC3_DOORBELL_LOG 0x0261
+#define regSDMA_RLC3_DOORBELL_LOG_BASE_IDX 0
+#define regSDMA_RLC3_WATERMARK 0x0262
+#define regSDMA_RLC3_WATERMARK_BASE_IDX 0
+#define regSDMA_RLC3_DOORBELL_OFFSET 0x0263
+#define regSDMA_RLC3_DOORBELL_OFFSET_BASE_IDX 0
+#define regSDMA_RLC3_CSA_ADDR_LO 0x0264
+#define regSDMA_RLC3_CSA_ADDR_LO_BASE_IDX 0
+#define regSDMA_RLC3_CSA_ADDR_HI 0x0265
+#define regSDMA_RLC3_CSA_ADDR_HI_BASE_IDX 0
+#define regSDMA_RLC3_IB_SUB_REMAIN 0x0267
+#define regSDMA_RLC3_IB_SUB_REMAIN_BASE_IDX 0
+#define regSDMA_RLC3_PREEMPT 0x0268
+#define regSDMA_RLC3_PREEMPT_BASE_IDX 0
+#define regSDMA_RLC3_DUMMY_REG 0x0269
+#define regSDMA_RLC3_DUMMY_REG_BASE_IDX 0
+#define regSDMA_RLC3_RB_WPTR_POLL_ADDR_HI 0x026a
+#define regSDMA_RLC3_RB_WPTR_POLL_ADDR_HI_BASE_IDX 0
+#define regSDMA_RLC3_RB_WPTR_POLL_ADDR_LO 0x026b
+#define regSDMA_RLC3_RB_WPTR_POLL_ADDR_LO_BASE_IDX 0
+#define regSDMA_RLC3_RB_AQL_CNTL 0x026c
+#define regSDMA_RLC3_RB_AQL_CNTL_BASE_IDX 0
+#define regSDMA_RLC3_MINOR_PTR_UPDATE 0x026d
+#define regSDMA_RLC3_MINOR_PTR_UPDATE_BASE_IDX 0
+#define regSDMA_RLC3_MIDCMD_DATA0 0x0278
+#define regSDMA_RLC3_MIDCMD_DATA0_BASE_IDX 0
+#define regSDMA_RLC3_MIDCMD_DATA1 0x0279
+#define regSDMA_RLC3_MIDCMD_DATA1_BASE_IDX 0
+#define regSDMA_RLC3_MIDCMD_DATA2 0x027a
+#define regSDMA_RLC3_MIDCMD_DATA2_BASE_IDX 0
+#define regSDMA_RLC3_MIDCMD_DATA3 0x027b
+#define regSDMA_RLC3_MIDCMD_DATA3_BASE_IDX 0
+#define regSDMA_RLC3_MIDCMD_DATA4 0x027c
+#define regSDMA_RLC3_MIDCMD_DATA4_BASE_IDX 0
+#define regSDMA_RLC3_MIDCMD_DATA5 0x027d
+#define regSDMA_RLC3_MIDCMD_DATA5_BASE_IDX 0
+#define regSDMA_RLC3_MIDCMD_DATA6 0x027e
+#define regSDMA_RLC3_MIDCMD_DATA6_BASE_IDX 0
+#define regSDMA_RLC3_MIDCMD_DATA7 0x027f
+#define regSDMA_RLC3_MIDCMD_DATA7_BASE_IDX 0
+#define regSDMA_RLC3_MIDCMD_DATA8 0x0280
+#define regSDMA_RLC3_MIDCMD_DATA8_BASE_IDX 0
+#define regSDMA_RLC3_MIDCMD_DATA9 0x0281
+#define regSDMA_RLC3_MIDCMD_DATA9_BASE_IDX 0
+#define regSDMA_RLC3_MIDCMD_DATA10 0x0282
+#define regSDMA_RLC3_MIDCMD_DATA10_BASE_IDX 0
+#define regSDMA_RLC3_MIDCMD_CNTL 0x0283
+#define regSDMA_RLC3_MIDCMD_CNTL_BASE_IDX 0
+#define regSDMA_RLC4_RB_CNTL 0x0290
+#define regSDMA_RLC4_RB_CNTL_BASE_IDX 0
+#define regSDMA_RLC4_RB_BASE 0x0291
+#define regSDMA_RLC4_RB_BASE_BASE_IDX 0
+#define regSDMA_RLC4_RB_BASE_HI 0x0292
+#define regSDMA_RLC4_RB_BASE_HI_BASE_IDX 0
+#define regSDMA_RLC4_RB_RPTR 0x0293
+#define regSDMA_RLC4_RB_RPTR_BASE_IDX 0
+#define regSDMA_RLC4_RB_RPTR_HI 0x0294
+#define regSDMA_RLC4_RB_RPTR_HI_BASE_IDX 0
+#define regSDMA_RLC4_RB_WPTR 0x0295
+#define regSDMA_RLC4_RB_WPTR_BASE_IDX 0
+#define regSDMA_RLC4_RB_WPTR_HI 0x0296
+#define regSDMA_RLC4_RB_WPTR_HI_BASE_IDX 0
+#define regSDMA_RLC4_RB_WPTR_POLL_CNTL 0x0297
+#define regSDMA_RLC4_RB_WPTR_POLL_CNTL_BASE_IDX 0
+#define regSDMA_RLC4_RB_RPTR_ADDR_HI 0x0298
+#define regSDMA_RLC4_RB_RPTR_ADDR_HI_BASE_IDX 0
+#define regSDMA_RLC4_RB_RPTR_ADDR_LO 0x0299
+#define regSDMA_RLC4_RB_RPTR_ADDR_LO_BASE_IDX 0
+#define regSDMA_RLC4_IB_CNTL 0x029a
+#define regSDMA_RLC4_IB_CNTL_BASE_IDX 0
+#define regSDMA_RLC4_IB_RPTR 0x029b
+#define regSDMA_RLC4_IB_RPTR_BASE_IDX 0
+#define regSDMA_RLC4_IB_OFFSET 0x029c
+#define regSDMA_RLC4_IB_OFFSET_BASE_IDX 0
+#define regSDMA_RLC4_IB_BASE_LO 0x029d
+#define regSDMA_RLC4_IB_BASE_LO_BASE_IDX 0
+#define regSDMA_RLC4_IB_BASE_HI 0x029e
+#define regSDMA_RLC4_IB_BASE_HI_BASE_IDX 0
+#define regSDMA_RLC4_IB_SIZE 0x029f
+#define regSDMA_RLC4_IB_SIZE_BASE_IDX 0
+#define regSDMA_RLC4_SKIP_CNTL 0x02a0
+#define regSDMA_RLC4_SKIP_CNTL_BASE_IDX 0
+#define regSDMA_RLC4_CONTEXT_STATUS 0x02a1
+#define regSDMA_RLC4_CONTEXT_STATUS_BASE_IDX 0
+#define regSDMA_RLC4_DOORBELL 0x02a2
+#define regSDMA_RLC4_DOORBELL_BASE_IDX 0
+#define regSDMA_RLC4_STATUS 0x02b8
+#define regSDMA_RLC4_STATUS_BASE_IDX 0
+#define regSDMA_RLC4_DOORBELL_LOG 0x02b9
+#define regSDMA_RLC4_DOORBELL_LOG_BASE_IDX 0
+#define regSDMA_RLC4_WATERMARK 0x02ba
+#define regSDMA_RLC4_WATERMARK_BASE_IDX 0
+#define regSDMA_RLC4_DOORBELL_OFFSET 0x02bb
+#define regSDMA_RLC4_DOORBELL_OFFSET_BASE_IDX 0
+#define regSDMA_RLC4_CSA_ADDR_LO 0x02bc
+#define regSDMA_RLC4_CSA_ADDR_LO_BASE_IDX 0
+#define regSDMA_RLC4_CSA_ADDR_HI 0x02bd
+#define regSDMA_RLC4_CSA_ADDR_HI_BASE_IDX 0
+#define regSDMA_RLC4_IB_SUB_REMAIN 0x02bf
+#define regSDMA_RLC4_IB_SUB_REMAIN_BASE_IDX 0
+#define regSDMA_RLC4_PREEMPT 0x02c0
+#define regSDMA_RLC4_PREEMPT_BASE_IDX 0
+#define regSDMA_RLC4_DUMMY_REG 0x02c1
+#define regSDMA_RLC4_DUMMY_REG_BASE_IDX 0
+#define regSDMA_RLC4_RB_WPTR_POLL_ADDR_HI 0x02c2
+#define regSDMA_RLC4_RB_WPTR_POLL_ADDR_HI_BASE_IDX 0
+#define regSDMA_RLC4_RB_WPTR_POLL_ADDR_LO 0x02c3
+#define regSDMA_RLC4_RB_WPTR_POLL_ADDR_LO_BASE_IDX 0
+#define regSDMA_RLC4_RB_AQL_CNTL 0x02c4
+#define regSDMA_RLC4_RB_AQL_CNTL_BASE_IDX 0
+#define regSDMA_RLC4_MINOR_PTR_UPDATE 0x02c5
+#define regSDMA_RLC4_MINOR_PTR_UPDATE_BASE_IDX 0
+#define regSDMA_RLC4_MIDCMD_DATA0 0x02d0
+#define regSDMA_RLC4_MIDCMD_DATA0_BASE_IDX 0
+#define regSDMA_RLC4_MIDCMD_DATA1 0x02d1
+#define regSDMA_RLC4_MIDCMD_DATA1_BASE_IDX 0
+#define regSDMA_RLC4_MIDCMD_DATA2 0x02d2
+#define regSDMA_RLC4_MIDCMD_DATA2_BASE_IDX 0
+#define regSDMA_RLC4_MIDCMD_DATA3 0x02d3
+#define regSDMA_RLC4_MIDCMD_DATA3_BASE_IDX 0
+#define regSDMA_RLC4_MIDCMD_DATA4 0x02d4
+#define regSDMA_RLC4_MIDCMD_DATA4_BASE_IDX 0
+#define regSDMA_RLC4_MIDCMD_DATA5 0x02d5
+#define regSDMA_RLC4_MIDCMD_DATA5_BASE_IDX 0
+#define regSDMA_RLC4_MIDCMD_DATA6 0x02d6
+#define regSDMA_RLC4_MIDCMD_DATA6_BASE_IDX 0
+#define regSDMA_RLC4_MIDCMD_DATA7 0x02d7
+#define regSDMA_RLC4_MIDCMD_DATA7_BASE_IDX 0
+#define regSDMA_RLC4_MIDCMD_DATA8 0x02d8
+#define regSDMA_RLC4_MIDCMD_DATA8_BASE_IDX 0
+#define regSDMA_RLC4_MIDCMD_DATA9 0x02d9
+#define regSDMA_RLC4_MIDCMD_DATA9_BASE_IDX 0
+#define regSDMA_RLC4_MIDCMD_DATA10 0x02da
+#define regSDMA_RLC4_MIDCMD_DATA10_BASE_IDX 0
+#define regSDMA_RLC4_MIDCMD_CNTL 0x02db
+#define regSDMA_RLC4_MIDCMD_CNTL_BASE_IDX 0
+#define regSDMA_RLC5_RB_CNTL 0x02e8
+#define regSDMA_RLC5_RB_CNTL_BASE_IDX 0
+#define regSDMA_RLC5_RB_BASE 0x02e9
+#define regSDMA_RLC5_RB_BASE_BASE_IDX 0
+#define regSDMA_RLC5_RB_BASE_HI 0x02ea
+#define regSDMA_RLC5_RB_BASE_HI_BASE_IDX 0
+#define regSDMA_RLC5_RB_RPTR 0x02eb
+#define regSDMA_RLC5_RB_RPTR_BASE_IDX 0
+#define regSDMA_RLC5_RB_RPTR_HI 0x02ec
+#define regSDMA_RLC5_RB_RPTR_HI_BASE_IDX 0
+#define regSDMA_RLC5_RB_WPTR 0x02ed
+#define regSDMA_RLC5_RB_WPTR_BASE_IDX 0
+#define regSDMA_RLC5_RB_WPTR_HI 0x02ee
+#define regSDMA_RLC5_RB_WPTR_HI_BASE_IDX 0
+#define regSDMA_RLC5_RB_WPTR_POLL_CNTL 0x02ef
+#define regSDMA_RLC5_RB_WPTR_POLL_CNTL_BASE_IDX 0
+#define regSDMA_RLC5_RB_RPTR_ADDR_HI 0x02f0
+#define regSDMA_RLC5_RB_RPTR_ADDR_HI_BASE_IDX 0
+#define regSDMA_RLC5_RB_RPTR_ADDR_LO 0x02f1
+#define regSDMA_RLC5_RB_RPTR_ADDR_LO_BASE_IDX 0
+#define regSDMA_RLC5_IB_CNTL 0x02f2
+#define regSDMA_RLC5_IB_CNTL_BASE_IDX 0
+#define regSDMA_RLC5_IB_RPTR 0x02f3
+#define regSDMA_RLC5_IB_RPTR_BASE_IDX 0
+#define regSDMA_RLC5_IB_OFFSET 0x02f4
+#define regSDMA_RLC5_IB_OFFSET_BASE_IDX 0
+#define regSDMA_RLC5_IB_BASE_LO 0x02f5
+#define regSDMA_RLC5_IB_BASE_LO_BASE_IDX 0
+#define regSDMA_RLC5_IB_BASE_HI 0x02f6
+#define regSDMA_RLC5_IB_BASE_HI_BASE_IDX 0
+#define regSDMA_RLC5_IB_SIZE 0x02f7
+#define regSDMA_RLC5_IB_SIZE_BASE_IDX 0
+#define regSDMA_RLC5_SKIP_CNTL 0x02f8
+#define regSDMA_RLC5_SKIP_CNTL_BASE_IDX 0
+#define regSDMA_RLC5_CONTEXT_STATUS 0x02f9
+#define regSDMA_RLC5_CONTEXT_STATUS_BASE_IDX 0
+#define regSDMA_RLC5_DOORBELL 0x02fa
+#define regSDMA_RLC5_DOORBELL_BASE_IDX 0
+#define regSDMA_RLC5_STATUS 0x0310
+#define regSDMA_RLC5_STATUS_BASE_IDX 0
+#define regSDMA_RLC5_DOORBELL_LOG 0x0311
+#define regSDMA_RLC5_DOORBELL_LOG_BASE_IDX 0
+#define regSDMA_RLC5_WATERMARK 0x0312
+#define regSDMA_RLC5_WATERMARK_BASE_IDX 0
+#define regSDMA_RLC5_DOORBELL_OFFSET 0x0313
+#define regSDMA_RLC5_DOORBELL_OFFSET_BASE_IDX 0
+#define regSDMA_RLC5_CSA_ADDR_LO 0x0314
+#define regSDMA_RLC5_CSA_ADDR_LO_BASE_IDX 0
+#define regSDMA_RLC5_CSA_ADDR_HI 0x0315
+#define regSDMA_RLC5_CSA_ADDR_HI_BASE_IDX 0
+#define regSDMA_RLC5_IB_SUB_REMAIN 0x0317
+#define regSDMA_RLC5_IB_SUB_REMAIN_BASE_IDX 0
+#define regSDMA_RLC5_PREEMPT 0x0318
+#define regSDMA_RLC5_PREEMPT_BASE_IDX 0
+#define regSDMA_RLC5_DUMMY_REG 0x0319
+#define regSDMA_RLC5_DUMMY_REG_BASE_IDX 0
+#define regSDMA_RLC5_RB_WPTR_POLL_ADDR_HI 0x031a
+#define regSDMA_RLC5_RB_WPTR_POLL_ADDR_HI_BASE_IDX 0
+#define regSDMA_RLC5_RB_WPTR_POLL_ADDR_LO 0x031b
+#define regSDMA_RLC5_RB_WPTR_POLL_ADDR_LO_BASE_IDX 0
+#define regSDMA_RLC5_RB_AQL_CNTL 0x031c
+#define regSDMA_RLC5_RB_AQL_CNTL_BASE_IDX 0
+#define regSDMA_RLC5_MINOR_PTR_UPDATE 0x031d
+#define regSDMA_RLC5_MINOR_PTR_UPDATE_BASE_IDX 0
+#define regSDMA_RLC5_MIDCMD_DATA0 0x0328
+#define regSDMA_RLC5_MIDCMD_DATA0_BASE_IDX 0
+#define regSDMA_RLC5_MIDCMD_DATA1 0x0329
+#define regSDMA_RLC5_MIDCMD_DATA1_BASE_IDX 0
+#define regSDMA_RLC5_MIDCMD_DATA2 0x032a
+#define regSDMA_RLC5_MIDCMD_DATA2_BASE_IDX 0
+#define regSDMA_RLC5_MIDCMD_DATA3 0x032b
+#define regSDMA_RLC5_MIDCMD_DATA3_BASE_IDX 0
+#define regSDMA_RLC5_MIDCMD_DATA4 0x032c
+#define regSDMA_RLC5_MIDCMD_DATA4_BASE_IDX 0
+#define regSDMA_RLC5_MIDCMD_DATA5 0x032d
+#define regSDMA_RLC5_MIDCMD_DATA5_BASE_IDX 0
+#define regSDMA_RLC5_MIDCMD_DATA6 0x032e
+#define regSDMA_RLC5_MIDCMD_DATA6_BASE_IDX 0
+#define regSDMA_RLC5_MIDCMD_DATA7 0x032f
+#define regSDMA_RLC5_MIDCMD_DATA7_BASE_IDX 0
+#define regSDMA_RLC5_MIDCMD_DATA8 0x0330
+#define regSDMA_RLC5_MIDCMD_DATA8_BASE_IDX 0
+#define regSDMA_RLC5_MIDCMD_DATA9 0x0331
+#define regSDMA_RLC5_MIDCMD_DATA9_BASE_IDX 0
+#define regSDMA_RLC5_MIDCMD_DATA10 0x0332
+#define regSDMA_RLC5_MIDCMD_DATA10_BASE_IDX 0
+#define regSDMA_RLC5_MIDCMD_CNTL 0x0333
+#define regSDMA_RLC5_MIDCMD_CNTL_BASE_IDX 0
+#define regSDMA_RLC6_RB_CNTL 0x0340
+#define regSDMA_RLC6_RB_CNTL_BASE_IDX 0
+#define regSDMA_RLC6_RB_BASE 0x0341
+#define regSDMA_RLC6_RB_BASE_BASE_IDX 0
+#define regSDMA_RLC6_RB_BASE_HI 0x0342
+#define regSDMA_RLC6_RB_BASE_HI_BASE_IDX 0
+#define regSDMA_RLC6_RB_RPTR 0x0343
+#define regSDMA_RLC6_RB_RPTR_BASE_IDX 0
+#define regSDMA_RLC6_RB_RPTR_HI 0x0344
+#define regSDMA_RLC6_RB_RPTR_HI_BASE_IDX 0
+#define regSDMA_RLC6_RB_WPTR 0x0345
+#define regSDMA_RLC6_RB_WPTR_BASE_IDX 0
+#define regSDMA_RLC6_RB_WPTR_HI 0x0346
+#define regSDMA_RLC6_RB_WPTR_HI_BASE_IDX 0
+#define regSDMA_RLC6_RB_WPTR_POLL_CNTL 0x0347
+#define regSDMA_RLC6_RB_WPTR_POLL_CNTL_BASE_IDX 0
+#define regSDMA_RLC6_RB_RPTR_ADDR_HI 0x0348
+#define regSDMA_RLC6_RB_RPTR_ADDR_HI_BASE_IDX 0
+#define regSDMA_RLC6_RB_RPTR_ADDR_LO 0x0349
+#define regSDMA_RLC6_RB_RPTR_ADDR_LO_BASE_IDX 0
+#define regSDMA_RLC6_IB_CNTL 0x034a
+#define regSDMA_RLC6_IB_CNTL_BASE_IDX 0
+#define regSDMA_RLC6_IB_RPTR 0x034b
+#define regSDMA_RLC6_IB_RPTR_BASE_IDX 0
+#define regSDMA_RLC6_IB_OFFSET 0x034c
+#define regSDMA_RLC6_IB_OFFSET_BASE_IDX 0
+#define regSDMA_RLC6_IB_BASE_LO 0x034d
+#define regSDMA_RLC6_IB_BASE_LO_BASE_IDX 0
+#define regSDMA_RLC6_IB_BASE_HI 0x034e
+#define regSDMA_RLC6_IB_BASE_HI_BASE_IDX 0
+#define regSDMA_RLC6_IB_SIZE 0x034f
+#define regSDMA_RLC6_IB_SIZE_BASE_IDX 0
+#define regSDMA_RLC6_SKIP_CNTL 0x0350
+#define regSDMA_RLC6_SKIP_CNTL_BASE_IDX 0
+#define regSDMA_RLC6_CONTEXT_STATUS 0x0351
+#define regSDMA_RLC6_CONTEXT_STATUS_BASE_IDX 0
+#define regSDMA_RLC6_DOORBELL 0x0352
+#define regSDMA_RLC6_DOORBELL_BASE_IDX 0
+#define regSDMA_RLC6_STATUS 0x0368
+#define regSDMA_RLC6_STATUS_BASE_IDX 0
+#define regSDMA_RLC6_DOORBELL_LOG 0x0369
+#define regSDMA_RLC6_DOORBELL_LOG_BASE_IDX 0
+#define regSDMA_RLC6_WATERMARK 0x036a
+#define regSDMA_RLC6_WATERMARK_BASE_IDX 0
+#define regSDMA_RLC6_DOORBELL_OFFSET 0x036b
+#define regSDMA_RLC6_DOORBELL_OFFSET_BASE_IDX 0
+#define regSDMA_RLC6_CSA_ADDR_LO 0x036c
+#define regSDMA_RLC6_CSA_ADDR_LO_BASE_IDX 0
+#define regSDMA_RLC6_CSA_ADDR_HI 0x036d
+#define regSDMA_RLC6_CSA_ADDR_HI_BASE_IDX 0
+#define regSDMA_RLC6_IB_SUB_REMAIN 0x036f
+#define regSDMA_RLC6_IB_SUB_REMAIN_BASE_IDX 0
+#define regSDMA_RLC6_PREEMPT 0x0370
+#define regSDMA_RLC6_PREEMPT_BASE_IDX 0
+#define regSDMA_RLC6_DUMMY_REG 0x0371
+#define regSDMA_RLC6_DUMMY_REG_BASE_IDX 0
+#define regSDMA_RLC6_RB_WPTR_POLL_ADDR_HI 0x0372
+#define regSDMA_RLC6_RB_WPTR_POLL_ADDR_HI_BASE_IDX 0
+#define regSDMA_RLC6_RB_WPTR_POLL_ADDR_LO 0x0373
+#define regSDMA_RLC6_RB_WPTR_POLL_ADDR_LO_BASE_IDX 0
+#define regSDMA_RLC6_RB_AQL_CNTL 0x0374
+#define regSDMA_RLC6_RB_AQL_CNTL_BASE_IDX 0
+#define regSDMA_RLC6_MINOR_PTR_UPDATE 0x0375
+#define regSDMA_RLC6_MINOR_PTR_UPDATE_BASE_IDX 0
+#define regSDMA_RLC6_MIDCMD_DATA0 0x0380
+#define regSDMA_RLC6_MIDCMD_DATA0_BASE_IDX 0
+#define regSDMA_RLC6_MIDCMD_DATA1 0x0381
+#define regSDMA_RLC6_MIDCMD_DATA1_BASE_IDX 0
+#define regSDMA_RLC6_MIDCMD_DATA2 0x0382
+#define regSDMA_RLC6_MIDCMD_DATA2_BASE_IDX 0
+#define regSDMA_RLC6_MIDCMD_DATA3 0x0383
+#define regSDMA_RLC6_MIDCMD_DATA3_BASE_IDX 0
+#define regSDMA_RLC6_MIDCMD_DATA4 0x0384
+#define regSDMA_RLC6_MIDCMD_DATA4_BASE_IDX 0
+#define regSDMA_RLC6_MIDCMD_DATA5 0x0385
+#define regSDMA_RLC6_MIDCMD_DATA5_BASE_IDX 0
+#define regSDMA_RLC6_MIDCMD_DATA6 0x0386
+#define regSDMA_RLC6_MIDCMD_DATA6_BASE_IDX 0
+#define regSDMA_RLC6_MIDCMD_DATA7 0x0387
+#define regSDMA_RLC6_MIDCMD_DATA7_BASE_IDX 0
+#define regSDMA_RLC6_MIDCMD_DATA8 0x0388
+#define regSDMA_RLC6_MIDCMD_DATA8_BASE_IDX 0
+#define regSDMA_RLC6_MIDCMD_DATA9 0x0389
+#define regSDMA_RLC6_MIDCMD_DATA9_BASE_IDX 0
+#define regSDMA_RLC6_MIDCMD_DATA10 0x038a
+#define regSDMA_RLC6_MIDCMD_DATA10_BASE_IDX 0
+#define regSDMA_RLC6_MIDCMD_CNTL 0x038b
+#define regSDMA_RLC6_MIDCMD_CNTL_BASE_IDX 0
+#define regSDMA_RLC7_RB_CNTL 0x0398
+#define regSDMA_RLC7_RB_CNTL_BASE_IDX 0
+#define regSDMA_RLC7_RB_BASE 0x0399
+#define regSDMA_RLC7_RB_BASE_BASE_IDX 0
+#define regSDMA_RLC7_RB_BASE_HI 0x039a
+#define regSDMA_RLC7_RB_BASE_HI_BASE_IDX 0
+#define regSDMA_RLC7_RB_RPTR 0x039b
+#define regSDMA_RLC7_RB_RPTR_BASE_IDX 0
+#define regSDMA_RLC7_RB_RPTR_HI 0x039c
+#define regSDMA_RLC7_RB_RPTR_HI_BASE_IDX 0
+#define regSDMA_RLC7_RB_WPTR 0x039d
+#define regSDMA_RLC7_RB_WPTR_BASE_IDX 0
+#define regSDMA_RLC7_RB_WPTR_HI 0x039e
+#define regSDMA_RLC7_RB_WPTR_HI_BASE_IDX 0
+#define regSDMA_RLC7_RB_WPTR_POLL_CNTL 0x039f
+#define regSDMA_RLC7_RB_WPTR_POLL_CNTL_BASE_IDX 0
+#define regSDMA_RLC7_RB_RPTR_ADDR_HI 0x03a0
+#define regSDMA_RLC7_RB_RPTR_ADDR_HI_BASE_IDX 0
+#define regSDMA_RLC7_RB_RPTR_ADDR_LO 0x03a1
+#define regSDMA_RLC7_RB_RPTR_ADDR_LO_BASE_IDX 0
+#define regSDMA_RLC7_IB_CNTL 0x03a2
+#define regSDMA_RLC7_IB_CNTL_BASE_IDX 0
+#define regSDMA_RLC7_IB_RPTR 0x03a3
+#define regSDMA_RLC7_IB_RPTR_BASE_IDX 0
+#define regSDMA_RLC7_IB_OFFSET 0x03a4
+#define regSDMA_RLC7_IB_OFFSET_BASE_IDX 0
+#define regSDMA_RLC7_IB_BASE_LO 0x03a5
+#define regSDMA_RLC7_IB_BASE_LO_BASE_IDX 0
+#define regSDMA_RLC7_IB_BASE_HI 0x03a6
+#define regSDMA_RLC7_IB_BASE_HI_BASE_IDX 0
+#define regSDMA_RLC7_IB_SIZE 0x03a7
+#define regSDMA_RLC7_IB_SIZE_BASE_IDX 0
+#define regSDMA_RLC7_SKIP_CNTL 0x03a8
+#define regSDMA_RLC7_SKIP_CNTL_BASE_IDX 0
+#define regSDMA_RLC7_CONTEXT_STATUS 0x03a9
+#define regSDMA_RLC7_CONTEXT_STATUS_BASE_IDX 0
+#define regSDMA_RLC7_DOORBELL 0x03aa
+#define regSDMA_RLC7_DOORBELL_BASE_IDX 0
+#define regSDMA_RLC7_STATUS 0x03c0
+#define regSDMA_RLC7_STATUS_BASE_IDX 0
+#define regSDMA_RLC7_DOORBELL_LOG 0x03c1
+#define regSDMA_RLC7_DOORBELL_LOG_BASE_IDX 0
+#define regSDMA_RLC7_WATERMARK 0x03c2
+#define regSDMA_RLC7_WATERMARK_BASE_IDX 0
+#define regSDMA_RLC7_DOORBELL_OFFSET 0x03c3
+#define regSDMA_RLC7_DOORBELL_OFFSET_BASE_IDX 0
+#define regSDMA_RLC7_CSA_ADDR_LO 0x03c4
+#define regSDMA_RLC7_CSA_ADDR_LO_BASE_IDX 0
+#define regSDMA_RLC7_CSA_ADDR_HI 0x03c5
+#define regSDMA_RLC7_CSA_ADDR_HI_BASE_IDX 0
+#define regSDMA_RLC7_IB_SUB_REMAIN 0x03c7
+#define regSDMA_RLC7_IB_SUB_REMAIN_BASE_IDX 0
+#define regSDMA_RLC7_PREEMPT 0x03c8
+#define regSDMA_RLC7_PREEMPT_BASE_IDX 0
+#define regSDMA_RLC7_DUMMY_REG 0x03c9
+#define regSDMA_RLC7_DUMMY_REG_BASE_IDX 0
+#define regSDMA_RLC7_RB_WPTR_POLL_ADDR_HI 0x03ca
+#define regSDMA_RLC7_RB_WPTR_POLL_ADDR_HI_BASE_IDX 0
+#define regSDMA_RLC7_RB_WPTR_POLL_ADDR_LO 0x03cb
+#define regSDMA_RLC7_RB_WPTR_POLL_ADDR_LO_BASE_IDX 0
+#define regSDMA_RLC7_RB_AQL_CNTL 0x03cc
+#define regSDMA_RLC7_RB_AQL_CNTL_BASE_IDX 0
+#define regSDMA_RLC7_MINOR_PTR_UPDATE 0x03cd
+#define regSDMA_RLC7_MINOR_PTR_UPDATE_BASE_IDX 0
+#define regSDMA_RLC7_MIDCMD_DATA0 0x03d8
+#define regSDMA_RLC7_MIDCMD_DATA0_BASE_IDX 0
+#define regSDMA_RLC7_MIDCMD_DATA1 0x03d9
+#define regSDMA_RLC7_MIDCMD_DATA1_BASE_IDX 0
+#define regSDMA_RLC7_MIDCMD_DATA2 0x03da
+#define regSDMA_RLC7_MIDCMD_DATA2_BASE_IDX 0
+#define regSDMA_RLC7_MIDCMD_DATA3 0x03db
+#define regSDMA_RLC7_MIDCMD_DATA3_BASE_IDX 0
+#define regSDMA_RLC7_MIDCMD_DATA4 0x03dc
+#define regSDMA_RLC7_MIDCMD_DATA4_BASE_IDX 0
+#define regSDMA_RLC7_MIDCMD_DATA5 0x03dd
+#define regSDMA_RLC7_MIDCMD_DATA5_BASE_IDX 0
+#define regSDMA_RLC7_MIDCMD_DATA6 0x03de
+#define regSDMA_RLC7_MIDCMD_DATA6_BASE_IDX 0
+#define regSDMA_RLC7_MIDCMD_DATA7 0x03df
+#define regSDMA_RLC7_MIDCMD_DATA7_BASE_IDX 0
+#define regSDMA_RLC7_MIDCMD_DATA8 0x03e0
+#define regSDMA_RLC7_MIDCMD_DATA8_BASE_IDX 0
+#define regSDMA_RLC7_MIDCMD_DATA9 0x03e1
+#define regSDMA_RLC7_MIDCMD_DATA9_BASE_IDX 0
+#define regSDMA_RLC7_MIDCMD_DATA10 0x03e2
+#define regSDMA_RLC7_MIDCMD_DATA10_BASE_IDX 0
+#define regSDMA_RLC7_MIDCMD_CNTL 0x03e3
+#define regSDMA_RLC7_MIDCMD_CNTL_BASE_IDX 0
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/sdma/sdma_4_4_2_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/sdma/sdma_4_4_2_sh_mask.h
new file mode 100644
index 000000000000..e46cb3339355
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/sdma/sdma_4_4_2_sh_mask.h
@@ -0,0 +1,3276 @@
+/*
+ * Copyright 2021 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef _sdma_4_4_2_SH_MASK_HEADER
+#define _sdma_4_4_2_SH_MASK_HEADER
+
+
+// addressBlock: aid_sdma_insts_sdma0_sdmadec
+//SDMA_UCODE_ADDR
+#define SDMA_UCODE_ADDR__VALUE__SHIFT 0x0
+#define SDMA_UCODE_ADDR__VALUE_MASK 0x00003FFFL
+//SDMA_UCODE_DATA
+#define SDMA_UCODE_DATA__VALUE__SHIFT 0x0
+#define SDMA_UCODE_DATA__VALUE_MASK 0xFFFFFFFFL
+//SDMA_F32_CNTL
+#define SDMA_F32_CNTL__HALT__SHIFT 0x0
+#define SDMA_F32_CNTL__STEP__SHIFT 0x1
+#define SDMA_F32_CNTL__DBG_SELECT_BITS__SHIFT 0x2
+#define SDMA_F32_CNTL__RESET__SHIFT 0x8
+#define SDMA_F32_CNTL__CHECKSUM_CLR__SHIFT 0x9
+#define SDMA_F32_CNTL__HALT_MASK 0x00000001L
+#define SDMA_F32_CNTL__STEP_MASK 0x00000002L
+#define SDMA_F32_CNTL__DBG_SELECT_BITS_MASK 0x000000FCL
+#define SDMA_F32_CNTL__RESET_MASK 0x00000100L
+#define SDMA_F32_CNTL__CHECKSUM_CLR_MASK 0x00000200L
+//SDMA_MMHUB_CNTL
+#define SDMA_MMHUB_CNTL__UNIT_ID__SHIFT 0x0
+#define SDMA_MMHUB_CNTL__UNIT_ID_MASK 0x0000003FL
+//SDMA_MMHUB_TRUSTLVL
+#define SDMA_MMHUB_TRUSTLVL__SECFLAG0__SHIFT 0x0
+#define SDMA_MMHUB_TRUSTLVL__SECFLAG1__SHIFT 0x4
+#define SDMA_MMHUB_TRUSTLVL__SECFLAG2__SHIFT 0x8
+#define SDMA_MMHUB_TRUSTLVL__SECFLAG3__SHIFT 0xc
+#define SDMA_MMHUB_TRUSTLVL__SECFLAG4__SHIFT 0x10
+#define SDMA_MMHUB_TRUSTLVL__SECFLAG5__SHIFT 0x14
+#define SDMA_MMHUB_TRUSTLVL__SECFLAG6__SHIFT 0x18
+#define SDMA_MMHUB_TRUSTLVL__SECFLAG7__SHIFT 0x1c
+#define SDMA_MMHUB_TRUSTLVL__SECFLAG0_MASK 0x0000000FL
+#define SDMA_MMHUB_TRUSTLVL__SECFLAG1_MASK 0x000000F0L
+#define SDMA_MMHUB_TRUSTLVL__SECFLAG2_MASK 0x00000F00L
+#define SDMA_MMHUB_TRUSTLVL__SECFLAG3_MASK 0x0000F000L
+#define SDMA_MMHUB_TRUSTLVL__SECFLAG4_MASK 0x000F0000L
+#define SDMA_MMHUB_TRUSTLVL__SECFLAG5_MASK 0x00F00000L
+#define SDMA_MMHUB_TRUSTLVL__SECFLAG6_MASK 0x0F000000L
+#define SDMA_MMHUB_TRUSTLVL__SECFLAG7_MASK 0xF0000000L
+//SDMA_VM_CNTL
+#define SDMA_VM_CNTL__CMD__SHIFT 0x0
+#define SDMA_VM_CNTL__CMD_MASK 0x0000000FL
+//SDMA_VM_CTX_LO
+#define SDMA_VM_CTX_LO__ADDR__SHIFT 0x2
+#define SDMA_VM_CTX_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA_VM_CTX_HI
+#define SDMA_VM_CTX_HI__ADDR__SHIFT 0x0
+#define SDMA_VM_CTX_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA_ACTIVE_FCN_ID
+#define SDMA_ACTIVE_FCN_ID__VFID__SHIFT 0x0
+#define SDMA_ACTIVE_FCN_ID__RESERVED__SHIFT 0x4
+#define SDMA_ACTIVE_FCN_ID__VF__SHIFT 0x1f
+#define SDMA_ACTIVE_FCN_ID__VFID_MASK 0x0000000FL
+#define SDMA_ACTIVE_FCN_ID__RESERVED_MASK 0x7FFFFFF0L
+#define SDMA_ACTIVE_FCN_ID__VF_MASK 0x80000000L
+//SDMA_VM_CTX_CNTL
+#define SDMA_VM_CTX_CNTL__PRIV__SHIFT 0x0
+#define SDMA_VM_CTX_CNTL__VMID__SHIFT 0x4
+#define SDMA_VM_CTX_CNTL__PRIV_MASK 0x00000001L
+#define SDMA_VM_CTX_CNTL__VMID_MASK 0x000000F0L
+//SDMA_VIRT_RESET_REQ
+#define SDMA_VIRT_RESET_REQ__VF__SHIFT 0x0
+#define SDMA_VIRT_RESET_REQ__PF__SHIFT 0x1f
+#define SDMA_VIRT_RESET_REQ__VF_MASK 0x0000FFFFL
+#define SDMA_VIRT_RESET_REQ__PF_MASK 0x80000000L
+//SDMA_VF_ENABLE
+#define SDMA_VF_ENABLE__VF_ENABLE__SHIFT 0x0
+#define SDMA_VF_ENABLE__VF_ENABLE_MASK 0x00000001L
+//SDMA_CONTEXT_REG_TYPE0
+#define SDMA_CONTEXT_REG_TYPE0__SDMA_GFX_RB_CNTL__SHIFT 0x0
+#define SDMA_CONTEXT_REG_TYPE0__SDMA_GFX_RB_BASE__SHIFT 0x1
+#define SDMA_CONTEXT_REG_TYPE0__SDMA_GFX_RB_BASE_HI__SHIFT 0x2
+#define SDMA_CONTEXT_REG_TYPE0__SDMA_GFX_RB_RPTR__SHIFT 0x3
+#define SDMA_CONTEXT_REG_TYPE0__SDMA_GFX_RB_RPTR_HI__SHIFT 0x4
+#define SDMA_CONTEXT_REG_TYPE0__SDMA_GFX_RB_WPTR__SHIFT 0x5
+#define SDMA_CONTEXT_REG_TYPE0__SDMA_GFX_RB_WPTR_HI__SHIFT 0x6
+#define SDMA_CONTEXT_REG_TYPE0__SDMA_GFX_RB_WPTR_POLL_CNTL__SHIFT 0x7
+#define SDMA_CONTEXT_REG_TYPE0__SDMA_GFX_RB_RPTR_ADDR_HI__SHIFT 0x8
+#define SDMA_CONTEXT_REG_TYPE0__SDMA_GFX_RB_RPTR_ADDR_LO__SHIFT 0x9
+#define SDMA_CONTEXT_REG_TYPE0__SDMA_GFX_IB_CNTL__SHIFT 0xa
+#define SDMA_CONTEXT_REG_TYPE0__SDMA_GFX_IB_RPTR__SHIFT 0xb
+#define SDMA_CONTEXT_REG_TYPE0__SDMA_GFX_IB_OFFSET__SHIFT 0xc
+#define SDMA_CONTEXT_REG_TYPE0__SDMA_GFX_IB_BASE_LO__SHIFT 0xd
+#define SDMA_CONTEXT_REG_TYPE0__SDMA_GFX_IB_BASE_HI__SHIFT 0xe
+#define SDMA_CONTEXT_REG_TYPE0__SDMA_GFX_IB_SIZE__SHIFT 0xf
+#define SDMA_CONTEXT_REG_TYPE0__SDMA_GFX_SKIP_CNTL__SHIFT 0x10
+#define SDMA_CONTEXT_REG_TYPE0__SDMA_GFX_CONTEXT_STATUS__SHIFT 0x11
+#define SDMA_CONTEXT_REG_TYPE0__SDMA_GFX_DOORBELL__SHIFT 0x12
+#define SDMA_CONTEXT_REG_TYPE0__SDMA_GFX_CONTEXT_CNTL__SHIFT 0x13
+#define SDMA_CONTEXT_REG_TYPE0__SDMA_GFX_RB_CNTL_MASK 0x00000001L
+#define SDMA_CONTEXT_REG_TYPE0__SDMA_GFX_RB_BASE_MASK 0x00000002L
+#define SDMA_CONTEXT_REG_TYPE0__SDMA_GFX_RB_BASE_HI_MASK 0x00000004L
+#define SDMA_CONTEXT_REG_TYPE0__SDMA_GFX_RB_RPTR_MASK 0x00000008L
+#define SDMA_CONTEXT_REG_TYPE0__SDMA_GFX_RB_RPTR_HI_MASK 0x00000010L
+#define SDMA_CONTEXT_REG_TYPE0__SDMA_GFX_RB_WPTR_MASK 0x00000020L
+#define SDMA_CONTEXT_REG_TYPE0__SDMA_GFX_RB_WPTR_HI_MASK 0x00000040L
+#define SDMA_CONTEXT_REG_TYPE0__SDMA_GFX_RB_WPTR_POLL_CNTL_MASK 0x00000080L
+#define SDMA_CONTEXT_REG_TYPE0__SDMA_GFX_RB_RPTR_ADDR_HI_MASK 0x00000100L
+#define SDMA_CONTEXT_REG_TYPE0__SDMA_GFX_RB_RPTR_ADDR_LO_MASK 0x00000200L
+#define SDMA_CONTEXT_REG_TYPE0__SDMA_GFX_IB_CNTL_MASK 0x00000400L
+#define SDMA_CONTEXT_REG_TYPE0__SDMA_GFX_IB_RPTR_MASK 0x00000800L
+#define SDMA_CONTEXT_REG_TYPE0__SDMA_GFX_IB_OFFSET_MASK 0x00001000L
+#define SDMA_CONTEXT_REG_TYPE0__SDMA_GFX_IB_BASE_LO_MASK 0x00002000L
+#define SDMA_CONTEXT_REG_TYPE0__SDMA_GFX_IB_BASE_HI_MASK 0x00004000L
+#define SDMA_CONTEXT_REG_TYPE0__SDMA_GFX_IB_SIZE_MASK 0x00008000L
+#define SDMA_CONTEXT_REG_TYPE0__SDMA_GFX_SKIP_CNTL_MASK 0x00010000L
+#define SDMA_CONTEXT_REG_TYPE0__SDMA_GFX_CONTEXT_STATUS_MASK 0x00020000L
+#define SDMA_CONTEXT_REG_TYPE0__SDMA_GFX_DOORBELL_MASK 0x00040000L
+#define SDMA_CONTEXT_REG_TYPE0__SDMA_GFX_CONTEXT_CNTL_MASK 0x00080000L
+//SDMA_CONTEXT_REG_TYPE1
+#define SDMA_CONTEXT_REG_TYPE1__SDMA_GFX_STATUS__SHIFT 0x8
+#define SDMA_CONTEXT_REG_TYPE1__SDMA_GFX_DOORBELL_LOG__SHIFT 0x9
+#define SDMA_CONTEXT_REG_TYPE1__SDMA_GFX_WATERMARK__SHIFT 0xa
+#define SDMA_CONTEXT_REG_TYPE1__SDMA_GFX_DOORBELL_OFFSET__SHIFT 0xb
+#define SDMA_CONTEXT_REG_TYPE1__SDMA_GFX_CSA_ADDR_LO__SHIFT 0xc
+#define SDMA_CONTEXT_REG_TYPE1__SDMA_GFX_CSA_ADDR_HI__SHIFT 0xd
+#define SDMA_CONTEXT_REG_TYPE1__SDMA_GFX_IB_SUB_REMAIN__SHIFT 0xf
+#define SDMA_CONTEXT_REG_TYPE1__SDMA_GFX_PREEMPT__SHIFT 0x10
+#define SDMA_CONTEXT_REG_TYPE1__SDMA_GFX_DUMMY_REG__SHIFT 0x11
+#define SDMA_CONTEXT_REG_TYPE1__SDMA_GFX_RB_WPTR_POLL_ADDR_HI__SHIFT 0x12
+#define SDMA_CONTEXT_REG_TYPE1__SDMA_GFX_RB_WPTR_POLL_ADDR_LO__SHIFT 0x13
+#define SDMA_CONTEXT_REG_TYPE1__SDMA_GFX_RB_AQL_CNTL__SHIFT 0x14
+#define SDMA_CONTEXT_REG_TYPE1__SDMA_GFX_MINOR_PTR_UPDATE__SHIFT 0x15
+#define SDMA_CONTEXT_REG_TYPE1__RESERVED__SHIFT 0x16
+#define SDMA_CONTEXT_REG_TYPE1__SDMA_GFX_STATUS_MASK 0x00000100L
+#define SDMA_CONTEXT_REG_TYPE1__SDMA_GFX_DOORBELL_LOG_MASK 0x00000200L
+#define SDMA_CONTEXT_REG_TYPE1__SDMA_GFX_WATERMARK_MASK 0x00000400L
+#define SDMA_CONTEXT_REG_TYPE1__SDMA_GFX_DOORBELL_OFFSET_MASK 0x00000800L
+#define SDMA_CONTEXT_REG_TYPE1__SDMA_GFX_CSA_ADDR_LO_MASK 0x00001000L
+#define SDMA_CONTEXT_REG_TYPE1__SDMA_GFX_CSA_ADDR_HI_MASK 0x00002000L
+#define SDMA_CONTEXT_REG_TYPE1__SDMA_GFX_IB_SUB_REMAIN_MASK 0x00008000L
+#define SDMA_CONTEXT_REG_TYPE1__SDMA_GFX_PREEMPT_MASK 0x00010000L
+#define SDMA_CONTEXT_REG_TYPE1__SDMA_GFX_DUMMY_REG_MASK 0x00020000L
+#define SDMA_CONTEXT_REG_TYPE1__SDMA_GFX_RB_WPTR_POLL_ADDR_HI_MASK 0x00040000L
+#define SDMA_CONTEXT_REG_TYPE1__SDMA_GFX_RB_WPTR_POLL_ADDR_LO_MASK 0x00080000L
+#define SDMA_CONTEXT_REG_TYPE1__SDMA_GFX_RB_AQL_CNTL_MASK 0x00100000L
+#define SDMA_CONTEXT_REG_TYPE1__SDMA_GFX_MINOR_PTR_UPDATE_MASK 0x00200000L
+#define SDMA_CONTEXT_REG_TYPE1__RESERVED_MASK 0xFFC00000L
+//SDMA_CONTEXT_REG_TYPE2
+#define SDMA_CONTEXT_REG_TYPE2__SDMA_GFX_MIDCMD_DATA0__SHIFT 0x0
+#define SDMA_CONTEXT_REG_TYPE2__SDMA_GFX_MIDCMD_DATA1__SHIFT 0x1
+#define SDMA_CONTEXT_REG_TYPE2__SDMA_GFX_MIDCMD_DATA2__SHIFT 0x2
+#define SDMA_CONTEXT_REG_TYPE2__SDMA_GFX_MIDCMD_DATA3__SHIFT 0x3
+#define SDMA_CONTEXT_REG_TYPE2__SDMA_GFX_MIDCMD_DATA4__SHIFT 0x4
+#define SDMA_CONTEXT_REG_TYPE2__SDMA_GFX_MIDCMD_DATA5__SHIFT 0x5
+#define SDMA_CONTEXT_REG_TYPE2__SDMA_GFX_MIDCMD_DATA6__SHIFT 0x6
+#define SDMA_CONTEXT_REG_TYPE2__SDMA_GFX_MIDCMD_DATA7__SHIFT 0x7
+#define SDMA_CONTEXT_REG_TYPE2__SDMA_GFX_MIDCMD_DATA8__SHIFT 0x8
+#define SDMA_CONTEXT_REG_TYPE2__SDMA_GFX_MIDCMD_DATA9__SHIFT 0x9
+#define SDMA_CONTEXT_REG_TYPE2__SDMA_GFX_MIDCMD_DATA10__SHIFT 0xa
+#define SDMA_CONTEXT_REG_TYPE2__SDMA_GFX_MIDCMD_CNTL__SHIFT 0xb
+#define SDMA_CONTEXT_REG_TYPE2__RESERVED__SHIFT 0xe
+#define SDMA_CONTEXT_REG_TYPE2__SDMA_GFX_MIDCMD_DATA0_MASK 0x00000001L
+#define SDMA_CONTEXT_REG_TYPE2__SDMA_GFX_MIDCMD_DATA1_MASK 0x00000002L
+#define SDMA_CONTEXT_REG_TYPE2__SDMA_GFX_MIDCMD_DATA2_MASK 0x00000004L
+#define SDMA_CONTEXT_REG_TYPE2__SDMA_GFX_MIDCMD_DATA3_MASK 0x00000008L
+#define SDMA_CONTEXT_REG_TYPE2__SDMA_GFX_MIDCMD_DATA4_MASK 0x00000010L
+#define SDMA_CONTEXT_REG_TYPE2__SDMA_GFX_MIDCMD_DATA5_MASK 0x00000020L
+#define SDMA_CONTEXT_REG_TYPE2__SDMA_GFX_MIDCMD_DATA6_MASK 0x00000040L
+#define SDMA_CONTEXT_REG_TYPE2__SDMA_GFX_MIDCMD_DATA7_MASK 0x00000080L
+#define SDMA_CONTEXT_REG_TYPE2__SDMA_GFX_MIDCMD_DATA8_MASK 0x00000100L
+#define SDMA_CONTEXT_REG_TYPE2__SDMA_GFX_MIDCMD_DATA9_MASK 0x00000200L
+#define SDMA_CONTEXT_REG_TYPE2__SDMA_GFX_MIDCMD_DATA10_MASK 0x00000400L
+#define SDMA_CONTEXT_REG_TYPE2__SDMA_GFX_MIDCMD_CNTL_MASK 0x00000800L
+#define SDMA_CONTEXT_REG_TYPE2__RESERVED_MASK 0xFFFFC000L
+//SDMA_CONTEXT_REG_TYPE3
+#define SDMA_CONTEXT_REG_TYPE3__RESERVED__SHIFT 0x0
+#define SDMA_CONTEXT_REG_TYPE3__RESERVED_MASK 0xFFFFFFFFL
+//SDMA_PUB_REG_TYPE0
+#define SDMA_PUB_REG_TYPE0__SDMA_UCODE_ADDR__SHIFT 0x0
+#define SDMA_PUB_REG_TYPE0__SDMA_UCODE_DATA__SHIFT 0x1
+#define SDMA_PUB_REG_TYPE0__SDMA_F32_CNTL__SHIFT 0x2
+#define SDMA_PUB_REG_TYPE0__SDMA_MMHUB_CNTL__SHIFT 0x5
+#define SDMA_PUB_REG_TYPE0__SDMA_MMHUB_TRUSTLVL__SHIFT 0x6
+#define SDMA_PUB_REG_TYPE0__RESERVED_14_10__SHIFT 0xa
+#define SDMA_PUB_REG_TYPE0__SDMA_VM_CNTL__SHIFT 0x10
+#define SDMA_PUB_REG_TYPE0__SDMA_VM_CTX_LO__SHIFT 0x11
+#define SDMA_PUB_REG_TYPE0__SDMA_VM_CTX_HI__SHIFT 0x12
+#define SDMA_PUB_REG_TYPE0__SDMA_ACTIVE_FCN_ID__SHIFT 0x13
+#define SDMA_PUB_REG_TYPE0__SDMA_VM_CTX_CNTL__SHIFT 0x14
+#define SDMA_PUB_REG_TYPE0__SDMA_VIRT_RESET_REQ__SHIFT 0x15
+#define SDMA_PUB_REG_TYPE0__SDMA_VF_ENABLE__SHIFT 0x16
+#define SDMA_PUB_REG_TYPE0__SDMA_CONTEXT_REG_TYPE0__SHIFT 0x17
+#define SDMA_PUB_REG_TYPE0__SDMA_CONTEXT_REG_TYPE1__SHIFT 0x18
+#define SDMA_PUB_REG_TYPE0__SDMA_CONTEXT_REG_TYPE2__SHIFT 0x19
+#define SDMA_PUB_REG_TYPE0__SDMA_CONTEXT_REG_TYPE3__SHIFT 0x1a
+#define SDMA_PUB_REG_TYPE0__SDMA_PUB_REG_TYPE0__SHIFT 0x1b
+#define SDMA_PUB_REG_TYPE0__SDMA_PUB_REG_TYPE1__SHIFT 0x1c
+#define SDMA_PUB_REG_TYPE0__SDMA_PUB_REG_TYPE2__SHIFT 0x1d
+#define SDMA_PUB_REG_TYPE0__SDMA_PUB_REG_TYPE3__SHIFT 0x1e
+#define SDMA_PUB_REG_TYPE0__SDMA_CONTEXT_GROUP_BOUNDARY__SHIFT 0x1f
+#define SDMA_PUB_REG_TYPE0__SDMA_UCODE_ADDR_MASK 0x00000001L
+#define SDMA_PUB_REG_TYPE0__SDMA_UCODE_DATA_MASK 0x00000002L
+#define SDMA_PUB_REG_TYPE0__SDMA_F32_CNTL_MASK 0x00000004L
+#define SDMA_PUB_REG_TYPE0__SDMA_MMHUB_CNTL_MASK 0x00000020L
+#define SDMA_PUB_REG_TYPE0__SDMA_MMHUB_TRUSTLVL_MASK 0x00000040L
+#define SDMA_PUB_REG_TYPE0__RESERVED_14_10_MASK 0x00007C00L
+#define SDMA_PUB_REG_TYPE0__SDMA_VM_CNTL_MASK 0x00010000L
+#define SDMA_PUB_REG_TYPE0__SDMA_VM_CTX_LO_MASK 0x00020000L
+#define SDMA_PUB_REG_TYPE0__SDMA_VM_CTX_HI_MASK 0x00040000L
+#define SDMA_PUB_REG_TYPE0__SDMA_ACTIVE_FCN_ID_MASK 0x00080000L
+#define SDMA_PUB_REG_TYPE0__SDMA_VM_CTX_CNTL_MASK 0x00100000L
+#define SDMA_PUB_REG_TYPE0__SDMA_VIRT_RESET_REQ_MASK 0x00200000L
+#define SDMA_PUB_REG_TYPE0__SDMA_VF_ENABLE_MASK 0x00400000L
+#define SDMA_PUB_REG_TYPE0__SDMA_CONTEXT_REG_TYPE0_MASK 0x00800000L
+#define SDMA_PUB_REG_TYPE0__SDMA_CONTEXT_REG_TYPE1_MASK 0x01000000L
+#define SDMA_PUB_REG_TYPE0__SDMA_CONTEXT_REG_TYPE2_MASK 0x02000000L
+#define SDMA_PUB_REG_TYPE0__SDMA_CONTEXT_REG_TYPE3_MASK 0x04000000L
+#define SDMA_PUB_REG_TYPE0__SDMA_PUB_REG_TYPE0_MASK 0x08000000L
+#define SDMA_PUB_REG_TYPE0__SDMA_PUB_REG_TYPE1_MASK 0x10000000L
+#define SDMA_PUB_REG_TYPE0__SDMA_PUB_REG_TYPE2_MASK 0x20000000L
+#define SDMA_PUB_REG_TYPE0__SDMA_PUB_REG_TYPE3_MASK 0x40000000L
+#define SDMA_PUB_REG_TYPE0__SDMA_CONTEXT_GROUP_BOUNDARY_MASK 0x80000000L
+//SDMA_PUB_REG_TYPE1
+#define SDMA_PUB_REG_TYPE1__SDMA_RB_RPTR_FETCH_HI__SHIFT 0x0
+#define SDMA_PUB_REG_TYPE1__SDMA_SEM_WAIT_FAIL_TIMER_CNTL__SHIFT 0x1
+#define SDMA_PUB_REG_TYPE1__SDMA_RB_RPTR_FETCH__SHIFT 0x2
+#define SDMA_PUB_REG_TYPE1__SDMA_IB_OFFSET_FETCH__SHIFT 0x3
+#define SDMA_PUB_REG_TYPE1__SDMA_PROGRAM__SHIFT 0x4
+#define SDMA_PUB_REG_TYPE1__SDMA_STATUS_REG__SHIFT 0x5
+#define SDMA_PUB_REG_TYPE1__SDMA_STATUS1_REG__SHIFT 0x6
+#define SDMA_PUB_REG_TYPE1__SDMA_RD_BURST_CNTL__SHIFT 0x7
+#define SDMA_PUB_REG_TYPE1__SDMA_HBM_PAGE_CONFIG__SHIFT 0x8
+#define SDMA_PUB_REG_TYPE1__SDMA_UCODE_CHECKSUM__SHIFT 0x9
+#define SDMA_PUB_REG_TYPE1__RESERVED_10_10__SHIFT 0xa
+#define SDMA_PUB_REG_TYPE1__SDMA_FREEZE__SHIFT 0xb
+#define SDMA_PUB_REG_TYPE1__SDMA_PHASE0_QUANTUM__SHIFT 0xc
+#define SDMA_PUB_REG_TYPE1__SDMA_PHASE1_QUANTUM__SHIFT 0xd
+#define SDMA_PUB_REG_TYPE1__SDMA_POWER_GATING__SHIFT 0xe
+#define SDMA_PUB_REG_TYPE1__SDMA_PGFSM_CONFIG__SHIFT 0xf
+#define SDMA_PUB_REG_TYPE1__SDMA_PGFSM_WRITE__SHIFT 0x10
+#define SDMA_PUB_REG_TYPE1__SDMA_PGFSM_READ__SHIFT 0x11
+#define SDMA_PUB_REG_TYPE1__CC_SDMA_EDC_CONFIG__SHIFT 0x12
+#define SDMA_PUB_REG_TYPE1__SDMA_BA_THRESHOLD__SHIFT 0x13
+#define SDMA_PUB_REG_TYPE1__SDMA_ID__SHIFT 0x14
+#define SDMA_PUB_REG_TYPE1__SDMA_VERSION__SHIFT 0x15
+#define SDMA_PUB_REG_TYPE1__SDMA_EDC_COUNTER__SHIFT 0x16
+#define SDMA_PUB_REG_TYPE1__SDMA_EDC_COUNTER2__SHIFT 0x17
+#define SDMA_PUB_REG_TYPE1__SDMA_STATUS2_REG__SHIFT 0x18
+#define SDMA_PUB_REG_TYPE1__SDMA_ATOMIC_CNTL__SHIFT 0x19
+#define SDMA_PUB_REG_TYPE1__SDMA_ATOMIC_PREOP_LO__SHIFT 0x1a
+#define SDMA_PUB_REG_TYPE1__SDMA_ATOMIC_PREOP_HI__SHIFT 0x1b
+#define SDMA_PUB_REG_TYPE1__SDMA_UTCL1_CNTL__SHIFT 0x1c
+#define SDMA_PUB_REG_TYPE1__SDMA_UTCL1_WATERMK__SHIFT 0x1d
+#define SDMA_PUB_REG_TYPE1__SDMA_UTCL1_RD_STATUS__SHIFT 0x1e
+#define SDMA_PUB_REG_TYPE1__SDMA_UTCL1_WR_STATUS__SHIFT 0x1f
+#define SDMA_PUB_REG_TYPE1__SDMA_RB_RPTR_FETCH_HI_MASK 0x00000001L
+#define SDMA_PUB_REG_TYPE1__SDMA_SEM_WAIT_FAIL_TIMER_CNTL_MASK 0x00000002L
+#define SDMA_PUB_REG_TYPE1__SDMA_RB_RPTR_FETCH_MASK 0x00000004L
+#define SDMA_PUB_REG_TYPE1__SDMA_IB_OFFSET_FETCH_MASK 0x00000008L
+#define SDMA_PUB_REG_TYPE1__SDMA_PROGRAM_MASK 0x00000010L
+#define SDMA_PUB_REG_TYPE1__SDMA_STATUS_REG_MASK 0x00000020L
+#define SDMA_PUB_REG_TYPE1__SDMA_STATUS1_REG_MASK 0x00000040L
+#define SDMA_PUB_REG_TYPE1__SDMA_RD_BURST_CNTL_MASK 0x00000080L
+#define SDMA_PUB_REG_TYPE1__SDMA_HBM_PAGE_CONFIG_MASK 0x00000100L
+#define SDMA_PUB_REG_TYPE1__SDMA_UCODE_CHECKSUM_MASK 0x00000200L
+#define SDMA_PUB_REG_TYPE1__RESERVED_10_10_MASK 0x00000400L
+#define SDMA_PUB_REG_TYPE1__SDMA_FREEZE_MASK 0x00000800L
+#define SDMA_PUB_REG_TYPE1__SDMA_PHASE0_QUANTUM_MASK 0x00001000L
+#define SDMA_PUB_REG_TYPE1__SDMA_PHASE1_QUANTUM_MASK 0x00002000L
+#define SDMA_PUB_REG_TYPE1__SDMA_POWER_GATING_MASK 0x00004000L
+#define SDMA_PUB_REG_TYPE1__SDMA_PGFSM_CONFIG_MASK 0x00008000L
+#define SDMA_PUB_REG_TYPE1__SDMA_PGFSM_WRITE_MASK 0x00010000L
+#define SDMA_PUB_REG_TYPE1__SDMA_PGFSM_READ_MASK 0x00020000L
+#define SDMA_PUB_REG_TYPE1__CC_SDMA_EDC_CONFIG_MASK 0x00040000L
+#define SDMA_PUB_REG_TYPE1__SDMA_BA_THRESHOLD_MASK 0x00080000L
+#define SDMA_PUB_REG_TYPE1__SDMA_ID_MASK 0x00100000L
+#define SDMA_PUB_REG_TYPE1__SDMA_VERSION_MASK 0x00200000L
+#define SDMA_PUB_REG_TYPE1__SDMA_EDC_COUNTER_MASK 0x00400000L
+#define SDMA_PUB_REG_TYPE1__SDMA_EDC_COUNTER2_MASK 0x00800000L
+#define SDMA_PUB_REG_TYPE1__SDMA_STATUS2_REG_MASK 0x01000000L
+#define SDMA_PUB_REG_TYPE1__SDMA_ATOMIC_CNTL_MASK 0x02000000L
+#define SDMA_PUB_REG_TYPE1__SDMA_ATOMIC_PREOP_LO_MASK 0x04000000L
+#define SDMA_PUB_REG_TYPE1__SDMA_ATOMIC_PREOP_HI_MASK 0x08000000L
+#define SDMA_PUB_REG_TYPE1__SDMA_UTCL1_CNTL_MASK 0x10000000L
+#define SDMA_PUB_REG_TYPE1__SDMA_UTCL1_WATERMK_MASK 0x20000000L
+#define SDMA_PUB_REG_TYPE1__SDMA_UTCL1_RD_STATUS_MASK 0x40000000L
+#define SDMA_PUB_REG_TYPE1__SDMA_UTCL1_WR_STATUS_MASK 0x80000000L
+//SDMA_PUB_REG_TYPE2
+#define SDMA_PUB_REG_TYPE2__SDMA_UTCL1_INV0__SHIFT 0x0
+#define SDMA_PUB_REG_TYPE2__SDMA_UTCL1_INV1__SHIFT 0x1
+#define SDMA_PUB_REG_TYPE2__SDMA_UTCL1_INV2__SHIFT 0x2
+#define SDMA_PUB_REG_TYPE2__SDMA_UTCL1_RD_XNACK0__SHIFT 0x3
+#define SDMA_PUB_REG_TYPE2__SDMA_UTCL1_RD_XNACK1__SHIFT 0x4
+#define SDMA_PUB_REG_TYPE2__SDMA_UTCL1_WR_XNACK0__SHIFT 0x5
+#define SDMA_PUB_REG_TYPE2__SDMA_UTCL1_WR_XNACK1__SHIFT 0x6
+#define SDMA_PUB_REG_TYPE2__SDMA_UTCL1_TIMEOUT__SHIFT 0x7
+#define SDMA_PUB_REG_TYPE2__SDMA_UTCL1_PAGE__SHIFT 0x8
+#define SDMA_PUB_REG_TYPE2__SDMA_POWER_CNTL_IDLE__SHIFT 0x9
+#define SDMA_PUB_REG_TYPE2__SDMA_RELAX_ORDERING_LUT__SHIFT 0xa
+#define SDMA_PUB_REG_TYPE2__SDMA_CHICKEN_BITS_2__SHIFT 0xb
+#define SDMA_PUB_REG_TYPE2__SDMA_STATUS3_REG__SHIFT 0xc
+#define SDMA_PUB_REG_TYPE2__SDMA_PHYSICAL_ADDR_LO__SHIFT 0xd
+#define SDMA_PUB_REG_TYPE2__SDMA_PHYSICAL_ADDR_HI__SHIFT 0xe
+#define SDMA_PUB_REG_TYPE2__SDMA_PHASE2_QUANTUM__SHIFT 0xf
+#define SDMA_PUB_REG_TYPE2__SDMA_ERROR_LOG__SHIFT 0x10
+#define SDMA_PUB_REG_TYPE2__SDMA_PUB_DUMMY_REG0__SHIFT 0x11
+#define SDMA_PUB_REG_TYPE2__SDMA_PUB_DUMMY_REG1__SHIFT 0x12
+#define SDMA_PUB_REG_TYPE2__SDMA_PUB_DUMMY_REG2__SHIFT 0x13
+#define SDMA_PUB_REG_TYPE2__SDMA_PUB_DUMMY_REG3__SHIFT 0x14
+#define SDMA_PUB_REG_TYPE2__SDMA_F32_COUNTER__SHIFT 0x15
+#define SDMA_PUB_REG_TYPE2__RESERVED_22_22__SHIFT 0x16
+#define SDMA_PUB_REG_TYPE2__SDMA_PERFCNT_PERFCOUNTER0_CFG__SHIFT 0x17
+#define SDMA_PUB_REG_TYPE2__SDMA_PERFCNT_PERFCOUNTER1_CFG__SHIFT 0x18
+#define SDMA_PUB_REG_TYPE2__SDMA_PERFCNT_PERFCOUNTER_RSLT_CNTL__SHIFT 0x19
+#define SDMA_PUB_REG_TYPE2__SDMA_PERFCNT_MISC_CNTL__SHIFT 0x1a
+#define SDMA_PUB_REG_TYPE2__SDMA_PERFCNT_PERFCOUNTER_LO__SHIFT 0x1b
+#define SDMA_PUB_REG_TYPE2__SDMA_PERFCNT_PERFCOUNTER_HI__SHIFT 0x1c
+#define SDMA_PUB_REG_TYPE2__SDMA_CRD_CNTL__SHIFT 0x1d
+#define SDMA_PUB_REG_TYPE2__SDMA_GPU_IOV_VIOLATION_LOG__SHIFT 0x1e
+#define SDMA_PUB_REG_TYPE2__SDMA_ULV_CNTL__SHIFT 0x1f
+#define SDMA_PUB_REG_TYPE2__SDMA_UTCL1_INV0_MASK 0x00000001L
+#define SDMA_PUB_REG_TYPE2__SDMA_UTCL1_INV1_MASK 0x00000002L
+#define SDMA_PUB_REG_TYPE2__SDMA_UTCL1_INV2_MASK 0x00000004L
+#define SDMA_PUB_REG_TYPE2__SDMA_UTCL1_RD_XNACK0_MASK 0x00000008L
+#define SDMA_PUB_REG_TYPE2__SDMA_UTCL1_RD_XNACK1_MASK 0x00000010L
+#define SDMA_PUB_REG_TYPE2__SDMA_UTCL1_WR_XNACK0_MASK 0x00000020L
+#define SDMA_PUB_REG_TYPE2__SDMA_UTCL1_WR_XNACK1_MASK 0x00000040L
+#define SDMA_PUB_REG_TYPE2__SDMA_UTCL1_TIMEOUT_MASK 0x00000080L
+#define SDMA_PUB_REG_TYPE2__SDMA_UTCL1_PAGE_MASK 0x00000100L
+#define SDMA_PUB_REG_TYPE2__SDMA_POWER_CNTL_IDLE_MASK 0x00000200L
+#define SDMA_PUB_REG_TYPE2__SDMA_RELAX_ORDERING_LUT_MASK 0x00000400L
+#define SDMA_PUB_REG_TYPE2__SDMA_CHICKEN_BITS_2_MASK 0x00000800L
+#define SDMA_PUB_REG_TYPE2__SDMA_STATUS3_REG_MASK 0x00001000L
+#define SDMA_PUB_REG_TYPE2__SDMA_PHYSICAL_ADDR_LO_MASK 0x00002000L
+#define SDMA_PUB_REG_TYPE2__SDMA_PHYSICAL_ADDR_HI_MASK 0x00004000L
+#define SDMA_PUB_REG_TYPE2__SDMA_PHASE2_QUANTUM_MASK 0x00008000L
+#define SDMA_PUB_REG_TYPE2__SDMA_ERROR_LOG_MASK 0x00010000L
+#define SDMA_PUB_REG_TYPE2__SDMA_PUB_DUMMY_REG0_MASK 0x00020000L
+#define SDMA_PUB_REG_TYPE2__SDMA_PUB_DUMMY_REG1_MASK 0x00040000L
+#define SDMA_PUB_REG_TYPE2__SDMA_PUB_DUMMY_REG2_MASK 0x00080000L
+#define SDMA_PUB_REG_TYPE2__SDMA_PUB_DUMMY_REG3_MASK 0x00100000L
+#define SDMA_PUB_REG_TYPE2__SDMA_F32_COUNTER_MASK 0x00200000L
+#define SDMA_PUB_REG_TYPE2__RESERVED_22_22_MASK 0x00400000L
+#define SDMA_PUB_REG_TYPE2__SDMA_PERFCNT_PERFCOUNTER0_CFG_MASK 0x00800000L
+#define SDMA_PUB_REG_TYPE2__SDMA_PERFCNT_PERFCOUNTER1_CFG_MASK 0x01000000L
+#define SDMA_PUB_REG_TYPE2__SDMA_PERFCNT_PERFCOUNTER_RSLT_CNTL_MASK 0x02000000L
+#define SDMA_PUB_REG_TYPE2__SDMA_PERFCNT_MISC_CNTL_MASK 0x04000000L
+#define SDMA_PUB_REG_TYPE2__SDMA_PERFCNT_PERFCOUNTER_LO_MASK 0x08000000L
+#define SDMA_PUB_REG_TYPE2__SDMA_PERFCNT_PERFCOUNTER_HI_MASK 0x10000000L
+#define SDMA_PUB_REG_TYPE2__SDMA_CRD_CNTL_MASK 0x20000000L
+#define SDMA_PUB_REG_TYPE2__SDMA_GPU_IOV_VIOLATION_LOG_MASK 0x40000000L
+#define SDMA_PUB_REG_TYPE2__SDMA_ULV_CNTL_MASK 0x80000000L
+//SDMA_PUB_REG_TYPE3
+#define SDMA_PUB_REG_TYPE3__SDMA_EA_DBIT_ADDR_DATA__SHIFT 0x0
+#define SDMA_PUB_REG_TYPE3__SDMA_EA_DBIT_ADDR_INDEX__SHIFT 0x1
+#define SDMA_PUB_REG_TYPE3__SDMA_GPU_IOV_VIOLATION_LOG2__SHIFT 0x2
+#define SDMA_PUB_REG_TYPE3__SDMA_STATUS4_REG__SHIFT 0x3
+#define SDMA_PUB_REG_TYPE3__SDMA_SCRATCH_RAM_DATA__SHIFT 0x4
+#define SDMA_PUB_REG_TYPE3__SDMA_SCRATCH_RAM_ADDR__SHIFT 0x5
+#define SDMA_PUB_REG_TYPE3__SDMA_CE_CTRL__SHIFT 0x6
+#define SDMA_PUB_REG_TYPE3__SDMA_RAS_STATUS__SHIFT 0x7
+#define SDMA_PUB_REG_TYPE3__SDMA_CLK_STATUS__SHIFT 0x8
+#define SDMA_PUB_REG_TYPE3__SDMA_POWER_CNTL__SHIFT 0xb
+#define SDMA_PUB_REG_TYPE3__SDMA_CLK_CTRL__SHIFT 0xc
+#define SDMA_PUB_REG_TYPE3__SDMA_CNTL__SHIFT 0xd
+#define SDMA_PUB_REG_TYPE3__SDMA_CHICKEN_BITS__SHIFT 0xe
+#define SDMA_PUB_REG_TYPE3__SDMA_GB_ADDR_CONFIG__SHIFT 0xf
+#define SDMA_PUB_REG_TYPE3__SDMA_GB_ADDR_CONFIG_READ__SHIFT 0x10
+#define SDMA_PUB_REG_TYPE3__RESERVED__SHIFT 0x13
+#define SDMA_PUB_REG_TYPE3__SDMA_EA_DBIT_ADDR_DATA_MASK 0x00000001L
+#define SDMA_PUB_REG_TYPE3__SDMA_EA_DBIT_ADDR_INDEX_MASK 0x00000002L
+#define SDMA_PUB_REG_TYPE3__SDMA_GPU_IOV_VIOLATION_LOG2_MASK 0x00000004L
+#define SDMA_PUB_REG_TYPE3__SDMA_STATUS4_REG_MASK 0x00000008L
+#define SDMA_PUB_REG_TYPE3__SDMA_SCRATCH_RAM_DATA_MASK 0x00000010L
+#define SDMA_PUB_REG_TYPE3__SDMA_SCRATCH_RAM_ADDR_MASK 0x00000020L
+#define SDMA_PUB_REG_TYPE3__SDMA_CE_CTRL_MASK 0x00000040L
+#define SDMA_PUB_REG_TYPE3__SDMA_RAS_STATUS_MASK 0x00000080L
+#define SDMA_PUB_REG_TYPE3__SDMA_CLK_STATUS_MASK 0x00000100L
+#define SDMA_PUB_REG_TYPE3__SDMA_POWER_CNTL_MASK 0x00000800L
+#define SDMA_PUB_REG_TYPE3__SDMA_CLK_CTRL_MASK 0x00001000L
+#define SDMA_PUB_REG_TYPE3__SDMA_CNTL_MASK 0x00002000L
+#define SDMA_PUB_REG_TYPE3__SDMA_CHICKEN_BITS_MASK 0x00004000L
+#define SDMA_PUB_REG_TYPE3__SDMA_GB_ADDR_CONFIG_MASK 0x00008000L
+#define SDMA_PUB_REG_TYPE3__SDMA_GB_ADDR_CONFIG_READ_MASK 0x00010000L
+#define SDMA_PUB_REG_TYPE3__RESERVED_MASK 0xFFF80000L
+//SDMA_CONTEXT_GROUP_BOUNDARY
+#define SDMA_CONTEXT_GROUP_BOUNDARY__RESERVED__SHIFT 0x0
+#define SDMA_CONTEXT_GROUP_BOUNDARY__RESERVED_MASK 0xFFFFFFFFL
+//SDMA_RB_RPTR_FETCH_HI
+#define SDMA_RB_RPTR_FETCH_HI__OFFSET__SHIFT 0x0
+#define SDMA_RB_RPTR_FETCH_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA_SEM_WAIT_FAIL_TIMER_CNTL
+#define SDMA_SEM_WAIT_FAIL_TIMER_CNTL__TIMER__SHIFT 0x0
+#define SDMA_SEM_WAIT_FAIL_TIMER_CNTL__TIMER_MASK 0xFFFFFFFFL
+//SDMA_RB_RPTR_FETCH
+#define SDMA_RB_RPTR_FETCH__OFFSET__SHIFT 0x2
+#define SDMA_RB_RPTR_FETCH__OFFSET_MASK 0xFFFFFFFCL
+//SDMA_IB_OFFSET_FETCH
+#define SDMA_IB_OFFSET_FETCH__OFFSET__SHIFT 0x2
+#define SDMA_IB_OFFSET_FETCH__OFFSET_MASK 0x003FFFFCL
+//SDMA_PROGRAM
+#define SDMA_PROGRAM__STREAM__SHIFT 0x0
+#define SDMA_PROGRAM__STREAM_MASK 0xFFFFFFFFL
+//SDMA_STATUS_REG
+#define SDMA_STATUS_REG__IDLE__SHIFT 0x0
+#define SDMA_STATUS_REG__REG_IDLE__SHIFT 0x1
+#define SDMA_STATUS_REG__RB_EMPTY__SHIFT 0x2
+#define SDMA_STATUS_REG__RB_FULL__SHIFT 0x3
+#define SDMA_STATUS_REG__RB_CMD_IDLE__SHIFT 0x4
+#define SDMA_STATUS_REG__RB_CMD_FULL__SHIFT 0x5
+#define SDMA_STATUS_REG__IB_CMD_IDLE__SHIFT 0x6
+#define SDMA_STATUS_REG__IB_CMD_FULL__SHIFT 0x7
+#define SDMA_STATUS_REG__BLOCK_IDLE__SHIFT 0x8
+#define SDMA_STATUS_REG__INSIDE_IB__SHIFT 0x9
+#define SDMA_STATUS_REG__EX_IDLE__SHIFT 0xa
+#define SDMA_STATUS_REG__EX_IDLE_POLL_TIMER_EXPIRE__SHIFT 0xb
+#define SDMA_STATUS_REG__PACKET_READY__SHIFT 0xc
+#define SDMA_STATUS_REG__MC_WR_IDLE__SHIFT 0xd
+#define SDMA_STATUS_REG__SRBM_IDLE__SHIFT 0xe
+#define SDMA_STATUS_REG__CONTEXT_EMPTY__SHIFT 0xf
+#define SDMA_STATUS_REG__DELTA_RPTR_FULL__SHIFT 0x10
+#define SDMA_STATUS_REG__RB_MC_RREQ_IDLE__SHIFT 0x11
+#define SDMA_STATUS_REG__IB_MC_RREQ_IDLE__SHIFT 0x12
+#define SDMA_STATUS_REG__MC_RD_IDLE__SHIFT 0x13
+#define SDMA_STATUS_REG__DELTA_RPTR_EMPTY__SHIFT 0x14
+#define SDMA_STATUS_REG__MC_RD_RET_STALL__SHIFT 0x15
+#define SDMA_STATUS_REG__MC_RD_NO_POLL_IDLE__SHIFT 0x16
+#define SDMA_STATUS_REG__DRM_IDLE__SHIFT 0x17
+#define SDMA_STATUS_REG__DRM_MASK_FULL__SHIFT 0x18
+#define SDMA_STATUS_REG__PREV_CMD_IDLE__SHIFT 0x19
+#define SDMA_STATUS_REG__SEM_IDLE__SHIFT 0x1a
+#define SDMA_STATUS_REG__SEM_REQ_STALL__SHIFT 0x1b
+#define SDMA_STATUS_REG__SEM_RESP_STATE__SHIFT 0x1c
+#define SDMA_STATUS_REG__INT_IDLE__SHIFT 0x1e
+#define SDMA_STATUS_REG__INT_REQ_STALL__SHIFT 0x1f
+#define SDMA_STATUS_REG__IDLE_MASK 0x00000001L
+#define SDMA_STATUS_REG__REG_IDLE_MASK 0x00000002L
+#define SDMA_STATUS_REG__RB_EMPTY_MASK 0x00000004L
+#define SDMA_STATUS_REG__RB_FULL_MASK 0x00000008L
+#define SDMA_STATUS_REG__RB_CMD_IDLE_MASK 0x00000010L
+#define SDMA_STATUS_REG__RB_CMD_FULL_MASK 0x00000020L
+#define SDMA_STATUS_REG__IB_CMD_IDLE_MASK 0x00000040L
+#define SDMA_STATUS_REG__IB_CMD_FULL_MASK 0x00000080L
+#define SDMA_STATUS_REG__BLOCK_IDLE_MASK 0x00000100L
+#define SDMA_STATUS_REG__INSIDE_IB_MASK 0x00000200L
+#define SDMA_STATUS_REG__EX_IDLE_MASK 0x00000400L
+#define SDMA_STATUS_REG__EX_IDLE_POLL_TIMER_EXPIRE_MASK 0x00000800L
+#define SDMA_STATUS_REG__PACKET_READY_MASK 0x00001000L
+#define SDMA_STATUS_REG__MC_WR_IDLE_MASK 0x00002000L
+#define SDMA_STATUS_REG__SRBM_IDLE_MASK 0x00004000L
+#define SDMA_STATUS_REG__CONTEXT_EMPTY_MASK 0x00008000L
+#define SDMA_STATUS_REG__DELTA_RPTR_FULL_MASK 0x00010000L
+#define SDMA_STATUS_REG__RB_MC_RREQ_IDLE_MASK 0x00020000L
+#define SDMA_STATUS_REG__IB_MC_RREQ_IDLE_MASK 0x00040000L
+#define SDMA_STATUS_REG__MC_RD_IDLE_MASK 0x00080000L
+#define SDMA_STATUS_REG__DELTA_RPTR_EMPTY_MASK 0x00100000L
+#define SDMA_STATUS_REG__MC_RD_RET_STALL_MASK 0x00200000L
+#define SDMA_STATUS_REG__MC_RD_NO_POLL_IDLE_MASK 0x00400000L
+#define SDMA_STATUS_REG__DRM_IDLE_MASK 0x00800000L
+#define SDMA_STATUS_REG__DRM_MASK_FULL_MASK 0x01000000L
+#define SDMA_STATUS_REG__PREV_CMD_IDLE_MASK 0x02000000L
+#define SDMA_STATUS_REG__SEM_IDLE_MASK 0x04000000L
+#define SDMA_STATUS_REG__SEM_REQ_STALL_MASK 0x08000000L
+#define SDMA_STATUS_REG__SEM_RESP_STATE_MASK 0x30000000L
+#define SDMA_STATUS_REG__INT_IDLE_MASK 0x40000000L
+#define SDMA_STATUS_REG__INT_REQ_STALL_MASK 0x80000000L
+//SDMA_STATUS1_REG
+#define SDMA_STATUS1_REG__CE_WREQ_IDLE__SHIFT 0x0
+#define SDMA_STATUS1_REG__CE_WR_IDLE__SHIFT 0x1
+#define SDMA_STATUS1_REG__CE_SPLIT_IDLE__SHIFT 0x2
+#define SDMA_STATUS1_REG__CE_RREQ_IDLE__SHIFT 0x3
+#define SDMA_STATUS1_REG__CE_OUT_IDLE__SHIFT 0x4
+#define SDMA_STATUS1_REG__CE_IN_IDLE__SHIFT 0x5
+#define SDMA_STATUS1_REG__CE_DST_IDLE__SHIFT 0x6
+#define SDMA_STATUS1_REG__CE_DRM_IDLE__SHIFT 0x7
+#define SDMA_STATUS1_REG__CE_DRM1_IDLE__SHIFT 0x8
+#define SDMA_STATUS1_REG__CE_CMD_IDLE__SHIFT 0x9
+#define SDMA_STATUS1_REG__CE_AFIFO_FULL__SHIFT 0xa
+#define SDMA_STATUS1_REG__CE_DRM_FULL__SHIFT 0xb
+#define SDMA_STATUS1_REG__CE_DRM1_FULL__SHIFT 0xc
+#define SDMA_STATUS1_REG__CE_INFO_FULL__SHIFT 0xd
+#define SDMA_STATUS1_REG__CE_INFO1_FULL__SHIFT 0xe
+#define SDMA_STATUS1_REG__EX_START__SHIFT 0xf
+#define SDMA_STATUS1_REG__DRM_CTX_RESTORE__SHIFT 0x10
+#define SDMA_STATUS1_REG__CE_RD_STALL__SHIFT 0x11
+#define SDMA_STATUS1_REG__CE_WR_STALL__SHIFT 0x12
+#define SDMA_STATUS1_REG__CE_WREQ_IDLE_MASK 0x00000001L
+#define SDMA_STATUS1_REG__CE_WR_IDLE_MASK 0x00000002L
+#define SDMA_STATUS1_REG__CE_SPLIT_IDLE_MASK 0x00000004L
+#define SDMA_STATUS1_REG__CE_RREQ_IDLE_MASK 0x00000008L
+#define SDMA_STATUS1_REG__CE_OUT_IDLE_MASK 0x00000010L
+#define SDMA_STATUS1_REG__CE_IN_IDLE_MASK 0x00000020L
+#define SDMA_STATUS1_REG__CE_DST_IDLE_MASK 0x00000040L
+#define SDMA_STATUS1_REG__CE_DRM_IDLE_MASK 0x00000080L
+#define SDMA_STATUS1_REG__CE_DRM1_IDLE_MASK 0x00000100L
+#define SDMA_STATUS1_REG__CE_CMD_IDLE_MASK 0x00000200L
+#define SDMA_STATUS1_REG__CE_AFIFO_FULL_MASK 0x00000400L
+#define SDMA_STATUS1_REG__CE_DRM_FULL_MASK 0x00000800L
+#define SDMA_STATUS1_REG__CE_DRM1_FULL_MASK 0x00001000L
+#define SDMA_STATUS1_REG__CE_INFO_FULL_MASK 0x00002000L
+#define SDMA_STATUS1_REG__CE_INFO1_FULL_MASK 0x00004000L
+#define SDMA_STATUS1_REG__EX_START_MASK 0x00008000L
+#define SDMA_STATUS1_REG__DRM_CTX_RESTORE_MASK 0x00010000L
+#define SDMA_STATUS1_REG__CE_RD_STALL_MASK 0x00020000L
+#define SDMA_STATUS1_REG__CE_WR_STALL_MASK 0x00040000L
+//SDMA_RD_BURST_CNTL
+#define SDMA_RD_BURST_CNTL__RD_BURST__SHIFT 0x0
+#define SDMA_RD_BURST_CNTL__CMD_BUFFER_RD_BURST__SHIFT 0x2
+#define SDMA_RD_BURST_CNTL__RD_BURST_MASK 0x00000003L
+#define SDMA_RD_BURST_CNTL__CMD_BUFFER_RD_BURST_MASK 0x0000000CL
+//SDMA_HBM_PAGE_CONFIG
+#define SDMA_HBM_PAGE_CONFIG__PAGE_SIZE_EXPONENT__SHIFT 0x0
+#define SDMA_HBM_PAGE_CONFIG__PAGE_SIZE_EXPONENT_MASK 0x00000003L
+//SDMA_UCODE_CHECKSUM
+#define SDMA_UCODE_CHECKSUM__DATA__SHIFT 0x0
+#define SDMA_UCODE_CHECKSUM__DATA_MASK 0xFFFFFFFFL
+//SDMA_FREEZE
+#define SDMA_FREEZE__PREEMPT__SHIFT 0x0
+#define SDMA_FREEZE__FREEZE__SHIFT 0x4
+#define SDMA_FREEZE__FROZEN__SHIFT 0x5
+#define SDMA_FREEZE__F32_FREEZE__SHIFT 0x6
+#define SDMA_FREEZE__PREEMPT_MASK 0x00000001L
+#define SDMA_FREEZE__FREEZE_MASK 0x00000010L
+#define SDMA_FREEZE__FROZEN_MASK 0x00000020L
+#define SDMA_FREEZE__F32_FREEZE_MASK 0x00000040L
+//SDMA_PHASE0_QUANTUM
+#define SDMA_PHASE0_QUANTUM__UNIT__SHIFT 0x0
+#define SDMA_PHASE0_QUANTUM__VALUE__SHIFT 0x8
+#define SDMA_PHASE0_QUANTUM__PREFER__SHIFT 0x1e
+#define SDMA_PHASE0_QUANTUM__UNIT_MASK 0x0000000FL
+#define SDMA_PHASE0_QUANTUM__VALUE_MASK 0x00FFFF00L
+#define SDMA_PHASE0_QUANTUM__PREFER_MASK 0x40000000L
+//SDMA_PHASE1_QUANTUM
+#define SDMA_PHASE1_QUANTUM__UNIT__SHIFT 0x0
+#define SDMA_PHASE1_QUANTUM__VALUE__SHIFT 0x8
+#define SDMA_PHASE1_QUANTUM__PREFER__SHIFT 0x1e
+#define SDMA_PHASE1_QUANTUM__UNIT_MASK 0x0000000FL
+#define SDMA_PHASE1_QUANTUM__VALUE_MASK 0x00FFFF00L
+#define SDMA_PHASE1_QUANTUM__PREFER_MASK 0x40000000L
+//SDMA_POWER_GATING
+#define SDMA_POWER_GATING__SDMA_POWER_OFF_CONDITION__SHIFT 0x0
+#define SDMA_POWER_GATING__SDMA_POWER_ON_CONDITION__SHIFT 0x1
+#define SDMA_POWER_GATING__SDMA_POWER_OFF_REQ__SHIFT 0x2
+#define SDMA_POWER_GATING__SDMA_POWER_ON_REQ__SHIFT 0x3
+#define SDMA_POWER_GATING__PG_CNTL_STATUS__SHIFT 0x4
+#define SDMA_POWER_GATING__SDMA_POWER_OFF_CONDITION_MASK 0x00000001L
+#define SDMA_POWER_GATING__SDMA_POWER_ON_CONDITION_MASK 0x00000002L
+#define SDMA_POWER_GATING__SDMA_POWER_OFF_REQ_MASK 0x00000004L
+#define SDMA_POWER_GATING__SDMA_POWER_ON_REQ_MASK 0x00000008L
+#define SDMA_POWER_GATING__PG_CNTL_STATUS_MASK 0x00000030L
+//SDMA_PGFSM_CONFIG
+#define SDMA_PGFSM_CONFIG__FSM_ADDR__SHIFT 0x0
+#define SDMA_PGFSM_CONFIG__POWER_DOWN__SHIFT 0x8
+#define SDMA_PGFSM_CONFIG__POWER_UP__SHIFT 0x9
+#define SDMA_PGFSM_CONFIG__P1_SELECT__SHIFT 0xa
+#define SDMA_PGFSM_CONFIG__P2_SELECT__SHIFT 0xb
+#define SDMA_PGFSM_CONFIG__WRITE__SHIFT 0xc
+#define SDMA_PGFSM_CONFIG__READ__SHIFT 0xd
+#define SDMA_PGFSM_CONFIG__SRBM_OVERRIDE__SHIFT 0x1b
+#define SDMA_PGFSM_CONFIG__REG_ADDR__SHIFT 0x1c
+#define SDMA_PGFSM_CONFIG__FSM_ADDR_MASK 0x000000FFL
+#define SDMA_PGFSM_CONFIG__POWER_DOWN_MASK 0x00000100L
+#define SDMA_PGFSM_CONFIG__POWER_UP_MASK 0x00000200L
+#define SDMA_PGFSM_CONFIG__P1_SELECT_MASK 0x00000400L
+#define SDMA_PGFSM_CONFIG__P2_SELECT_MASK 0x00000800L
+#define SDMA_PGFSM_CONFIG__WRITE_MASK 0x00001000L
+#define SDMA_PGFSM_CONFIG__READ_MASK 0x00002000L
+#define SDMA_PGFSM_CONFIG__SRBM_OVERRIDE_MASK 0x08000000L
+#define SDMA_PGFSM_CONFIG__REG_ADDR_MASK 0xF0000000L
+//SDMA_PGFSM_WRITE
+#define SDMA_PGFSM_WRITE__VALUE__SHIFT 0x0
+#define SDMA_PGFSM_WRITE__VALUE_MASK 0xFFFFFFFFL
+//SDMA_PGFSM_READ
+#define SDMA_PGFSM_READ__VALUE__SHIFT 0x0
+#define SDMA_PGFSM_READ__VALUE_MASK 0x00FFFFFFL
+//CC_SDMA_EDC_CONFIG
+#define CC_SDMA_EDC_CONFIG__WRITE_DIS__SHIFT 0x0
+#define CC_SDMA_EDC_CONFIG__DIS_EDC__SHIFT 0x1
+#define CC_SDMA_EDC_CONFIG__WRITE_DIS_MASK 0x00000001L
+#define CC_SDMA_EDC_CONFIG__DIS_EDC_MASK 0x00000002L
+//SDMA_BA_THRESHOLD
+#define SDMA_BA_THRESHOLD__READ_THRES__SHIFT 0x0
+#define SDMA_BA_THRESHOLD__WRITE_THRES__SHIFT 0x10
+#define SDMA_BA_THRESHOLD__READ_THRES_MASK 0x000003FFL
+#define SDMA_BA_THRESHOLD__WRITE_THRES_MASK 0x03FF0000L
+//SDMA_ID
+#define SDMA_ID__DEVICE_ID__SHIFT 0x0
+#define SDMA_ID__DEVICE_ID_MASK 0x000000FFL
+//SDMA_VERSION
+#define SDMA_VERSION__MINVER__SHIFT 0x0
+#define SDMA_VERSION__MAJVER__SHIFT 0x8
+#define SDMA_VERSION__REV__SHIFT 0x10
+#define SDMA_VERSION__MINVER_MASK 0x0000007FL
+#define SDMA_VERSION__MAJVER_MASK 0x00007F00L
+#define SDMA_VERSION__REV_MASK 0x003F0000L
+//SDMA_EDC_COUNTER
+#define SDMA_EDC_COUNTER__SDMA_MBANK_DATA_BUF0_SED__SHIFT 0x0
+#define SDMA_EDC_COUNTER__SDMA_MBANK_DATA_BUF1_SED__SHIFT 0x2
+#define SDMA_EDC_COUNTER__SDMA_MBANK_DATA_BUF2_SED__SHIFT 0x4
+#define SDMA_EDC_COUNTER__SDMA_MBANK_DATA_BUF3_SED__SHIFT 0x6
+#define SDMA_EDC_COUNTER__SDMA_MBANK_DATA_BUF4_SED__SHIFT 0x8
+#define SDMA_EDC_COUNTER__SDMA_MBANK_DATA_BUF5_SED__SHIFT 0xa
+#define SDMA_EDC_COUNTER__SDMA_MBANK_DATA_BUF6_SED__SHIFT 0xc
+#define SDMA_EDC_COUNTER__SDMA_MBANK_DATA_BUF7_SED__SHIFT 0xe
+#define SDMA_EDC_COUNTER__SDMA_MBANK_DATA_BUF8_SED__SHIFT 0x10
+#define SDMA_EDC_COUNTER__SDMA_MBANK_DATA_BUF9_SED__SHIFT 0x12
+#define SDMA_EDC_COUNTER__SDMA_MBANK_DATA_BUF10_SED__SHIFT 0x14
+#define SDMA_EDC_COUNTER__SDMA_MBANK_DATA_BUF11_SED__SHIFT 0x16
+#define SDMA_EDC_COUNTER__SDMA_MBANK_DATA_BUF12_SED__SHIFT 0x18
+#define SDMA_EDC_COUNTER__SDMA_MBANK_DATA_BUF13_SED__SHIFT 0x1a
+#define SDMA_EDC_COUNTER__SDMA_MBANK_DATA_BUF14_SED__SHIFT 0x1c
+#define SDMA_EDC_COUNTER__SDMA_MBANK_DATA_BUF15_SED__SHIFT 0x1e
+#define SDMA_EDC_COUNTER__SDMA_MBANK_DATA_BUF0_SED_MASK 0x00000003L
+#define SDMA_EDC_COUNTER__SDMA_MBANK_DATA_BUF1_SED_MASK 0x0000000CL
+#define SDMA_EDC_COUNTER__SDMA_MBANK_DATA_BUF2_SED_MASK 0x00000030L
+#define SDMA_EDC_COUNTER__SDMA_MBANK_DATA_BUF3_SED_MASK 0x000000C0L
+#define SDMA_EDC_COUNTER__SDMA_MBANK_DATA_BUF4_SED_MASK 0x00000300L
+#define SDMA_EDC_COUNTER__SDMA_MBANK_DATA_BUF5_SED_MASK 0x00000C00L
+#define SDMA_EDC_COUNTER__SDMA_MBANK_DATA_BUF6_SED_MASK 0x00003000L
+#define SDMA_EDC_COUNTER__SDMA_MBANK_DATA_BUF7_SED_MASK 0x0000C000L
+#define SDMA_EDC_COUNTER__SDMA_MBANK_DATA_BUF8_SED_MASK 0x00030000L
+#define SDMA_EDC_COUNTER__SDMA_MBANK_DATA_BUF9_SED_MASK 0x000C0000L
+#define SDMA_EDC_COUNTER__SDMA_MBANK_DATA_BUF10_SED_MASK 0x00300000L
+#define SDMA_EDC_COUNTER__SDMA_MBANK_DATA_BUF11_SED_MASK 0x00C00000L
+#define SDMA_EDC_COUNTER__SDMA_MBANK_DATA_BUF12_SED_MASK 0x03000000L
+#define SDMA_EDC_COUNTER__SDMA_MBANK_DATA_BUF13_SED_MASK 0x0C000000L
+#define SDMA_EDC_COUNTER__SDMA_MBANK_DATA_BUF14_SED_MASK 0x30000000L
+#define SDMA_EDC_COUNTER__SDMA_MBANK_DATA_BUF15_SED_MASK 0xC0000000L
+//SDMA_EDC_COUNTER2
+#define SDMA_EDC_COUNTER2__SDMA_UCODE_BUF_SED__SHIFT 0x0
+#define SDMA_EDC_COUNTER2__SDMA_RB_CMD_BUF_SED__SHIFT 0x2
+#define SDMA_EDC_COUNTER2__SDMA_IB_CMD_BUF_SED__SHIFT 0x4
+#define SDMA_EDC_COUNTER2__SDMA_UTCL1_RD_FIFO_SED__SHIFT 0x6
+#define SDMA_EDC_COUNTER2__SDMA_UTCL1_RDBST_FIFO_SED__SHIFT 0x8
+#define SDMA_EDC_COUNTER2__SDMA_UTCL1_WR_FIFO_SED__SHIFT 0xa
+#define SDMA_EDC_COUNTER2__SDMA_DATA_LUT_FIFO_SED__SHIFT 0xc
+#define SDMA_EDC_COUNTER2__SDMA_SPLIT_DATA_BUF_SED__SHIFT 0xe
+#define SDMA_EDC_COUNTER2__SDMA_MC_WR_ADDR_FIFO_SED__SHIFT 0x10
+#define SDMA_EDC_COUNTER2__SDMA_MC_RDRET_BUF_SED__SHIFT 0x12
+#define SDMA_EDC_COUNTER2__SDMA_UCODE_BUF_SED_MASK 0x00000003L
+#define SDMA_EDC_COUNTER2__SDMA_RB_CMD_BUF_SED_MASK 0x0000000CL
+#define SDMA_EDC_COUNTER2__SDMA_IB_CMD_BUF_SED_MASK 0x00000030L
+#define SDMA_EDC_COUNTER2__SDMA_UTCL1_RD_FIFO_SED_MASK 0x000000C0L
+#define SDMA_EDC_COUNTER2__SDMA_UTCL1_RDBST_FIFO_SED_MASK 0x00000300L
+#define SDMA_EDC_COUNTER2__SDMA_UTCL1_WR_FIFO_SED_MASK 0x00000C00L
+#define SDMA_EDC_COUNTER2__SDMA_DATA_LUT_FIFO_SED_MASK 0x00003000L
+#define SDMA_EDC_COUNTER2__SDMA_SPLIT_DATA_BUF_SED_MASK 0x0000C000L
+#define SDMA_EDC_COUNTER2__SDMA_MC_WR_ADDR_FIFO_SED_MASK 0x00030000L
+#define SDMA_EDC_COUNTER2__SDMA_MC_RDRET_BUF_SED_MASK 0x000C0000L
+//SDMA_STATUS2_REG
+#define SDMA_STATUS2_REG__ID__SHIFT 0x0
+#define SDMA_STATUS2_REG__F32_INSTR_PTR__SHIFT 0x3
+#define SDMA_STATUS2_REG__CMD_OP__SHIFT 0x10
+#define SDMA_STATUS2_REG__ID_MASK 0x00000007L
+#define SDMA_STATUS2_REG__F32_INSTR_PTR_MASK 0x0000FFF8L
+#define SDMA_STATUS2_REG__CMD_OP_MASK 0xFFFF0000L
+//SDMA_ATOMIC_CNTL
+#define SDMA_ATOMIC_CNTL__LOOP_TIMER__SHIFT 0x0
+#define SDMA_ATOMIC_CNTL__ATOMIC_RTN_INT_ENABLE__SHIFT 0x1f
+#define SDMA_ATOMIC_CNTL__LOOP_TIMER_MASK 0x7FFFFFFFL
+#define SDMA_ATOMIC_CNTL__ATOMIC_RTN_INT_ENABLE_MASK 0x80000000L
+//SDMA_ATOMIC_PREOP_LO
+#define SDMA_ATOMIC_PREOP_LO__DATA__SHIFT 0x0
+#define SDMA_ATOMIC_PREOP_LO__DATA_MASK 0xFFFFFFFFL
+//SDMA_ATOMIC_PREOP_HI
+#define SDMA_ATOMIC_PREOP_HI__DATA__SHIFT 0x0
+#define SDMA_ATOMIC_PREOP_HI__DATA_MASK 0xFFFFFFFFL
+//SDMA_UTCL1_CNTL
+#define SDMA_UTCL1_CNTL__REDO_ENABLE__SHIFT 0x0
+#define SDMA_UTCL1_CNTL__REDO_DELAY__SHIFT 0x1
+#define SDMA_UTCL1_CNTL__REDO_WATERMK__SHIFT 0xb
+#define SDMA_UTCL1_CNTL__INVACK_DELAY__SHIFT 0xe
+#define SDMA_UTCL1_CNTL__REQL2_CREDIT__SHIFT 0x18
+#define SDMA_UTCL1_CNTL__VADDR_WATERMK__SHIFT 0x1d
+#define SDMA_UTCL1_CNTL__REDO_ENABLE_MASK 0x00000001L
+#define SDMA_UTCL1_CNTL__REDO_DELAY_MASK 0x000007FEL
+#define SDMA_UTCL1_CNTL__REDO_WATERMK_MASK 0x00003800L
+#define SDMA_UTCL1_CNTL__INVACK_DELAY_MASK 0x00FFC000L
+#define SDMA_UTCL1_CNTL__REQL2_CREDIT_MASK 0x1F000000L
+#define SDMA_UTCL1_CNTL__VADDR_WATERMK_MASK 0xE0000000L
+//SDMA_UTCL1_WATERMK
+#define SDMA_UTCL1_WATERMK__REQ_WATERMK__SHIFT 0x0
+#define SDMA_UTCL1_WATERMK__REQ_DEPTH__SHIFT 0x3
+#define SDMA_UTCL1_WATERMK__PAGE_WATERMK__SHIFT 0x5
+#define SDMA_UTCL1_WATERMK__INVREQ_WATERMK__SHIFT 0x8
+#define SDMA_UTCL1_WATERMK__RESERVED__SHIFT 0x10
+#define SDMA_UTCL1_WATERMK__REQ_WATERMK_MASK 0x00000007L
+#define SDMA_UTCL1_WATERMK__REQ_DEPTH_MASK 0x00000018L
+#define SDMA_UTCL1_WATERMK__PAGE_WATERMK_MASK 0x000000E0L
+#define SDMA_UTCL1_WATERMK__INVREQ_WATERMK_MASK 0x0000FF00L
+#define SDMA_UTCL1_WATERMK__RESERVED_MASK 0xFFFF0000L
+//SDMA_UTCL1_RD_STATUS
+#define SDMA_UTCL1_RD_STATUS__RQMC_RET_ADDR_FIFO_EMPTY__SHIFT 0x0
+#define SDMA_UTCL1_RD_STATUS__RQMC_REQ_FIFO_EMPTY__SHIFT 0x1
+#define SDMA_UTCL1_RD_STATUS__RTPG_RET_BUF_EMPTY__SHIFT 0x2
+#define SDMA_UTCL1_RD_STATUS__RTPG_VADDR_FIFO_EMPTY__SHIFT 0x3
+#define SDMA_UTCL1_RD_STATUS__RQPG_HEAD_VIRT_FIFO_EMPTY__SHIFT 0x4
+#define SDMA_UTCL1_RD_STATUS__RQPG_REDO_FIFO_EMPTY__SHIFT 0x5
+#define SDMA_UTCL1_RD_STATUS__RQPG_REQPAGE_FIFO_EMPTY__SHIFT 0x6
+#define SDMA_UTCL1_RD_STATUS__RQPG_XNACK_FIFO_EMPTY__SHIFT 0x7
+#define SDMA_UTCL1_RD_STATUS__RQPG_INVREQ_FIFO_EMPTY__SHIFT 0x8
+#define SDMA_UTCL1_RD_STATUS__RQMC_RET_ADDR_FIFO_FULL__SHIFT 0x9
+#define SDMA_UTCL1_RD_STATUS__RQMC_REQ_FIFO_FULL__SHIFT 0xa
+#define SDMA_UTCL1_RD_STATUS__RTPG_RET_BUF_FULL__SHIFT 0xb
+#define SDMA_UTCL1_RD_STATUS__RTPG_VADDR_FIFO_FULL__SHIFT 0xc
+#define SDMA_UTCL1_RD_STATUS__RQPG_HEAD_VIRT_FIFO_FULL__SHIFT 0xd
+#define SDMA_UTCL1_RD_STATUS__RQPG_REDO_FIFO_FULL__SHIFT 0xe
+#define SDMA_UTCL1_RD_STATUS__RQPG_REQPAGE_FIFO_FULL__SHIFT 0xf
+#define SDMA_UTCL1_RD_STATUS__RQPG_XNACK_FIFO_FULL__SHIFT 0x10
+#define SDMA_UTCL1_RD_STATUS__RQPG_INVREQ_FIFO_FULL__SHIFT 0x11
+#define SDMA_UTCL1_RD_STATUS__PAGE_FAULT__SHIFT 0x12
+#define SDMA_UTCL1_RD_STATUS__PAGE_NULL__SHIFT 0x13
+#define SDMA_UTCL1_RD_STATUS__REQL2_IDLE__SHIFT 0x14
+#define SDMA_UTCL1_RD_STATUS__CE_L1_STALL__SHIFT 0x15
+#define SDMA_UTCL1_RD_STATUS__NEXT_RD_VECTOR__SHIFT 0x16
+#define SDMA_UTCL1_RD_STATUS__MERGE_STATE__SHIFT 0x1a
+#define SDMA_UTCL1_RD_STATUS__ADDR_RD_RTR__SHIFT 0x1d
+#define SDMA_UTCL1_RD_STATUS__WPTR_POLLING__SHIFT 0x1e
+#define SDMA_UTCL1_RD_STATUS__INVREQ_SIZE__SHIFT 0x1f
+#define SDMA_UTCL1_RD_STATUS__RQMC_RET_ADDR_FIFO_EMPTY_MASK 0x00000001L
+#define SDMA_UTCL1_RD_STATUS__RQMC_REQ_FIFO_EMPTY_MASK 0x00000002L
+#define SDMA_UTCL1_RD_STATUS__RTPG_RET_BUF_EMPTY_MASK 0x00000004L
+#define SDMA_UTCL1_RD_STATUS__RTPG_VADDR_FIFO_EMPTY_MASK 0x00000008L
+#define SDMA_UTCL1_RD_STATUS__RQPG_HEAD_VIRT_FIFO_EMPTY_MASK 0x00000010L
+#define SDMA_UTCL1_RD_STATUS__RQPG_REDO_FIFO_EMPTY_MASK 0x00000020L
+#define SDMA_UTCL1_RD_STATUS__RQPG_REQPAGE_FIFO_EMPTY_MASK 0x00000040L
+#define SDMA_UTCL1_RD_STATUS__RQPG_XNACK_FIFO_EMPTY_MASK 0x00000080L
+#define SDMA_UTCL1_RD_STATUS__RQPG_INVREQ_FIFO_EMPTY_MASK 0x00000100L
+#define SDMA_UTCL1_RD_STATUS__RQMC_RET_ADDR_FIFO_FULL_MASK 0x00000200L
+#define SDMA_UTCL1_RD_STATUS__RQMC_REQ_FIFO_FULL_MASK 0x00000400L
+#define SDMA_UTCL1_RD_STATUS__RTPG_RET_BUF_FULL_MASK 0x00000800L
+#define SDMA_UTCL1_RD_STATUS__RTPG_VADDR_FIFO_FULL_MASK 0x00001000L
+#define SDMA_UTCL1_RD_STATUS__RQPG_HEAD_VIRT_FIFO_FULL_MASK 0x00002000L
+#define SDMA_UTCL1_RD_STATUS__RQPG_REDO_FIFO_FULL_MASK 0x00004000L
+#define SDMA_UTCL1_RD_STATUS__RQPG_REQPAGE_FIFO_FULL_MASK 0x00008000L
+#define SDMA_UTCL1_RD_STATUS__RQPG_XNACK_FIFO_FULL_MASK 0x00010000L
+#define SDMA_UTCL1_RD_STATUS__RQPG_INVREQ_FIFO_FULL_MASK 0x00020000L
+#define SDMA_UTCL1_RD_STATUS__PAGE_FAULT_MASK 0x00040000L
+#define SDMA_UTCL1_RD_STATUS__PAGE_NULL_MASK 0x00080000L
+#define SDMA_UTCL1_RD_STATUS__REQL2_IDLE_MASK 0x00100000L
+#define SDMA_UTCL1_RD_STATUS__CE_L1_STALL_MASK 0x00200000L
+#define SDMA_UTCL1_RD_STATUS__NEXT_RD_VECTOR_MASK 0x03C00000L
+#define SDMA_UTCL1_RD_STATUS__MERGE_STATE_MASK 0x1C000000L
+#define SDMA_UTCL1_RD_STATUS__ADDR_RD_RTR_MASK 0x20000000L
+#define SDMA_UTCL1_RD_STATUS__WPTR_POLLING_MASK 0x40000000L
+#define SDMA_UTCL1_RD_STATUS__INVREQ_SIZE_MASK 0x80000000L
+//SDMA_UTCL1_WR_STATUS
+#define SDMA_UTCL1_WR_STATUS__RQMC_RET_ADDR_FIFO_EMPTY__SHIFT 0x0
+#define SDMA_UTCL1_WR_STATUS__RQMC_REQ_FIFO_EMPTY__SHIFT 0x1
+#define SDMA_UTCL1_WR_STATUS__RTPG_RET_BUF_EMPTY__SHIFT 0x2
+#define SDMA_UTCL1_WR_STATUS__RTPG_VADDR_FIFO_EMPTY__SHIFT 0x3
+#define SDMA_UTCL1_WR_STATUS__RQPG_HEAD_VIRT_FIFO_EMPTY__SHIFT 0x4
+#define SDMA_UTCL1_WR_STATUS__RQPG_REDO_FIFO_EMPTY__SHIFT 0x5
+#define SDMA_UTCL1_WR_STATUS__RQPG_REQPAGE_FIFO_EMPTY__SHIFT 0x6
+#define SDMA_UTCL1_WR_STATUS__REDO_ARR_EMPTY__SHIFT 0x7
+#define SDMA_UTCL1_WR_STATUS__RESERVED_8__SHIFT 0x8
+#define SDMA_UTCL1_WR_STATUS__RQMC_RET_ADDR_FIFO_FULL__SHIFT 0x9
+#define SDMA_UTCL1_WR_STATUS__RQMC_REQ_FIFO_FULL__SHIFT 0xa
+#define SDMA_UTCL1_WR_STATUS__RTPG_RET_BUF_FULL__SHIFT 0xb
+#define SDMA_UTCL1_WR_STATUS__RTPG_VADDR_FIFO_FULL__SHIFT 0xc
+#define SDMA_UTCL1_WR_STATUS__RQPG_HEAD_VIRT_FIFO_FULL__SHIFT 0xd
+#define SDMA_UTCL1_WR_STATUS__RQPG_REDO_FIFO_FULL__SHIFT 0xe
+#define SDMA_UTCL1_WR_STATUS__RQPG_REQPAGE_FIFO_FULL__SHIFT 0xf
+#define SDMA_UTCL1_WR_STATUS__REDO_ARR_FULL__SHIFT 0x10
+#define SDMA_UTCL1_WR_STATUS__RESERVED_17__SHIFT 0x11
+#define SDMA_UTCL1_WR_STATUS__PAGE_FAULT__SHIFT 0x12
+#define SDMA_UTCL1_WR_STATUS__PAGE_NULL__SHIFT 0x13
+#define SDMA_UTCL1_WR_STATUS__REQL2_IDLE__SHIFT 0x14
+#define SDMA_UTCL1_WR_STATUS__F32_WR_RTR__SHIFT 0x15
+#define SDMA_UTCL1_WR_STATUS__NEXT_WR_VECTOR__SHIFT 0x16
+#define SDMA_UTCL1_WR_STATUS__MERGE_STATE__SHIFT 0x19
+#define SDMA_UTCL1_WR_STATUS__RPTR_DATA_FIFO_EMPTY__SHIFT 0x1c
+#define SDMA_UTCL1_WR_STATUS__RPTR_DATA_FIFO_FULL__SHIFT 0x1d
+#define SDMA_UTCL1_WR_STATUS__WRREQ_DATA_FIFO_EMPTY__SHIFT 0x1e
+#define SDMA_UTCL1_WR_STATUS__WRREQ_DATA_FIFO_FULL__SHIFT 0x1f
+#define SDMA_UTCL1_WR_STATUS__RQMC_RET_ADDR_FIFO_EMPTY_MASK 0x00000001L
+#define SDMA_UTCL1_WR_STATUS__RQMC_REQ_FIFO_EMPTY_MASK 0x00000002L
+#define SDMA_UTCL1_WR_STATUS__RTPG_RET_BUF_EMPTY_MASK 0x00000004L
+#define SDMA_UTCL1_WR_STATUS__RTPG_VADDR_FIFO_EMPTY_MASK 0x00000008L
+#define SDMA_UTCL1_WR_STATUS__RQPG_HEAD_VIRT_FIFO_EMPTY_MASK 0x00000010L
+#define SDMA_UTCL1_WR_STATUS__RQPG_REDO_FIFO_EMPTY_MASK 0x00000020L
+#define SDMA_UTCL1_WR_STATUS__RQPG_REQPAGE_FIFO_EMPTY_MASK 0x00000040L
+#define SDMA_UTCL1_WR_STATUS__REDO_ARR_EMPTY_MASK 0x00000080L
+#define SDMA_UTCL1_WR_STATUS__RESERVED_8_MASK 0x00000100L
+#define SDMA_UTCL1_WR_STATUS__RQMC_RET_ADDR_FIFO_FULL_MASK 0x00000200L
+#define SDMA_UTCL1_WR_STATUS__RQMC_REQ_FIFO_FULL_MASK 0x00000400L
+#define SDMA_UTCL1_WR_STATUS__RTPG_RET_BUF_FULL_MASK 0x00000800L
+#define SDMA_UTCL1_WR_STATUS__RTPG_VADDR_FIFO_FULL_MASK 0x00001000L
+#define SDMA_UTCL1_WR_STATUS__RQPG_HEAD_VIRT_FIFO_FULL_MASK 0x00002000L
+#define SDMA_UTCL1_WR_STATUS__RQPG_REDO_FIFO_FULL_MASK 0x00004000L
+#define SDMA_UTCL1_WR_STATUS__RQPG_REQPAGE_FIFO_FULL_MASK 0x00008000L
+#define SDMA_UTCL1_WR_STATUS__REDO_ARR_FULL_MASK 0x00010000L
+#define SDMA_UTCL1_WR_STATUS__RESERVED_17_MASK 0x00020000L
+#define SDMA_UTCL1_WR_STATUS__PAGE_FAULT_MASK 0x00040000L
+#define SDMA_UTCL1_WR_STATUS__PAGE_NULL_MASK 0x00080000L
+#define SDMA_UTCL1_WR_STATUS__REQL2_IDLE_MASK 0x00100000L
+#define SDMA_UTCL1_WR_STATUS__F32_WR_RTR_MASK 0x00200000L
+#define SDMA_UTCL1_WR_STATUS__NEXT_WR_VECTOR_MASK 0x01C00000L
+#define SDMA_UTCL1_WR_STATUS__MERGE_STATE_MASK 0x0E000000L
+#define SDMA_UTCL1_WR_STATUS__RPTR_DATA_FIFO_EMPTY_MASK 0x10000000L
+#define SDMA_UTCL1_WR_STATUS__RPTR_DATA_FIFO_FULL_MASK 0x20000000L
+#define SDMA_UTCL1_WR_STATUS__WRREQ_DATA_FIFO_EMPTY_MASK 0x40000000L
+#define SDMA_UTCL1_WR_STATUS__WRREQ_DATA_FIFO_FULL_MASK 0x80000000L
+//SDMA_UTCL1_INV0
+#define SDMA_UTCL1_INV0__INV_MIDDLE__SHIFT 0x0
+#define SDMA_UTCL1_INV0__RD_TIMEOUT__SHIFT 0x1
+#define SDMA_UTCL1_INV0__WR_TIMEOUT__SHIFT 0x2
+#define SDMA_UTCL1_INV0__RD_IN_INVADR__SHIFT 0x3
+#define SDMA_UTCL1_INV0__WR_IN_INVADR__SHIFT 0x4
+#define SDMA_UTCL1_INV0__PAGE_NULL_SW__SHIFT 0x5
+#define SDMA_UTCL1_INV0__XNACK_IS_INVADR__SHIFT 0x6
+#define SDMA_UTCL1_INV0__INVREQ_ENABLE__SHIFT 0x7
+#define SDMA_UTCL1_INV0__NACK_TIMEOUT_SW__SHIFT 0x8
+#define SDMA_UTCL1_INV0__NFLUSH_INV_IDLE__SHIFT 0x9
+#define SDMA_UTCL1_INV0__FLUSH_INV_IDLE__SHIFT 0xa
+#define SDMA_UTCL1_INV0__INV_FLUSHTYPE__SHIFT 0xb
+#define SDMA_UTCL1_INV0__INV_VMID_VEC__SHIFT 0xc
+#define SDMA_UTCL1_INV0__INV_ADDR_HI__SHIFT 0x1c
+#define SDMA_UTCL1_INV0__INV_MIDDLE_MASK 0x00000001L
+#define SDMA_UTCL1_INV0__RD_TIMEOUT_MASK 0x00000002L
+#define SDMA_UTCL1_INV0__WR_TIMEOUT_MASK 0x00000004L
+#define SDMA_UTCL1_INV0__RD_IN_INVADR_MASK 0x00000008L
+#define SDMA_UTCL1_INV0__WR_IN_INVADR_MASK 0x00000010L
+#define SDMA_UTCL1_INV0__PAGE_NULL_SW_MASK 0x00000020L
+#define SDMA_UTCL1_INV0__XNACK_IS_INVADR_MASK 0x00000040L
+#define SDMA_UTCL1_INV0__INVREQ_ENABLE_MASK 0x00000080L
+#define SDMA_UTCL1_INV0__NACK_TIMEOUT_SW_MASK 0x00000100L
+#define SDMA_UTCL1_INV0__NFLUSH_INV_IDLE_MASK 0x00000200L
+#define SDMA_UTCL1_INV0__FLUSH_INV_IDLE_MASK 0x00000400L
+#define SDMA_UTCL1_INV0__INV_FLUSHTYPE_MASK 0x00000800L
+#define SDMA_UTCL1_INV0__INV_VMID_VEC_MASK 0x0FFFF000L
+#define SDMA_UTCL1_INV0__INV_ADDR_HI_MASK 0xF0000000L
+//SDMA_UTCL1_INV1
+#define SDMA_UTCL1_INV1__INV_ADDR_LO__SHIFT 0x0
+#define SDMA_UTCL1_INV1__INV_ADDR_LO_MASK 0xFFFFFFFFL
+//SDMA_UTCL1_INV2
+#define SDMA_UTCL1_INV2__INV_NFLUSH_VMID_VEC__SHIFT 0x0
+#define SDMA_UTCL1_INV2__INV_NFLUSH_VMID_VEC_MASK 0xFFFFFFFFL
+//SDMA_UTCL1_RD_XNACK0
+#define SDMA_UTCL1_RD_XNACK0__XNACK_ADDR_LO__SHIFT 0x0
+#define SDMA_UTCL1_RD_XNACK0__XNACK_ADDR_LO_MASK 0xFFFFFFFFL
+//SDMA_UTCL1_RD_XNACK1
+#define SDMA_UTCL1_RD_XNACK1__XNACK_ADDR_HI__SHIFT 0x0
+#define SDMA_UTCL1_RD_XNACK1__XNACK_VMID__SHIFT 0x4
+#define SDMA_UTCL1_RD_XNACK1__XNACK_VECTOR__SHIFT 0x8
+#define SDMA_UTCL1_RD_XNACK1__IS_XNACK__SHIFT 0x1a
+#define SDMA_UTCL1_RD_XNACK1__XNACK_ADDR_HI_MASK 0x0000000FL
+#define SDMA_UTCL1_RD_XNACK1__XNACK_VMID_MASK 0x000000F0L
+#define SDMA_UTCL1_RD_XNACK1__XNACK_VECTOR_MASK 0x03FFFF00L
+#define SDMA_UTCL1_RD_XNACK1__IS_XNACK_MASK 0x0C000000L
+//SDMA_UTCL1_WR_XNACK0
+#define SDMA_UTCL1_WR_XNACK0__XNACK_ADDR_LO__SHIFT 0x0
+#define SDMA_UTCL1_WR_XNACK0__XNACK_ADDR_LO_MASK 0xFFFFFFFFL
+//SDMA_UTCL1_WR_XNACK1
+#define SDMA_UTCL1_WR_XNACK1__XNACK_ADDR_HI__SHIFT 0x0
+#define SDMA_UTCL1_WR_XNACK1__XNACK_VMID__SHIFT 0x4
+#define SDMA_UTCL1_WR_XNACK1__XNACK_VECTOR__SHIFT 0x8
+#define SDMA_UTCL1_WR_XNACK1__IS_XNACK__SHIFT 0x1a
+#define SDMA_UTCL1_WR_XNACK1__XNACK_ADDR_HI_MASK 0x0000000FL
+#define SDMA_UTCL1_WR_XNACK1__XNACK_VMID_MASK 0x000000F0L
+#define SDMA_UTCL1_WR_XNACK1__XNACK_VECTOR_MASK 0x03FFFF00L
+#define SDMA_UTCL1_WR_XNACK1__IS_XNACK_MASK 0x0C000000L
+//SDMA_UTCL1_TIMEOUT
+#define SDMA_UTCL1_TIMEOUT__RD_XNACK_LIMIT__SHIFT 0x0
+#define SDMA_UTCL1_TIMEOUT__WR_XNACK_LIMIT__SHIFT 0x10
+#define SDMA_UTCL1_TIMEOUT__RD_XNACK_LIMIT_MASK 0x0000FFFFL
+#define SDMA_UTCL1_TIMEOUT__WR_XNACK_LIMIT_MASK 0xFFFF0000L
+//SDMA_UTCL1_PAGE
+#define SDMA_UTCL1_PAGE__VM_HOLE__SHIFT 0x0
+#define SDMA_UTCL1_PAGE__REQ_TYPE__SHIFT 0x1
+#define SDMA_UTCL1_PAGE__TMZ_ENABLE__SHIFT 0x5
+#define SDMA_UTCL1_PAGE__USE_MTYPE__SHIFT 0x6
+#define SDMA_UTCL1_PAGE__USE_PT_SNOOP__SHIFT 0x9
+#define SDMA_UTCL1_PAGE__LLC_NOALLOC__SHIFT 0xa
+#define SDMA_UTCL1_PAGE__VM_HOLE_MASK 0x00000001L
+#define SDMA_UTCL1_PAGE__REQ_TYPE_MASK 0x0000001EL
+#define SDMA_UTCL1_PAGE__TMZ_ENABLE_MASK 0x00000020L
+#define SDMA_UTCL1_PAGE__USE_MTYPE_MASK 0x000001C0L
+#define SDMA_UTCL1_PAGE__USE_PT_SNOOP_MASK 0x00000200L
+#define SDMA_UTCL1_PAGE__LLC_NOALLOC_MASK 0x00000400L
+//SDMA_POWER_CNTL_IDLE
+#define SDMA_POWER_CNTL_IDLE__DELAY0__SHIFT 0x0
+#define SDMA_POWER_CNTL_IDLE__DELAY1__SHIFT 0x10
+#define SDMA_POWER_CNTL_IDLE__DELAY2__SHIFT 0x18
+#define SDMA_POWER_CNTL_IDLE__DELAY0_MASK 0x0000FFFFL
+#define SDMA_POWER_CNTL_IDLE__DELAY1_MASK 0x00FF0000L
+#define SDMA_POWER_CNTL_IDLE__DELAY2_MASK 0xFF000000L
+//SDMA_RELAX_ORDERING_LUT
+#define SDMA_RELAX_ORDERING_LUT__RESERVED0__SHIFT 0x0
+#define SDMA_RELAX_ORDERING_LUT__COPY__SHIFT 0x1
+#define SDMA_RELAX_ORDERING_LUT__WRITE__SHIFT 0x2
+#define SDMA_RELAX_ORDERING_LUT__RESERVED3__SHIFT 0x3
+#define SDMA_RELAX_ORDERING_LUT__RESERVED4__SHIFT 0x4
+#define SDMA_RELAX_ORDERING_LUT__FENCE__SHIFT 0x5
+#define SDMA_RELAX_ORDERING_LUT__RESERVED76__SHIFT 0x6
+#define SDMA_RELAX_ORDERING_LUT__POLL_MEM__SHIFT 0x8
+#define SDMA_RELAX_ORDERING_LUT__COND_EXE__SHIFT 0x9
+#define SDMA_RELAX_ORDERING_LUT__ATOMIC__SHIFT 0xa
+#define SDMA_RELAX_ORDERING_LUT__CONST_FILL__SHIFT 0xb
+#define SDMA_RELAX_ORDERING_LUT__PTEPDE__SHIFT 0xc
+#define SDMA_RELAX_ORDERING_LUT__TIMESTAMP__SHIFT 0xd
+#define SDMA_RELAX_ORDERING_LUT__RESERVED__SHIFT 0xe
+#define SDMA_RELAX_ORDERING_LUT__WORLD_SWITCH__SHIFT 0x1b
+#define SDMA_RELAX_ORDERING_LUT__RPTR_WRB__SHIFT 0x1c
+#define SDMA_RELAX_ORDERING_LUT__WPTR_POLL__SHIFT 0x1d
+#define SDMA_RELAX_ORDERING_LUT__IB_FETCH__SHIFT 0x1e
+#define SDMA_RELAX_ORDERING_LUT__RB_FETCH__SHIFT 0x1f
+#define SDMA_RELAX_ORDERING_LUT__RESERVED0_MASK 0x00000001L
+#define SDMA_RELAX_ORDERING_LUT__COPY_MASK 0x00000002L
+#define SDMA_RELAX_ORDERING_LUT__WRITE_MASK 0x00000004L
+#define SDMA_RELAX_ORDERING_LUT__RESERVED3_MASK 0x00000008L
+#define SDMA_RELAX_ORDERING_LUT__RESERVED4_MASK 0x00000010L
+#define SDMA_RELAX_ORDERING_LUT__FENCE_MASK 0x00000020L
+#define SDMA_RELAX_ORDERING_LUT__RESERVED76_MASK 0x000000C0L
+#define SDMA_RELAX_ORDERING_LUT__POLL_MEM_MASK 0x00000100L
+#define SDMA_RELAX_ORDERING_LUT__COND_EXE_MASK 0x00000200L
+#define SDMA_RELAX_ORDERING_LUT__ATOMIC_MASK 0x00000400L
+#define SDMA_RELAX_ORDERING_LUT__CONST_FILL_MASK 0x00000800L
+#define SDMA_RELAX_ORDERING_LUT__PTEPDE_MASK 0x00001000L
+#define SDMA_RELAX_ORDERING_LUT__TIMESTAMP_MASK 0x00002000L
+#define SDMA_RELAX_ORDERING_LUT__RESERVED_MASK 0x07FFC000L
+#define SDMA_RELAX_ORDERING_LUT__WORLD_SWITCH_MASK 0x08000000L
+#define SDMA_RELAX_ORDERING_LUT__RPTR_WRB_MASK 0x10000000L
+#define SDMA_RELAX_ORDERING_LUT__WPTR_POLL_MASK 0x20000000L
+#define SDMA_RELAX_ORDERING_LUT__IB_FETCH_MASK 0x40000000L
+#define SDMA_RELAX_ORDERING_LUT__RB_FETCH_MASK 0x80000000L
+//SDMA_CHICKEN_BITS_2
+#define SDMA_CHICKEN_BITS_2__F32_CMD_PROC_DELAY__SHIFT 0x0
+#define SDMA_CHICKEN_BITS_2__F32_SEND_POSTCODE_EN__SHIFT 0x4
+#define SDMA_CHICKEN_BITS_2__F32_CMD_PROC_DELAY_MASK 0x0000000FL
+#define SDMA_CHICKEN_BITS_2__F32_SEND_POSTCODE_EN_MASK 0x00000010L
+//SDMA_STATUS3_REG
+#define SDMA_STATUS3_REG__CMD_OP_STATUS__SHIFT 0x0
+#define SDMA_STATUS3_REG__PREV_VM_CMD__SHIFT 0x10
+#define SDMA_STATUS3_REG__EXCEPTION_IDLE__SHIFT 0x14
+#define SDMA_STATUS3_REG__QUEUE_ID_MATCH__SHIFT 0x15
+#define SDMA_STATUS3_REG__INT_QUEUE_ID__SHIFT 0x16
+#define SDMA_STATUS3_REG__CMD_OP_STATUS_MASK 0x0000FFFFL
+#define SDMA_STATUS3_REG__PREV_VM_CMD_MASK 0x000F0000L
+#define SDMA_STATUS3_REG__EXCEPTION_IDLE_MASK 0x00100000L
+#define SDMA_STATUS3_REG__QUEUE_ID_MATCH_MASK 0x00200000L
+#define SDMA_STATUS3_REG__INT_QUEUE_ID_MASK 0x03C00000L
+//SDMA_PHYSICAL_ADDR_LO
+#define SDMA_PHYSICAL_ADDR_LO__D_VALID__SHIFT 0x0
+#define SDMA_PHYSICAL_ADDR_LO__DIRTY__SHIFT 0x1
+#define SDMA_PHYSICAL_ADDR_LO__PHY_VALID__SHIFT 0x2
+#define SDMA_PHYSICAL_ADDR_LO__ADDR__SHIFT 0xc
+#define SDMA_PHYSICAL_ADDR_LO__D_VALID_MASK 0x00000001L
+#define SDMA_PHYSICAL_ADDR_LO__DIRTY_MASK 0x00000002L
+#define SDMA_PHYSICAL_ADDR_LO__PHY_VALID_MASK 0x00000004L
+#define SDMA_PHYSICAL_ADDR_LO__ADDR_MASK 0xFFFFF000L
+//SDMA_PHYSICAL_ADDR_HI
+#define SDMA_PHYSICAL_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA_PHYSICAL_ADDR_HI__ADDR_MASK 0x0000FFFFL
+//SDMA_PHASE2_QUANTUM
+#define SDMA_PHASE2_QUANTUM__UNIT__SHIFT 0x0
+#define SDMA_PHASE2_QUANTUM__VALUE__SHIFT 0x8
+#define SDMA_PHASE2_QUANTUM__PREFER__SHIFT 0x1e
+#define SDMA_PHASE2_QUANTUM__UNIT_MASK 0x0000000FL
+#define SDMA_PHASE2_QUANTUM__VALUE_MASK 0x00FFFF00L
+#define SDMA_PHASE2_QUANTUM__PREFER_MASK 0x40000000L
+//SDMA_ERROR_LOG
+#define SDMA_ERROR_LOG__OVERRIDE__SHIFT 0x0
+#define SDMA_ERROR_LOG__STATUS__SHIFT 0x10
+#define SDMA_ERROR_LOG__OVERRIDE_MASK 0x0000FFFFL
+#define SDMA_ERROR_LOG__STATUS_MASK 0xFFFF0000L
+//SDMA_PUB_DUMMY_REG0
+#define SDMA_PUB_DUMMY_REG0__VALUE__SHIFT 0x0
+#define SDMA_PUB_DUMMY_REG0__VALUE_MASK 0xFFFFFFFFL
+//SDMA_PUB_DUMMY_REG1
+#define SDMA_PUB_DUMMY_REG1__VALUE__SHIFT 0x0
+#define SDMA_PUB_DUMMY_REG1__VALUE_MASK 0xFFFFFFFFL
+//SDMA_PUB_DUMMY_REG2
+#define SDMA_PUB_DUMMY_REG2__VALUE__SHIFT 0x0
+#define SDMA_PUB_DUMMY_REG2__VALUE_MASK 0xFFFFFFFFL
+//SDMA_PUB_DUMMY_REG3
+#define SDMA_PUB_DUMMY_REG3__VALUE__SHIFT 0x0
+#define SDMA_PUB_DUMMY_REG3__VALUE_MASK 0xFFFFFFFFL
+//SDMA_F32_COUNTER
+#define SDMA_F32_COUNTER__VALUE__SHIFT 0x0
+#define SDMA_F32_COUNTER__VALUE_MASK 0xFFFFFFFFL
+//SDMA_PERFCNT_PERFCOUNTER0_CFG
+#define SDMA_PERFCNT_PERFCOUNTER0_CFG__PERF_SEL__SHIFT 0x0
+#define SDMA_PERFCNT_PERFCOUNTER0_CFG__PERF_SEL_END__SHIFT 0x8
+#define SDMA_PERFCNT_PERFCOUNTER0_CFG__PERF_MODE__SHIFT 0x18
+#define SDMA_PERFCNT_PERFCOUNTER0_CFG__ENABLE__SHIFT 0x1c
+#define SDMA_PERFCNT_PERFCOUNTER0_CFG__CLEAR__SHIFT 0x1d
+#define SDMA_PERFCNT_PERFCOUNTER0_CFG__PERF_SEL_MASK 0x000000FFL
+#define SDMA_PERFCNT_PERFCOUNTER0_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define SDMA_PERFCNT_PERFCOUNTER0_CFG__PERF_MODE_MASK 0x0F000000L
+#define SDMA_PERFCNT_PERFCOUNTER0_CFG__ENABLE_MASK 0x10000000L
+#define SDMA_PERFCNT_PERFCOUNTER0_CFG__CLEAR_MASK 0x20000000L
+//SDMA_PERFCNT_PERFCOUNTER1_CFG
+#define SDMA_PERFCNT_PERFCOUNTER1_CFG__PERF_SEL__SHIFT 0x0
+#define SDMA_PERFCNT_PERFCOUNTER1_CFG__PERF_SEL_END__SHIFT 0x8
+#define SDMA_PERFCNT_PERFCOUNTER1_CFG__PERF_MODE__SHIFT 0x18
+#define SDMA_PERFCNT_PERFCOUNTER1_CFG__ENABLE__SHIFT 0x1c
+#define SDMA_PERFCNT_PERFCOUNTER1_CFG__CLEAR__SHIFT 0x1d
+#define SDMA_PERFCNT_PERFCOUNTER1_CFG__PERF_SEL_MASK 0x000000FFL
+#define SDMA_PERFCNT_PERFCOUNTER1_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define SDMA_PERFCNT_PERFCOUNTER1_CFG__PERF_MODE_MASK 0x0F000000L
+#define SDMA_PERFCNT_PERFCOUNTER1_CFG__ENABLE_MASK 0x10000000L
+#define SDMA_PERFCNT_PERFCOUNTER1_CFG__CLEAR_MASK 0x20000000L
+//SDMA_PERFCNT_PERFCOUNTER_RSLT_CNTL
+#define SDMA_PERFCNT_PERFCOUNTER_RSLT_CNTL__PERF_COUNTER_SELECT__SHIFT 0x0
+#define SDMA_PERFCNT_PERFCOUNTER_RSLT_CNTL__START_TRIGGER__SHIFT 0x8
+#define SDMA_PERFCNT_PERFCOUNTER_RSLT_CNTL__STOP_TRIGGER__SHIFT 0x10
+#define SDMA_PERFCNT_PERFCOUNTER_RSLT_CNTL__ENABLE_ANY__SHIFT 0x18
+#define SDMA_PERFCNT_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL__SHIFT 0x19
+#define SDMA_PERFCNT_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE__SHIFT 0x1a
+#define SDMA_PERFCNT_PERFCOUNTER_RSLT_CNTL__PERF_COUNTER_SELECT_MASK 0x0000000FL
+#define SDMA_PERFCNT_PERFCOUNTER_RSLT_CNTL__START_TRIGGER_MASK 0x0000FF00L
+#define SDMA_PERFCNT_PERFCOUNTER_RSLT_CNTL__STOP_TRIGGER_MASK 0x00FF0000L
+#define SDMA_PERFCNT_PERFCOUNTER_RSLT_CNTL__ENABLE_ANY_MASK 0x01000000L
+#define SDMA_PERFCNT_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL_MASK 0x02000000L
+#define SDMA_PERFCNT_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE_MASK 0x04000000L
+//SDMA_PERFCNT_MISC_CNTL
+#define SDMA_PERFCNT_MISC_CNTL__CMD_OP__SHIFT 0x0
+#define SDMA_PERFCNT_MISC_CNTL__MMHUB_REQ_EVENT_SELECT__SHIFT 0x10
+#define SDMA_PERFCNT_MISC_CNTL__CMD_OP_MASK 0x0000FFFFL
+#define SDMA_PERFCNT_MISC_CNTL__MMHUB_REQ_EVENT_SELECT_MASK 0x00010000L
+//SDMA_PERFCNT_PERFCOUNTER_LO
+#define SDMA_PERFCNT_PERFCOUNTER_LO__COUNTER_LO__SHIFT 0x0
+#define SDMA_PERFCNT_PERFCOUNTER_LO__COUNTER_LO_MASK 0xFFFFFFFFL
+//SDMA_PERFCNT_PERFCOUNTER_HI
+#define SDMA_PERFCNT_PERFCOUNTER_HI__COUNTER_HI__SHIFT 0x0
+#define SDMA_PERFCNT_PERFCOUNTER_HI__COMPARE_VALUE__SHIFT 0x10
+#define SDMA_PERFCNT_PERFCOUNTER_HI__COUNTER_HI_MASK 0x0000FFFFL
+#define SDMA_PERFCNT_PERFCOUNTER_HI__COMPARE_VALUE_MASK 0xFFFF0000L
+//SDMA_CRD_CNTL
+#define SDMA_CRD_CNTL__DRM_CREDIT__SHIFT 0x0
+#define SDMA_CRD_CNTL__MC_WRREQ_CREDIT__SHIFT 0x7
+#define SDMA_CRD_CNTL__MC_RDREQ_CREDIT__SHIFT 0xd
+#define SDMA_CRD_CNTL__DRM_CREDIT_MASK 0x0000007FL
+#define SDMA_CRD_CNTL__MC_WRREQ_CREDIT_MASK 0x00001F80L
+#define SDMA_CRD_CNTL__MC_RDREQ_CREDIT_MASK 0x0007E000L
+//SDMA_GPU_IOV_VIOLATION_LOG
+#define SDMA_GPU_IOV_VIOLATION_LOG__VIOLATION_STATUS__SHIFT 0x0
+#define SDMA_GPU_IOV_VIOLATION_LOG__MULTIPLE_VIOLATION_STATUS__SHIFT 0x1
+#define SDMA_GPU_IOV_VIOLATION_LOG__ADDRESS__SHIFT 0x2
+#define SDMA_GPU_IOV_VIOLATION_LOG__WRITE_OPERATION__SHIFT 0x14
+#define SDMA_GPU_IOV_VIOLATION_LOG__VF__SHIFT 0x15
+#define SDMA_GPU_IOV_VIOLATION_LOG__VFID__SHIFT 0x16
+#define SDMA_GPU_IOV_VIOLATION_LOG__VIOLATION_STATUS_MASK 0x00000001L
+#define SDMA_GPU_IOV_VIOLATION_LOG__MULTIPLE_VIOLATION_STATUS_MASK 0x00000002L
+#define SDMA_GPU_IOV_VIOLATION_LOG__ADDRESS_MASK 0x000FFFFCL
+#define SDMA_GPU_IOV_VIOLATION_LOG__WRITE_OPERATION_MASK 0x00100000L
+#define SDMA_GPU_IOV_VIOLATION_LOG__VF_MASK 0x00200000L
+#define SDMA_GPU_IOV_VIOLATION_LOG__VFID_MASK 0x03C00000L
+//SDMA_ULV_CNTL
+#define SDMA_ULV_CNTL__HYSTERESIS__SHIFT 0x0
+#define SDMA_ULV_CNTL__ENTER_ULV_INT_CLR__SHIFT 0x1b
+#define SDMA_ULV_CNTL__EXIT_ULV_INT_CLR__SHIFT 0x1c
+#define SDMA_ULV_CNTL__ENTER_ULV_INT__SHIFT 0x1d
+#define SDMA_ULV_CNTL__EXIT_ULV_INT__SHIFT 0x1e
+#define SDMA_ULV_CNTL__ULV_STATUS__SHIFT 0x1f
+#define SDMA_ULV_CNTL__HYSTERESIS_MASK 0x0000001FL
+#define SDMA_ULV_CNTL__ENTER_ULV_INT_CLR_MASK 0x08000000L
+#define SDMA_ULV_CNTL__EXIT_ULV_INT_CLR_MASK 0x10000000L
+#define SDMA_ULV_CNTL__ENTER_ULV_INT_MASK 0x20000000L
+#define SDMA_ULV_CNTL__EXIT_ULV_INT_MASK 0x40000000L
+#define SDMA_ULV_CNTL__ULV_STATUS_MASK 0x80000000L
+//SDMA_EA_DBIT_ADDR_DATA
+#define SDMA_EA_DBIT_ADDR_DATA__VALUE__SHIFT 0x0
+#define SDMA_EA_DBIT_ADDR_DATA__VALUE_MASK 0xFFFFFFFFL
+//SDMA_EA_DBIT_ADDR_INDEX
+#define SDMA_EA_DBIT_ADDR_INDEX__VALUE__SHIFT 0x0
+#define SDMA_EA_DBIT_ADDR_INDEX__VALUE_MASK 0x00000007L
+//SDMA_GPU_IOV_VIOLATION_LOG2
+#define SDMA_GPU_IOV_VIOLATION_LOG2__INITIATOR_ID__SHIFT 0x0
+#define SDMA_GPU_IOV_VIOLATION_LOG2__INITIATOR_ID_MASK 0x000003FFL
+//SDMA_STATUS4_REG
+#define SDMA_STATUS4_REG__IDLE__SHIFT 0x0
+#define SDMA_STATUS4_REG__IH_OUTSTANDING__SHIFT 0x2
+#define SDMA_STATUS4_REG__SEM_OUTSTANDING__SHIFT 0x3
+#define SDMA_STATUS4_REG__MMHUB_RD_OUTSTANDING__SHIFT 0x4
+#define SDMA_STATUS4_REG__MMHUB_WR_OUTSTANDING__SHIFT 0x5
+#define SDMA_STATUS4_REG__UTCL2_RD_OUTSTANDING__SHIFT 0x6
+#define SDMA_STATUS4_REG__UTCL2_WR_OUTSTANDING__SHIFT 0x7
+#define SDMA_STATUS4_REG__REG_POLLING__SHIFT 0x8
+#define SDMA_STATUS4_REG__MEM_POLLING__SHIFT 0x9
+#define SDMA_STATUS4_REG__UTCL2_RD_XNACK__SHIFT 0xa
+#define SDMA_STATUS4_REG__UTCL2_WR_XNACK__SHIFT 0xc
+#define SDMA_STATUS4_REG__ACTIVE_QUEUE_ID__SHIFT 0xe
+#define SDMA_STATUS4_REG__SRIOV_WATING_RLCV_CMD__SHIFT 0x12
+#define SDMA_STATUS4_REG__SRIOV_SDMA_EXECUTING_CMD__SHIFT 0x13
+#define SDMA_STATUS4_REG__VM_HOLE_STATUS__SHIFT 0x14
+#define SDMA_STATUS4_REG__IDLE_MASK 0x00000001L
+#define SDMA_STATUS4_REG__IH_OUTSTANDING_MASK 0x00000004L
+#define SDMA_STATUS4_REG__SEM_OUTSTANDING_MASK 0x00000008L
+#define SDMA_STATUS4_REG__MMHUB_RD_OUTSTANDING_MASK 0x00000010L
+#define SDMA_STATUS4_REG__MMHUB_WR_OUTSTANDING_MASK 0x00000020L
+#define SDMA_STATUS4_REG__UTCL2_RD_OUTSTANDING_MASK 0x00000040L
+#define SDMA_STATUS4_REG__UTCL2_WR_OUTSTANDING_MASK 0x00000080L
+#define SDMA_STATUS4_REG__REG_POLLING_MASK 0x00000100L
+#define SDMA_STATUS4_REG__MEM_POLLING_MASK 0x00000200L
+#define SDMA_STATUS4_REG__UTCL2_RD_XNACK_MASK 0x00000C00L
+#define SDMA_STATUS4_REG__UTCL2_WR_XNACK_MASK 0x00003000L
+#define SDMA_STATUS4_REG__ACTIVE_QUEUE_ID_MASK 0x0003C000L
+#define SDMA_STATUS4_REG__SRIOV_WATING_RLCV_CMD_MASK 0x00040000L
+#define SDMA_STATUS4_REG__SRIOV_SDMA_EXECUTING_CMD_MASK 0x00080000L
+#define SDMA_STATUS4_REG__VM_HOLE_STATUS_MASK 0x00100000L
+//SDMA_SCRATCH_RAM_DATA
+#define SDMA_SCRATCH_RAM_DATA__DATA__SHIFT 0x0
+#define SDMA_SCRATCH_RAM_DATA__DATA_MASK 0xFFFFFFFFL
+//SDMA_SCRATCH_RAM_ADDR
+#define SDMA_SCRATCH_RAM_ADDR__ADDR__SHIFT 0x0
+#define SDMA_SCRATCH_RAM_ADDR__ADDR_MASK 0x0000007FL
+//SDMA_CE_CTRL
+#define SDMA_CE_CTRL__RD_LUT_WATERMARK__SHIFT 0x0
+#define SDMA_CE_CTRL__RD_LUT_DEPTH__SHIFT 0x3
+#define SDMA_CE_CTRL__WR_AFIFO_WATERMARK__SHIFT 0x5
+#define SDMA_CE_CTRL__RESERVED__SHIFT 0x8
+#define SDMA_CE_CTRL__RD_LUT_WATERMARK_MASK 0x00000007L
+#define SDMA_CE_CTRL__RD_LUT_DEPTH_MASK 0x00000018L
+#define SDMA_CE_CTRL__WR_AFIFO_WATERMARK_MASK 0x000000E0L
+#define SDMA_CE_CTRL__RESERVED_MASK 0xFFFFFF00L
+//SDMA_RAS_STATUS
+#define SDMA_RAS_STATUS__RB_FETCH_ECC__SHIFT 0x0
+#define SDMA_RAS_STATUS__IB_FETCH_ECC__SHIFT 0x1
+#define SDMA_RAS_STATUS__F32_DATA_ECC__SHIFT 0x2
+#define SDMA_RAS_STATUS__WPTR_ATOMIC_ECC__SHIFT 0x3
+#define SDMA_RAS_STATUS__COPY_DATA_ECC__SHIFT 0x4
+#define SDMA_RAS_STATUS__SRAM_ECC__SHIFT 0x5
+#define SDMA_RAS_STATUS__RB_FETCH_NACK_GEN_ERR__SHIFT 0x8
+#define SDMA_RAS_STATUS__IB_FETCH_NACK_GEN_ERR__SHIFT 0x9
+#define SDMA_RAS_STATUS__F32_DATA_NACK_GEN_ERR__SHIFT 0xa
+#define SDMA_RAS_STATUS__COPY_DATA_NACK_GEN_ERR__SHIFT 0xb
+#define SDMA_RAS_STATUS__WRRET_DATA_NACK_GEN_ERR__SHIFT 0xc
+#define SDMA_RAS_STATUS__WPTR_RPTR_ATOMIC_NACK_GEN_ERR__SHIFT 0xd
+#define SDMA_RAS_STATUS__ECC_PWRMGT_INT_BUSY__SHIFT 0xe
+#define SDMA_RAS_STATUS__RB_FETCH_ECC_MASK 0x00000001L
+#define SDMA_RAS_STATUS__IB_FETCH_ECC_MASK 0x00000002L
+#define SDMA_RAS_STATUS__F32_DATA_ECC_MASK 0x00000004L
+#define SDMA_RAS_STATUS__WPTR_ATOMIC_ECC_MASK 0x00000008L
+#define SDMA_RAS_STATUS__COPY_DATA_ECC_MASK 0x00000010L
+#define SDMA_RAS_STATUS__SRAM_ECC_MASK 0x00000020L
+#define SDMA_RAS_STATUS__RB_FETCH_NACK_GEN_ERR_MASK 0x00000100L
+#define SDMA_RAS_STATUS__IB_FETCH_NACK_GEN_ERR_MASK 0x00000200L
+#define SDMA_RAS_STATUS__F32_DATA_NACK_GEN_ERR_MASK 0x00000400L
+#define SDMA_RAS_STATUS__COPY_DATA_NACK_GEN_ERR_MASK 0x00000800L
+#define SDMA_RAS_STATUS__WRRET_DATA_NACK_GEN_ERR_MASK 0x00001000L
+#define SDMA_RAS_STATUS__WPTR_RPTR_ATOMIC_NACK_GEN_ERR_MASK 0x00002000L
+#define SDMA_RAS_STATUS__ECC_PWRMGT_INT_BUSY_MASK 0x00004000L
+//SDMA_CLK_STATUS
+#define SDMA_CLK_STATUS__DYN_CLK__SHIFT 0x0
+#define SDMA_CLK_STATUS__PTR_CLK__SHIFT 0x1
+#define SDMA_CLK_STATUS__REG_CLK__SHIFT 0x2
+#define SDMA_CLK_STATUS__F32_CLK__SHIFT 0x3
+#define SDMA_CLK_STATUS__CE_CLK__SHIFT 0x4
+#define SDMA_CLK_STATUS__PERF_CLK__SHIFT 0x5
+#define SDMA_CLK_STATUS__DYN_CLK_MASK 0x00000001L
+#define SDMA_CLK_STATUS__PTR_CLK_MASK 0x00000002L
+#define SDMA_CLK_STATUS__REG_CLK_MASK 0x00000004L
+#define SDMA_CLK_STATUS__F32_CLK_MASK 0x00000008L
+#define SDMA_CLK_STATUS__CE_CLK_MASK 0x00000010L
+#define SDMA_CLK_STATUS__PERF_CLK_MASK 0x00000020L
+//SDMA_POWER_CNTL
+#define SDMA_POWER_CNTL__PG_CNTL_ENABLE__SHIFT 0x0
+#define SDMA_POWER_CNTL__EXT_PG_POWER_ON_REQ__SHIFT 0x1
+#define SDMA_POWER_CNTL__EXT_PG_POWER_OFF_REQ__SHIFT 0x2
+#define SDMA_POWER_CNTL__ON_OFF_CONDITION_HOLD_TIME__SHIFT 0x3
+#define SDMA_POWER_CNTL__MEM_POWER_OVERRIDE__SHIFT 0x8
+#define SDMA_POWER_CNTL__MEM_POWER_LS_EN__SHIFT 0x9
+#define SDMA_POWER_CNTL__MEM_POWER_DS_EN__SHIFT 0xa
+#define SDMA_POWER_CNTL__MEM_POWER_SD_EN__SHIFT 0xb
+#define SDMA_POWER_CNTL__MEM_POWER_DELAY__SHIFT 0xc
+#define SDMA_POWER_CNTL__ON_OFF_STATUS_DURATION_TIME__SHIFT 0x1a
+#define SDMA_POWER_CNTL__PG_CNTL_ENABLE_MASK 0x00000001L
+#define SDMA_POWER_CNTL__EXT_PG_POWER_ON_REQ_MASK 0x00000002L
+#define SDMA_POWER_CNTL__EXT_PG_POWER_OFF_REQ_MASK 0x00000004L
+#define SDMA_POWER_CNTL__ON_OFF_CONDITION_HOLD_TIME_MASK 0x000000F8L
+#define SDMA_POWER_CNTL__MEM_POWER_OVERRIDE_MASK 0x00000100L
+#define SDMA_POWER_CNTL__MEM_POWER_LS_EN_MASK 0x00000200L
+#define SDMA_POWER_CNTL__MEM_POWER_DS_EN_MASK 0x00000400L
+#define SDMA_POWER_CNTL__MEM_POWER_SD_EN_MASK 0x00000800L
+#define SDMA_POWER_CNTL__MEM_POWER_DELAY_MASK 0x003FF000L
+#define SDMA_POWER_CNTL__ON_OFF_STATUS_DURATION_TIME_MASK 0xFC000000L
+//SDMA_CLK_CTRL
+#define SDMA_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define SDMA_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define SDMA_CLK_CTRL__RESERVED__SHIFT 0xc
+#define SDMA_CLK_CTRL__SOFT_OVERRIDE7__SHIFT 0x18
+#define SDMA_CLK_CTRL__SOFT_OVERRIDE6__SHIFT 0x19
+#define SDMA_CLK_CTRL__SOFT_OVERRIDE5__SHIFT 0x1a
+#define SDMA_CLK_CTRL__SOFT_OVERRIDE4__SHIFT 0x1b
+#define SDMA_CLK_CTRL__SOFT_OVERRIDE3__SHIFT 0x1c
+#define SDMA_CLK_CTRL__SOFT_OVERRIDE2__SHIFT 0x1d
+#define SDMA_CLK_CTRL__SOFT_OVERRIDE1__SHIFT 0x1e
+#define SDMA_CLK_CTRL__SOFT_OVERRIDE0__SHIFT 0x1f
+#define SDMA_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define SDMA_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define SDMA_CLK_CTRL__RESERVED_MASK 0x00FFF000L
+#define SDMA_CLK_CTRL__SOFT_OVERRIDE7_MASK 0x01000000L
+#define SDMA_CLK_CTRL__SOFT_OVERRIDE6_MASK 0x02000000L
+#define SDMA_CLK_CTRL__SOFT_OVERRIDE5_MASK 0x04000000L
+#define SDMA_CLK_CTRL__SOFT_OVERRIDE4_MASK 0x08000000L
+#define SDMA_CLK_CTRL__SOFT_OVERRIDE3_MASK 0x10000000L
+#define SDMA_CLK_CTRL__SOFT_OVERRIDE2_MASK 0x20000000L
+#define SDMA_CLK_CTRL__SOFT_OVERRIDE1_MASK 0x40000000L
+#define SDMA_CLK_CTRL__SOFT_OVERRIDE0_MASK 0x80000000L
+//SDMA_CNTL
+#define SDMA_CNTL__TRAP_ENABLE__SHIFT 0x0
+#define SDMA_CNTL__UTC_L1_ENABLE__SHIFT 0x1
+#define SDMA_CNTL__SEM_WAIT_INT_ENABLE__SHIFT 0x2
+#define SDMA_CNTL__DATA_SWAP_ENABLE__SHIFT 0x3
+#define SDMA_CNTL__FENCE_SWAP_ENABLE__SHIFT 0x4
+#define SDMA_CNTL__MIDCMD_PREEMPT_ENABLE__SHIFT 0x5
+#define SDMA_CNTL__MIDCMD_EXPIRE_ENABLE__SHIFT 0x6
+#define SDMA_CNTL__REG_WRITE_PROTECT_INT_ENABLE__SHIFT 0x7
+#define SDMA_CNTL__INVALID_DOORBELL_INT_ENABLE__SHIFT 0x8
+#define SDMA_CNTL__VM_HOLE_INT_ENABLE__SHIFT 0x9
+#define SDMA_CNTL__DRAM_ECC_INT_ENABLE__SHIFT 0xa
+#define SDMA_CNTL__PAGE_RETRY_TIMEOUT_INT_ENABLE__SHIFT 0xb
+#define SDMA_CNTL__PAGE_NULL_INT_ENABLE__SHIFT 0xc
+#define SDMA_CNTL__PAGE_FAULT_INT_ENABLE__SHIFT 0xd
+#define SDMA_CNTL__NACK_GEN_ERR_INT_ENABLE__SHIFT 0xe
+#define SDMA_CNTL__MIDCMD_WORLDSWITCH_ENABLE__SHIFT 0x11
+#define SDMA_CNTL__AUTO_CTXSW_ENABLE__SHIFT 0x12
+#define SDMA_CNTL__DRM_RESTORE_ENABLE__SHIFT 0x13
+#define SDMA_CNTL__CTXEMPTY_INT_ENABLE__SHIFT 0x1c
+#define SDMA_CNTL__FROZEN_INT_ENABLE__SHIFT 0x1d
+#define SDMA_CNTL__IB_PREEMPT_INT_ENABLE__SHIFT 0x1e
+#define SDMA_CNTL__RB_PREEMPT_INT_ENABLE__SHIFT 0x1f
+#define SDMA_CNTL__TRAP_ENABLE_MASK 0x00000001L
+#define SDMA_CNTL__UTC_L1_ENABLE_MASK 0x00000002L
+#define SDMA_CNTL__SEM_WAIT_INT_ENABLE_MASK 0x00000004L
+#define SDMA_CNTL__DATA_SWAP_ENABLE_MASK 0x00000008L
+#define SDMA_CNTL__FENCE_SWAP_ENABLE_MASK 0x00000010L
+#define SDMA_CNTL__MIDCMD_PREEMPT_ENABLE_MASK 0x00000020L
+#define SDMA_CNTL__MIDCMD_EXPIRE_ENABLE_MASK 0x00000040L
+#define SDMA_CNTL__REG_WRITE_PROTECT_INT_ENABLE_MASK 0x00000080L
+#define SDMA_CNTL__INVALID_DOORBELL_INT_ENABLE_MASK 0x00000100L
+#define SDMA_CNTL__VM_HOLE_INT_ENABLE_MASK 0x00000200L
+#define SDMA_CNTL__DRAM_ECC_INT_ENABLE_MASK 0x00000400L
+#define SDMA_CNTL__PAGE_RETRY_TIMEOUT_INT_ENABLE_MASK 0x00000800L
+#define SDMA_CNTL__PAGE_NULL_INT_ENABLE_MASK 0x00001000L
+#define SDMA_CNTL__PAGE_FAULT_INT_ENABLE_MASK 0x00002000L
+#define SDMA_CNTL__NACK_GEN_ERR_INT_ENABLE_MASK 0x00004000L
+#define SDMA_CNTL__MIDCMD_WORLDSWITCH_ENABLE_MASK 0x00020000L
+#define SDMA_CNTL__AUTO_CTXSW_ENABLE_MASK 0x00040000L
+#define SDMA_CNTL__DRM_RESTORE_ENABLE_MASK 0x00080000L
+#define SDMA_CNTL__CTXEMPTY_INT_ENABLE_MASK 0x10000000L
+#define SDMA_CNTL__FROZEN_INT_ENABLE_MASK 0x20000000L
+#define SDMA_CNTL__IB_PREEMPT_INT_ENABLE_MASK 0x40000000L
+#define SDMA_CNTL__RB_PREEMPT_INT_ENABLE_MASK 0x80000000L
+//SDMA_CHICKEN_BITS
+#define SDMA_CHICKEN_BITS__COPY_EFFICIENCY_ENABLE__SHIFT 0x0
+#define SDMA_CHICKEN_BITS__STALL_ON_TRANS_FULL_ENABLE__SHIFT 0x1
+#define SDMA_CHICKEN_BITS__STALL_ON_NO_FREE_DATA_BUFFER_ENABLE__SHIFT 0x2
+#define SDMA_CHICKEN_BITS__F32_MGCG_ENABLE__SHIFT 0x3
+#define SDMA_CHICKEN_BITS__WRITE_BURST_LENGTH__SHIFT 0x8
+#define SDMA_CHICKEN_BITS__WRITE_BURST_WAIT_CYCLE__SHIFT 0xa
+#define SDMA_CHICKEN_BITS__COPY_OVERLAP_ENABLE__SHIFT 0x10
+#define SDMA_CHICKEN_BITS__RAW_CHECK_ENABLE__SHIFT 0x11
+#define SDMA_CHICKEN_BITS__SRBM_POLL_RETRYING__SHIFT 0x14
+#define SDMA_CHICKEN_BITS__CG_STATUS_OUTPUT__SHIFT 0x17
+#define SDMA_CHICKEN_BITS__SRAM_FGCG_ENABLE__SHIFT 0x1a
+#define SDMA_CHICKEN_BITS__RESERVED__SHIFT 0x1b
+#define SDMA_CHICKEN_BITS__COPY_EFFICIENCY_ENABLE_MASK 0x00000001L
+#define SDMA_CHICKEN_BITS__STALL_ON_TRANS_FULL_ENABLE_MASK 0x00000002L
+#define SDMA_CHICKEN_BITS__STALL_ON_NO_FREE_DATA_BUFFER_ENABLE_MASK 0x00000004L
+#define SDMA_CHICKEN_BITS__F32_MGCG_ENABLE_MASK 0x00000008L
+#define SDMA_CHICKEN_BITS__WRITE_BURST_LENGTH_MASK 0x00000300L
+#define SDMA_CHICKEN_BITS__WRITE_BURST_WAIT_CYCLE_MASK 0x00001C00L
+#define SDMA_CHICKEN_BITS__COPY_OVERLAP_ENABLE_MASK 0x00010000L
+#define SDMA_CHICKEN_BITS__RAW_CHECK_ENABLE_MASK 0x00020000L
+#define SDMA_CHICKEN_BITS__SRBM_POLL_RETRYING_MASK 0x00100000L
+#define SDMA_CHICKEN_BITS__CG_STATUS_OUTPUT_MASK 0x00800000L
+#define SDMA_CHICKEN_BITS__SRAM_FGCG_ENABLE_MASK 0x04000000L
+#define SDMA_CHICKEN_BITS__RESERVED_MASK 0xF8000000L
+//SDMA_GB_ADDR_CONFIG
+#define SDMA_GB_ADDR_CONFIG__NUM_PIPES__SHIFT 0x0
+#define SDMA_GB_ADDR_CONFIG__PIPE_INTERLEAVE_SIZE__SHIFT 0x3
+#define SDMA_GB_ADDR_CONFIG__BANK_INTERLEAVE_SIZE__SHIFT 0x8
+#define SDMA_GB_ADDR_CONFIG__NUM_BANKS__SHIFT 0xc
+#define SDMA_GB_ADDR_CONFIG__NUM_SHADER_ENGINES__SHIFT 0x13
+#define SDMA_GB_ADDR_CONFIG__NUM_PIPES_MASK 0x00000007L
+#define SDMA_GB_ADDR_CONFIG__PIPE_INTERLEAVE_SIZE_MASK 0x00000038L
+#define SDMA_GB_ADDR_CONFIG__BANK_INTERLEAVE_SIZE_MASK 0x00000700L
+#define SDMA_GB_ADDR_CONFIG__NUM_BANKS_MASK 0x00007000L
+#define SDMA_GB_ADDR_CONFIG__NUM_SHADER_ENGINES_MASK 0x00180000L
+//SDMA_GB_ADDR_CONFIG_READ
+#define SDMA_GB_ADDR_CONFIG_READ__NUM_PIPES__SHIFT 0x0
+#define SDMA_GB_ADDR_CONFIG_READ__PIPE_INTERLEAVE_SIZE__SHIFT 0x3
+#define SDMA_GB_ADDR_CONFIG_READ__BANK_INTERLEAVE_SIZE__SHIFT 0x8
+#define SDMA_GB_ADDR_CONFIG_READ__NUM_BANKS__SHIFT 0xc
+#define SDMA_GB_ADDR_CONFIG_READ__NUM_SHADER_ENGINES__SHIFT 0x13
+#define SDMA_GB_ADDR_CONFIG_READ__NUM_PIPES_MASK 0x00000007L
+#define SDMA_GB_ADDR_CONFIG_READ__PIPE_INTERLEAVE_SIZE_MASK 0x00000038L
+#define SDMA_GB_ADDR_CONFIG_READ__BANK_INTERLEAVE_SIZE_MASK 0x00000700L
+#define SDMA_GB_ADDR_CONFIG_READ__NUM_BANKS_MASK 0x00007000L
+#define SDMA_GB_ADDR_CONFIG_READ__NUM_SHADER_ENGINES_MASK 0x00180000L
+//SDMA_GFX_RB_CNTL
+#define SDMA_GFX_RB_CNTL__RB_ENABLE__SHIFT 0x0
+#define SDMA_GFX_RB_CNTL__RB_SIZE__SHIFT 0x1
+#define SDMA_GFX_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9
+#define SDMA_GFX_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc
+#define SDMA_GFX_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd
+#define SDMA_GFX_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10
+#define SDMA_GFX_RB_CNTL__RB_PRIV__SHIFT 0x17
+#define SDMA_GFX_RB_CNTL__RB_VMID__SHIFT 0x18
+#define SDMA_GFX_RB_CNTL__RB_ENABLE_MASK 0x00000001L
+#define SDMA_GFX_RB_CNTL__RB_SIZE_MASK 0x0000003EL
+#define SDMA_GFX_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L
+#define SDMA_GFX_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L
+#define SDMA_GFX_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L
+#define SDMA_GFX_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L
+#define SDMA_GFX_RB_CNTL__RB_PRIV_MASK 0x00800000L
+#define SDMA_GFX_RB_CNTL__RB_VMID_MASK 0x0F000000L
+//SDMA_GFX_RB_BASE
+#define SDMA_GFX_RB_BASE__ADDR__SHIFT 0x0
+#define SDMA_GFX_RB_BASE__ADDR_MASK 0xFFFFFFFFL
+//SDMA_GFX_RB_BASE_HI
+#define SDMA_GFX_RB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA_GFX_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL
+//SDMA_GFX_RB_RPTR
+#define SDMA_GFX_RB_RPTR__OFFSET__SHIFT 0x0
+#define SDMA_GFX_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA_GFX_RB_RPTR_HI
+#define SDMA_GFX_RB_RPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA_GFX_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA_GFX_RB_WPTR
+#define SDMA_GFX_RB_WPTR__OFFSET__SHIFT 0x0
+#define SDMA_GFX_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA_GFX_RB_WPTR_HI
+#define SDMA_GFX_RB_WPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA_GFX_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA_GFX_RB_WPTR_POLL_CNTL
+#define SDMA_GFX_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0
+#define SDMA_GFX_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1
+#define SDMA_GFX_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2
+#define SDMA_GFX_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4
+#define SDMA_GFX_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10
+#define SDMA_GFX_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L
+#define SDMA_GFX_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L
+#define SDMA_GFX_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L
+#define SDMA_GFX_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L
+#define SDMA_GFX_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L
+//SDMA_GFX_RB_RPTR_ADDR_HI
+#define SDMA_GFX_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA_GFX_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA_GFX_RB_RPTR_ADDR_LO
+#define SDMA_GFX_RB_RPTR_ADDR_LO__RPTR_WB_IDLE__SHIFT 0x0
+#define SDMA_GFX_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA_GFX_RB_RPTR_ADDR_LO__RPTR_WB_IDLE_MASK 0x00000001L
+#define SDMA_GFX_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA_GFX_IB_CNTL
+#define SDMA_GFX_IB_CNTL__IB_ENABLE__SHIFT 0x0
+#define SDMA_GFX_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4
+#define SDMA_GFX_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8
+#define SDMA_GFX_IB_CNTL__CMD_VMID__SHIFT 0x10
+#define SDMA_GFX_IB_CNTL__IB_PRIV__SHIFT 0x1f
+#define SDMA_GFX_IB_CNTL__IB_ENABLE_MASK 0x00000001L
+#define SDMA_GFX_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L
+#define SDMA_GFX_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L
+#define SDMA_GFX_IB_CNTL__CMD_VMID_MASK 0x000F0000L
+#define SDMA_GFX_IB_CNTL__IB_PRIV_MASK 0x80000000L
+//SDMA_GFX_IB_RPTR
+#define SDMA_GFX_IB_RPTR__OFFSET__SHIFT 0x2
+#define SDMA_GFX_IB_RPTR__OFFSET_MASK 0x003FFFFCL
+//SDMA_GFX_IB_OFFSET
+#define SDMA_GFX_IB_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA_GFX_IB_OFFSET__OFFSET_MASK 0x003FFFFCL
+//SDMA_GFX_IB_BASE_LO
+#define SDMA_GFX_IB_BASE_LO__ADDR__SHIFT 0x5
+#define SDMA_GFX_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L
+//SDMA_GFX_IB_BASE_HI
+#define SDMA_GFX_IB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA_GFX_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA_GFX_IB_SIZE
+#define SDMA_GFX_IB_SIZE__SIZE__SHIFT 0x0
+#define SDMA_GFX_IB_SIZE__SIZE_MASK 0x000FFFFFL
+//SDMA_GFX_SKIP_CNTL
+#define SDMA_GFX_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0
+#define SDMA_GFX_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL
+//SDMA_GFX_CONTEXT_STATUS
+#define SDMA_GFX_CONTEXT_STATUS__SELECTED__SHIFT 0x0
+#define SDMA_GFX_CONTEXT_STATUS__IDLE__SHIFT 0x2
+#define SDMA_GFX_CONTEXT_STATUS__EXPIRED__SHIFT 0x3
+#define SDMA_GFX_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4
+#define SDMA_GFX_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7
+#define SDMA_GFX_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8
+#define SDMA_GFX_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9
+#define SDMA_GFX_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa
+#define SDMA_GFX_CONTEXT_STATUS__SELECTED_MASK 0x00000001L
+#define SDMA_GFX_CONTEXT_STATUS__IDLE_MASK 0x00000004L
+#define SDMA_GFX_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L
+#define SDMA_GFX_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L
+#define SDMA_GFX_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L
+#define SDMA_GFX_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L
+#define SDMA_GFX_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L
+#define SDMA_GFX_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L
+//SDMA_GFX_DOORBELL
+#define SDMA_GFX_DOORBELL__ENABLE__SHIFT 0x1c
+#define SDMA_GFX_DOORBELL__CAPTURED__SHIFT 0x1e
+#define SDMA_GFX_DOORBELL__ENABLE_MASK 0x10000000L
+#define SDMA_GFX_DOORBELL__CAPTURED_MASK 0x40000000L
+//SDMA_GFX_CONTEXT_CNTL
+#define SDMA_GFX_CONTEXT_CNTL__RESUME_CTX__SHIFT 0x10
+#define SDMA_GFX_CONTEXT_CNTL__SESSION_SEL__SHIFT 0x18
+#define SDMA_GFX_CONTEXT_CNTL__RESUME_CTX_MASK 0x00010000L
+#define SDMA_GFX_CONTEXT_CNTL__SESSION_SEL_MASK 0x0F000000L
+//SDMA_GFX_STATUS
+#define SDMA_GFX_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0
+#define SDMA_GFX_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8
+#define SDMA_GFX_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL
+#define SDMA_GFX_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L
+//SDMA_GFX_DOORBELL_LOG
+#define SDMA_GFX_DOORBELL_LOG__BE_ERROR__SHIFT 0x0
+#define SDMA_GFX_DOORBELL_LOG__DATA__SHIFT 0x2
+#define SDMA_GFX_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L
+#define SDMA_GFX_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL
+//SDMA_GFX_WATERMARK
+#define SDMA_GFX_WATERMARK__RD_OUTSTANDING__SHIFT 0x0
+#define SDMA_GFX_WATERMARK__WR_OUTSTANDING__SHIFT 0x10
+#define SDMA_GFX_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL
+#define SDMA_GFX_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L
+//SDMA_GFX_DOORBELL_OFFSET
+#define SDMA_GFX_DOORBELL_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA_GFX_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL
+//SDMA_GFX_CSA_ADDR_LO
+#define SDMA_GFX_CSA_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA_GFX_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA_GFX_CSA_ADDR_HI
+#define SDMA_GFX_CSA_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA_GFX_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA_GFX_IB_SUB_REMAIN
+#define SDMA_GFX_IB_SUB_REMAIN__SIZE__SHIFT 0x0
+#define SDMA_GFX_IB_SUB_REMAIN__SIZE_MASK 0x000FFFFFL
+//SDMA_GFX_PREEMPT
+#define SDMA_GFX_PREEMPT__IB_PREEMPT__SHIFT 0x0
+#define SDMA_GFX_PREEMPT__IB_PREEMPT_MASK 0x00000001L
+//SDMA_GFX_DUMMY_REG
+#define SDMA_GFX_DUMMY_REG__DUMMY__SHIFT 0x0
+#define SDMA_GFX_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL
+//SDMA_GFX_RB_WPTR_POLL_ADDR_HI
+#define SDMA_GFX_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA_GFX_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA_GFX_RB_WPTR_POLL_ADDR_LO
+#define SDMA_GFX_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA_GFX_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA_GFX_RB_AQL_CNTL
+#define SDMA_GFX_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0
+#define SDMA_GFX_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1
+#define SDMA_GFX_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8
+#define SDMA_GFX_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L
+#define SDMA_GFX_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL
+#define SDMA_GFX_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L
+//SDMA_GFX_MINOR_PTR_UPDATE
+#define SDMA_GFX_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0
+#define SDMA_GFX_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L
+//SDMA_GFX_MIDCMD_DATA0
+#define SDMA_GFX_MIDCMD_DATA0__DATA0__SHIFT 0x0
+#define SDMA_GFX_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL
+//SDMA_GFX_MIDCMD_DATA1
+#define SDMA_GFX_MIDCMD_DATA1__DATA1__SHIFT 0x0
+#define SDMA_GFX_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL
+//SDMA_GFX_MIDCMD_DATA2
+#define SDMA_GFX_MIDCMD_DATA2__DATA2__SHIFT 0x0
+#define SDMA_GFX_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL
+//SDMA_GFX_MIDCMD_DATA3
+#define SDMA_GFX_MIDCMD_DATA3__DATA3__SHIFT 0x0
+#define SDMA_GFX_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL
+//SDMA_GFX_MIDCMD_DATA4
+#define SDMA_GFX_MIDCMD_DATA4__DATA4__SHIFT 0x0
+#define SDMA_GFX_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL
+//SDMA_GFX_MIDCMD_DATA5
+#define SDMA_GFX_MIDCMD_DATA5__DATA5__SHIFT 0x0
+#define SDMA_GFX_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL
+//SDMA_GFX_MIDCMD_DATA6
+#define SDMA_GFX_MIDCMD_DATA6__DATA6__SHIFT 0x0
+#define SDMA_GFX_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL
+//SDMA_GFX_MIDCMD_DATA7
+#define SDMA_GFX_MIDCMD_DATA7__DATA7__SHIFT 0x0
+#define SDMA_GFX_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL
+//SDMA_GFX_MIDCMD_DATA8
+#define SDMA_GFX_MIDCMD_DATA8__DATA8__SHIFT 0x0
+#define SDMA_GFX_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL
+//SDMA_GFX_MIDCMD_DATA9
+#define SDMA_GFX_MIDCMD_DATA9__DATA9__SHIFT 0x0
+#define SDMA_GFX_MIDCMD_DATA9__DATA9_MASK 0xFFFFFFFFL
+//SDMA_GFX_MIDCMD_DATA10
+#define SDMA_GFX_MIDCMD_DATA10__DATA10__SHIFT 0x0
+#define SDMA_GFX_MIDCMD_DATA10__DATA10_MASK 0xFFFFFFFFL
+//SDMA_GFX_MIDCMD_CNTL
+#define SDMA_GFX_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0
+#define SDMA_GFX_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1
+#define SDMA_GFX_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4
+#define SDMA_GFX_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8
+#define SDMA_GFX_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L
+#define SDMA_GFX_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L
+#define SDMA_GFX_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L
+#define SDMA_GFX_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L
+//SDMA_PAGE_RB_CNTL
+#define SDMA_PAGE_RB_CNTL__RB_ENABLE__SHIFT 0x0
+#define SDMA_PAGE_RB_CNTL__RB_SIZE__SHIFT 0x1
+#define SDMA_PAGE_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9
+#define SDMA_PAGE_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc
+#define SDMA_PAGE_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd
+#define SDMA_PAGE_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10
+#define SDMA_PAGE_RB_CNTL__RB_PRIV__SHIFT 0x17
+#define SDMA_PAGE_RB_CNTL__RB_VMID__SHIFT 0x18
+#define SDMA_PAGE_RB_CNTL__RB_ENABLE_MASK 0x00000001L
+#define SDMA_PAGE_RB_CNTL__RB_SIZE_MASK 0x0000003EL
+#define SDMA_PAGE_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L
+#define SDMA_PAGE_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L
+#define SDMA_PAGE_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L
+#define SDMA_PAGE_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L
+#define SDMA_PAGE_RB_CNTL__RB_PRIV_MASK 0x00800000L
+#define SDMA_PAGE_RB_CNTL__RB_VMID_MASK 0x0F000000L
+//SDMA_PAGE_RB_BASE
+#define SDMA_PAGE_RB_BASE__ADDR__SHIFT 0x0
+#define SDMA_PAGE_RB_BASE__ADDR_MASK 0xFFFFFFFFL
+//SDMA_PAGE_RB_BASE_HI
+#define SDMA_PAGE_RB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA_PAGE_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL
+//SDMA_PAGE_RB_RPTR
+#define SDMA_PAGE_RB_RPTR__OFFSET__SHIFT 0x0
+#define SDMA_PAGE_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA_PAGE_RB_RPTR_HI
+#define SDMA_PAGE_RB_RPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA_PAGE_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA_PAGE_RB_WPTR
+#define SDMA_PAGE_RB_WPTR__OFFSET__SHIFT 0x0
+#define SDMA_PAGE_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA_PAGE_RB_WPTR_HI
+#define SDMA_PAGE_RB_WPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA_PAGE_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA_PAGE_RB_WPTR_POLL_CNTL
+#define SDMA_PAGE_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0
+#define SDMA_PAGE_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1
+#define SDMA_PAGE_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2
+#define SDMA_PAGE_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4
+#define SDMA_PAGE_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10
+#define SDMA_PAGE_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L
+#define SDMA_PAGE_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L
+#define SDMA_PAGE_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L
+#define SDMA_PAGE_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L
+#define SDMA_PAGE_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L
+//SDMA_PAGE_RB_RPTR_ADDR_HI
+#define SDMA_PAGE_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA_PAGE_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA_PAGE_RB_RPTR_ADDR_LO
+#define SDMA_PAGE_RB_RPTR_ADDR_LO__RPTR_WB_IDLE__SHIFT 0x0
+#define SDMA_PAGE_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA_PAGE_RB_RPTR_ADDR_LO__RPTR_WB_IDLE_MASK 0x00000001L
+#define SDMA_PAGE_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA_PAGE_IB_CNTL
+#define SDMA_PAGE_IB_CNTL__IB_ENABLE__SHIFT 0x0
+#define SDMA_PAGE_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4
+#define SDMA_PAGE_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8
+#define SDMA_PAGE_IB_CNTL__CMD_VMID__SHIFT 0x10
+#define SDMA_PAGE_IB_CNTL__IB_PRIV__SHIFT 0x1f
+#define SDMA_PAGE_IB_CNTL__IB_ENABLE_MASK 0x00000001L
+#define SDMA_PAGE_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L
+#define SDMA_PAGE_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L
+#define SDMA_PAGE_IB_CNTL__CMD_VMID_MASK 0x000F0000L
+#define SDMA_PAGE_IB_CNTL__IB_PRIV_MASK 0x80000000L
+//SDMA_PAGE_IB_RPTR
+#define SDMA_PAGE_IB_RPTR__OFFSET__SHIFT 0x2
+#define SDMA_PAGE_IB_RPTR__OFFSET_MASK 0x003FFFFCL
+//SDMA_PAGE_IB_OFFSET
+#define SDMA_PAGE_IB_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA_PAGE_IB_OFFSET__OFFSET_MASK 0x003FFFFCL
+//SDMA_PAGE_IB_BASE_LO
+#define SDMA_PAGE_IB_BASE_LO__ADDR__SHIFT 0x5
+#define SDMA_PAGE_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L
+//SDMA_PAGE_IB_BASE_HI
+#define SDMA_PAGE_IB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA_PAGE_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA_PAGE_IB_SIZE
+#define SDMA_PAGE_IB_SIZE__SIZE__SHIFT 0x0
+#define SDMA_PAGE_IB_SIZE__SIZE_MASK 0x000FFFFFL
+//SDMA_PAGE_SKIP_CNTL
+#define SDMA_PAGE_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0
+#define SDMA_PAGE_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL
+//SDMA_PAGE_CONTEXT_STATUS
+#define SDMA_PAGE_CONTEXT_STATUS__SELECTED__SHIFT 0x0
+#define SDMA_PAGE_CONTEXT_STATUS__IDLE__SHIFT 0x2
+#define SDMA_PAGE_CONTEXT_STATUS__EXPIRED__SHIFT 0x3
+#define SDMA_PAGE_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4
+#define SDMA_PAGE_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7
+#define SDMA_PAGE_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8
+#define SDMA_PAGE_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9
+#define SDMA_PAGE_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa
+#define SDMA_PAGE_CONTEXT_STATUS__SELECTED_MASK 0x00000001L
+#define SDMA_PAGE_CONTEXT_STATUS__IDLE_MASK 0x00000004L
+#define SDMA_PAGE_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L
+#define SDMA_PAGE_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L
+#define SDMA_PAGE_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L
+#define SDMA_PAGE_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L
+#define SDMA_PAGE_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L
+#define SDMA_PAGE_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L
+//SDMA_PAGE_DOORBELL
+#define SDMA_PAGE_DOORBELL__ENABLE__SHIFT 0x1c
+#define SDMA_PAGE_DOORBELL__CAPTURED__SHIFT 0x1e
+#define SDMA_PAGE_DOORBELL__ENABLE_MASK 0x10000000L
+#define SDMA_PAGE_DOORBELL__CAPTURED_MASK 0x40000000L
+//SDMA_PAGE_STATUS
+#define SDMA_PAGE_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0
+#define SDMA_PAGE_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8
+#define SDMA_PAGE_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL
+#define SDMA_PAGE_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L
+//SDMA_PAGE_DOORBELL_LOG
+#define SDMA_PAGE_DOORBELL_LOG__BE_ERROR__SHIFT 0x0
+#define SDMA_PAGE_DOORBELL_LOG__DATA__SHIFT 0x2
+#define SDMA_PAGE_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L
+#define SDMA_PAGE_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL
+//SDMA_PAGE_WATERMARK
+#define SDMA_PAGE_WATERMARK__RD_OUTSTANDING__SHIFT 0x0
+#define SDMA_PAGE_WATERMARK__WR_OUTSTANDING__SHIFT 0x10
+#define SDMA_PAGE_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL
+#define SDMA_PAGE_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L
+//SDMA_PAGE_DOORBELL_OFFSET
+#define SDMA_PAGE_DOORBELL_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA_PAGE_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL
+//SDMA_PAGE_CSA_ADDR_LO
+#define SDMA_PAGE_CSA_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA_PAGE_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA_PAGE_CSA_ADDR_HI
+#define SDMA_PAGE_CSA_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA_PAGE_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA_PAGE_IB_SUB_REMAIN
+#define SDMA_PAGE_IB_SUB_REMAIN__SIZE__SHIFT 0x0
+#define SDMA_PAGE_IB_SUB_REMAIN__SIZE_MASK 0x000FFFFFL
+//SDMA_PAGE_PREEMPT
+#define SDMA_PAGE_PREEMPT__IB_PREEMPT__SHIFT 0x0
+#define SDMA_PAGE_PREEMPT__IB_PREEMPT_MASK 0x00000001L
+//SDMA_PAGE_DUMMY_REG
+#define SDMA_PAGE_DUMMY_REG__DUMMY__SHIFT 0x0
+#define SDMA_PAGE_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL
+//SDMA_PAGE_RB_WPTR_POLL_ADDR_HI
+#define SDMA_PAGE_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA_PAGE_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA_PAGE_RB_WPTR_POLL_ADDR_LO
+#define SDMA_PAGE_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA_PAGE_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA_PAGE_RB_AQL_CNTL
+#define SDMA_PAGE_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0
+#define SDMA_PAGE_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1
+#define SDMA_PAGE_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8
+#define SDMA_PAGE_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L
+#define SDMA_PAGE_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL
+#define SDMA_PAGE_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L
+//SDMA_PAGE_MINOR_PTR_UPDATE
+#define SDMA_PAGE_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0
+#define SDMA_PAGE_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L
+//SDMA_PAGE_MIDCMD_DATA0
+#define SDMA_PAGE_MIDCMD_DATA0__DATA0__SHIFT 0x0
+#define SDMA_PAGE_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL
+//SDMA_PAGE_MIDCMD_DATA1
+#define SDMA_PAGE_MIDCMD_DATA1__DATA1__SHIFT 0x0
+#define SDMA_PAGE_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL
+//SDMA_PAGE_MIDCMD_DATA2
+#define SDMA_PAGE_MIDCMD_DATA2__DATA2__SHIFT 0x0
+#define SDMA_PAGE_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL
+//SDMA_PAGE_MIDCMD_DATA3
+#define SDMA_PAGE_MIDCMD_DATA3__DATA3__SHIFT 0x0
+#define SDMA_PAGE_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL
+//SDMA_PAGE_MIDCMD_DATA4
+#define SDMA_PAGE_MIDCMD_DATA4__DATA4__SHIFT 0x0
+#define SDMA_PAGE_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL
+//SDMA_PAGE_MIDCMD_DATA5
+#define SDMA_PAGE_MIDCMD_DATA5__DATA5__SHIFT 0x0
+#define SDMA_PAGE_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL
+//SDMA_PAGE_MIDCMD_DATA6
+#define SDMA_PAGE_MIDCMD_DATA6__DATA6__SHIFT 0x0
+#define SDMA_PAGE_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL
+//SDMA_PAGE_MIDCMD_DATA7
+#define SDMA_PAGE_MIDCMD_DATA7__DATA7__SHIFT 0x0
+#define SDMA_PAGE_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL
+//SDMA_PAGE_MIDCMD_DATA8
+#define SDMA_PAGE_MIDCMD_DATA8__DATA8__SHIFT 0x0
+#define SDMA_PAGE_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL
+//SDMA_PAGE_MIDCMD_DATA9
+#define SDMA_PAGE_MIDCMD_DATA9__DATA9__SHIFT 0x0
+#define SDMA_PAGE_MIDCMD_DATA9__DATA9_MASK 0xFFFFFFFFL
+//SDMA_PAGE_MIDCMD_DATA10
+#define SDMA_PAGE_MIDCMD_DATA10__DATA10__SHIFT 0x0
+#define SDMA_PAGE_MIDCMD_DATA10__DATA10_MASK 0xFFFFFFFFL
+//SDMA_PAGE_MIDCMD_CNTL
+#define SDMA_PAGE_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0
+#define SDMA_PAGE_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1
+#define SDMA_PAGE_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4
+#define SDMA_PAGE_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8
+#define SDMA_PAGE_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L
+#define SDMA_PAGE_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L
+#define SDMA_PAGE_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L
+#define SDMA_PAGE_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L
+//SDMA_RLC0_RB_CNTL
+#define SDMA_RLC0_RB_CNTL__RB_ENABLE__SHIFT 0x0
+#define SDMA_RLC0_RB_CNTL__RB_SIZE__SHIFT 0x1
+#define SDMA_RLC0_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9
+#define SDMA_RLC0_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc
+#define SDMA_RLC0_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd
+#define SDMA_RLC0_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10
+#define SDMA_RLC0_RB_CNTL__RB_PRIV__SHIFT 0x17
+#define SDMA_RLC0_RB_CNTL__RB_VMID__SHIFT 0x18
+#define SDMA_RLC0_RB_CNTL__RB_ENABLE_MASK 0x00000001L
+#define SDMA_RLC0_RB_CNTL__RB_SIZE_MASK 0x0000003EL
+#define SDMA_RLC0_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L
+#define SDMA_RLC0_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L
+#define SDMA_RLC0_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L
+#define SDMA_RLC0_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L
+#define SDMA_RLC0_RB_CNTL__RB_PRIV_MASK 0x00800000L
+#define SDMA_RLC0_RB_CNTL__RB_VMID_MASK 0x0F000000L
+//SDMA_RLC0_RB_BASE
+#define SDMA_RLC0_RB_BASE__ADDR__SHIFT 0x0
+#define SDMA_RLC0_RB_BASE__ADDR_MASK 0xFFFFFFFFL
+//SDMA_RLC0_RB_BASE_HI
+#define SDMA_RLC0_RB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA_RLC0_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL
+//SDMA_RLC0_RB_RPTR
+#define SDMA_RLC0_RB_RPTR__OFFSET__SHIFT 0x0
+#define SDMA_RLC0_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA_RLC0_RB_RPTR_HI
+#define SDMA_RLC0_RB_RPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA_RLC0_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA_RLC0_RB_WPTR
+#define SDMA_RLC0_RB_WPTR__OFFSET__SHIFT 0x0
+#define SDMA_RLC0_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA_RLC0_RB_WPTR_HI
+#define SDMA_RLC0_RB_WPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA_RLC0_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA_RLC0_RB_WPTR_POLL_CNTL
+#define SDMA_RLC0_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0
+#define SDMA_RLC0_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1
+#define SDMA_RLC0_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2
+#define SDMA_RLC0_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4
+#define SDMA_RLC0_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10
+#define SDMA_RLC0_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L
+#define SDMA_RLC0_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L
+#define SDMA_RLC0_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L
+#define SDMA_RLC0_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L
+#define SDMA_RLC0_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L
+//SDMA_RLC0_RB_RPTR_ADDR_HI
+#define SDMA_RLC0_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA_RLC0_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA_RLC0_RB_RPTR_ADDR_LO
+#define SDMA_RLC0_RB_RPTR_ADDR_LO__RPTR_WB_IDLE__SHIFT 0x0
+#define SDMA_RLC0_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA_RLC0_RB_RPTR_ADDR_LO__RPTR_WB_IDLE_MASK 0x00000001L
+#define SDMA_RLC0_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA_RLC0_IB_CNTL
+#define SDMA_RLC0_IB_CNTL__IB_ENABLE__SHIFT 0x0
+#define SDMA_RLC0_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4
+#define SDMA_RLC0_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8
+#define SDMA_RLC0_IB_CNTL__CMD_VMID__SHIFT 0x10
+#define SDMA_RLC0_IB_CNTL__IB_PRIV__SHIFT 0x1f
+#define SDMA_RLC0_IB_CNTL__IB_ENABLE_MASK 0x00000001L
+#define SDMA_RLC0_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L
+#define SDMA_RLC0_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L
+#define SDMA_RLC0_IB_CNTL__CMD_VMID_MASK 0x000F0000L
+#define SDMA_RLC0_IB_CNTL__IB_PRIV_MASK 0x80000000L
+//SDMA_RLC0_IB_RPTR
+#define SDMA_RLC0_IB_RPTR__OFFSET__SHIFT 0x2
+#define SDMA_RLC0_IB_RPTR__OFFSET_MASK 0x003FFFFCL
+//SDMA_RLC0_IB_OFFSET
+#define SDMA_RLC0_IB_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA_RLC0_IB_OFFSET__OFFSET_MASK 0x003FFFFCL
+//SDMA_RLC0_IB_BASE_LO
+#define SDMA_RLC0_IB_BASE_LO__ADDR__SHIFT 0x5
+#define SDMA_RLC0_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L
+//SDMA_RLC0_IB_BASE_HI
+#define SDMA_RLC0_IB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA_RLC0_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA_RLC0_IB_SIZE
+#define SDMA_RLC0_IB_SIZE__SIZE__SHIFT 0x0
+#define SDMA_RLC0_IB_SIZE__SIZE_MASK 0x000FFFFFL
+//SDMA_RLC0_SKIP_CNTL
+#define SDMA_RLC0_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0
+#define SDMA_RLC0_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL
+//SDMA_RLC0_CONTEXT_STATUS
+#define SDMA_RLC0_CONTEXT_STATUS__SELECTED__SHIFT 0x0
+#define SDMA_RLC0_CONTEXT_STATUS__IDLE__SHIFT 0x2
+#define SDMA_RLC0_CONTEXT_STATUS__EXPIRED__SHIFT 0x3
+#define SDMA_RLC0_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4
+#define SDMA_RLC0_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7
+#define SDMA_RLC0_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8
+#define SDMA_RLC0_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9
+#define SDMA_RLC0_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa
+#define SDMA_RLC0_CONTEXT_STATUS__SELECTED_MASK 0x00000001L
+#define SDMA_RLC0_CONTEXT_STATUS__IDLE_MASK 0x00000004L
+#define SDMA_RLC0_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L
+#define SDMA_RLC0_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L
+#define SDMA_RLC0_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L
+#define SDMA_RLC0_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L
+#define SDMA_RLC0_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L
+#define SDMA_RLC0_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L
+//SDMA_RLC0_DOORBELL
+#define SDMA_RLC0_DOORBELL__ENABLE__SHIFT 0x1c
+#define SDMA_RLC0_DOORBELL__CAPTURED__SHIFT 0x1e
+#define SDMA_RLC0_DOORBELL__ENABLE_MASK 0x10000000L
+#define SDMA_RLC0_DOORBELL__CAPTURED_MASK 0x40000000L
+//SDMA_RLC0_STATUS
+#define SDMA_RLC0_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0
+#define SDMA_RLC0_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8
+#define SDMA_RLC0_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL
+#define SDMA_RLC0_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L
+//SDMA_RLC0_DOORBELL_LOG
+#define SDMA_RLC0_DOORBELL_LOG__BE_ERROR__SHIFT 0x0
+#define SDMA_RLC0_DOORBELL_LOG__DATA__SHIFT 0x2
+#define SDMA_RLC0_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L
+#define SDMA_RLC0_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL
+//SDMA_RLC0_WATERMARK
+#define SDMA_RLC0_WATERMARK__RD_OUTSTANDING__SHIFT 0x0
+#define SDMA_RLC0_WATERMARK__WR_OUTSTANDING__SHIFT 0x10
+#define SDMA_RLC0_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL
+#define SDMA_RLC0_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L
+//SDMA_RLC0_DOORBELL_OFFSET
+#define SDMA_RLC0_DOORBELL_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA_RLC0_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL
+//SDMA_RLC0_CSA_ADDR_LO
+#define SDMA_RLC0_CSA_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA_RLC0_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA_RLC0_CSA_ADDR_HI
+#define SDMA_RLC0_CSA_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA_RLC0_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA_RLC0_IB_SUB_REMAIN
+#define SDMA_RLC0_IB_SUB_REMAIN__SIZE__SHIFT 0x0
+#define SDMA_RLC0_IB_SUB_REMAIN__SIZE_MASK 0x000FFFFFL
+//SDMA_RLC0_PREEMPT
+#define SDMA_RLC0_PREEMPT__IB_PREEMPT__SHIFT 0x0
+#define SDMA_RLC0_PREEMPT__IB_PREEMPT_MASK 0x00000001L
+//SDMA_RLC0_DUMMY_REG
+#define SDMA_RLC0_DUMMY_REG__DUMMY__SHIFT 0x0
+#define SDMA_RLC0_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL
+//SDMA_RLC0_RB_WPTR_POLL_ADDR_HI
+#define SDMA_RLC0_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA_RLC0_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA_RLC0_RB_WPTR_POLL_ADDR_LO
+#define SDMA_RLC0_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA_RLC0_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA_RLC0_RB_AQL_CNTL
+#define SDMA_RLC0_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0
+#define SDMA_RLC0_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1
+#define SDMA_RLC0_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8
+#define SDMA_RLC0_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L
+#define SDMA_RLC0_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL
+#define SDMA_RLC0_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L
+//SDMA_RLC0_MINOR_PTR_UPDATE
+#define SDMA_RLC0_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0
+#define SDMA_RLC0_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L
+//SDMA_RLC0_MIDCMD_DATA0
+#define SDMA_RLC0_MIDCMD_DATA0__DATA0__SHIFT 0x0
+#define SDMA_RLC0_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL
+//SDMA_RLC0_MIDCMD_DATA1
+#define SDMA_RLC0_MIDCMD_DATA1__DATA1__SHIFT 0x0
+#define SDMA_RLC0_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL
+//SDMA_RLC0_MIDCMD_DATA2
+#define SDMA_RLC0_MIDCMD_DATA2__DATA2__SHIFT 0x0
+#define SDMA_RLC0_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL
+//SDMA_RLC0_MIDCMD_DATA3
+#define SDMA_RLC0_MIDCMD_DATA3__DATA3__SHIFT 0x0
+#define SDMA_RLC0_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL
+//SDMA_RLC0_MIDCMD_DATA4
+#define SDMA_RLC0_MIDCMD_DATA4__DATA4__SHIFT 0x0
+#define SDMA_RLC0_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL
+//SDMA_RLC0_MIDCMD_DATA5
+#define SDMA_RLC0_MIDCMD_DATA5__DATA5__SHIFT 0x0
+#define SDMA_RLC0_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL
+//SDMA_RLC0_MIDCMD_DATA6
+#define SDMA_RLC0_MIDCMD_DATA6__DATA6__SHIFT 0x0
+#define SDMA_RLC0_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL
+//SDMA_RLC0_MIDCMD_DATA7
+#define SDMA_RLC0_MIDCMD_DATA7__DATA7__SHIFT 0x0
+#define SDMA_RLC0_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL
+//SDMA_RLC0_MIDCMD_DATA8
+#define SDMA_RLC0_MIDCMD_DATA8__DATA8__SHIFT 0x0
+#define SDMA_RLC0_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL
+//SDMA_RLC0_MIDCMD_DATA9
+#define SDMA_RLC0_MIDCMD_DATA9__DATA9__SHIFT 0x0
+#define SDMA_RLC0_MIDCMD_DATA9__DATA9_MASK 0xFFFFFFFFL
+//SDMA_RLC0_MIDCMD_DATA10
+#define SDMA_RLC0_MIDCMD_DATA10__DATA10__SHIFT 0x0
+#define SDMA_RLC0_MIDCMD_DATA10__DATA10_MASK 0xFFFFFFFFL
+//SDMA_RLC0_MIDCMD_CNTL
+#define SDMA_RLC0_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0
+#define SDMA_RLC0_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1
+#define SDMA_RLC0_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4
+#define SDMA_RLC0_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8
+#define SDMA_RLC0_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L
+#define SDMA_RLC0_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L
+#define SDMA_RLC0_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L
+#define SDMA_RLC0_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L
+//SDMA_RLC1_RB_CNTL
+#define SDMA_RLC1_RB_CNTL__RB_ENABLE__SHIFT 0x0
+#define SDMA_RLC1_RB_CNTL__RB_SIZE__SHIFT 0x1
+#define SDMA_RLC1_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9
+#define SDMA_RLC1_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc
+#define SDMA_RLC1_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd
+#define SDMA_RLC1_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10
+#define SDMA_RLC1_RB_CNTL__RB_PRIV__SHIFT 0x17
+#define SDMA_RLC1_RB_CNTL__RB_VMID__SHIFT 0x18
+#define SDMA_RLC1_RB_CNTL__RB_ENABLE_MASK 0x00000001L
+#define SDMA_RLC1_RB_CNTL__RB_SIZE_MASK 0x0000003EL
+#define SDMA_RLC1_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L
+#define SDMA_RLC1_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L
+#define SDMA_RLC1_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L
+#define SDMA_RLC1_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L
+#define SDMA_RLC1_RB_CNTL__RB_PRIV_MASK 0x00800000L
+#define SDMA_RLC1_RB_CNTL__RB_VMID_MASK 0x0F000000L
+//SDMA_RLC1_RB_BASE
+#define SDMA_RLC1_RB_BASE__ADDR__SHIFT 0x0
+#define SDMA_RLC1_RB_BASE__ADDR_MASK 0xFFFFFFFFL
+//SDMA_RLC1_RB_BASE_HI
+#define SDMA_RLC1_RB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA_RLC1_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL
+//SDMA_RLC1_RB_RPTR
+#define SDMA_RLC1_RB_RPTR__OFFSET__SHIFT 0x0
+#define SDMA_RLC1_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA_RLC1_RB_RPTR_HI
+#define SDMA_RLC1_RB_RPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA_RLC1_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA_RLC1_RB_WPTR
+#define SDMA_RLC1_RB_WPTR__OFFSET__SHIFT 0x0
+#define SDMA_RLC1_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA_RLC1_RB_WPTR_HI
+#define SDMA_RLC1_RB_WPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA_RLC1_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA_RLC1_RB_WPTR_POLL_CNTL
+#define SDMA_RLC1_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0
+#define SDMA_RLC1_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1
+#define SDMA_RLC1_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2
+#define SDMA_RLC1_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4
+#define SDMA_RLC1_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10
+#define SDMA_RLC1_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L
+#define SDMA_RLC1_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L
+#define SDMA_RLC1_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L
+#define SDMA_RLC1_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L
+#define SDMA_RLC1_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L
+//SDMA_RLC1_RB_RPTR_ADDR_HI
+#define SDMA_RLC1_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA_RLC1_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA_RLC1_RB_RPTR_ADDR_LO
+#define SDMA_RLC1_RB_RPTR_ADDR_LO__RPTR_WB_IDLE__SHIFT 0x0
+#define SDMA_RLC1_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA_RLC1_RB_RPTR_ADDR_LO__RPTR_WB_IDLE_MASK 0x00000001L
+#define SDMA_RLC1_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA_RLC1_IB_CNTL
+#define SDMA_RLC1_IB_CNTL__IB_ENABLE__SHIFT 0x0
+#define SDMA_RLC1_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4
+#define SDMA_RLC1_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8
+#define SDMA_RLC1_IB_CNTL__CMD_VMID__SHIFT 0x10
+#define SDMA_RLC1_IB_CNTL__IB_PRIV__SHIFT 0x1f
+#define SDMA_RLC1_IB_CNTL__IB_ENABLE_MASK 0x00000001L
+#define SDMA_RLC1_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L
+#define SDMA_RLC1_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L
+#define SDMA_RLC1_IB_CNTL__CMD_VMID_MASK 0x000F0000L
+#define SDMA_RLC1_IB_CNTL__IB_PRIV_MASK 0x80000000L
+//SDMA_RLC1_IB_RPTR
+#define SDMA_RLC1_IB_RPTR__OFFSET__SHIFT 0x2
+#define SDMA_RLC1_IB_RPTR__OFFSET_MASK 0x003FFFFCL
+//SDMA_RLC1_IB_OFFSET
+#define SDMA_RLC1_IB_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA_RLC1_IB_OFFSET__OFFSET_MASK 0x003FFFFCL
+//SDMA_RLC1_IB_BASE_LO
+#define SDMA_RLC1_IB_BASE_LO__ADDR__SHIFT 0x5
+#define SDMA_RLC1_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L
+//SDMA_RLC1_IB_BASE_HI
+#define SDMA_RLC1_IB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA_RLC1_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA_RLC1_IB_SIZE
+#define SDMA_RLC1_IB_SIZE__SIZE__SHIFT 0x0
+#define SDMA_RLC1_IB_SIZE__SIZE_MASK 0x000FFFFFL
+//SDMA_RLC1_SKIP_CNTL
+#define SDMA_RLC1_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0
+#define SDMA_RLC1_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL
+//SDMA_RLC1_CONTEXT_STATUS
+#define SDMA_RLC1_CONTEXT_STATUS__SELECTED__SHIFT 0x0
+#define SDMA_RLC1_CONTEXT_STATUS__IDLE__SHIFT 0x2
+#define SDMA_RLC1_CONTEXT_STATUS__EXPIRED__SHIFT 0x3
+#define SDMA_RLC1_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4
+#define SDMA_RLC1_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7
+#define SDMA_RLC1_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8
+#define SDMA_RLC1_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9
+#define SDMA_RLC1_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa
+#define SDMA_RLC1_CONTEXT_STATUS__SELECTED_MASK 0x00000001L
+#define SDMA_RLC1_CONTEXT_STATUS__IDLE_MASK 0x00000004L
+#define SDMA_RLC1_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L
+#define SDMA_RLC1_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L
+#define SDMA_RLC1_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L
+#define SDMA_RLC1_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L
+#define SDMA_RLC1_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L
+#define SDMA_RLC1_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L
+//SDMA_RLC1_DOORBELL
+#define SDMA_RLC1_DOORBELL__ENABLE__SHIFT 0x1c
+#define SDMA_RLC1_DOORBELL__CAPTURED__SHIFT 0x1e
+#define SDMA_RLC1_DOORBELL__ENABLE_MASK 0x10000000L
+#define SDMA_RLC1_DOORBELL__CAPTURED_MASK 0x40000000L
+//SDMA_RLC1_STATUS
+#define SDMA_RLC1_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0
+#define SDMA_RLC1_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8
+#define SDMA_RLC1_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL
+#define SDMA_RLC1_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L
+//SDMA_RLC1_DOORBELL_LOG
+#define SDMA_RLC1_DOORBELL_LOG__BE_ERROR__SHIFT 0x0
+#define SDMA_RLC1_DOORBELL_LOG__DATA__SHIFT 0x2
+#define SDMA_RLC1_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L
+#define SDMA_RLC1_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL
+//SDMA_RLC1_WATERMARK
+#define SDMA_RLC1_WATERMARK__RD_OUTSTANDING__SHIFT 0x0
+#define SDMA_RLC1_WATERMARK__WR_OUTSTANDING__SHIFT 0x10
+#define SDMA_RLC1_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL
+#define SDMA_RLC1_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L
+//SDMA_RLC1_DOORBELL_OFFSET
+#define SDMA_RLC1_DOORBELL_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA_RLC1_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL
+//SDMA_RLC1_CSA_ADDR_LO
+#define SDMA_RLC1_CSA_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA_RLC1_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA_RLC1_CSA_ADDR_HI
+#define SDMA_RLC1_CSA_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA_RLC1_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA_RLC1_IB_SUB_REMAIN
+#define SDMA_RLC1_IB_SUB_REMAIN__SIZE__SHIFT 0x0
+#define SDMA_RLC1_IB_SUB_REMAIN__SIZE_MASK 0x000FFFFFL
+//SDMA_RLC1_PREEMPT
+#define SDMA_RLC1_PREEMPT__IB_PREEMPT__SHIFT 0x0
+#define SDMA_RLC1_PREEMPT__IB_PREEMPT_MASK 0x00000001L
+//SDMA_RLC1_DUMMY_REG
+#define SDMA_RLC1_DUMMY_REG__DUMMY__SHIFT 0x0
+#define SDMA_RLC1_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL
+//SDMA_RLC1_RB_WPTR_POLL_ADDR_HI
+#define SDMA_RLC1_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA_RLC1_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA_RLC1_RB_WPTR_POLL_ADDR_LO
+#define SDMA_RLC1_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA_RLC1_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA_RLC1_RB_AQL_CNTL
+#define SDMA_RLC1_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0
+#define SDMA_RLC1_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1
+#define SDMA_RLC1_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8
+#define SDMA_RLC1_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L
+#define SDMA_RLC1_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL
+#define SDMA_RLC1_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L
+//SDMA_RLC1_MINOR_PTR_UPDATE
+#define SDMA_RLC1_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0
+#define SDMA_RLC1_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L
+//SDMA_RLC1_MIDCMD_DATA0
+#define SDMA_RLC1_MIDCMD_DATA0__DATA0__SHIFT 0x0
+#define SDMA_RLC1_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL
+//SDMA_RLC1_MIDCMD_DATA1
+#define SDMA_RLC1_MIDCMD_DATA1__DATA1__SHIFT 0x0
+#define SDMA_RLC1_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL
+//SDMA_RLC1_MIDCMD_DATA2
+#define SDMA_RLC1_MIDCMD_DATA2__DATA2__SHIFT 0x0
+#define SDMA_RLC1_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL
+//SDMA_RLC1_MIDCMD_DATA3
+#define SDMA_RLC1_MIDCMD_DATA3__DATA3__SHIFT 0x0
+#define SDMA_RLC1_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL
+//SDMA_RLC1_MIDCMD_DATA4
+#define SDMA_RLC1_MIDCMD_DATA4__DATA4__SHIFT 0x0
+#define SDMA_RLC1_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL
+//SDMA_RLC1_MIDCMD_DATA5
+#define SDMA_RLC1_MIDCMD_DATA5__DATA5__SHIFT 0x0
+#define SDMA_RLC1_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL
+//SDMA_RLC1_MIDCMD_DATA6
+#define SDMA_RLC1_MIDCMD_DATA6__DATA6__SHIFT 0x0
+#define SDMA_RLC1_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL
+//SDMA_RLC1_MIDCMD_DATA7
+#define SDMA_RLC1_MIDCMD_DATA7__DATA7__SHIFT 0x0
+#define SDMA_RLC1_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL
+//SDMA_RLC1_MIDCMD_DATA8
+#define SDMA_RLC1_MIDCMD_DATA8__DATA8__SHIFT 0x0
+#define SDMA_RLC1_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL
+//SDMA_RLC1_MIDCMD_DATA9
+#define SDMA_RLC1_MIDCMD_DATA9__DATA9__SHIFT 0x0
+#define SDMA_RLC1_MIDCMD_DATA9__DATA9_MASK 0xFFFFFFFFL
+//SDMA_RLC1_MIDCMD_DATA10
+#define SDMA_RLC1_MIDCMD_DATA10__DATA10__SHIFT 0x0
+#define SDMA_RLC1_MIDCMD_DATA10__DATA10_MASK 0xFFFFFFFFL
+//SDMA_RLC1_MIDCMD_CNTL
+#define SDMA_RLC1_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0
+#define SDMA_RLC1_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1
+#define SDMA_RLC1_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4
+#define SDMA_RLC1_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8
+#define SDMA_RLC1_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L
+#define SDMA_RLC1_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L
+#define SDMA_RLC1_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L
+#define SDMA_RLC1_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L
+//SDMA_RLC2_RB_CNTL
+#define SDMA_RLC2_RB_CNTL__RB_ENABLE__SHIFT 0x0
+#define SDMA_RLC2_RB_CNTL__RB_SIZE__SHIFT 0x1
+#define SDMA_RLC2_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9
+#define SDMA_RLC2_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc
+#define SDMA_RLC2_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd
+#define SDMA_RLC2_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10
+#define SDMA_RLC2_RB_CNTL__RB_PRIV__SHIFT 0x17
+#define SDMA_RLC2_RB_CNTL__RB_VMID__SHIFT 0x18
+#define SDMA_RLC2_RB_CNTL__RB_ENABLE_MASK 0x00000001L
+#define SDMA_RLC2_RB_CNTL__RB_SIZE_MASK 0x0000003EL
+#define SDMA_RLC2_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L
+#define SDMA_RLC2_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L
+#define SDMA_RLC2_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L
+#define SDMA_RLC2_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L
+#define SDMA_RLC2_RB_CNTL__RB_PRIV_MASK 0x00800000L
+#define SDMA_RLC2_RB_CNTL__RB_VMID_MASK 0x0F000000L
+//SDMA_RLC2_RB_BASE
+#define SDMA_RLC2_RB_BASE__ADDR__SHIFT 0x0
+#define SDMA_RLC2_RB_BASE__ADDR_MASK 0xFFFFFFFFL
+//SDMA_RLC2_RB_BASE_HI
+#define SDMA_RLC2_RB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA_RLC2_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL
+//SDMA_RLC2_RB_RPTR
+#define SDMA_RLC2_RB_RPTR__OFFSET__SHIFT 0x0
+#define SDMA_RLC2_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA_RLC2_RB_RPTR_HI
+#define SDMA_RLC2_RB_RPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA_RLC2_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA_RLC2_RB_WPTR
+#define SDMA_RLC2_RB_WPTR__OFFSET__SHIFT 0x0
+#define SDMA_RLC2_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA_RLC2_RB_WPTR_HI
+#define SDMA_RLC2_RB_WPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA_RLC2_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA_RLC2_RB_WPTR_POLL_CNTL
+#define SDMA_RLC2_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0
+#define SDMA_RLC2_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1
+#define SDMA_RLC2_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2
+#define SDMA_RLC2_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4
+#define SDMA_RLC2_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10
+#define SDMA_RLC2_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L
+#define SDMA_RLC2_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L
+#define SDMA_RLC2_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L
+#define SDMA_RLC2_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L
+#define SDMA_RLC2_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L
+//SDMA_RLC2_RB_RPTR_ADDR_HI
+#define SDMA_RLC2_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA_RLC2_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA_RLC2_RB_RPTR_ADDR_LO
+#define SDMA_RLC2_RB_RPTR_ADDR_LO__RPTR_WB_IDLE__SHIFT 0x0
+#define SDMA_RLC2_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA_RLC2_RB_RPTR_ADDR_LO__RPTR_WB_IDLE_MASK 0x00000001L
+#define SDMA_RLC2_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA_RLC2_IB_CNTL
+#define SDMA_RLC2_IB_CNTL__IB_ENABLE__SHIFT 0x0
+#define SDMA_RLC2_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4
+#define SDMA_RLC2_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8
+#define SDMA_RLC2_IB_CNTL__CMD_VMID__SHIFT 0x10
+#define SDMA_RLC2_IB_CNTL__IB_PRIV__SHIFT 0x1f
+#define SDMA_RLC2_IB_CNTL__IB_ENABLE_MASK 0x00000001L
+#define SDMA_RLC2_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L
+#define SDMA_RLC2_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L
+#define SDMA_RLC2_IB_CNTL__CMD_VMID_MASK 0x000F0000L
+#define SDMA_RLC2_IB_CNTL__IB_PRIV_MASK 0x80000000L
+//SDMA_RLC2_IB_RPTR
+#define SDMA_RLC2_IB_RPTR__OFFSET__SHIFT 0x2
+#define SDMA_RLC2_IB_RPTR__OFFSET_MASK 0x003FFFFCL
+//SDMA_RLC2_IB_OFFSET
+#define SDMA_RLC2_IB_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA_RLC2_IB_OFFSET__OFFSET_MASK 0x003FFFFCL
+//SDMA_RLC2_IB_BASE_LO
+#define SDMA_RLC2_IB_BASE_LO__ADDR__SHIFT 0x5
+#define SDMA_RLC2_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L
+//SDMA_RLC2_IB_BASE_HI
+#define SDMA_RLC2_IB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA_RLC2_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA_RLC2_IB_SIZE
+#define SDMA_RLC2_IB_SIZE__SIZE__SHIFT 0x0
+#define SDMA_RLC2_IB_SIZE__SIZE_MASK 0x000FFFFFL
+//SDMA_RLC2_SKIP_CNTL
+#define SDMA_RLC2_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0
+#define SDMA_RLC2_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL
+//SDMA_RLC2_CONTEXT_STATUS
+#define SDMA_RLC2_CONTEXT_STATUS__SELECTED__SHIFT 0x0
+#define SDMA_RLC2_CONTEXT_STATUS__IDLE__SHIFT 0x2
+#define SDMA_RLC2_CONTEXT_STATUS__EXPIRED__SHIFT 0x3
+#define SDMA_RLC2_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4
+#define SDMA_RLC2_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7
+#define SDMA_RLC2_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8
+#define SDMA_RLC2_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9
+#define SDMA_RLC2_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa
+#define SDMA_RLC2_CONTEXT_STATUS__SELECTED_MASK 0x00000001L
+#define SDMA_RLC2_CONTEXT_STATUS__IDLE_MASK 0x00000004L
+#define SDMA_RLC2_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L
+#define SDMA_RLC2_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L
+#define SDMA_RLC2_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L
+#define SDMA_RLC2_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L
+#define SDMA_RLC2_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L
+#define SDMA_RLC2_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L
+//SDMA_RLC2_DOORBELL
+#define SDMA_RLC2_DOORBELL__ENABLE__SHIFT 0x1c
+#define SDMA_RLC2_DOORBELL__CAPTURED__SHIFT 0x1e
+#define SDMA_RLC2_DOORBELL__ENABLE_MASK 0x10000000L
+#define SDMA_RLC2_DOORBELL__CAPTURED_MASK 0x40000000L
+//SDMA_RLC2_STATUS
+#define SDMA_RLC2_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0
+#define SDMA_RLC2_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8
+#define SDMA_RLC2_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL
+#define SDMA_RLC2_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L
+//SDMA_RLC2_DOORBELL_LOG
+#define SDMA_RLC2_DOORBELL_LOG__BE_ERROR__SHIFT 0x0
+#define SDMA_RLC2_DOORBELL_LOG__DATA__SHIFT 0x2
+#define SDMA_RLC2_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L
+#define SDMA_RLC2_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL
+//SDMA_RLC2_WATERMARK
+#define SDMA_RLC2_WATERMARK__RD_OUTSTANDING__SHIFT 0x0
+#define SDMA_RLC2_WATERMARK__WR_OUTSTANDING__SHIFT 0x10
+#define SDMA_RLC2_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL
+#define SDMA_RLC2_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L
+//SDMA_RLC2_DOORBELL_OFFSET
+#define SDMA_RLC2_DOORBELL_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA_RLC2_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL
+//SDMA_RLC2_CSA_ADDR_LO
+#define SDMA_RLC2_CSA_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA_RLC2_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA_RLC2_CSA_ADDR_HI
+#define SDMA_RLC2_CSA_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA_RLC2_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA_RLC2_IB_SUB_REMAIN
+#define SDMA_RLC2_IB_SUB_REMAIN__SIZE__SHIFT 0x0
+#define SDMA_RLC2_IB_SUB_REMAIN__SIZE_MASK 0x000FFFFFL
+//SDMA_RLC2_PREEMPT
+#define SDMA_RLC2_PREEMPT__IB_PREEMPT__SHIFT 0x0
+#define SDMA_RLC2_PREEMPT__IB_PREEMPT_MASK 0x00000001L
+//SDMA_RLC2_DUMMY_REG
+#define SDMA_RLC2_DUMMY_REG__DUMMY__SHIFT 0x0
+#define SDMA_RLC2_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL
+//SDMA_RLC2_RB_WPTR_POLL_ADDR_HI
+#define SDMA_RLC2_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA_RLC2_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA_RLC2_RB_WPTR_POLL_ADDR_LO
+#define SDMA_RLC2_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA_RLC2_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA_RLC2_RB_AQL_CNTL
+#define SDMA_RLC2_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0
+#define SDMA_RLC2_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1
+#define SDMA_RLC2_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8
+#define SDMA_RLC2_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L
+#define SDMA_RLC2_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL
+#define SDMA_RLC2_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L
+//SDMA_RLC2_MINOR_PTR_UPDATE
+#define SDMA_RLC2_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0
+#define SDMA_RLC2_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L
+//SDMA_RLC2_MIDCMD_DATA0
+#define SDMA_RLC2_MIDCMD_DATA0__DATA0__SHIFT 0x0
+#define SDMA_RLC2_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL
+//SDMA_RLC2_MIDCMD_DATA1
+#define SDMA_RLC2_MIDCMD_DATA1__DATA1__SHIFT 0x0
+#define SDMA_RLC2_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL
+//SDMA_RLC2_MIDCMD_DATA2
+#define SDMA_RLC2_MIDCMD_DATA2__DATA2__SHIFT 0x0
+#define SDMA_RLC2_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL
+//SDMA_RLC2_MIDCMD_DATA3
+#define SDMA_RLC2_MIDCMD_DATA3__DATA3__SHIFT 0x0
+#define SDMA_RLC2_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL
+//SDMA_RLC2_MIDCMD_DATA4
+#define SDMA_RLC2_MIDCMD_DATA4__DATA4__SHIFT 0x0
+#define SDMA_RLC2_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL
+//SDMA_RLC2_MIDCMD_DATA5
+#define SDMA_RLC2_MIDCMD_DATA5__DATA5__SHIFT 0x0
+#define SDMA_RLC2_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL
+//SDMA_RLC2_MIDCMD_DATA6
+#define SDMA_RLC2_MIDCMD_DATA6__DATA6__SHIFT 0x0
+#define SDMA_RLC2_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL
+//SDMA_RLC2_MIDCMD_DATA7
+#define SDMA_RLC2_MIDCMD_DATA7__DATA7__SHIFT 0x0
+#define SDMA_RLC2_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL
+//SDMA_RLC2_MIDCMD_DATA8
+#define SDMA_RLC2_MIDCMD_DATA8__DATA8__SHIFT 0x0
+#define SDMA_RLC2_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL
+//SDMA_RLC2_MIDCMD_DATA9
+#define SDMA_RLC2_MIDCMD_DATA9__DATA9__SHIFT 0x0
+#define SDMA_RLC2_MIDCMD_DATA9__DATA9_MASK 0xFFFFFFFFL
+//SDMA_RLC2_MIDCMD_DATA10
+#define SDMA_RLC2_MIDCMD_DATA10__DATA10__SHIFT 0x0
+#define SDMA_RLC2_MIDCMD_DATA10__DATA10_MASK 0xFFFFFFFFL
+//SDMA_RLC2_MIDCMD_CNTL
+#define SDMA_RLC2_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0
+#define SDMA_RLC2_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1
+#define SDMA_RLC2_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4
+#define SDMA_RLC2_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8
+#define SDMA_RLC2_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L
+#define SDMA_RLC2_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L
+#define SDMA_RLC2_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L
+#define SDMA_RLC2_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L
+//SDMA_RLC3_RB_CNTL
+#define SDMA_RLC3_RB_CNTL__RB_ENABLE__SHIFT 0x0
+#define SDMA_RLC3_RB_CNTL__RB_SIZE__SHIFT 0x1
+#define SDMA_RLC3_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9
+#define SDMA_RLC3_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc
+#define SDMA_RLC3_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd
+#define SDMA_RLC3_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10
+#define SDMA_RLC3_RB_CNTL__RB_PRIV__SHIFT 0x17
+#define SDMA_RLC3_RB_CNTL__RB_VMID__SHIFT 0x18
+#define SDMA_RLC3_RB_CNTL__RB_ENABLE_MASK 0x00000001L
+#define SDMA_RLC3_RB_CNTL__RB_SIZE_MASK 0x0000003EL
+#define SDMA_RLC3_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L
+#define SDMA_RLC3_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L
+#define SDMA_RLC3_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L
+#define SDMA_RLC3_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L
+#define SDMA_RLC3_RB_CNTL__RB_PRIV_MASK 0x00800000L
+#define SDMA_RLC3_RB_CNTL__RB_VMID_MASK 0x0F000000L
+//SDMA_RLC3_RB_BASE
+#define SDMA_RLC3_RB_BASE__ADDR__SHIFT 0x0
+#define SDMA_RLC3_RB_BASE__ADDR_MASK 0xFFFFFFFFL
+//SDMA_RLC3_RB_BASE_HI
+#define SDMA_RLC3_RB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA_RLC3_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL
+//SDMA_RLC3_RB_RPTR
+#define SDMA_RLC3_RB_RPTR__OFFSET__SHIFT 0x0
+#define SDMA_RLC3_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA_RLC3_RB_RPTR_HI
+#define SDMA_RLC3_RB_RPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA_RLC3_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA_RLC3_RB_WPTR
+#define SDMA_RLC3_RB_WPTR__OFFSET__SHIFT 0x0
+#define SDMA_RLC3_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA_RLC3_RB_WPTR_HI
+#define SDMA_RLC3_RB_WPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA_RLC3_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA_RLC3_RB_WPTR_POLL_CNTL
+#define SDMA_RLC3_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0
+#define SDMA_RLC3_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1
+#define SDMA_RLC3_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2
+#define SDMA_RLC3_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4
+#define SDMA_RLC3_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10
+#define SDMA_RLC3_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L
+#define SDMA_RLC3_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L
+#define SDMA_RLC3_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L
+#define SDMA_RLC3_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L
+#define SDMA_RLC3_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L
+//SDMA_RLC3_RB_RPTR_ADDR_HI
+#define SDMA_RLC3_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA_RLC3_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA_RLC3_RB_RPTR_ADDR_LO
+#define SDMA_RLC3_RB_RPTR_ADDR_LO__RPTR_WB_IDLE__SHIFT 0x0
+#define SDMA_RLC3_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA_RLC3_RB_RPTR_ADDR_LO__RPTR_WB_IDLE_MASK 0x00000001L
+#define SDMA_RLC3_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA_RLC3_IB_CNTL
+#define SDMA_RLC3_IB_CNTL__IB_ENABLE__SHIFT 0x0
+#define SDMA_RLC3_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4
+#define SDMA_RLC3_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8
+#define SDMA_RLC3_IB_CNTL__CMD_VMID__SHIFT 0x10
+#define SDMA_RLC3_IB_CNTL__IB_PRIV__SHIFT 0x1f
+#define SDMA_RLC3_IB_CNTL__IB_ENABLE_MASK 0x00000001L
+#define SDMA_RLC3_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L
+#define SDMA_RLC3_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L
+#define SDMA_RLC3_IB_CNTL__CMD_VMID_MASK 0x000F0000L
+#define SDMA_RLC3_IB_CNTL__IB_PRIV_MASK 0x80000000L
+//SDMA_RLC3_IB_RPTR
+#define SDMA_RLC3_IB_RPTR__OFFSET__SHIFT 0x2
+#define SDMA_RLC3_IB_RPTR__OFFSET_MASK 0x003FFFFCL
+//SDMA_RLC3_IB_OFFSET
+#define SDMA_RLC3_IB_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA_RLC3_IB_OFFSET__OFFSET_MASK 0x003FFFFCL
+//SDMA_RLC3_IB_BASE_LO
+#define SDMA_RLC3_IB_BASE_LO__ADDR__SHIFT 0x5
+#define SDMA_RLC3_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L
+//SDMA_RLC3_IB_BASE_HI
+#define SDMA_RLC3_IB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA_RLC3_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA_RLC3_IB_SIZE
+#define SDMA_RLC3_IB_SIZE__SIZE__SHIFT 0x0
+#define SDMA_RLC3_IB_SIZE__SIZE_MASK 0x000FFFFFL
+//SDMA_RLC3_SKIP_CNTL
+#define SDMA_RLC3_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0
+#define SDMA_RLC3_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL
+//SDMA_RLC3_CONTEXT_STATUS
+#define SDMA_RLC3_CONTEXT_STATUS__SELECTED__SHIFT 0x0
+#define SDMA_RLC3_CONTEXT_STATUS__IDLE__SHIFT 0x2
+#define SDMA_RLC3_CONTEXT_STATUS__EXPIRED__SHIFT 0x3
+#define SDMA_RLC3_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4
+#define SDMA_RLC3_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7
+#define SDMA_RLC3_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8
+#define SDMA_RLC3_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9
+#define SDMA_RLC3_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa
+#define SDMA_RLC3_CONTEXT_STATUS__SELECTED_MASK 0x00000001L
+#define SDMA_RLC3_CONTEXT_STATUS__IDLE_MASK 0x00000004L
+#define SDMA_RLC3_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L
+#define SDMA_RLC3_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L
+#define SDMA_RLC3_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L
+#define SDMA_RLC3_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L
+#define SDMA_RLC3_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L
+#define SDMA_RLC3_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L
+//SDMA_RLC3_DOORBELL
+#define SDMA_RLC3_DOORBELL__ENABLE__SHIFT 0x1c
+#define SDMA_RLC3_DOORBELL__CAPTURED__SHIFT 0x1e
+#define SDMA_RLC3_DOORBELL__ENABLE_MASK 0x10000000L
+#define SDMA_RLC3_DOORBELL__CAPTURED_MASK 0x40000000L
+//SDMA_RLC3_STATUS
+#define SDMA_RLC3_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0
+#define SDMA_RLC3_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8
+#define SDMA_RLC3_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL
+#define SDMA_RLC3_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L
+//SDMA_RLC3_DOORBELL_LOG
+#define SDMA_RLC3_DOORBELL_LOG__BE_ERROR__SHIFT 0x0
+#define SDMA_RLC3_DOORBELL_LOG__DATA__SHIFT 0x2
+#define SDMA_RLC3_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L
+#define SDMA_RLC3_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL
+//SDMA_RLC3_WATERMARK
+#define SDMA_RLC3_WATERMARK__RD_OUTSTANDING__SHIFT 0x0
+#define SDMA_RLC3_WATERMARK__WR_OUTSTANDING__SHIFT 0x10
+#define SDMA_RLC3_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL
+#define SDMA_RLC3_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L
+//SDMA_RLC3_DOORBELL_OFFSET
+#define SDMA_RLC3_DOORBELL_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA_RLC3_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL
+//SDMA_RLC3_CSA_ADDR_LO
+#define SDMA_RLC3_CSA_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA_RLC3_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA_RLC3_CSA_ADDR_HI
+#define SDMA_RLC3_CSA_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA_RLC3_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA_RLC3_IB_SUB_REMAIN
+#define SDMA_RLC3_IB_SUB_REMAIN__SIZE__SHIFT 0x0
+#define SDMA_RLC3_IB_SUB_REMAIN__SIZE_MASK 0x000FFFFFL
+//SDMA_RLC3_PREEMPT
+#define SDMA_RLC3_PREEMPT__IB_PREEMPT__SHIFT 0x0
+#define SDMA_RLC3_PREEMPT__IB_PREEMPT_MASK 0x00000001L
+//SDMA_RLC3_DUMMY_REG
+#define SDMA_RLC3_DUMMY_REG__DUMMY__SHIFT 0x0
+#define SDMA_RLC3_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL
+//SDMA_RLC3_RB_WPTR_POLL_ADDR_HI
+#define SDMA_RLC3_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA_RLC3_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA_RLC3_RB_WPTR_POLL_ADDR_LO
+#define SDMA_RLC3_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA_RLC3_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA_RLC3_RB_AQL_CNTL
+#define SDMA_RLC3_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0
+#define SDMA_RLC3_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1
+#define SDMA_RLC3_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8
+#define SDMA_RLC3_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L
+#define SDMA_RLC3_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL
+#define SDMA_RLC3_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L
+//SDMA_RLC3_MINOR_PTR_UPDATE
+#define SDMA_RLC3_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0
+#define SDMA_RLC3_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L
+//SDMA_RLC3_MIDCMD_DATA0
+#define SDMA_RLC3_MIDCMD_DATA0__DATA0__SHIFT 0x0
+#define SDMA_RLC3_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL
+//SDMA_RLC3_MIDCMD_DATA1
+#define SDMA_RLC3_MIDCMD_DATA1__DATA1__SHIFT 0x0
+#define SDMA_RLC3_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL
+//SDMA_RLC3_MIDCMD_DATA2
+#define SDMA_RLC3_MIDCMD_DATA2__DATA2__SHIFT 0x0
+#define SDMA_RLC3_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL
+//SDMA_RLC3_MIDCMD_DATA3
+#define SDMA_RLC3_MIDCMD_DATA3__DATA3__SHIFT 0x0
+#define SDMA_RLC3_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL
+//SDMA_RLC3_MIDCMD_DATA4
+#define SDMA_RLC3_MIDCMD_DATA4__DATA4__SHIFT 0x0
+#define SDMA_RLC3_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL
+//SDMA_RLC3_MIDCMD_DATA5
+#define SDMA_RLC3_MIDCMD_DATA5__DATA5__SHIFT 0x0
+#define SDMA_RLC3_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL
+//SDMA_RLC3_MIDCMD_DATA6
+#define SDMA_RLC3_MIDCMD_DATA6__DATA6__SHIFT 0x0
+#define SDMA_RLC3_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL
+//SDMA_RLC3_MIDCMD_DATA7
+#define SDMA_RLC3_MIDCMD_DATA7__DATA7__SHIFT 0x0
+#define SDMA_RLC3_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL
+//SDMA_RLC3_MIDCMD_DATA8
+#define SDMA_RLC3_MIDCMD_DATA8__DATA8__SHIFT 0x0
+#define SDMA_RLC3_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL
+//SDMA_RLC3_MIDCMD_DATA9
+#define SDMA_RLC3_MIDCMD_DATA9__DATA9__SHIFT 0x0
+#define SDMA_RLC3_MIDCMD_DATA9__DATA9_MASK 0xFFFFFFFFL
+//SDMA_RLC3_MIDCMD_DATA10
+#define SDMA_RLC3_MIDCMD_DATA10__DATA10__SHIFT 0x0
+#define SDMA_RLC3_MIDCMD_DATA10__DATA10_MASK 0xFFFFFFFFL
+//SDMA_RLC3_MIDCMD_CNTL
+#define SDMA_RLC3_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0
+#define SDMA_RLC3_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1
+#define SDMA_RLC3_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4
+#define SDMA_RLC3_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8
+#define SDMA_RLC3_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L
+#define SDMA_RLC3_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L
+#define SDMA_RLC3_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L
+#define SDMA_RLC3_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L
+//SDMA_RLC4_RB_CNTL
+#define SDMA_RLC4_RB_CNTL__RB_ENABLE__SHIFT 0x0
+#define SDMA_RLC4_RB_CNTL__RB_SIZE__SHIFT 0x1
+#define SDMA_RLC4_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9
+#define SDMA_RLC4_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc
+#define SDMA_RLC4_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd
+#define SDMA_RLC4_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10
+#define SDMA_RLC4_RB_CNTL__RB_PRIV__SHIFT 0x17
+#define SDMA_RLC4_RB_CNTL__RB_VMID__SHIFT 0x18
+#define SDMA_RLC4_RB_CNTL__RB_ENABLE_MASK 0x00000001L
+#define SDMA_RLC4_RB_CNTL__RB_SIZE_MASK 0x0000003EL
+#define SDMA_RLC4_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L
+#define SDMA_RLC4_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L
+#define SDMA_RLC4_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L
+#define SDMA_RLC4_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L
+#define SDMA_RLC4_RB_CNTL__RB_PRIV_MASK 0x00800000L
+#define SDMA_RLC4_RB_CNTL__RB_VMID_MASK 0x0F000000L
+//SDMA_RLC4_RB_BASE
+#define SDMA_RLC4_RB_BASE__ADDR__SHIFT 0x0
+#define SDMA_RLC4_RB_BASE__ADDR_MASK 0xFFFFFFFFL
+//SDMA_RLC4_RB_BASE_HI
+#define SDMA_RLC4_RB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA_RLC4_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL
+//SDMA_RLC4_RB_RPTR
+#define SDMA_RLC4_RB_RPTR__OFFSET__SHIFT 0x0
+#define SDMA_RLC4_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA_RLC4_RB_RPTR_HI
+#define SDMA_RLC4_RB_RPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA_RLC4_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA_RLC4_RB_WPTR
+#define SDMA_RLC4_RB_WPTR__OFFSET__SHIFT 0x0
+#define SDMA_RLC4_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA_RLC4_RB_WPTR_HI
+#define SDMA_RLC4_RB_WPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA_RLC4_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA_RLC4_RB_WPTR_POLL_CNTL
+#define SDMA_RLC4_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0
+#define SDMA_RLC4_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1
+#define SDMA_RLC4_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2
+#define SDMA_RLC4_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4
+#define SDMA_RLC4_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10
+#define SDMA_RLC4_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L
+#define SDMA_RLC4_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L
+#define SDMA_RLC4_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L
+#define SDMA_RLC4_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L
+#define SDMA_RLC4_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L
+//SDMA_RLC4_RB_RPTR_ADDR_HI
+#define SDMA_RLC4_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA_RLC4_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA_RLC4_RB_RPTR_ADDR_LO
+#define SDMA_RLC4_RB_RPTR_ADDR_LO__RPTR_WB_IDLE__SHIFT 0x0
+#define SDMA_RLC4_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA_RLC4_RB_RPTR_ADDR_LO__RPTR_WB_IDLE_MASK 0x00000001L
+#define SDMA_RLC4_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA_RLC4_IB_CNTL
+#define SDMA_RLC4_IB_CNTL__IB_ENABLE__SHIFT 0x0
+#define SDMA_RLC4_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4
+#define SDMA_RLC4_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8
+#define SDMA_RLC4_IB_CNTL__CMD_VMID__SHIFT 0x10
+#define SDMA_RLC4_IB_CNTL__IB_PRIV__SHIFT 0x1f
+#define SDMA_RLC4_IB_CNTL__IB_ENABLE_MASK 0x00000001L
+#define SDMA_RLC4_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L
+#define SDMA_RLC4_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L
+#define SDMA_RLC4_IB_CNTL__CMD_VMID_MASK 0x000F0000L
+#define SDMA_RLC4_IB_CNTL__IB_PRIV_MASK 0x80000000L
+//SDMA_RLC4_IB_RPTR
+#define SDMA_RLC4_IB_RPTR__OFFSET__SHIFT 0x2
+#define SDMA_RLC4_IB_RPTR__OFFSET_MASK 0x003FFFFCL
+//SDMA_RLC4_IB_OFFSET
+#define SDMA_RLC4_IB_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA_RLC4_IB_OFFSET__OFFSET_MASK 0x003FFFFCL
+//SDMA_RLC4_IB_BASE_LO
+#define SDMA_RLC4_IB_BASE_LO__ADDR__SHIFT 0x5
+#define SDMA_RLC4_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L
+//SDMA_RLC4_IB_BASE_HI
+#define SDMA_RLC4_IB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA_RLC4_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA_RLC4_IB_SIZE
+#define SDMA_RLC4_IB_SIZE__SIZE__SHIFT 0x0
+#define SDMA_RLC4_IB_SIZE__SIZE_MASK 0x000FFFFFL
+//SDMA_RLC4_SKIP_CNTL
+#define SDMA_RLC4_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0
+#define SDMA_RLC4_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL
+//SDMA_RLC4_CONTEXT_STATUS
+#define SDMA_RLC4_CONTEXT_STATUS__SELECTED__SHIFT 0x0
+#define SDMA_RLC4_CONTEXT_STATUS__IDLE__SHIFT 0x2
+#define SDMA_RLC4_CONTEXT_STATUS__EXPIRED__SHIFT 0x3
+#define SDMA_RLC4_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4
+#define SDMA_RLC4_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7
+#define SDMA_RLC4_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8
+#define SDMA_RLC4_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9
+#define SDMA_RLC4_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa
+#define SDMA_RLC4_CONTEXT_STATUS__SELECTED_MASK 0x00000001L
+#define SDMA_RLC4_CONTEXT_STATUS__IDLE_MASK 0x00000004L
+#define SDMA_RLC4_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L
+#define SDMA_RLC4_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L
+#define SDMA_RLC4_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L
+#define SDMA_RLC4_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L
+#define SDMA_RLC4_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L
+#define SDMA_RLC4_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L
+//SDMA_RLC4_DOORBELL
+#define SDMA_RLC4_DOORBELL__ENABLE__SHIFT 0x1c
+#define SDMA_RLC4_DOORBELL__CAPTURED__SHIFT 0x1e
+#define SDMA_RLC4_DOORBELL__ENABLE_MASK 0x10000000L
+#define SDMA_RLC4_DOORBELL__CAPTURED_MASK 0x40000000L
+//SDMA_RLC4_STATUS
+#define SDMA_RLC4_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0
+#define SDMA_RLC4_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8
+#define SDMA_RLC4_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL
+#define SDMA_RLC4_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L
+//SDMA_RLC4_DOORBELL_LOG
+#define SDMA_RLC4_DOORBELL_LOG__BE_ERROR__SHIFT 0x0
+#define SDMA_RLC4_DOORBELL_LOG__DATA__SHIFT 0x2
+#define SDMA_RLC4_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L
+#define SDMA_RLC4_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL
+//SDMA_RLC4_WATERMARK
+#define SDMA_RLC4_WATERMARK__RD_OUTSTANDING__SHIFT 0x0
+#define SDMA_RLC4_WATERMARK__WR_OUTSTANDING__SHIFT 0x10
+#define SDMA_RLC4_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL
+#define SDMA_RLC4_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L
+//SDMA_RLC4_DOORBELL_OFFSET
+#define SDMA_RLC4_DOORBELL_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA_RLC4_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL
+//SDMA_RLC4_CSA_ADDR_LO
+#define SDMA_RLC4_CSA_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA_RLC4_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA_RLC4_CSA_ADDR_HI
+#define SDMA_RLC4_CSA_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA_RLC4_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA_RLC4_IB_SUB_REMAIN
+#define SDMA_RLC4_IB_SUB_REMAIN__SIZE__SHIFT 0x0
+#define SDMA_RLC4_IB_SUB_REMAIN__SIZE_MASK 0x000FFFFFL
+//SDMA_RLC4_PREEMPT
+#define SDMA_RLC4_PREEMPT__IB_PREEMPT__SHIFT 0x0
+#define SDMA_RLC4_PREEMPT__IB_PREEMPT_MASK 0x00000001L
+//SDMA_RLC4_DUMMY_REG
+#define SDMA_RLC4_DUMMY_REG__DUMMY__SHIFT 0x0
+#define SDMA_RLC4_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL
+//SDMA_RLC4_RB_WPTR_POLL_ADDR_HI
+#define SDMA_RLC4_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA_RLC4_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA_RLC4_RB_WPTR_POLL_ADDR_LO
+#define SDMA_RLC4_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA_RLC4_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA_RLC4_RB_AQL_CNTL
+#define SDMA_RLC4_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0
+#define SDMA_RLC4_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1
+#define SDMA_RLC4_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8
+#define SDMA_RLC4_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L
+#define SDMA_RLC4_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL
+#define SDMA_RLC4_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L
+//SDMA_RLC4_MINOR_PTR_UPDATE
+#define SDMA_RLC4_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0
+#define SDMA_RLC4_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L
+//SDMA_RLC4_MIDCMD_DATA0
+#define SDMA_RLC4_MIDCMD_DATA0__DATA0__SHIFT 0x0
+#define SDMA_RLC4_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL
+//SDMA_RLC4_MIDCMD_DATA1
+#define SDMA_RLC4_MIDCMD_DATA1__DATA1__SHIFT 0x0
+#define SDMA_RLC4_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL
+//SDMA_RLC4_MIDCMD_DATA2
+#define SDMA_RLC4_MIDCMD_DATA2__DATA2__SHIFT 0x0
+#define SDMA_RLC4_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL
+//SDMA_RLC4_MIDCMD_DATA3
+#define SDMA_RLC4_MIDCMD_DATA3__DATA3__SHIFT 0x0
+#define SDMA_RLC4_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL
+//SDMA_RLC4_MIDCMD_DATA4
+#define SDMA_RLC4_MIDCMD_DATA4__DATA4__SHIFT 0x0
+#define SDMA_RLC4_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL
+//SDMA_RLC4_MIDCMD_DATA5
+#define SDMA_RLC4_MIDCMD_DATA5__DATA5__SHIFT 0x0
+#define SDMA_RLC4_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL
+//SDMA_RLC4_MIDCMD_DATA6
+#define SDMA_RLC4_MIDCMD_DATA6__DATA6__SHIFT 0x0
+#define SDMA_RLC4_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL
+//SDMA_RLC4_MIDCMD_DATA7
+#define SDMA_RLC4_MIDCMD_DATA7__DATA7__SHIFT 0x0
+#define SDMA_RLC4_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL
+//SDMA_RLC4_MIDCMD_DATA8
+#define SDMA_RLC4_MIDCMD_DATA8__DATA8__SHIFT 0x0
+#define SDMA_RLC4_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL
+//SDMA_RLC4_MIDCMD_DATA9
+#define SDMA_RLC4_MIDCMD_DATA9__DATA9__SHIFT 0x0
+#define SDMA_RLC4_MIDCMD_DATA9__DATA9_MASK 0xFFFFFFFFL
+//SDMA_RLC4_MIDCMD_DATA10
+#define SDMA_RLC4_MIDCMD_DATA10__DATA10__SHIFT 0x0
+#define SDMA_RLC4_MIDCMD_DATA10__DATA10_MASK 0xFFFFFFFFL
+//SDMA_RLC4_MIDCMD_CNTL
+#define SDMA_RLC4_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0
+#define SDMA_RLC4_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1
+#define SDMA_RLC4_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4
+#define SDMA_RLC4_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8
+#define SDMA_RLC4_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L
+#define SDMA_RLC4_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L
+#define SDMA_RLC4_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L
+#define SDMA_RLC4_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L
+//SDMA_RLC5_RB_CNTL
+#define SDMA_RLC5_RB_CNTL__RB_ENABLE__SHIFT 0x0
+#define SDMA_RLC5_RB_CNTL__RB_SIZE__SHIFT 0x1
+#define SDMA_RLC5_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9
+#define SDMA_RLC5_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc
+#define SDMA_RLC5_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd
+#define SDMA_RLC5_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10
+#define SDMA_RLC5_RB_CNTL__RB_PRIV__SHIFT 0x17
+#define SDMA_RLC5_RB_CNTL__RB_VMID__SHIFT 0x18
+#define SDMA_RLC5_RB_CNTL__RB_ENABLE_MASK 0x00000001L
+#define SDMA_RLC5_RB_CNTL__RB_SIZE_MASK 0x0000003EL
+#define SDMA_RLC5_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L
+#define SDMA_RLC5_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L
+#define SDMA_RLC5_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L
+#define SDMA_RLC5_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L
+#define SDMA_RLC5_RB_CNTL__RB_PRIV_MASK 0x00800000L
+#define SDMA_RLC5_RB_CNTL__RB_VMID_MASK 0x0F000000L
+//SDMA_RLC5_RB_BASE
+#define SDMA_RLC5_RB_BASE__ADDR__SHIFT 0x0
+#define SDMA_RLC5_RB_BASE__ADDR_MASK 0xFFFFFFFFL
+//SDMA_RLC5_RB_BASE_HI
+#define SDMA_RLC5_RB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA_RLC5_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL
+//SDMA_RLC5_RB_RPTR
+#define SDMA_RLC5_RB_RPTR__OFFSET__SHIFT 0x0
+#define SDMA_RLC5_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA_RLC5_RB_RPTR_HI
+#define SDMA_RLC5_RB_RPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA_RLC5_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA_RLC5_RB_WPTR
+#define SDMA_RLC5_RB_WPTR__OFFSET__SHIFT 0x0
+#define SDMA_RLC5_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA_RLC5_RB_WPTR_HI
+#define SDMA_RLC5_RB_WPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA_RLC5_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA_RLC5_RB_WPTR_POLL_CNTL
+#define SDMA_RLC5_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0
+#define SDMA_RLC5_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1
+#define SDMA_RLC5_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2
+#define SDMA_RLC5_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4
+#define SDMA_RLC5_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10
+#define SDMA_RLC5_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L
+#define SDMA_RLC5_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L
+#define SDMA_RLC5_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L
+#define SDMA_RLC5_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L
+#define SDMA_RLC5_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L
+//SDMA_RLC5_RB_RPTR_ADDR_HI
+#define SDMA_RLC5_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA_RLC5_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA_RLC5_RB_RPTR_ADDR_LO
+#define SDMA_RLC5_RB_RPTR_ADDR_LO__RPTR_WB_IDLE__SHIFT 0x0
+#define SDMA_RLC5_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA_RLC5_RB_RPTR_ADDR_LO__RPTR_WB_IDLE_MASK 0x00000001L
+#define SDMA_RLC5_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA_RLC5_IB_CNTL
+#define SDMA_RLC5_IB_CNTL__IB_ENABLE__SHIFT 0x0
+#define SDMA_RLC5_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4
+#define SDMA_RLC5_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8
+#define SDMA_RLC5_IB_CNTL__CMD_VMID__SHIFT 0x10
+#define SDMA_RLC5_IB_CNTL__IB_PRIV__SHIFT 0x1f
+#define SDMA_RLC5_IB_CNTL__IB_ENABLE_MASK 0x00000001L
+#define SDMA_RLC5_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L
+#define SDMA_RLC5_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L
+#define SDMA_RLC5_IB_CNTL__CMD_VMID_MASK 0x000F0000L
+#define SDMA_RLC5_IB_CNTL__IB_PRIV_MASK 0x80000000L
+//SDMA_RLC5_IB_RPTR
+#define SDMA_RLC5_IB_RPTR__OFFSET__SHIFT 0x2
+#define SDMA_RLC5_IB_RPTR__OFFSET_MASK 0x003FFFFCL
+//SDMA_RLC5_IB_OFFSET
+#define SDMA_RLC5_IB_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA_RLC5_IB_OFFSET__OFFSET_MASK 0x003FFFFCL
+//SDMA_RLC5_IB_BASE_LO
+#define SDMA_RLC5_IB_BASE_LO__ADDR__SHIFT 0x5
+#define SDMA_RLC5_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L
+//SDMA_RLC5_IB_BASE_HI
+#define SDMA_RLC5_IB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA_RLC5_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA_RLC5_IB_SIZE
+#define SDMA_RLC5_IB_SIZE__SIZE__SHIFT 0x0
+#define SDMA_RLC5_IB_SIZE__SIZE_MASK 0x000FFFFFL
+//SDMA_RLC5_SKIP_CNTL
+#define SDMA_RLC5_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0
+#define SDMA_RLC5_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL
+//SDMA_RLC5_CONTEXT_STATUS
+#define SDMA_RLC5_CONTEXT_STATUS__SELECTED__SHIFT 0x0
+#define SDMA_RLC5_CONTEXT_STATUS__IDLE__SHIFT 0x2
+#define SDMA_RLC5_CONTEXT_STATUS__EXPIRED__SHIFT 0x3
+#define SDMA_RLC5_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4
+#define SDMA_RLC5_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7
+#define SDMA_RLC5_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8
+#define SDMA_RLC5_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9
+#define SDMA_RLC5_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa
+#define SDMA_RLC5_CONTEXT_STATUS__SELECTED_MASK 0x00000001L
+#define SDMA_RLC5_CONTEXT_STATUS__IDLE_MASK 0x00000004L
+#define SDMA_RLC5_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L
+#define SDMA_RLC5_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L
+#define SDMA_RLC5_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L
+#define SDMA_RLC5_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L
+#define SDMA_RLC5_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L
+#define SDMA_RLC5_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L
+//SDMA_RLC5_DOORBELL
+#define SDMA_RLC5_DOORBELL__ENABLE__SHIFT 0x1c
+#define SDMA_RLC5_DOORBELL__CAPTURED__SHIFT 0x1e
+#define SDMA_RLC5_DOORBELL__ENABLE_MASK 0x10000000L
+#define SDMA_RLC5_DOORBELL__CAPTURED_MASK 0x40000000L
+//SDMA_RLC5_STATUS
+#define SDMA_RLC5_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0
+#define SDMA_RLC5_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8
+#define SDMA_RLC5_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL
+#define SDMA_RLC5_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L
+//SDMA_RLC5_DOORBELL_LOG
+#define SDMA_RLC5_DOORBELL_LOG__BE_ERROR__SHIFT 0x0
+#define SDMA_RLC5_DOORBELL_LOG__DATA__SHIFT 0x2
+#define SDMA_RLC5_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L
+#define SDMA_RLC5_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL
+//SDMA_RLC5_WATERMARK
+#define SDMA_RLC5_WATERMARK__RD_OUTSTANDING__SHIFT 0x0
+#define SDMA_RLC5_WATERMARK__WR_OUTSTANDING__SHIFT 0x10
+#define SDMA_RLC5_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL
+#define SDMA_RLC5_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L
+//SDMA_RLC5_DOORBELL_OFFSET
+#define SDMA_RLC5_DOORBELL_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA_RLC5_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL
+//SDMA_RLC5_CSA_ADDR_LO
+#define SDMA_RLC5_CSA_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA_RLC5_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA_RLC5_CSA_ADDR_HI
+#define SDMA_RLC5_CSA_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA_RLC5_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA_RLC5_IB_SUB_REMAIN
+#define SDMA_RLC5_IB_SUB_REMAIN__SIZE__SHIFT 0x0
+#define SDMA_RLC5_IB_SUB_REMAIN__SIZE_MASK 0x000FFFFFL
+//SDMA_RLC5_PREEMPT
+#define SDMA_RLC5_PREEMPT__IB_PREEMPT__SHIFT 0x0
+#define SDMA_RLC5_PREEMPT__IB_PREEMPT_MASK 0x00000001L
+//SDMA_RLC5_DUMMY_REG
+#define SDMA_RLC5_DUMMY_REG__DUMMY__SHIFT 0x0
+#define SDMA_RLC5_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL
+//SDMA_RLC5_RB_WPTR_POLL_ADDR_HI
+#define SDMA_RLC5_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA_RLC5_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA_RLC5_RB_WPTR_POLL_ADDR_LO
+#define SDMA_RLC5_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA_RLC5_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA_RLC5_RB_AQL_CNTL
+#define SDMA_RLC5_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0
+#define SDMA_RLC5_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1
+#define SDMA_RLC5_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8
+#define SDMA_RLC5_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L
+#define SDMA_RLC5_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL
+#define SDMA_RLC5_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L
+//SDMA_RLC5_MINOR_PTR_UPDATE
+#define SDMA_RLC5_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0
+#define SDMA_RLC5_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L
+//SDMA_RLC5_MIDCMD_DATA0
+#define SDMA_RLC5_MIDCMD_DATA0__DATA0__SHIFT 0x0
+#define SDMA_RLC5_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL
+//SDMA_RLC5_MIDCMD_DATA1
+#define SDMA_RLC5_MIDCMD_DATA1__DATA1__SHIFT 0x0
+#define SDMA_RLC5_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL
+//SDMA_RLC5_MIDCMD_DATA2
+#define SDMA_RLC5_MIDCMD_DATA2__DATA2__SHIFT 0x0
+#define SDMA_RLC5_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL
+//SDMA_RLC5_MIDCMD_DATA3
+#define SDMA_RLC5_MIDCMD_DATA3__DATA3__SHIFT 0x0
+#define SDMA_RLC5_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL
+//SDMA_RLC5_MIDCMD_DATA4
+#define SDMA_RLC5_MIDCMD_DATA4__DATA4__SHIFT 0x0
+#define SDMA_RLC5_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL
+//SDMA_RLC5_MIDCMD_DATA5
+#define SDMA_RLC5_MIDCMD_DATA5__DATA5__SHIFT 0x0
+#define SDMA_RLC5_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL
+//SDMA_RLC5_MIDCMD_DATA6
+#define SDMA_RLC5_MIDCMD_DATA6__DATA6__SHIFT 0x0
+#define SDMA_RLC5_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL
+//SDMA_RLC5_MIDCMD_DATA7
+#define SDMA_RLC5_MIDCMD_DATA7__DATA7__SHIFT 0x0
+#define SDMA_RLC5_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL
+//SDMA_RLC5_MIDCMD_DATA8
+#define SDMA_RLC5_MIDCMD_DATA8__DATA8__SHIFT 0x0
+#define SDMA_RLC5_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL
+//SDMA_RLC5_MIDCMD_DATA9
+#define SDMA_RLC5_MIDCMD_DATA9__DATA9__SHIFT 0x0
+#define SDMA_RLC5_MIDCMD_DATA9__DATA9_MASK 0xFFFFFFFFL
+//SDMA_RLC5_MIDCMD_DATA10
+#define SDMA_RLC5_MIDCMD_DATA10__DATA10__SHIFT 0x0
+#define SDMA_RLC5_MIDCMD_DATA10__DATA10_MASK 0xFFFFFFFFL
+//SDMA_RLC5_MIDCMD_CNTL
+#define SDMA_RLC5_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0
+#define SDMA_RLC5_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1
+#define SDMA_RLC5_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4
+#define SDMA_RLC5_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8
+#define SDMA_RLC5_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L
+#define SDMA_RLC5_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L
+#define SDMA_RLC5_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L
+#define SDMA_RLC5_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L
+//SDMA_RLC6_RB_CNTL
+#define SDMA_RLC6_RB_CNTL__RB_ENABLE__SHIFT 0x0
+#define SDMA_RLC6_RB_CNTL__RB_SIZE__SHIFT 0x1
+#define SDMA_RLC6_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9
+#define SDMA_RLC6_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc
+#define SDMA_RLC6_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd
+#define SDMA_RLC6_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10
+#define SDMA_RLC6_RB_CNTL__RB_PRIV__SHIFT 0x17
+#define SDMA_RLC6_RB_CNTL__RB_VMID__SHIFT 0x18
+#define SDMA_RLC6_RB_CNTL__RB_ENABLE_MASK 0x00000001L
+#define SDMA_RLC6_RB_CNTL__RB_SIZE_MASK 0x0000003EL
+#define SDMA_RLC6_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L
+#define SDMA_RLC6_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L
+#define SDMA_RLC6_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L
+#define SDMA_RLC6_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L
+#define SDMA_RLC6_RB_CNTL__RB_PRIV_MASK 0x00800000L
+#define SDMA_RLC6_RB_CNTL__RB_VMID_MASK 0x0F000000L
+//SDMA_RLC6_RB_BASE
+#define SDMA_RLC6_RB_BASE__ADDR__SHIFT 0x0
+#define SDMA_RLC6_RB_BASE__ADDR_MASK 0xFFFFFFFFL
+//SDMA_RLC6_RB_BASE_HI
+#define SDMA_RLC6_RB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA_RLC6_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL
+//SDMA_RLC6_RB_RPTR
+#define SDMA_RLC6_RB_RPTR__OFFSET__SHIFT 0x0
+#define SDMA_RLC6_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA_RLC6_RB_RPTR_HI
+#define SDMA_RLC6_RB_RPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA_RLC6_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA_RLC6_RB_WPTR
+#define SDMA_RLC6_RB_WPTR__OFFSET__SHIFT 0x0
+#define SDMA_RLC6_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA_RLC6_RB_WPTR_HI
+#define SDMA_RLC6_RB_WPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA_RLC6_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA_RLC6_RB_WPTR_POLL_CNTL
+#define SDMA_RLC6_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0
+#define SDMA_RLC6_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1
+#define SDMA_RLC6_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2
+#define SDMA_RLC6_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4
+#define SDMA_RLC6_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10
+#define SDMA_RLC6_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L
+#define SDMA_RLC6_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L
+#define SDMA_RLC6_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L
+#define SDMA_RLC6_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L
+#define SDMA_RLC6_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L
+//SDMA_RLC6_RB_RPTR_ADDR_HI
+#define SDMA_RLC6_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA_RLC6_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA_RLC6_RB_RPTR_ADDR_LO
+#define SDMA_RLC6_RB_RPTR_ADDR_LO__RPTR_WB_IDLE__SHIFT 0x0
+#define SDMA_RLC6_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA_RLC6_RB_RPTR_ADDR_LO__RPTR_WB_IDLE_MASK 0x00000001L
+#define SDMA_RLC6_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA_RLC6_IB_CNTL
+#define SDMA_RLC6_IB_CNTL__IB_ENABLE__SHIFT 0x0
+#define SDMA_RLC6_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4
+#define SDMA_RLC6_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8
+#define SDMA_RLC6_IB_CNTL__CMD_VMID__SHIFT 0x10
+#define SDMA_RLC6_IB_CNTL__IB_PRIV__SHIFT 0x1f
+#define SDMA_RLC6_IB_CNTL__IB_ENABLE_MASK 0x00000001L
+#define SDMA_RLC6_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L
+#define SDMA_RLC6_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L
+#define SDMA_RLC6_IB_CNTL__CMD_VMID_MASK 0x000F0000L
+#define SDMA_RLC6_IB_CNTL__IB_PRIV_MASK 0x80000000L
+//SDMA_RLC6_IB_RPTR
+#define SDMA_RLC6_IB_RPTR__OFFSET__SHIFT 0x2
+#define SDMA_RLC6_IB_RPTR__OFFSET_MASK 0x003FFFFCL
+//SDMA_RLC6_IB_OFFSET
+#define SDMA_RLC6_IB_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA_RLC6_IB_OFFSET__OFFSET_MASK 0x003FFFFCL
+//SDMA_RLC6_IB_BASE_LO
+#define SDMA_RLC6_IB_BASE_LO__ADDR__SHIFT 0x5
+#define SDMA_RLC6_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L
+//SDMA_RLC6_IB_BASE_HI
+#define SDMA_RLC6_IB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA_RLC6_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA_RLC6_IB_SIZE
+#define SDMA_RLC6_IB_SIZE__SIZE__SHIFT 0x0
+#define SDMA_RLC6_IB_SIZE__SIZE_MASK 0x000FFFFFL
+//SDMA_RLC6_SKIP_CNTL
+#define SDMA_RLC6_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0
+#define SDMA_RLC6_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL
+//SDMA_RLC6_CONTEXT_STATUS
+#define SDMA_RLC6_CONTEXT_STATUS__SELECTED__SHIFT 0x0
+#define SDMA_RLC6_CONTEXT_STATUS__IDLE__SHIFT 0x2
+#define SDMA_RLC6_CONTEXT_STATUS__EXPIRED__SHIFT 0x3
+#define SDMA_RLC6_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4
+#define SDMA_RLC6_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7
+#define SDMA_RLC6_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8
+#define SDMA_RLC6_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9
+#define SDMA_RLC6_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa
+#define SDMA_RLC6_CONTEXT_STATUS__SELECTED_MASK 0x00000001L
+#define SDMA_RLC6_CONTEXT_STATUS__IDLE_MASK 0x00000004L
+#define SDMA_RLC6_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L
+#define SDMA_RLC6_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L
+#define SDMA_RLC6_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L
+#define SDMA_RLC6_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L
+#define SDMA_RLC6_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L
+#define SDMA_RLC6_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L
+//SDMA_RLC6_DOORBELL
+#define SDMA_RLC6_DOORBELL__ENABLE__SHIFT 0x1c
+#define SDMA_RLC6_DOORBELL__CAPTURED__SHIFT 0x1e
+#define SDMA_RLC6_DOORBELL__ENABLE_MASK 0x10000000L
+#define SDMA_RLC6_DOORBELL__CAPTURED_MASK 0x40000000L
+//SDMA_RLC6_STATUS
+#define SDMA_RLC6_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0
+#define SDMA_RLC6_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8
+#define SDMA_RLC6_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL
+#define SDMA_RLC6_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L
+//SDMA_RLC6_DOORBELL_LOG
+#define SDMA_RLC6_DOORBELL_LOG__BE_ERROR__SHIFT 0x0
+#define SDMA_RLC6_DOORBELL_LOG__DATA__SHIFT 0x2
+#define SDMA_RLC6_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L
+#define SDMA_RLC6_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL
+//SDMA_RLC6_WATERMARK
+#define SDMA_RLC6_WATERMARK__RD_OUTSTANDING__SHIFT 0x0
+#define SDMA_RLC6_WATERMARK__WR_OUTSTANDING__SHIFT 0x10
+#define SDMA_RLC6_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL
+#define SDMA_RLC6_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L
+//SDMA_RLC6_DOORBELL_OFFSET
+#define SDMA_RLC6_DOORBELL_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA_RLC6_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL
+//SDMA_RLC6_CSA_ADDR_LO
+#define SDMA_RLC6_CSA_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA_RLC6_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA_RLC6_CSA_ADDR_HI
+#define SDMA_RLC6_CSA_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA_RLC6_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA_RLC6_IB_SUB_REMAIN
+#define SDMA_RLC6_IB_SUB_REMAIN__SIZE__SHIFT 0x0
+#define SDMA_RLC6_IB_SUB_REMAIN__SIZE_MASK 0x000FFFFFL
+//SDMA_RLC6_PREEMPT
+#define SDMA_RLC6_PREEMPT__IB_PREEMPT__SHIFT 0x0
+#define SDMA_RLC6_PREEMPT__IB_PREEMPT_MASK 0x00000001L
+//SDMA_RLC6_DUMMY_REG
+#define SDMA_RLC6_DUMMY_REG__DUMMY__SHIFT 0x0
+#define SDMA_RLC6_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL
+//SDMA_RLC6_RB_WPTR_POLL_ADDR_HI
+#define SDMA_RLC6_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA_RLC6_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA_RLC6_RB_WPTR_POLL_ADDR_LO
+#define SDMA_RLC6_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA_RLC6_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA_RLC6_RB_AQL_CNTL
+#define SDMA_RLC6_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0
+#define SDMA_RLC6_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1
+#define SDMA_RLC6_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8
+#define SDMA_RLC6_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L
+#define SDMA_RLC6_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL
+#define SDMA_RLC6_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L
+//SDMA_RLC6_MINOR_PTR_UPDATE
+#define SDMA_RLC6_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0
+#define SDMA_RLC6_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L
+//SDMA_RLC6_MIDCMD_DATA0
+#define SDMA_RLC6_MIDCMD_DATA0__DATA0__SHIFT 0x0
+#define SDMA_RLC6_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL
+//SDMA_RLC6_MIDCMD_DATA1
+#define SDMA_RLC6_MIDCMD_DATA1__DATA1__SHIFT 0x0
+#define SDMA_RLC6_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL
+//SDMA_RLC6_MIDCMD_DATA2
+#define SDMA_RLC6_MIDCMD_DATA2__DATA2__SHIFT 0x0
+#define SDMA_RLC6_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL
+//SDMA_RLC6_MIDCMD_DATA3
+#define SDMA_RLC6_MIDCMD_DATA3__DATA3__SHIFT 0x0
+#define SDMA_RLC6_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL
+//SDMA_RLC6_MIDCMD_DATA4
+#define SDMA_RLC6_MIDCMD_DATA4__DATA4__SHIFT 0x0
+#define SDMA_RLC6_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL
+//SDMA_RLC6_MIDCMD_DATA5
+#define SDMA_RLC6_MIDCMD_DATA5__DATA5__SHIFT 0x0
+#define SDMA_RLC6_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL
+//SDMA_RLC6_MIDCMD_DATA6
+#define SDMA_RLC6_MIDCMD_DATA6__DATA6__SHIFT 0x0
+#define SDMA_RLC6_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL
+//SDMA_RLC6_MIDCMD_DATA7
+#define SDMA_RLC6_MIDCMD_DATA7__DATA7__SHIFT 0x0
+#define SDMA_RLC6_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL
+//SDMA_RLC6_MIDCMD_DATA8
+#define SDMA_RLC6_MIDCMD_DATA8__DATA8__SHIFT 0x0
+#define SDMA_RLC6_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL
+//SDMA_RLC6_MIDCMD_DATA9
+#define SDMA_RLC6_MIDCMD_DATA9__DATA9__SHIFT 0x0
+#define SDMA_RLC6_MIDCMD_DATA9__DATA9_MASK 0xFFFFFFFFL
+//SDMA_RLC6_MIDCMD_DATA10
+#define SDMA_RLC6_MIDCMD_DATA10__DATA10__SHIFT 0x0
+#define SDMA_RLC6_MIDCMD_DATA10__DATA10_MASK 0xFFFFFFFFL
+//SDMA_RLC6_MIDCMD_CNTL
+#define SDMA_RLC6_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0
+#define SDMA_RLC6_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1
+#define SDMA_RLC6_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4
+#define SDMA_RLC6_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8
+#define SDMA_RLC6_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L
+#define SDMA_RLC6_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L
+#define SDMA_RLC6_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L
+#define SDMA_RLC6_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L
+//SDMA_RLC7_RB_CNTL
+#define SDMA_RLC7_RB_CNTL__RB_ENABLE__SHIFT 0x0
+#define SDMA_RLC7_RB_CNTL__RB_SIZE__SHIFT 0x1
+#define SDMA_RLC7_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9
+#define SDMA_RLC7_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc
+#define SDMA_RLC7_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd
+#define SDMA_RLC7_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10
+#define SDMA_RLC7_RB_CNTL__RB_PRIV__SHIFT 0x17
+#define SDMA_RLC7_RB_CNTL__RB_VMID__SHIFT 0x18
+#define SDMA_RLC7_RB_CNTL__RB_ENABLE_MASK 0x00000001L
+#define SDMA_RLC7_RB_CNTL__RB_SIZE_MASK 0x0000003EL
+#define SDMA_RLC7_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L
+#define SDMA_RLC7_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L
+#define SDMA_RLC7_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L
+#define SDMA_RLC7_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L
+#define SDMA_RLC7_RB_CNTL__RB_PRIV_MASK 0x00800000L
+#define SDMA_RLC7_RB_CNTL__RB_VMID_MASK 0x0F000000L
+//SDMA_RLC7_RB_BASE
+#define SDMA_RLC7_RB_BASE__ADDR__SHIFT 0x0
+#define SDMA_RLC7_RB_BASE__ADDR_MASK 0xFFFFFFFFL
+//SDMA_RLC7_RB_BASE_HI
+#define SDMA_RLC7_RB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA_RLC7_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL
+//SDMA_RLC7_RB_RPTR
+#define SDMA_RLC7_RB_RPTR__OFFSET__SHIFT 0x0
+#define SDMA_RLC7_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA_RLC7_RB_RPTR_HI
+#define SDMA_RLC7_RB_RPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA_RLC7_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA_RLC7_RB_WPTR
+#define SDMA_RLC7_RB_WPTR__OFFSET__SHIFT 0x0
+#define SDMA_RLC7_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA_RLC7_RB_WPTR_HI
+#define SDMA_RLC7_RB_WPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA_RLC7_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA_RLC7_RB_WPTR_POLL_CNTL
+#define SDMA_RLC7_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0
+#define SDMA_RLC7_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1
+#define SDMA_RLC7_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2
+#define SDMA_RLC7_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4
+#define SDMA_RLC7_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10
+#define SDMA_RLC7_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L
+#define SDMA_RLC7_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L
+#define SDMA_RLC7_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L
+#define SDMA_RLC7_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L
+#define SDMA_RLC7_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L
+//SDMA_RLC7_RB_RPTR_ADDR_HI
+#define SDMA_RLC7_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA_RLC7_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA_RLC7_RB_RPTR_ADDR_LO
+#define SDMA_RLC7_RB_RPTR_ADDR_LO__RPTR_WB_IDLE__SHIFT 0x0
+#define SDMA_RLC7_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA_RLC7_RB_RPTR_ADDR_LO__RPTR_WB_IDLE_MASK 0x00000001L
+#define SDMA_RLC7_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA_RLC7_IB_CNTL
+#define SDMA_RLC7_IB_CNTL__IB_ENABLE__SHIFT 0x0
+#define SDMA_RLC7_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4
+#define SDMA_RLC7_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8
+#define SDMA_RLC7_IB_CNTL__CMD_VMID__SHIFT 0x10
+#define SDMA_RLC7_IB_CNTL__IB_PRIV__SHIFT 0x1f
+#define SDMA_RLC7_IB_CNTL__IB_ENABLE_MASK 0x00000001L
+#define SDMA_RLC7_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L
+#define SDMA_RLC7_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L
+#define SDMA_RLC7_IB_CNTL__CMD_VMID_MASK 0x000F0000L
+#define SDMA_RLC7_IB_CNTL__IB_PRIV_MASK 0x80000000L
+//SDMA_RLC7_IB_RPTR
+#define SDMA_RLC7_IB_RPTR__OFFSET__SHIFT 0x2
+#define SDMA_RLC7_IB_RPTR__OFFSET_MASK 0x003FFFFCL
+//SDMA_RLC7_IB_OFFSET
+#define SDMA_RLC7_IB_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA_RLC7_IB_OFFSET__OFFSET_MASK 0x003FFFFCL
+//SDMA_RLC7_IB_BASE_LO
+#define SDMA_RLC7_IB_BASE_LO__ADDR__SHIFT 0x5
+#define SDMA_RLC7_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L
+//SDMA_RLC7_IB_BASE_HI
+#define SDMA_RLC7_IB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA_RLC7_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA_RLC7_IB_SIZE
+#define SDMA_RLC7_IB_SIZE__SIZE__SHIFT 0x0
+#define SDMA_RLC7_IB_SIZE__SIZE_MASK 0x000FFFFFL
+//SDMA_RLC7_SKIP_CNTL
+#define SDMA_RLC7_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0
+#define SDMA_RLC7_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL
+//SDMA_RLC7_CONTEXT_STATUS
+#define SDMA_RLC7_CONTEXT_STATUS__SELECTED__SHIFT 0x0
+#define SDMA_RLC7_CONTEXT_STATUS__IDLE__SHIFT 0x2
+#define SDMA_RLC7_CONTEXT_STATUS__EXPIRED__SHIFT 0x3
+#define SDMA_RLC7_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4
+#define SDMA_RLC7_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7
+#define SDMA_RLC7_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8
+#define SDMA_RLC7_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9
+#define SDMA_RLC7_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa
+#define SDMA_RLC7_CONTEXT_STATUS__SELECTED_MASK 0x00000001L
+#define SDMA_RLC7_CONTEXT_STATUS__IDLE_MASK 0x00000004L
+#define SDMA_RLC7_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L
+#define SDMA_RLC7_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L
+#define SDMA_RLC7_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L
+#define SDMA_RLC7_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L
+#define SDMA_RLC7_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L
+#define SDMA_RLC7_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L
+//SDMA_RLC7_DOORBELL
+#define SDMA_RLC7_DOORBELL__ENABLE__SHIFT 0x1c
+#define SDMA_RLC7_DOORBELL__CAPTURED__SHIFT 0x1e
+#define SDMA_RLC7_DOORBELL__ENABLE_MASK 0x10000000L
+#define SDMA_RLC7_DOORBELL__CAPTURED_MASK 0x40000000L
+//SDMA_RLC7_STATUS
+#define SDMA_RLC7_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0
+#define SDMA_RLC7_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8
+#define SDMA_RLC7_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL
+#define SDMA_RLC7_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L
+//SDMA_RLC7_DOORBELL_LOG
+#define SDMA_RLC7_DOORBELL_LOG__BE_ERROR__SHIFT 0x0
+#define SDMA_RLC7_DOORBELL_LOG__DATA__SHIFT 0x2
+#define SDMA_RLC7_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L
+#define SDMA_RLC7_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL
+//SDMA_RLC7_WATERMARK
+#define SDMA_RLC7_WATERMARK__RD_OUTSTANDING__SHIFT 0x0
+#define SDMA_RLC7_WATERMARK__WR_OUTSTANDING__SHIFT 0x10
+#define SDMA_RLC7_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL
+#define SDMA_RLC7_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L
+//SDMA_RLC7_DOORBELL_OFFSET
+#define SDMA_RLC7_DOORBELL_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA_RLC7_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL
+//SDMA_RLC7_CSA_ADDR_LO
+#define SDMA_RLC7_CSA_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA_RLC7_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA_RLC7_CSA_ADDR_HI
+#define SDMA_RLC7_CSA_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA_RLC7_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA_RLC7_IB_SUB_REMAIN
+#define SDMA_RLC7_IB_SUB_REMAIN__SIZE__SHIFT 0x0
+#define SDMA_RLC7_IB_SUB_REMAIN__SIZE_MASK 0x000FFFFFL
+//SDMA_RLC7_PREEMPT
+#define SDMA_RLC7_PREEMPT__IB_PREEMPT__SHIFT 0x0
+#define SDMA_RLC7_PREEMPT__IB_PREEMPT_MASK 0x00000001L
+//SDMA_RLC7_DUMMY_REG
+#define SDMA_RLC7_DUMMY_REG__DUMMY__SHIFT 0x0
+#define SDMA_RLC7_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL
+//SDMA_RLC7_RB_WPTR_POLL_ADDR_HI
+#define SDMA_RLC7_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA_RLC7_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA_RLC7_RB_WPTR_POLL_ADDR_LO
+#define SDMA_RLC7_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA_RLC7_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA_RLC7_RB_AQL_CNTL
+#define SDMA_RLC7_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0
+#define SDMA_RLC7_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1
+#define SDMA_RLC7_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8
+#define SDMA_RLC7_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L
+#define SDMA_RLC7_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL
+#define SDMA_RLC7_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L
+//SDMA_RLC7_MINOR_PTR_UPDATE
+#define SDMA_RLC7_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0
+#define SDMA_RLC7_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L
+//SDMA_RLC7_MIDCMD_DATA0
+#define SDMA_RLC7_MIDCMD_DATA0__DATA0__SHIFT 0x0
+#define SDMA_RLC7_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL
+//SDMA_RLC7_MIDCMD_DATA1
+#define SDMA_RLC7_MIDCMD_DATA1__DATA1__SHIFT 0x0
+#define SDMA_RLC7_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL
+//SDMA_RLC7_MIDCMD_DATA2
+#define SDMA_RLC7_MIDCMD_DATA2__DATA2__SHIFT 0x0
+#define SDMA_RLC7_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL
+//SDMA_RLC7_MIDCMD_DATA3
+#define SDMA_RLC7_MIDCMD_DATA3__DATA3__SHIFT 0x0
+#define SDMA_RLC7_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL
+//SDMA_RLC7_MIDCMD_DATA4
+#define SDMA_RLC7_MIDCMD_DATA4__DATA4__SHIFT 0x0
+#define SDMA_RLC7_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL
+//SDMA_RLC7_MIDCMD_DATA5
+#define SDMA_RLC7_MIDCMD_DATA5__DATA5__SHIFT 0x0
+#define SDMA_RLC7_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL
+//SDMA_RLC7_MIDCMD_DATA6
+#define SDMA_RLC7_MIDCMD_DATA6__DATA6__SHIFT 0x0
+#define SDMA_RLC7_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL
+//SDMA_RLC7_MIDCMD_DATA7
+#define SDMA_RLC7_MIDCMD_DATA7__DATA7__SHIFT 0x0
+#define SDMA_RLC7_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL
+//SDMA_RLC7_MIDCMD_DATA8
+#define SDMA_RLC7_MIDCMD_DATA8__DATA8__SHIFT 0x0
+#define SDMA_RLC7_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL
+//SDMA_RLC7_MIDCMD_DATA9
+#define SDMA_RLC7_MIDCMD_DATA9__DATA9__SHIFT 0x0
+#define SDMA_RLC7_MIDCMD_DATA9__DATA9_MASK 0xFFFFFFFFL
+//SDMA_RLC7_MIDCMD_DATA10
+#define SDMA_RLC7_MIDCMD_DATA10__DATA10__SHIFT 0x0
+#define SDMA_RLC7_MIDCMD_DATA10__DATA10_MASK 0xFFFFFFFFL
+//SDMA_RLC7_MIDCMD_CNTL
+#define SDMA_RLC7_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0
+#define SDMA_RLC7_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1
+#define SDMA_RLC7_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4
+#define SDMA_RLC7_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8
+#define SDMA_RLC7_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L
+#define SDMA_RLC7_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L
+#define SDMA_RLC7_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L
+#define SDMA_RLC7_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/kgd_pp_interface.h b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
index 75f18791cdb9..86b6b0c9fb02 100644
--- a/drivers/gpu/drm/amd/include/kgd_pp_interface.h
+++ b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
@@ -160,6 +160,8 @@ enum PP_SMC_POWER_PROFILE {
PP_SMC_POWER_PROFILE_COMPUTE = 0x5,
PP_SMC_POWER_PROFILE_CUSTOM = 0x6,
PP_SMC_POWER_PROFILE_WINDOW3D = 0x7,
+ PP_SMC_POWER_PROFILE_CAPPED = 0x8,
+ PP_SMC_POWER_PROFILE_UNCAPPED = 0x9,
PP_SMC_POWER_PROFILE_COUNT,
};
@@ -331,6 +333,8 @@ struct amd_pm_funcs {
int (*get_mclk_od)(void *handle);
int (*set_mclk_od)(void *handle, uint32_t value);
int (*read_sensor)(void *handle, int idx, void *value, int *size);
+ int (*get_apu_thermal_limit)(void *handle, uint32_t *limit);
+ int (*set_apu_thermal_limit)(void *handle, uint32_t limit);
enum amd_dpm_forced_level (*get_performance_level)(void *handle);
enum amd_pm_state_type (*get_current_power_state)(void *handle);
int (*get_fan_speed_rpm)(void *handle, uint32_t *rpm);
diff --git a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c
index 6e79d3352d0b..300e156b924f 100644
--- a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c
+++ b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c
@@ -456,6 +456,34 @@ int amdgpu_dpm_read_sensor(struct amdgpu_device *adev, enum amd_pp_sensors senso
return ret;
}
+int amdgpu_dpm_get_apu_thermal_limit(struct amdgpu_device *adev, uint32_t *limit)
+{
+ const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+ int ret = -EINVAL;
+
+ if (pp_funcs && pp_funcs->get_apu_thermal_limit) {
+ mutex_lock(&adev->pm.mutex);
+ ret = pp_funcs->get_apu_thermal_limit(adev->powerplay.pp_handle, limit);
+ mutex_unlock(&adev->pm.mutex);
+ }
+
+ return ret;
+}
+
+int amdgpu_dpm_set_apu_thermal_limit(struct amdgpu_device *adev, uint32_t limit)
+{
+ const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+ int ret = -EINVAL;
+
+ if (pp_funcs && pp_funcs->set_apu_thermal_limit) {
+ mutex_lock(&adev->pm.mutex);
+ ret = pp_funcs->set_apu_thermal_limit(adev->powerplay.pp_handle, limit);
+ mutex_unlock(&adev->pm.mutex);
+ }
+
+ return ret;
+}
+
void amdgpu_dpm_compute_clocks(struct amdgpu_device *adev)
{
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
diff --git a/drivers/gpu/drm/amd/pm/amdgpu_pm.c b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
index bf6d63673b5a..d75a67cfe523 100644
--- a/drivers/gpu/drm/amd/pm/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
@@ -91,6 +91,8 @@ const char * const amdgpu_pp_profile_name[] = {
"COMPUTE",
"CUSTOM",
"WINDOW_3D",
+ "CAPPED",
+ "UNCAPPED",
};
/**
@@ -1686,6 +1688,82 @@ static ssize_t amdgpu_set_thermal_throttling_logging(struct device *dev,
}
/**
+ * DOC: apu_thermal_cap
+ *
+ * The amdgpu driver provides a sysfs API for retrieving/updating thermal
+ * limit temperature in millidegrees Celsius
+ *
+ * Reading back the file shows you core limit value
+ *
+ * Writing an integer to the file, sets a new thermal limit. The value
+ * should be between 0 and 100. If the value is less than 0 or greater
+ * than 100, then the write request will be ignored.
+ */
+static ssize_t amdgpu_get_apu_thermal_cap(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ int ret, size;
+ u32 limit;
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = drm_to_adev(ddev);
+
+ ret = pm_runtime_get_sync(ddev->dev);
+ if (ret < 0) {
+ pm_runtime_put_autosuspend(ddev->dev);
+ return ret;
+ }
+
+ ret = amdgpu_dpm_get_apu_thermal_limit(adev, &limit);
+ if (!ret)
+ size = sysfs_emit(buf, "%u\n", limit);
+ else
+ size = sysfs_emit(buf, "failed to get thermal limit\n");
+
+ pm_runtime_mark_last_busy(ddev->dev);
+ pm_runtime_put_autosuspend(ddev->dev);
+
+ return size;
+}
+
+static ssize_t amdgpu_set_apu_thermal_cap(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ int ret;
+ u32 value;
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = drm_to_adev(ddev);
+
+ ret = kstrtou32(buf, 10, &value);
+ if (ret)
+ return ret;
+
+ if (value > 100) {
+ dev_err(dev, "Invalid argument !\n");
+ return -EINVAL;
+ }
+
+ ret = pm_runtime_get_sync(ddev->dev);
+ if (ret < 0) {
+ pm_runtime_put_autosuspend(ddev->dev);
+ return ret;
+ }
+
+ ret = amdgpu_dpm_set_apu_thermal_limit(adev, value);
+ if (ret) {
+ dev_err(dev, "failed to update thermal limit\n");
+ return ret;
+ }
+
+ pm_runtime_mark_last_busy(ddev->dev);
+ pm_runtime_put_autosuspend(ddev->dev);
+
+ return count;
+}
+
+/**
* DOC: gpu_metrics
*
* The amdgpu driver provides a sysfs API for retrieving current gpu
@@ -1937,6 +2015,7 @@ static struct amdgpu_device_attr amdgpu_device_attrs[] = {
AMDGPU_DEVICE_ATTR_RW(pp_features, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
AMDGPU_DEVICE_ATTR_RO(unique_id, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
AMDGPU_DEVICE_ATTR_RW(thermal_throttling_logging, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
+ AMDGPU_DEVICE_ATTR_RW(apu_thermal_cap, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
AMDGPU_DEVICE_ATTR_RO(gpu_metrics, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
AMDGPU_DEVICE_ATTR_RO(smartshift_apu_power, ATTR_FLAG_BASIC,
.attr_update = ss_power_attr_update),
diff --git a/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h b/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h
index 16addceca68f..d178f3f44081 100644
--- a/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h
+++ b/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h
@@ -369,6 +369,9 @@ struct amdgpu_pm {
int amdgpu_dpm_read_sensor(struct amdgpu_device *adev, enum amd_pp_sensors sensor,
void *data, uint32_t *size);
+int amdgpu_dpm_get_apu_thermal_limit(struct amdgpu_device *adev, uint32_t *limit);
+int amdgpu_dpm_set_apu_thermal_limit(struct amdgpu_device *adev, uint32_t limit);
+
int amdgpu_dpm_set_powergating_by_smu(struct amdgpu_device *adev,
uint32_t block_type, bool gate);
diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
index 0652b001ad54..b5d64749990e 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
@@ -40,6 +40,7 @@
#include "smu_v13_0_0_ppt.h"
#include "smu_v13_0_4_ppt.h"
#include "smu_v13_0_5_ppt.h"
+#include "smu_v13_0_6_ppt.h"
#include "smu_v13_0_7_ppt.h"
#include "amd_pcie.h"
@@ -609,6 +610,11 @@ static int smu_set_funcs(struct amdgpu_device *adev)
case IP_VERSION(13, 0, 10):
smu_v13_0_0_set_ppt_funcs(smu);
break;
+ case IP_VERSION(13, 0, 6):
+ smu_v13_0_6_set_ppt_funcs(smu);
+ /* Enable pp_od_clk_voltage node */
+ smu->od_enabled = true;
+ break;
case IP_VERSION(13, 0, 7):
smu_v13_0_7_set_ppt_funcs(smu);
break;
@@ -2532,6 +2538,28 @@ unlock:
return ret;
}
+static int smu_get_apu_thermal_limit(void *handle, uint32_t *limit)
+{
+ int ret = -EINVAL;
+ struct smu_context *smu = handle;
+
+ if (smu->ppt_funcs && smu->ppt_funcs->get_apu_thermal_limit)
+ ret = smu->ppt_funcs->get_apu_thermal_limit(smu, limit);
+
+ return ret;
+}
+
+static int smu_set_apu_thermal_limit(void *handle, uint32_t limit)
+{
+ int ret = -EINVAL;
+ struct smu_context *smu = handle;
+
+ if (smu->ppt_funcs && smu->ppt_funcs->set_apu_thermal_limit)
+ ret = smu->ppt_funcs->set_apu_thermal_limit(smu, limit);
+
+ return ret;
+}
+
static int smu_get_power_profile_mode(void *handle, char *buf)
{
struct smu_context *smu = handle;
@@ -3033,6 +3061,8 @@ static const struct amd_pm_funcs swsmu_pm_funcs = {
.emit_clock_levels = smu_emit_ppclk_levels,
.force_performance_level = smu_force_performance_level,
.read_sensor = smu_read_sensor,
+ .get_apu_thermal_limit = smu_get_apu_thermal_limit,
+ .set_apu_thermal_limit = smu_set_apu_thermal_limit,
.get_performance_level = smu_get_performance_level,
.get_current_power_state = smu_get_current_power_state,
.get_fan_speed_rpm = smu_get_fan_speed_rpm,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
index 2a03d85bf4e2..09469c750a96 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
@@ -722,6 +722,18 @@ struct pptable_funcs {
void *data, uint32_t *size);
/**
+ * @get_apu_thermal_limit: get apu core limit from smu
+ * &limit: current limit temperature in millidegrees Celsius
+ */
+ int (*get_apu_thermal_limit)(struct smu_context *smu, uint32_t *limit);
+
+ /**
+ * @set_apu_thermal_limit: update all controllers with new limit
+ * &limit: limit temperature to be setted, in millidegrees Celsius
+ */
+ int (*set_apu_thermal_limit)(struct smu_context *smu, uint32_t limit);
+
+ /**
* @pre_display_config_changed: Prepare GPU for a display configuration
* change.
*
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu11_driver_if_vangogh.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu11_driver_if_vangogh.h
index 8361ebd8d876..21e6028a49e6 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu11_driver_if_vangogh.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu11_driver_if_vangogh.h
@@ -238,7 +238,9 @@ typedef struct {
#define WORKLOAD_PPLIB_VR_BIT 3
#define WORKLOAD_PPLIB_COMPUTE_BIT 4
#define WORKLOAD_PPLIB_CUSTOM_BIT 5
-#define WORKLOAD_PPLIB_COUNT 6
+#define WORKLOAD_PPLIB_CAPPED_BIT 6
+#define WORKLOAD_PPLIB_UNCAPPED_BIT 7
+#define WORKLOAD_PPLIB_COUNT 8
#define TABLE_BIOS_IF 0 // Called by BIOS
#define TABLE_WATERMARKS 1 // Called by DAL through VBIOS
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_4.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_4.h
index f77401709d83..2162ecd1057d 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_4.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_4.h
@@ -27,7 +27,7 @@
// *** IMPORTANT ***
// SMU TEAM: Always increment the interface version if
// any structure is changed in this file
-#define PMFW_DRIVER_IF_VERSION 7
+#define PMFW_DRIVER_IF_VERSION 8
typedef struct {
int32_t value;
@@ -198,7 +198,7 @@ typedef struct {
uint16_t SkinTemp;
uint16_t DeviceState;
uint16_t CurTemp; //[centi-Celsius]
- uint16_t spare2;
+ uint16_t FilterAlphaValue;
uint16_t AverageGfxclkFrequency;
uint16_t AverageFclkFrequency;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_6.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_6.h
new file mode 100644
index 000000000000..be596777cd2c
--- /dev/null
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_6.h
@@ -0,0 +1,141 @@
+/*
+ * Copyright 2021 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef SMU_13_0_6_DRIVER_IF_H
+#define SMU_13_0_6_DRIVER_IF_H
+
+// *** IMPORTANT ***
+// PMFW TEAM: Always increment the interface version if
+// anything is changed in this file
+#define SMU13_0_6_DRIVER_IF_VERSION 0x08042022
+
+//I2C Interface
+#define NUM_I2C_CONTROLLERS 8
+#define I2C_CONTROLLER_ENABLED 1
+#define I2C_CONTROLLER_DISABLED 0
+
+#define MAX_SW_I2C_COMMANDS 24
+
+typedef enum {
+ I2C_CONTROLLER_PORT_0, //CKSVII2C0
+ I2C_CONTROLLER_PORT_1, //CKSVII2C1
+ I2C_CONTROLLER_PORT_COUNT,
+} I2cControllerPort_e;
+
+typedef enum {
+ UNSUPPORTED_1, //50 Kbits/s not supported anymore!
+ I2C_SPEED_STANDARD_100K, //100 Kbits/s
+ I2C_SPEED_FAST_400K, //400 Kbits/s
+ I2C_SPEED_FAST_PLUS_1M, //1 Mbits/s (in fast mode)
+ UNSUPPORTED_2, //1 Mbits/s (in high speed mode) not supported anymore!
+ UNSUPPORTED_3, //2.3 Mbits/s not supported anymore!
+ I2C_SPEED_COUNT,
+} I2cSpeed_e;
+
+typedef enum {
+ I2C_CMD_READ,
+ I2C_CMD_WRITE,
+ I2C_CMD_COUNT,
+} I2cCmdType_e;
+
+#define CMDCONFIG_STOP_BIT 0
+#define CMDCONFIG_RESTART_BIT 1
+#define CMDCONFIG_READWRITE_BIT 2 //bit should be 0 for read, 1 for write
+
+#define CMDCONFIG_STOP_MASK (1 << CMDCONFIG_STOP_BIT)
+#define CMDCONFIG_RESTART_MASK (1 << CMDCONFIG_RESTART_BIT)
+#define CMDCONFIG_READWRITE_MASK (1 << CMDCONFIG_READWRITE_BIT)
+
+typedef struct {
+ uint8_t ReadWriteData; //Return data for read. Data to send for write
+ uint8_t CmdConfig; //Includes whether associated command should have a stop or restart command, and is a read or write
+} SwI2cCmd_t; //SW I2C Command Table
+
+typedef struct {
+ uint8_t I2CcontrollerPort; //CKSVII2C0(0) or //CKSVII2C1(1)
+ uint8_t I2CSpeed; //Use I2cSpeed_e to indicate speed to select
+ uint8_t SlaveAddress; //Slave address of device
+ uint8_t NumCmds; //Number of commands
+ SwI2cCmd_t SwI2cCmds[MAX_SW_I2C_COMMANDS];
+} SwI2cRequest_t; // SW I2C Request Table
+
+typedef struct {
+ SwI2cRequest_t SwI2cRequest;
+ uint32_t Spare[8];
+ uint32_t MmHubPadding[8]; // SMU internal use
+} SwI2cRequestExternal_t;
+
+typedef enum {
+ PPCLK_VCLK,
+ PPCLK_DCLK,
+ PPCLK_SOCCLK,
+ PPCLK_UCLK,
+ PPCLK_FCLK,
+ PPCLK_LCLK,
+ PPCLK_COUNT,
+} PPCLK_e;
+
+typedef enum {
+ GPIO_INT_POLARITY_ACTIVE_LOW,
+ GPIO_INT_POLARITY_ACTIVE_HIGH,
+} GpioIntPolarity_e;
+
+//TODO confirm if this is used in SMU_13_0_6 PPSMC_MSG_SetUclkDpmMode
+typedef enum {
+ UCLK_DPM_MODE_BANDWIDTH,
+ UCLK_DPM_MODE_LATENCY,
+} UCLK_DPM_MODE_e;
+
+typedef struct {
+ //0-26 SOC, 27-29 SOCIO
+ uint16_t avgPsmCount[30];
+ uint16_t minPsmCount[30];
+ float avgPsmVoltage[30];
+ float minPsmVoltage[30];
+} AvfsDebugTableAid_t;
+
+typedef struct {
+ //0-27 GFX, 28-29 SOC
+ uint16_t avgPsmCount[30];
+ uint16_t minPsmCount[30];
+ float avgPsmVoltage[30];
+ float minPsmVoltage[30];
+} AvfsDebugTableXcd_t;
+
+// These defines are used with the following messages:
+// SMC_MSG_TransferTableDram2Smu
+// SMC_MSG_TransferTableSmu2Dram
+// #define TABLE_PPTABLE 0
+// #define TABLE_AVFS_PSM_DEBUG 1
+// #define TABLE_AVFS_FUSE_OVERRIDE 2
+// #define TABLE_PMSTATUSLOG 3
+// #define TABLE_SMU_METRICS 4
+// #define TABLE_DRIVER_SMU_CONFIG 5
+// #define TABLE_I2C_COMMANDS 6
+// #define TABLE_COUNT 7
+
+// // Table transfer status
+// #define TABLE_TRANSFER_OK 0x0
+// #define TABLE_TRANSFER_FAILED 0xFF
+// #define TABLE_TRANSFER_PENDING 0xAB
+
+#endif
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_pmfw.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_pmfw.h
new file mode 100644
index 000000000000..bdccbb4a6276
--- /dev/null
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_pmfw.h
@@ -0,0 +1,212 @@
+/*
+ * Copyright 2021 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef SMU_13_0_6_PMFW_H
+#define SMU_13_0_6_PMFW_H
+
+#define NUM_VCLK_DPM_LEVELS 4
+#define NUM_DCLK_DPM_LEVELS 4
+#define NUM_SOCCLK_DPM_LEVELS 4
+#define NUM_LCLK_DPM_LEVELS 4
+#define NUM_UCLK_DPM_LEVELS 4
+#define NUM_FCLK_DPM_LEVELS 4
+#define NUM_XGMI_DPM_LEVELS 2
+#define NUM_CXL_BITRATES 4
+#define NUM_PCIE_BITRATES 4
+#define NUM_XGMI_BITRATES 4
+#define NUM_XGMI_WIDTHS 3
+
+typedef enum {
+/*0*/ FEATURE_DATA_CALCULATION = 0,
+/*1*/ FEATURE_DPM_CCLK = 1,
+/*2*/ FEATURE_DPM_FCLK = 2,
+/*3*/ FEATURE_DPM_GFXCLK = 3,
+/*4*/ FEATURE_DPM_LCLK = 4,
+/*5*/ FEATURE_DPM_SOCCLK = 5,
+/*6*/ FEATURE_DPM_UCLK = 6,
+/*7*/ FEATURE_DPM_VCN = 7,
+/*8*/ FEATURE_DPM_XGMI = 8,
+/*9*/ FEATURE_DS_FCLK = 9,
+/*10*/ FEATURE_DS_GFXCLK = 10,
+/*11*/ FEATURE_DS_LCLK = 11,
+/*12*/ FEATURE_DS_MP0CLK = 12,
+/*13*/ FEATURE_DS_MP1CLK = 13,
+/*14*/ FEATURE_DS_MPIOCLK = 14,
+/*15*/ FEATURE_DS_SOCCLK = 15,
+/*16*/ FEATURE_DS_VCN = 16,
+/*17*/ FEATURE_APCC_DFLL = 17,
+/*18*/ FEATURE_APCC_PLUS = 18,
+/*19*/ FEATURE_DF_CSTATE = 19,
+/*20*/ FEATURE_CC6 = 20,
+/*21*/ FEATURE_PC6 = 21,
+/*22*/ FEATURE_CPPC = 22,
+/*23*/ FEATURE_PPT = 23,
+/*24*/ FEATURE_TDC = 24,
+/*25*/ FEATURE_THERMAL = 25,
+/*26*/ FEATURE_SOC_PCC = 26,
+/*27*/ FEATURE_CCD_PCC = 27,
+/*28*/ FEATURE_CCD_EDC = 28,
+/*29*/ FEATURE_PROCHOT = 29,
+/*30*/ FEATURE_DVO_CCLK = 30,
+/*31*/ FEATURE_FDD_AID_HBM = 31,
+/*32*/ FEATURE_FDD_AID_SOC = 32,
+/*33*/ FEATURE_FDD_XCD_EDC = 33,
+/*34*/ FEATURE_FDD_XCD_XVMIN = 34,
+/*35*/ FEATURE_FW_CTF = 35,
+/*36*/ FEATURE_GFXOFF = 36,
+/*37*/ FEATURE_SMU_CG = 37,
+/*38*/ FEATURE_PSI7 = 38,
+/*39*/ FEATURE_CSTATE_BOOST = 39,
+/*40*/ FEATURE_XGMI_PER_LINK_PWR_DOWN = 40,
+/*41*/ FEATURE_CXL_QOS = 41,
+/*42*/ FEATURE_SOC_DC_RTC = 42,
+/*43*/ FEATURE_GFX_DC_RTC = 43,
+
+/*44*/ NUM_FEATURES = 44
+} FEATURE_LIST_e;
+
+//enum for MPIO PCIe gen speed msgs
+typedef enum {
+ PCIE_LINK_SPEED_INDEX_TABLE_GEN1,
+ PCIE_LINK_SPEED_INDEX_TABLE_GEN2,
+ PCIE_LINK_SPEED_INDEX_TABLE_GEN3,
+ PCIE_LINK_SPEED_INDEX_TABLE_GEN4,
+ PCIE_LINK_SPEED_INDEX_TABLE_GEN4_ESM,
+ PCIE_LINK_SPEED_INDEX_TABLE_GEN5,
+ PCIE_LINK_SPEED_INDEX_TABLE_COUNT
+} PCIE_LINK_SPEED_INDEX_TABLE_e;
+
+typedef enum {
+ VOLTAGE_COLD_0,
+ VOLTAGE_COLD_1,
+ VOLTAGE_COLD_2,
+ VOLTAGE_COLD_3,
+ VOLTAGE_COLD_4,
+ VOLTAGE_COLD_5,
+ VOLTAGE_COLD_6,
+ VOLTAGE_COLD_7,
+ VOLTAGE_MID_0,
+ VOLTAGE_MID_1,
+ VOLTAGE_MID_2,
+ VOLTAGE_MID_3,
+ VOLTAGE_MID_4,
+ VOLTAGE_MID_5,
+ VOLTAGE_MID_6,
+ VOLTAGE_MID_7,
+ VOLTAGE_HOT_0,
+ VOLTAGE_HOT_1,
+ VOLTAGE_HOT_2,
+ VOLTAGE_HOT_3,
+ VOLTAGE_HOT_4,
+ VOLTAGE_HOT_5,
+ VOLTAGE_HOT_6,
+ VOLTAGE_HOT_7,
+ VOLTAGE_GUARDBAND_COUNT
+} GFX_GUARDBAND_e;
+
+#define SMU_METRICS_TABLE_VERSION 0x1
+
+typedef struct {
+ uint32_t AccumulationCounter;
+
+ //TEMPERATURE
+ uint32_t MaxSocketTemperature;
+ uint32_t MaxVrTemperature;
+ uint32_t MaxHbmTemperature;
+ uint64_t MaxSocketTemperatureAcc;
+ uint64_t MaxVrTemperatureAcc;
+ uint64_t MaxHbmTemperatureAcc;
+
+ //POWER
+ uint32_t SocketPowerLimit;
+ uint32_t MaxSocketPowerLimit;
+ uint32_t SocketPower;
+
+ //ENERGY
+ uint64_t Timestamp;
+ uint64_t SocketEnergyAcc;
+ uint64_t CcdEnergyAcc;
+ uint64_t XcdEnergyAcc;
+ uint64_t AidEnergyAcc;
+ uint64_t HbmEnergyAcc;
+
+ //FREQUENCY
+ uint32_t CclkFrequencyLimit;
+ uint32_t GfxclkFrequencyLimit;
+ uint32_t FclkFrequency;
+ uint32_t UclkFrequency;
+ uint32_t SocclkFrequency[4];
+ uint32_t VclkFrequency[4];
+ uint32_t DclkFrequency[4];
+ uint32_t LclkFrequency[4];
+ uint64_t GfxclkFrequencyAcc[8];
+ uint64_t CclkFrequencyAcc[96];
+
+ //FREQUENCY RANGE
+ uint32_t MaxCclkFrequency;
+ uint32_t MinCclkFrequency;
+ uint32_t MaxGfxclkFrequency;
+ uint32_t MinGfxclkFrequency;
+ uint32_t FclkFrequencyTable[4];
+ uint32_t UclkFrequencyTable[4];
+ uint32_t SocclkFrequencyTable[4];
+ uint32_t VclkFrequencyTable[4];
+ uint32_t DclkFrequencyTable[4];
+ uint32_t LclkFrequencyTable[4];
+ uint32_t MaxLclkDpmRange;
+ uint32_t MinLclkDpmRange;
+
+ //XGMI
+ uint32_t XgmiWidth;
+ uint32_t XgmiBitrate;
+ uint64_t XgmiReadBandwidthAcc[8];
+ uint64_t XgmiWriteBandwidthAcc[8];
+
+ //ACTIVITY
+ uint32_t SocketC0Residency;
+ uint32_t SocketGfxBusy;
+ uint32_t DramBandwidthUtilization;
+ uint64_t SocketC0ResidencyAcc;
+ uint64_t SocketGfxBusyAcc;
+ uint64_t DramBandwidthAcc;
+ uint32_t MaxDramBandwidth;
+ uint64_t DramBandwidthUtilizationAcc;
+ uint64_t PcieBandwidthAcc[4];
+
+ //THROTTLERS
+ uint32_t ProchotResidencyAcc;
+ uint32_t PptResidencyAcc;
+ uint32_t SocketThmResidencyAcc;
+ uint32_t VrThmResidencyAcc;
+ uint32_t HbmThmResidencyAcc;
+} MetricsTable_t;
+
+#define SMU_VF_METRICS_TABLE_VERSION 0x1
+
+typedef struct {
+ uint32_t AccumulationCounter;
+ uint32_t InstGfxclk_TargFreq;
+ uint64_t AccGfxclk_TargFreq;
+ uint64_t AccGfxRsmuDpm_Busy;
+} VfMetricsTable_t;
+
+#endif
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_ppsmc.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_ppsmc.h
new file mode 100644
index 000000000000..b838e8db395a
--- /dev/null
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_ppsmc.h
@@ -0,0 +1,95 @@
+/*
+ * Copyright 2021 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef SMU_13_0_6_PPSMC_H
+#define SMU_13_0_6_PPSMC_H
+
+// SMU Response Codes:
+#define PPSMC_Result_OK 0x1
+#define PPSMC_Result_Failed 0xFF
+#define PPSMC_Result_UnknownCmd 0xFE
+#define PPSMC_Result_CmdRejectedPrereq 0xFD
+#define PPSMC_Result_CmdRejectedBusy 0xFC
+
+// Message Definitions:
+#define PPSMC_MSG_TestMessage 0x1
+#define PPSMC_MSG_GetSmuVersion 0x2
+#define PPSMC_MSG_GfxDriverReset 0x3
+#define PPSMC_MSG_GetDriverIfVersion 0x4
+#define PPSMC_MSG_EnableAllSmuFeatures 0x5
+#define PPSMC_MSG_DisableAllSmuFeatures 0x6
+#define PPSMC_MSG_RequestI2cTransaction 0x7
+#define PPSMC_MSG_GetMetricsVersion 0x8
+#define PPSMC_MSG_GetMetricsTable 0x9
+#define PPSMC_MSG_GetEccInfoTable 0xA
+#define PPSMC_MSG_GetEnabledSmuFeaturesLow 0xB
+#define PPSMC_MSG_GetEnabledSmuFeaturesHigh 0xC
+#define PPSMC_MSG_SetDriverDramAddrHigh 0xD
+#define PPSMC_MSG_SetDriverDramAddrLow 0xE
+#define PPSMC_MSG_SetToolsDramAddrHigh 0xF
+#define PPSMC_MSG_SetToolsDramAddrLow 0x10
+#define PPSMC_MSG_SetSystemVirtualDramAddrHigh 0x11
+#define PPSMC_MSG_SetSystemVirtualDramAddrLow 0x12
+#define PPSMC_MSG_SetSoftMinByFreq 0x13
+#define PPSMC_MSG_SetSoftMaxByFreq 0x14
+#define PPSMC_MSG_GetMinDpmFreq 0x15
+#define PPSMC_MSG_GetMaxDpmFreq 0x16
+#define PPSMC_MSG_GetDpmFreqByIndex 0x17
+#define PPSMC_MSG_SetPptLimit 0x18
+#define PPSMC_MSG_GetPptLimit 0x19
+#define PPSMC_MSG_DramLogSetDramAddrHigh 0x1A
+#define PPSMC_MSG_DramLogSetDramAddrLow 0x1B
+#define PPSMC_MSG_DramLogSetDramSize 0x1C
+#define PPSMC_MSG_GetDebugData 0x1D
+#define PPSMC_MSG_HeavySBR 0x1E
+#define PPSMC_MSG_SetNumBadHbmPagesRetired 0x1F
+#define PPSMC_MSG_DFCstateControl 0x20
+#define PPSMC_MSG_GetGmiPwrDnHyst 0x21
+#define PPSMC_MSG_SetGmiPwrDnHyst 0x22
+#define PPSMC_MSG_GmiPwrDnControl 0x23
+#define PPSMC_MSG_EnterGfxoff 0x24
+#define PPSMC_MSG_ExitGfxoff 0x25
+#define PPSMC_MSG_EnableDeterminism 0x26
+#define PPSMC_MSG_DisableDeterminism 0x27
+#define PPSMC_MSG_DumpSTBtoDram 0x28
+#define PPSMC_MSG_STBtoDramLogSetDramAddrHigh 0x29
+#define PPSMC_MSG_STBtoDramLogSetDramAddrLow 0x2A
+#define PPSMC_MSG_STBtoDramLogSetDramSize 0x2B
+#define PPSMC_MSG_SetSystemVirtualSTBtoDramAddrHigh 0x2C
+#define PPSMC_MSG_SetSystemVirtualSTBtoDramAddrLow 0x2D
+#define PPSMC_MSG_GfxDriverResetRecovery 0x2E
+#define PPSMC_MSG_TriggerVFFLR 0x2F
+#define PPSMC_MSG_SetSoftMinGfxClk 0x30
+#define PPSMC_MSG_SetSoftMaxGfxClk 0x31
+#define PPSMC_MSG_GetMinGfxDpmFreq 0x32
+#define PPSMC_MSG_GetMaxGfxDpmFreq 0x33
+#define PPSMC_Message_Count 0x34
+
+//PPSMC Reset Types for driver msg argument
+#define PPSMC_RESET_TYPE_DRIVER_MODE_1_RESET 0x1
+#define PPSMC_RESET_TYPE_DRIVER_MODE_2_RESET 0x2
+#define PPSMC_RESET_TYPE_DRIVER_MODE_3_RESET 0x3
+
+typedef uint32_t PPSMC_Result;
+typedef uint32_t PPSMC_MSG;
+
+#endif
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h
index 96f6c2db955b..297b70b9388f 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h
@@ -243,7 +243,9 @@
__SMU_DUMMY_MAP(SetNumBadMemoryPagesRetired), \
__SMU_DUMMY_MAP(SetBadMemoryPagesRetiredFlagsPerChannel), \
__SMU_DUMMY_MAP(AllowGpo), \
- __SMU_DUMMY_MAP(Mode2Reset),
+ __SMU_DUMMY_MAP(Mode2Reset), \
+ __SMU_DUMMY_MAP(RequestI2cTransaction), \
+ __SMU_DUMMY_MAP(GetMetricsTable),
#undef __SMU_DUMMY_MAP
#define __SMU_DUMMY_MAP(type) SMU_MSG_##type
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h
index 1c0ae2cb757b..0ef37837b164 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h
@@ -29,11 +29,12 @@
#define SMU13_DRIVER_IF_VERSION_YELLOW_CARP 0x04
#define SMU13_DRIVER_IF_VERSION_ALDE 0x08
#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_0_0 0x37
-#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_4 0x07
+#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_4 0x08
#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_5 0x04
#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_0_10 0x32
#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_7 0x37
#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_10 0x1D
+#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_6 0x0
#define SMU13_MODE1_RESET_WAIT_TIME_IN_MS 500 //500ms
@@ -244,6 +245,10 @@ int smu_v13_0_set_single_dpm_table(struct smu_context *smu,
enum smu_clk_type clk_type,
struct smu_13_0_dpm_table *single_dpm_table);
+int smu_v13_0_get_dpm_freq_by_index(struct smu_context *smu,
+ enum smu_clk_type clk_type, uint16_t level,
+ uint32_t *value);
+
int smu_v13_0_get_current_pcie_link_width_level(struct smu_context *smu);
int smu_v13_0_get_current_pcie_link_width(struct smu_context *smu);
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
index 697e98a0a20a..75f18681e984 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
@@ -2143,16 +2143,9 @@ static int sienna_cichlid_set_default_od_settings(struct smu_context *smu)
(OverDriveTable_t *)smu->smu_table.boot_overdrive_table;
OverDriveTable_t *user_od_table =
(OverDriveTable_t *)smu->smu_table.user_overdrive_table;
+ OverDriveTable_t user_od_table_bak;
int ret = 0;
- /*
- * For S3/S4/Runpm resume, no need to setup those overdrive tables again as
- * - either they already have the default OD settings got during cold bootup
- * - or they have some user customized OD settings which cannot be overwritten
- */
- if (smu->adev->in_suspend)
- return 0;
-
ret = smu_cmn_update_table(smu, SMU_TABLE_OVERDRIVE,
0, (void *)boot_od_table, false);
if (ret) {
@@ -2163,7 +2156,23 @@ static int sienna_cichlid_set_default_od_settings(struct smu_context *smu)
sienna_cichlid_dump_od_table(smu, boot_od_table);
memcpy(od_table, boot_od_table, sizeof(OverDriveTable_t));
- memcpy(user_od_table, boot_od_table, sizeof(OverDriveTable_t));
+
+ /*
+ * For S3/S4/Runpm resume, we need to setup those overdrive tables again,
+ * but we have to preserve user defined values in "user_od_table".
+ */
+ if (!smu->adev->in_suspend) {
+ memcpy(user_od_table, boot_od_table, sizeof(OverDriveTable_t));
+ smu->user_dpm_profile.user_od = false;
+ } else if (smu->user_dpm_profile.user_od) {
+ memcpy(&user_od_table_bak, user_od_table, sizeof(OverDriveTable_t));
+ memcpy(user_od_table, boot_od_table, sizeof(OverDriveTable_t));
+ user_od_table->GfxclkFmin = user_od_table_bak.GfxclkFmin;
+ user_od_table->GfxclkFmax = user_od_table_bak.GfxclkFmax;
+ user_od_table->UclkFmin = user_od_table_bak.UclkFmin;
+ user_od_table->UclkFmax = user_od_table_bak.UclkFmax;
+ user_od_table->VddGfxOffset = user_od_table_bak.VddGfxOffset;
+ }
return 0;
}
@@ -2373,6 +2382,20 @@ static int sienna_cichlid_od_edit_dpm_table(struct smu_context *smu,
return ret;
}
+static int sienna_cichlid_restore_user_od_settings(struct smu_context *smu)
+{
+ struct smu_table_context *table_context = &smu->smu_table;
+ OverDriveTable_t *od_table = table_context->overdrive_table;
+ OverDriveTable_t *user_od_table = table_context->user_overdrive_table;
+ int res;
+
+ res = smu_v11_0_restore_user_od_settings(smu);
+ if (res == 0)
+ memcpy(od_table, user_od_table, sizeof(OverDriveTable_t));
+
+ return res;
+}
+
static int sienna_cichlid_run_btc(struct smu_context *smu)
{
int res;
@@ -4400,7 +4423,7 @@ static const struct pptable_funcs sienna_cichlid_ppt_funcs = {
.set_soft_freq_limited_range = smu_v11_0_set_soft_freq_limited_range,
.set_default_od_settings = sienna_cichlid_set_default_od_settings,
.od_edit_dpm_table = sienna_cichlid_od_edit_dpm_table,
- .restore_user_od_settings = smu_v11_0_restore_user_od_settings,
+ .restore_user_od_settings = sienna_cichlid_restore_user_od_settings,
.run_btc = sienna_cichlid_run_btc,
.set_power_source = smu_v11_0_set_power_source,
.get_pp_feature_mask = smu_cmn_get_pp_feature_mask,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
index cb10c7e31264..4590374251f3 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
@@ -203,6 +203,8 @@ static struct cmn2asic_mapping vangogh_workload_map[PP_SMC_POWER_PROFILE_COUNT]
WORKLOAD_MAP(PP_SMC_POWER_PROFILE_VR, WORKLOAD_PPLIB_VR_BIT),
WORKLOAD_MAP(PP_SMC_POWER_PROFILE_COMPUTE, WORKLOAD_PPLIB_COMPUTE_BIT),
WORKLOAD_MAP(PP_SMC_POWER_PROFILE_CUSTOM, WORKLOAD_PPLIB_CUSTOM_BIT),
+ WORKLOAD_MAP(PP_SMC_POWER_PROFILE_CAPPED, WORKLOAD_PPLIB_CAPPED_BIT),
+ WORKLOAD_MAP(PP_SMC_POWER_PROFILE_UNCAPPED, WORKLOAD_PPLIB_UNCAPPED_BIT),
};
static const uint8_t vangogh_throttler_map[] = {
@@ -1046,7 +1048,7 @@ static int vangogh_get_power_profile_mode(struct smu_context *smu,
if (!buf)
return -EINVAL;
- for (i = 0; i <= PP_SMC_POWER_PROFILE_CUSTOM; i++) {
+ for (i = 0; i < PP_SMC_POWER_PROFILE_COUNT; i++) {
/*
* Conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT
* Not all profile modes are supported on vangogh.
@@ -1070,7 +1072,7 @@ static int vangogh_set_power_profile_mode(struct smu_context *smu, long *input,
int workload_type, ret;
uint32_t profile_mode = input[size];
- if (profile_mode > PP_SMC_POWER_PROFILE_CUSTOM) {
+ if (profile_mode >= PP_SMC_POWER_PROFILE_COUNT) {
dev_err(smu->adev->dev, "Invalid power profile mode %d\n", profile_mode);
return -EINVAL;
}
@@ -1590,6 +1592,21 @@ static int vangogh_read_sensor(struct smu_context *smu,
return ret;
}
+static int vangogh_get_apu_thermal_limit(struct smu_context *smu, uint32_t *limit)
+{
+ return smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_GetThermalLimit,
+ 0, limit);
+}
+
+static int vangogh_set_apu_thermal_limit(struct smu_context *smu, uint32_t limit)
+{
+ return smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_SetReducedThermalLimit,
+ limit, NULL);
+}
+
+
static int vangogh_set_watermarks_table(struct smu_context *smu,
struct pp_smu_wm_range_sets *clock_ranges)
{
@@ -2425,6 +2442,8 @@ static const struct pptable_funcs vangogh_ppt_funcs = {
.dpm_set_jpeg_enable = vangogh_dpm_set_jpeg_enable,
.is_dpm_running = vangogh_is_dpm_running,
.read_sensor = vangogh_read_sensor,
+ .get_apu_thermal_limit = vangogh_get_apu_thermal_limit,
+ .set_apu_thermal_limit = vangogh_set_apu_thermal_limit,
.get_enabled_mask = smu_cmn_get_enabled_mask,
.get_pp_feature_mask = smu_cmn_get_pp_feature_mask,
.set_watermarks_table = vangogh_set_watermarks_table,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/Makefile b/drivers/gpu/drm/amd/pm/swsmu/smu13/Makefile
index 9043f6ef1aee..7f3493b6c53c 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/Makefile
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/Makefile
@@ -24,7 +24,7 @@
# It provides the smu management services for the driver.
SMU13_MGR = smu_v13_0.o aldebaran_ppt.o yellow_carp_ppt.o smu_v13_0_0_ppt.o smu_v13_0_4_ppt.o \
- smu_v13_0_5_ppt.o smu_v13_0_7_ppt.o
+ smu_v13_0_5_ppt.o smu_v13_0_7_ppt.o smu_v13_0_6_ppt.o
AMD_SWSMU_SMU13MGR = $(addprefix $(AMD_SWSMU_PATH)/smu13/,$(SMU13_MGR))
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
index a52ed0580fd7..73175c993da9 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
@@ -294,6 +294,10 @@ int smu_v13_0_check_fw_version(struct smu_context *smu)
case IP_VERSION(13, 0, 5):
smu->smc_driver_if_version = SMU13_DRIVER_IF_VERSION_SMU_V13_0_5;
break;
+ case IP_VERSION(13, 0, 6):
+ smu->smc_driver_if_version = SMU13_DRIVER_IF_VERSION_SMU_V13_0_6;
+ adev->pm.fw_version = smu_version;
+ break;
default:
dev_err(adev->dev, "smu unsupported IP version: 0x%x.\n",
adev->ip_versions[MP1_HWIP][0]);
@@ -1914,10 +1918,9 @@ int smu_v13_0_set_power_source(struct smu_context *smu,
NULL);
}
-static int smu_v13_0_get_dpm_freq_by_index(struct smu_context *smu,
- enum smu_clk_type clk_type,
- uint16_t level,
- uint32_t *value)
+int smu_v13_0_get_dpm_freq_by_index(struct smu_context *smu,
+ enum smu_clk_type clk_type, uint16_t level,
+ uint32_t *value)
{
int ret = 0, clk_id = 0;
uint32_t param;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
index 27448ffe60a4..e9766fe5656e 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
@@ -1587,7 +1587,9 @@ static int smu_v13_0_0_get_power_profile_mode(struct smu_context *smu,
workload_type = smu_cmn_to_asic_specific_index(smu,
CMN2ASIC_MAPPING_WORKLOAD,
i);
- if (workload_type < 0)
+ if (workload_type == -ENOTSUPP)
+ continue;
+ else if (workload_type < 0)
return -EINVAL;
result = smu_cmn_update_table(smu,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
new file mode 100644
index 000000000000..ea8f3d6fb98b
--- /dev/null
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
@@ -0,0 +1,2069 @@
+/*
+ * Copyright 2021 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#define SWSMU_CODE_LAYER_L2
+
+#include <linux/firmware.h>
+#include "amdgpu.h"
+#include "amdgpu_smu.h"
+#include "atomfirmware.h"
+#include "amdgpu_atomfirmware.h"
+#include "amdgpu_atombios.h"
+#include "smu_v13_0_6_pmfw.h"
+#include "smu13_driver_if_v13_0_6.h"
+#include "smu_v13_0_6_ppsmc.h"
+#include "soc15_common.h"
+#include "atom.h"
+#include "power_state.h"
+#include "smu_v13_0.h"
+#include "smu_v13_0_6_ppt.h"
+#include "nbio/nbio_7_4_offset.h"
+#include "nbio/nbio_7_4_sh_mask.h"
+#include "thm/thm_11_0_2_offset.h"
+#include "thm/thm_11_0_2_sh_mask.h"
+#include "amdgpu_xgmi.h"
+#include <linux/pci.h>
+#include "amdgpu_ras.h"
+#include "smu_cmn.h"
+#include "mp/mp_13_0_6_offset.h"
+#include "mp/mp_13_0_6_sh_mask.h"
+
+#undef MP1_Public
+#undef smnMP1_FIRMWARE_FLAGS
+
+/* TODO: Check final register offsets */
+#define MP1_Public 0x03b00000
+#define smnMP1_FIRMWARE_FLAGS 0x3010028
+/*
+ * DO NOT use these for err/warn/info/debug messages.
+ * Use dev_err, dev_warn, dev_info and dev_dbg instead.
+ * They are more MGPU friendly.
+ */
+#undef pr_err
+#undef pr_warn
+#undef pr_info
+#undef pr_debug
+
+#define to_amdgpu_device(x) (container_of(x, struct amdgpu_device, pm.smu_i2c))
+
+#define SMU_13_0_6_FEA_MAP(smu_feature, smu_13_0_6_feature) \
+ [smu_feature] = { 1, (smu_13_0_6_feature) }
+
+#define FEATURE_MASK(feature) (1ULL << feature)
+#define SMC_DPM_FEATURE \
+ (FEATURE_MASK(FEATURE_DATA_CALCULATION) | \
+ FEATURE_MASK(FEATURE_DPM_GFXCLK) | FEATURE_MASK(FEATURE_DPM_UCLK) | \
+ FEATURE_MASK(FEATURE_DPM_SOCCLK) | FEATURE_MASK(FEATURE_DPM_FCLK) | \
+ FEATURE_MASK(FEATURE_DPM_LCLK) | FEATURE_MASK(FEATURE_DPM_XGMI) | \
+ FEATURE_MASK(FEATURE_DPM_VCN))
+
+/* possible frequency drift (1Mhz) */
+#define EPSILON 1
+
+#define smnPCIE_ESM_CTRL 0x111003D0
+
+static const struct cmn2asic_msg_mapping smu_v13_0_6_message_map[SMU_MSG_MAX_COUNT] = {
+ MSG_MAP(TestMessage, PPSMC_MSG_TestMessage, 0),
+ MSG_MAP(GetSmuVersion, PPSMC_MSG_GetSmuVersion, 1),
+ MSG_MAP(GetDriverIfVersion, PPSMC_MSG_GetDriverIfVersion, 1),
+ MSG_MAP(EnableAllSmuFeatures, PPSMC_MSG_EnableAllSmuFeatures, 1),
+ MSG_MAP(DisableAllSmuFeatures, PPSMC_MSG_DisableAllSmuFeatures, 1),
+ MSG_MAP(RequestI2cTransaction, PPSMC_MSG_RequestI2cTransaction, 0),
+ MSG_MAP(GetMetricsTable, PPSMC_MSG_GetMetricsTable, 1),
+ MSG_MAP(GetEnabledSmuFeaturesHigh, PPSMC_MSG_GetEnabledSmuFeaturesHigh, 1),
+ MSG_MAP(GetEnabledSmuFeaturesLow, PPSMC_MSG_GetEnabledSmuFeaturesLow, 1),
+ MSG_MAP(SetDriverDramAddrHigh, PPSMC_MSG_SetDriverDramAddrHigh, 1),
+ MSG_MAP(SetDriverDramAddrLow, PPSMC_MSG_SetDriverDramAddrLow, 1),
+ MSG_MAP(SetToolsDramAddrHigh, PPSMC_MSG_SetToolsDramAddrHigh, 0),
+ MSG_MAP(SetToolsDramAddrLow, PPSMC_MSG_SetToolsDramAddrLow, 0),
+ MSG_MAP(SetSoftMinByFreq, PPSMC_MSG_SetSoftMinByFreq, 0),
+ MSG_MAP(SetSoftMaxByFreq, PPSMC_MSG_SetSoftMaxByFreq, 0),
+ MSG_MAP(GetMinDpmFreq, PPSMC_MSG_GetMinDpmFreq, 0),
+ MSG_MAP(GetMaxDpmFreq, PPSMC_MSG_GetMaxDpmFreq, 0),
+ MSG_MAP(GetDpmFreqByIndex, PPSMC_MSG_GetDpmFreqByIndex, 1),
+ MSG_MAP(SetPptLimit, PPSMC_MSG_SetPptLimit, 0),
+ MSG_MAP(GetPptLimit, PPSMC_MSG_GetPptLimit, 1),
+ MSG_MAP(GfxDeviceDriverReset, PPSMC_MSG_GfxDriverReset, 0),
+ MSG_MAP(DramLogSetDramAddrHigh, PPSMC_MSG_DramLogSetDramAddrHigh, 0),
+ MSG_MAP(DramLogSetDramAddrLow, PPSMC_MSG_DramLogSetDramAddrLow, 0),
+ MSG_MAP(DramLogSetDramSize, PPSMC_MSG_DramLogSetDramSize, 0),
+ MSG_MAP(GetDebugData, PPSMC_MSG_GetDebugData, 0),
+ MSG_MAP(SetNumBadHbmPagesRetired, PPSMC_MSG_SetNumBadHbmPagesRetired, 0),
+ MSG_MAP(DFCstateControl, PPSMC_MSG_DFCstateControl, 0),
+ MSG_MAP(GetGmiPwrDnHyst, PPSMC_MSG_GetGmiPwrDnHyst, 0),
+ MSG_MAP(SetGmiPwrDnHyst, PPSMC_MSG_SetGmiPwrDnHyst, 0),
+ MSG_MAP(GmiPwrDnControl, PPSMC_MSG_GmiPwrDnControl, 0),
+ MSG_MAP(EnterGfxoff, PPSMC_MSG_EnterGfxoff, 0),
+ MSG_MAP(ExitGfxoff, PPSMC_MSG_ExitGfxoff, 0),
+ MSG_MAP(EnableDeterminism, PPSMC_MSG_EnableDeterminism, 0),
+ MSG_MAP(DisableDeterminism, PPSMC_MSG_DisableDeterminism, 0),
+ MSG_MAP(GfxDriverResetRecovery, PPSMC_MSG_GfxDriverResetRecovery, 0),
+ MSG_MAP(GetMinGfxclkFrequency, PPSMC_MSG_GetMinGfxDpmFreq, 0),
+ MSG_MAP(GetMaxGfxclkFrequency, PPSMC_MSG_GetMaxGfxDpmFreq, 0),
+ MSG_MAP(SetSoftMinGfxclk, PPSMC_MSG_SetSoftMinGfxClk, 0),
+ MSG_MAP(SetSoftMaxGfxClk, PPSMC_MSG_SetSoftMaxGfxClk, 0),
+};
+
+static const struct cmn2asic_mapping smu_v13_0_6_clk_map[SMU_CLK_COUNT] = {
+ CLK_MAP(SOCCLK, PPCLK_SOCCLK),
+ CLK_MAP(FCLK, PPCLK_FCLK),
+ CLK_MAP(UCLK, PPCLK_UCLK),
+ CLK_MAP(MCLK, PPCLK_UCLK),
+ CLK_MAP(DCLK, PPCLK_DCLK),
+ CLK_MAP(VCLK, PPCLK_VCLK),
+ CLK_MAP(LCLK, PPCLK_LCLK),
+};
+
+static const struct cmn2asic_mapping smu_v13_0_6_feature_mask_map[SMU_FEATURE_COUNT] = {
+ SMU_13_0_6_FEA_MAP(SMU_FEATURE_DATA_CALCULATIONS_BIT, FEATURE_DATA_CALCULATION),
+ SMU_13_0_6_FEA_MAP(SMU_FEATURE_DPM_GFXCLK_BIT, FEATURE_DPM_GFXCLK),
+ SMU_13_0_6_FEA_MAP(SMU_FEATURE_DPM_UCLK_BIT, FEATURE_DPM_UCLK),
+ SMU_13_0_6_FEA_MAP(SMU_FEATURE_DPM_SOCCLK_BIT, FEATURE_DPM_SOCCLK),
+ SMU_13_0_6_FEA_MAP(SMU_FEATURE_DPM_FCLK_BIT, FEATURE_DPM_FCLK),
+ SMU_13_0_6_FEA_MAP(SMU_FEATURE_DPM_LCLK_BIT, FEATURE_DPM_LCLK),
+ SMU_13_0_6_FEA_MAP(SMU_FEATURE_DPM_VCLK_BIT, FEATURE_DPM_VCN),
+ SMU_13_0_6_FEA_MAP(SMU_FEATURE_DPM_DCLK_BIT, FEATURE_DPM_VCN),
+ SMU_13_0_6_FEA_MAP(SMU_FEATURE_DPM_XGMI_BIT, FEATURE_DPM_XGMI),
+ SMU_13_0_6_FEA_MAP(SMU_FEATURE_DS_GFXCLK_BIT, FEATURE_DS_GFXCLK),
+ SMU_13_0_6_FEA_MAP(SMU_FEATURE_DS_SOCCLK_BIT, FEATURE_DS_SOCCLK),
+ SMU_13_0_6_FEA_MAP(SMU_FEATURE_DS_LCLK_BIT, FEATURE_DS_LCLK),
+ SMU_13_0_6_FEA_MAP(SMU_FEATURE_DS_FCLK_BIT, FEATURE_DS_FCLK),
+ SMU_13_0_6_FEA_MAP(SMU_FEATURE_VCN_DPM_BIT, FEATURE_DPM_VCN),
+ SMU_13_0_6_FEA_MAP(SMU_FEATURE_PPT_BIT, FEATURE_PPT),
+ SMU_13_0_6_FEA_MAP(SMU_FEATURE_TDC_BIT, FEATURE_TDC),
+ SMU_13_0_6_FEA_MAP(SMU_FEATURE_APCC_DFLL_BIT, FEATURE_APCC_DFLL),
+ SMU_13_0_6_FEA_MAP(SMU_FEATURE_MP1_CG_BIT, FEATURE_SMU_CG),
+ SMU_13_0_6_FEA_MAP(SMU_FEATURE_GFXOFF_BIT, FEATURE_GFXOFF),
+ SMU_13_0_6_FEA_MAP(SMU_FEATURE_FW_CTF_BIT, FEATURE_FW_CTF),
+ SMU_13_0_6_FEA_MAP(SMU_FEATURE_THERMAL_BIT, FEATURE_THERMAL),
+ SMU_13_0_6_FEA_MAP(SMU_FEATURE_XGMI_PER_LINK_PWR_DWN_BIT, FEATURE_XGMI_PER_LINK_PWR_DOWN),
+ SMU_13_0_6_FEA_MAP(SMU_FEATURE_DF_CSTATE_BIT, FEATURE_DF_CSTATE),
+};
+
+#define TABLE_PMSTATUSLOG 0
+#define TABLE_SMU_METRICS 1
+#define TABLE_I2C_COMMANDS 2
+#define TABLE_COUNT 3
+
+static const struct cmn2asic_mapping smu_v13_0_6_table_map[SMU_TABLE_COUNT] = {
+ TAB_MAP(PMSTATUSLOG),
+ TAB_MAP(SMU_METRICS),
+ TAB_MAP(I2C_COMMANDS),
+};
+
+#define THROTTLER_PROCHOT_GFX_BIT 0
+#define THROTTLER_PPT_BIT 1
+#define THROTTLER_TEMP_SOC_BIT 2
+#define THROTTLER_TEMP_VR_GFX_BIT 3
+#define THROTTLER_TEMP_HBM_BIT 4
+
+static const uint8_t smu_v13_0_6_throttler_map[] = {
+ [THROTTLER_PPT_BIT] = (SMU_THROTTLER_PPT0_BIT),
+ [THROTTLER_TEMP_SOC_BIT] = (SMU_THROTTLER_TEMP_GPU_BIT),
+ [THROTTLER_TEMP_HBM_BIT] = (SMU_THROTTLER_TEMP_MEM_BIT),
+ [THROTTLER_TEMP_VR_GFX_BIT] = (SMU_THROTTLER_TEMP_VR_GFX_BIT),
+ [THROTTLER_PROCHOT_GFX_BIT] = (SMU_THROTTLER_PROCHOT_GFX_BIT),
+};
+
+struct PPTable_t {
+ uint32_t MaxSocketPowerLimit;
+ uint32_t MaxGfxclkFrequency;
+ uint32_t MinGfxclkFrequency;
+ uint32_t FclkFrequencyTable[4];
+ uint32_t UclkFrequencyTable[4];
+ uint32_t SocclkFrequencyTable[4];
+ uint32_t VclkFrequencyTable[4];
+ uint32_t DclkFrequencyTable[4];
+ uint32_t LclkFrequencyTable[4];
+ uint32_t MaxLclkDpmRange;
+ uint32_t MinLclkDpmRange;
+ bool Init;
+};
+
+#define SMUQ10_TO_UINT(x) ((x) >> 10)
+
+struct smu_v13_0_6_dpm_map {
+ enum smu_clk_type clk_type;
+ uint32_t feature_num;
+ struct smu_13_0_dpm_table *dpm_table;
+ uint32_t *freq_table;
+};
+
+static int smu_v13_0_6_tables_init(struct smu_context *smu)
+{
+ struct smu_table_context *smu_table = &smu->smu_table;
+ struct smu_table *tables = smu_table->tables;
+ struct amdgpu_device *adev = smu->adev;
+
+ if (!(adev->flags & AMD_IS_APU))
+ SMU_TABLE_INIT(tables, SMU_TABLE_PMSTATUSLOG, SMU13_TOOL_SIZE,
+ PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
+
+ SMU_TABLE_INIT(tables, SMU_TABLE_SMU_METRICS, sizeof(MetricsTable_t),
+ PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
+
+ SMU_TABLE_INIT(tables, SMU_TABLE_I2C_COMMANDS, sizeof(SwI2cRequest_t),
+ PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
+
+ smu_table->metrics_table = kzalloc(sizeof(MetricsTable_t), GFP_KERNEL);
+ if (!smu_table->metrics_table)
+ return -ENOMEM;
+ smu_table->metrics_time = 0;
+
+ smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v1_3);
+ smu_table->gpu_metrics_table =
+ kzalloc(smu_table->gpu_metrics_table_size, GFP_KERNEL);
+ if (!smu_table->gpu_metrics_table) {
+ kfree(smu_table->metrics_table);
+ return -ENOMEM;
+ }
+
+ smu_table->driver_pptable =
+ kzalloc(sizeof(struct PPTable_t), GFP_KERNEL);
+ if (!smu_table->driver_pptable) {
+ kfree(smu_table->metrics_table);
+ kfree(smu_table->gpu_metrics_table);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static int smu_v13_0_6_allocate_dpm_context(struct smu_context *smu)
+{
+ struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
+
+ smu_dpm->dpm_context =
+ kzalloc(sizeof(struct smu_13_0_dpm_context), GFP_KERNEL);
+ if (!smu_dpm->dpm_context)
+ return -ENOMEM;
+ smu_dpm->dpm_context_size = sizeof(struct smu_13_0_dpm_context);
+
+ return 0;
+}
+
+static int smu_v13_0_6_init_smc_tables(struct smu_context *smu)
+{
+ int ret = 0;
+
+ ret = smu_v13_0_6_tables_init(smu);
+ if (ret)
+ return ret;
+
+ ret = smu_v13_0_6_allocate_dpm_context(smu);
+
+ return ret;
+}
+
+static int smu_v13_0_6_get_allowed_feature_mask(struct smu_context *smu,
+ uint32_t *feature_mask,
+ uint32_t num)
+{
+ if (num > 2)
+ return -EINVAL;
+
+ /* pptable will handle the features to enable */
+ memset(feature_mask, 0xFF, sizeof(uint32_t) * num);
+
+ return 0;
+}
+
+static int smu_v13_0_6_get_metrics_table(struct smu_context *smu,
+ void *metrics_table, bool bypass_cache)
+{
+ struct smu_table_context *smu_table = &smu->smu_table;
+ uint32_t table_size = smu_table->tables[SMU_TABLE_SMU_METRICS].size;
+ struct smu_table *table = &smu_table->driver_table;
+ int ret;
+
+ if (bypass_cache || !smu_table->metrics_time ||
+ time_after(jiffies,
+ smu_table->metrics_time + msecs_to_jiffies(1))) {
+ ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetMetricsTable, NULL);
+ if (ret) {
+ dev_info(smu->adev->dev,
+ "Failed to export SMU metrics table!\n");
+ return ret;
+ }
+
+ amdgpu_asic_invalidate_hdp(smu->adev, NULL);
+ memcpy(smu_table->metrics_table, table->cpu_addr, table_size);
+
+ smu_table->metrics_time = jiffies;
+ }
+
+ if (metrics_table)
+ memcpy(metrics_table, smu_table->metrics_table, table_size);
+
+ return 0;
+}
+
+static int smu_v13_0_6_setup_driver_pptable(struct smu_context *smu)
+{
+ struct smu_table_context *smu_table = &smu->smu_table;
+ MetricsTable_t *metrics = (MetricsTable_t *)smu_table->metrics_table;
+ struct PPTable_t *pptable =
+ (struct PPTable_t *)smu_table->driver_pptable;
+ int ret;
+ int i;
+
+ /* Store one-time values in driver PPTable */
+ if (!pptable->Init) {
+ ret = smu_v13_0_6_get_metrics_table(smu, NULL, false);
+ if (ret)
+ return ret;
+
+ pptable->MaxSocketPowerLimit =
+ SMUQ10_TO_UINT(metrics->MaxSocketPowerLimit);
+ pptable->MaxGfxclkFrequency =
+ SMUQ10_TO_UINT(metrics->MaxGfxclkFrequency);
+ pptable->MinGfxclkFrequency =
+ SMUQ10_TO_UINT(metrics->MinGfxclkFrequency);
+
+ for (i = 0; i < 4; ++i) {
+ pptable->FclkFrequencyTable[i] =
+ SMUQ10_TO_UINT(metrics->FclkFrequencyTable[i]);
+ pptable->UclkFrequencyTable[i] =
+ SMUQ10_TO_UINT(metrics->UclkFrequencyTable[i]);
+ pptable->SocclkFrequencyTable[i] = SMUQ10_TO_UINT(
+ metrics->SocclkFrequencyTable[i]);
+ pptable->VclkFrequencyTable[i] =
+ SMUQ10_TO_UINT(metrics->VclkFrequencyTable[i]);
+ pptable->DclkFrequencyTable[i] =
+ SMUQ10_TO_UINT(metrics->DclkFrequencyTable[i]);
+ pptable->LclkFrequencyTable[i] =
+ SMUQ10_TO_UINT(metrics->LclkFrequencyTable[i]);
+ }
+
+ pptable->Init = true;
+ }
+
+ return 0;
+}
+
+static int smu_v13_0_6_get_dpm_ultimate_freq(struct smu_context *smu,
+ enum smu_clk_type clk_type,
+ uint32_t *min, uint32_t *max)
+{
+ struct smu_table_context *smu_table = &smu->smu_table;
+ struct PPTable_t *pptable =
+ (struct PPTable_t *)smu_table->driver_pptable;
+ uint32_t clock_limit = 0, param;
+ int ret = 0, clk_id = 0;
+
+ if (!smu_cmn_clk_dpm_is_enabled(smu, clk_type)) {
+ switch (clk_type) {
+ case SMU_MCLK:
+ case SMU_UCLK:
+ if (pptable->Init)
+ clock_limit = pptable->UclkFrequencyTable[0];
+ break;
+ case SMU_GFXCLK:
+ case SMU_SCLK:
+ if (pptable->Init)
+ clock_limit = pptable->MinGfxclkFrequency;
+ break;
+ case SMU_SOCCLK:
+ if (pptable->Init)
+ clock_limit = pptable->UclkFrequencyTable[0];
+ break;
+ case SMU_FCLK:
+ if (pptable->Init)
+ clock_limit = pptable->FclkFrequencyTable[0];
+ break;
+ case SMU_VCLK:
+ if (pptable->Init)
+ clock_limit = pptable->VclkFrequencyTable[0];
+ break;
+ case SMU_DCLK:
+ if (pptable->Init)
+ clock_limit = pptable->DclkFrequencyTable[0];
+ break;
+ default:
+ break;
+ }
+
+ if (min)
+ *min = clock_limit;
+
+ if (max)
+ *max = clock_limit;
+
+ return 0;
+ }
+
+ if (!(clk_type == SMU_GFXCLK || clk_type == SMU_SCLK)) {
+ clk_id = smu_cmn_to_asic_specific_index(
+ smu, CMN2ASIC_MAPPING_CLK, clk_type);
+ if (clk_id < 0) {
+ ret = -EINVAL;
+ goto failed;
+ }
+ param = (clk_id & 0xffff) << 16;
+ }
+
+ if (max) {
+ if (clk_type == SMU_GFXCLK || clk_type == SMU_SCLK)
+ ret = smu_cmn_send_smc_msg(
+ smu, SMU_MSG_GetMaxGfxclkFrequency, max);
+ else
+ ret = smu_cmn_send_smc_msg_with_param(
+ smu, SMU_MSG_GetMaxDpmFreq, param, max);
+ if (ret)
+ goto failed;
+ }
+
+ if (min) {
+ if (clk_type == SMU_GFXCLK || clk_type == SMU_SCLK)
+ ret = smu_cmn_send_smc_msg(
+ smu, SMU_MSG_GetMinGfxclkFrequency, min);
+ else
+ ret = smu_cmn_send_smc_msg_with_param(
+ smu, SMU_MSG_GetMinDpmFreq, param, min);
+ }
+
+failed:
+ return ret;
+}
+
+static int smu_v13_0_6_get_dpm_level_count(struct smu_context *smu,
+ enum smu_clk_type clk_type,
+ uint32_t *levels)
+{
+ int ret;
+
+ ret = smu_v13_0_get_dpm_freq_by_index(smu, clk_type, 0xff, levels);
+ if (!ret)
+ ++(*levels);
+
+ return ret;
+}
+
+static int smu_v13_0_6_set_default_dpm_table(struct smu_context *smu)
+{
+ struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
+ struct smu_table_context *smu_table = &smu->smu_table;
+ struct smu_13_0_dpm_table *dpm_table = NULL;
+ struct PPTable_t *pptable =
+ (struct PPTable_t *)smu_table->driver_pptable;
+ uint32_t gfxclkmin, gfxclkmax, levels;
+ int ret = 0, i, j;
+ struct smu_v13_0_6_dpm_map dpm_map[] = {
+ { SMU_SOCCLK, SMU_FEATURE_DPM_SOCCLK_BIT,
+ &dpm_context->dpm_tables.soc_table,
+ pptable->SocclkFrequencyTable },
+ { SMU_UCLK, SMU_FEATURE_DPM_UCLK_BIT,
+ &dpm_context->dpm_tables.uclk_table,
+ pptable->UclkFrequencyTable },
+ { SMU_FCLK, SMU_FEATURE_DPM_FCLK_BIT,
+ &dpm_context->dpm_tables.fclk_table,
+ pptable->FclkFrequencyTable },
+ { SMU_VCLK, SMU_FEATURE_DPM_VCLK_BIT,
+ &dpm_context->dpm_tables.vclk_table,
+ pptable->VclkFrequencyTable },
+ { SMU_DCLK, SMU_FEATURE_DPM_DCLK_BIT,
+ &dpm_context->dpm_tables.dclk_table,
+ pptable->DclkFrequencyTable },
+ };
+
+ smu_v13_0_6_setup_driver_pptable(smu);
+
+ /* gfxclk dpm table setup */
+ dpm_table = &dpm_context->dpm_tables.gfx_table;
+ if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_GFXCLK_BIT)) {
+ /* In the case of gfxclk, only fine-grained dpm is honored.
+ * Get min/max values from FW.
+ */
+ ret = smu_v13_0_6_get_dpm_ultimate_freq(smu, SMU_GFXCLK,
+ &gfxclkmin, &gfxclkmax);
+ if (ret)
+ return ret;
+
+ dpm_table->count = 2;
+ dpm_table->dpm_levels[0].value = gfxclkmin;
+ dpm_table->dpm_levels[0].enabled = true;
+ dpm_table->dpm_levels[1].value = gfxclkmax;
+ dpm_table->dpm_levels[1].enabled = true;
+ dpm_table->min = dpm_table->dpm_levels[0].value;
+ dpm_table->max = dpm_table->dpm_levels[1].value;
+ } else {
+ dpm_table->count = 1;
+ dpm_table->dpm_levels[0].value = pptable->MinGfxclkFrequency;
+ dpm_table->dpm_levels[0].enabled = true;
+ dpm_table->min = dpm_table->dpm_levels[0].value;
+ dpm_table->max = dpm_table->dpm_levels[0].value;
+ }
+
+ for (j = 0; j < ARRAY_SIZE(dpm_map); j++) {
+ dpm_table = dpm_map[j].dpm_table;
+ levels = 1;
+ if (smu_cmn_feature_is_enabled(smu, dpm_map[j].feature_num)) {
+ ret = smu_v13_0_6_get_dpm_level_count(
+ smu, dpm_map[j].clk_type, &levels);
+ if (ret)
+ return ret;
+ }
+ dpm_table->count = levels;
+ for (i = 0; i < dpm_table->count; ++i) {
+ dpm_table->dpm_levels[i].value =
+ dpm_map[j].freq_table[i];
+ dpm_table->dpm_levels[i].enabled = true;
+
+ }
+ dpm_table->min = dpm_table->dpm_levels[0].value;
+ dpm_table->max = dpm_table->dpm_levels[levels - 1].value;
+
+ }
+
+ return 0;
+}
+
+static int smu_v13_0_6_setup_pptable(struct smu_context *smu)
+{
+ struct smu_table_context *table_context = &smu->smu_table;
+
+ /* TODO: PPTable is not available.
+ * 1) Find an alternate way to get 'PPTable values' here.
+ * 2) Check if there is SW CTF
+ */
+ table_context->thermal_controller_type = 0;
+
+ return 0;
+}
+
+static int smu_v13_0_6_check_fw_status(struct smu_context *smu)
+{
+ struct amdgpu_device *adev = smu->adev;
+ uint32_t mp1_fw_flags;
+
+ mp1_fw_flags =
+ RREG32_PCIE(MP1_Public | (smnMP1_FIRMWARE_FLAGS & 0xffffffff));
+
+ if ((mp1_fw_flags & MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK) >>
+ MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED__SHIFT)
+ return 0;
+
+ return -EIO;
+}
+
+static int smu_v13_0_6_populate_umd_state_clk(struct smu_context *smu)
+{
+ struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
+ struct smu_13_0_dpm_table *gfx_table =
+ &dpm_context->dpm_tables.gfx_table;
+ struct smu_13_0_dpm_table *mem_table =
+ &dpm_context->dpm_tables.uclk_table;
+ struct smu_13_0_dpm_table *soc_table =
+ &dpm_context->dpm_tables.soc_table;
+ struct smu_umd_pstate_table *pstate_table = &smu->pstate_table;
+
+ pstate_table->gfxclk_pstate.min = gfx_table->min;
+ pstate_table->gfxclk_pstate.peak = gfx_table->max;
+ pstate_table->gfxclk_pstate.curr.min = gfx_table->min;
+ pstate_table->gfxclk_pstate.curr.max = gfx_table->max;
+
+ pstate_table->uclk_pstate.min = mem_table->min;
+ pstate_table->uclk_pstate.peak = mem_table->max;
+ pstate_table->uclk_pstate.curr.min = mem_table->min;
+ pstate_table->uclk_pstate.curr.max = mem_table->max;
+
+ pstate_table->socclk_pstate.min = soc_table->min;
+ pstate_table->socclk_pstate.peak = soc_table->max;
+ pstate_table->socclk_pstate.curr.min = soc_table->min;
+ pstate_table->socclk_pstate.curr.max = soc_table->max;
+
+ if (gfx_table->count > SMU_13_0_6_UMD_PSTATE_GFXCLK_LEVEL &&
+ mem_table->count > SMU_13_0_6_UMD_PSTATE_MCLK_LEVEL &&
+ soc_table->count > SMU_13_0_6_UMD_PSTATE_SOCCLK_LEVEL) {
+ pstate_table->gfxclk_pstate.standard =
+ gfx_table->dpm_levels[SMU_13_0_6_UMD_PSTATE_GFXCLK_LEVEL].value;
+ pstate_table->uclk_pstate.standard =
+ mem_table->dpm_levels[SMU_13_0_6_UMD_PSTATE_MCLK_LEVEL].value;
+ pstate_table->socclk_pstate.standard =
+ soc_table->dpm_levels[SMU_13_0_6_UMD_PSTATE_SOCCLK_LEVEL].value;
+ } else {
+ pstate_table->gfxclk_pstate.standard =
+ pstate_table->gfxclk_pstate.min;
+ pstate_table->uclk_pstate.standard =
+ pstate_table->uclk_pstate.min;
+ pstate_table->socclk_pstate.standard =
+ pstate_table->socclk_pstate.min;
+ }
+
+ return 0;
+}
+
+static int smu_v13_0_6_get_clk_table(struct smu_context *smu,
+ struct pp_clock_levels_with_latency *clocks,
+ struct smu_13_0_dpm_table *dpm_table)
+{
+ int i, count;
+
+ count = (dpm_table->count > MAX_NUM_CLOCKS) ? MAX_NUM_CLOCKS :
+ dpm_table->count;
+ clocks->num_levels = count;
+
+ for (i = 0; i < count; i++) {
+ clocks->data[i].clocks_in_khz =
+ dpm_table->dpm_levels[i].value * 1000;
+ clocks->data[i].latency_in_us = 0;
+ }
+
+ return 0;
+}
+
+static int smu_v13_0_6_freqs_in_same_level(int32_t frequency1,
+ int32_t frequency2)
+{
+ return (abs(frequency1 - frequency2) <= EPSILON);
+}
+
+static uint32_t smu_v13_0_6_get_throttler_status(struct smu_context *smu,
+ MetricsTable_t *metrics)
+{
+ uint32_t throttler_status = 0;
+
+ throttler_status |= metrics->ProchotResidencyAcc > 0 ? 1U << THROTTLER_PROCHOT_GFX_BIT : 0;
+ throttler_status |= metrics->PptResidencyAcc > 0 ? 1U << THROTTLER_PPT_BIT : 0;
+ throttler_status |= metrics->SocketThmResidencyAcc > 0 ? 1U << THROTTLER_TEMP_SOC_BIT : 0;
+ throttler_status |= metrics->VrThmResidencyAcc > 0 ? 1U << THROTTLER_TEMP_VR_GFX_BIT : 0;
+ throttler_status |= metrics->HbmThmResidencyAcc > 0 ? 1U << THROTTLER_TEMP_HBM_BIT : 0;
+
+ return throttler_status;
+}
+
+static int smu_v13_0_6_get_smu_metrics_data(struct smu_context *smu,
+ MetricsMember_t member,
+ uint32_t *value)
+{
+ struct smu_table_context *smu_table = &smu->smu_table;
+ MetricsTable_t *metrics = (MetricsTable_t *)smu_table->metrics_table;
+ int ret = 0;
+
+ ret = smu_v13_0_6_get_metrics_table(smu, NULL, false);
+ if (ret)
+ return ret;
+
+ /* For clocks with multiple instances, only report the first one */
+ switch (member) {
+ case METRICS_CURR_GFXCLK:
+ case METRICS_AVERAGE_GFXCLK:
+ *value = 0;
+ break;
+ case METRICS_CURR_SOCCLK:
+ case METRICS_AVERAGE_SOCCLK:
+ *value = SMUQ10_TO_UINT(metrics->SocclkFrequency[0]);
+ break;
+ case METRICS_CURR_UCLK:
+ case METRICS_AVERAGE_UCLK:
+ *value = SMUQ10_TO_UINT(metrics->UclkFrequency);
+ break;
+ case METRICS_CURR_VCLK:
+ *value = SMUQ10_TO_UINT(metrics->VclkFrequency[0]);
+ break;
+ case METRICS_CURR_DCLK:
+ *value = SMUQ10_TO_UINT(metrics->DclkFrequency[0]);
+ break;
+ case METRICS_CURR_FCLK:
+ *value = SMUQ10_TO_UINT(metrics->FclkFrequency);
+ break;
+ case METRICS_AVERAGE_GFXACTIVITY:
+ *value = SMUQ10_TO_UINT(metrics->SocketGfxBusy);
+ break;
+ case METRICS_AVERAGE_MEMACTIVITY:
+ *value = SMUQ10_TO_UINT(metrics->DramBandwidthUtilization);
+ break;
+ case METRICS_AVERAGE_SOCKETPOWER:
+ *value = SMUQ10_TO_UINT(metrics->SocketPower) << 8;
+ break;
+ case METRICS_TEMPERATURE_HOTSPOT:
+ *value = SMUQ10_TO_UINT(metrics->MaxSocketTemperature);
+ break;
+ case METRICS_TEMPERATURE_MEM:
+ *value = SMUQ10_TO_UINT(metrics->MaxHbmTemperature);
+ break;
+ /* This is the max of all VRs and not just SOC VR.
+ * No need to define another data type for the same.
+ */
+ case METRICS_TEMPERATURE_VRSOC:
+ *value = SMUQ10_TO_UINT(metrics->MaxVrTemperature);
+ break;
+ case METRICS_THROTTLER_STATUS:
+ *value = smu_v13_0_6_get_throttler_status(smu, metrics);
+ break;
+ default:
+ *value = UINT_MAX;
+ break;
+ }
+
+ return ret;
+}
+
+static int smu_v13_0_6_get_current_clk_freq_by_table(struct smu_context *smu,
+ enum smu_clk_type clk_type,
+ uint32_t *value)
+{
+ MetricsMember_t member_type;
+
+ if (!value)
+ return -EINVAL;
+
+ switch (clk_type) {
+ case SMU_GFXCLK:
+ member_type = METRICS_CURR_GFXCLK;
+ break;
+ case SMU_UCLK:
+ member_type = METRICS_CURR_UCLK;
+ break;
+ case SMU_SOCCLK:
+ member_type = METRICS_CURR_SOCCLK;
+ break;
+ case SMU_VCLK:
+ member_type = METRICS_CURR_VCLK;
+ break;
+ case SMU_DCLK:
+ member_type = METRICS_CURR_DCLK;
+ break;
+ case SMU_FCLK:
+ member_type = METRICS_CURR_FCLK;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return smu_v13_0_6_get_smu_metrics_data(smu, member_type, value);
+}
+
+static int smu_v13_0_6_print_clk_levels(struct smu_context *smu,
+ enum smu_clk_type type, char *buf)
+{
+ int i, now, size = 0;
+ int ret = 0;
+ struct smu_umd_pstate_table *pstate_table = &smu->pstate_table;
+ struct pp_clock_levels_with_latency clocks;
+ struct smu_13_0_dpm_table *single_dpm_table;
+ struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
+ struct smu_13_0_dpm_context *dpm_context = NULL;
+ uint32_t display_levels;
+ uint32_t freq_values[3] = { 0 };
+ uint32_t min_clk, max_clk;
+
+ smu_cmn_get_sysfs_buf(&buf, &size);
+
+ if (amdgpu_ras_intr_triggered()) {
+ size += sysfs_emit_at(buf, size, "unavailable\n");
+ return size;
+ }
+
+ dpm_context = smu_dpm->dpm_context;
+
+ switch (type) {
+ case SMU_OD_SCLK:
+ size += sysfs_emit_at(buf, size, "%s:\n", "GFXCLK");
+ fallthrough;
+ case SMU_SCLK:
+ ret = smu_v13_0_6_get_current_clk_freq_by_table(smu, SMU_GFXCLK,
+ &now);
+ if (ret) {
+ dev_err(smu->adev->dev,
+ "Attempt to get current gfx clk Failed!");
+ return ret;
+ }
+
+ single_dpm_table = &(dpm_context->dpm_tables.gfx_table);
+ ret = smu_v13_0_6_get_clk_table(smu, &clocks, single_dpm_table);
+ if (ret) {
+ dev_err(smu->adev->dev,
+ "Attempt to get gfx clk levels Failed!");
+ return ret;
+ }
+
+ display_levels = clocks.num_levels;
+
+ min_clk = pstate_table->gfxclk_pstate.curr.min;
+ max_clk = pstate_table->gfxclk_pstate.curr.max;
+
+ freq_values[0] = min_clk;
+ freq_values[1] = max_clk;
+
+ /* fine-grained dpm has only 2 levels */
+ if (now > min_clk && now < max_clk) {
+ display_levels = clocks.num_levels + 1;
+ freq_values[2] = max_clk;
+ freq_values[1] = now;
+ }
+
+ /*
+ * For DPM disabled case, there will be only one clock level.
+ * And it's safe to assume that is always the current clock.
+ */
+ if (display_levels == clocks.num_levels) {
+ for (i = 0; i < clocks.num_levels; i++)
+ size += sysfs_emit_at(
+ buf, size, "%d: %uMhz %s\n", i,
+ freq_values[i],
+ (clocks.num_levels == 1) ?
+ "*" :
+ (smu_v13_0_6_freqs_in_same_level(
+ freq_values[i], now) ?
+ "*" :
+ ""));
+ } else {
+ for (i = 0; i < display_levels; i++)
+ size += sysfs_emit_at(buf, size,
+ "%d: %uMhz %s\n", i,
+ freq_values[i],
+ i == 1 ? "*" : "");
+ }
+
+ break;
+
+ case SMU_OD_MCLK:
+ size += sysfs_emit_at(buf, size, "%s:\n", "MCLK");
+ fallthrough;
+ case SMU_MCLK:
+ ret = smu_v13_0_6_get_current_clk_freq_by_table(smu, SMU_UCLK,
+ &now);
+ if (ret) {
+ dev_err(smu->adev->dev,
+ "Attempt to get current mclk Failed!");
+ return ret;
+ }
+
+ single_dpm_table = &(dpm_context->dpm_tables.uclk_table);
+ ret = smu_v13_0_6_get_clk_table(smu, &clocks, single_dpm_table);
+ if (ret) {
+ dev_err(smu->adev->dev,
+ "Attempt to get memory clk levels Failed!");
+ return ret;
+ }
+
+ for (i = 0; i < clocks.num_levels; i++)
+ size += sysfs_emit_at(
+ buf, size, "%d: %uMhz %s\n", i,
+ clocks.data[i].clocks_in_khz / 1000,
+ (clocks.num_levels == 1) ?
+ "*" :
+ (smu_v13_0_6_freqs_in_same_level(
+ clocks.data[i].clocks_in_khz /
+ 1000,
+ now) ?
+ "*" :
+ ""));
+ break;
+
+ case SMU_SOCCLK:
+ ret = smu_v13_0_6_get_current_clk_freq_by_table(smu, SMU_SOCCLK,
+ &now);
+ if (ret) {
+ dev_err(smu->adev->dev,
+ "Attempt to get current socclk Failed!");
+ return ret;
+ }
+
+ single_dpm_table = &(dpm_context->dpm_tables.soc_table);
+ ret = smu_v13_0_6_get_clk_table(smu, &clocks, single_dpm_table);
+ if (ret) {
+ dev_err(smu->adev->dev,
+ "Attempt to get socclk levels Failed!");
+ return ret;
+ }
+
+ for (i = 0; i < clocks.num_levels; i++)
+ size += sysfs_emit_at(
+ buf, size, "%d: %uMhz %s\n", i,
+ clocks.data[i].clocks_in_khz / 1000,
+ (clocks.num_levels == 1) ?
+ "*" :
+ (smu_v13_0_6_freqs_in_same_level(
+ clocks.data[i].clocks_in_khz /
+ 1000,
+ now) ?
+ "*" :
+ ""));
+ break;
+
+ case SMU_FCLK:
+ ret = smu_v13_0_6_get_current_clk_freq_by_table(smu, SMU_FCLK,
+ &now);
+ if (ret) {
+ dev_err(smu->adev->dev,
+ "Attempt to get current fclk Failed!");
+ return ret;
+ }
+
+ single_dpm_table = &(dpm_context->dpm_tables.fclk_table);
+ ret = smu_v13_0_6_get_clk_table(smu, &clocks, single_dpm_table);
+ if (ret) {
+ dev_err(smu->adev->dev,
+ "Attempt to get fclk levels Failed!");
+ return ret;
+ }
+
+ for (i = 0; i < single_dpm_table->count; i++)
+ size += sysfs_emit_at(
+ buf, size, "%d: %uMhz %s\n", i,
+ single_dpm_table->dpm_levels[i].value,
+ (clocks.num_levels == 1) ?
+ "*" :
+ (smu_v13_0_6_freqs_in_same_level(
+ clocks.data[i].clocks_in_khz /
+ 1000,
+ now) ?
+ "*" :
+ ""));
+ break;
+
+ case SMU_VCLK:
+ ret = smu_v13_0_6_get_current_clk_freq_by_table(smu, SMU_VCLK,
+ &now);
+ if (ret) {
+ dev_err(smu->adev->dev,
+ "Attempt to get current vclk Failed!");
+ return ret;
+ }
+
+ single_dpm_table = &(dpm_context->dpm_tables.vclk_table);
+ ret = smu_v13_0_6_get_clk_table(smu, &clocks, single_dpm_table);
+ if (ret) {
+ dev_err(smu->adev->dev,
+ "Attempt to get vclk levels Failed!");
+ return ret;
+ }
+
+ for (i = 0; i < single_dpm_table->count; i++)
+ size += sysfs_emit_at(
+ buf, size, "%d: %uMhz %s\n", i,
+ single_dpm_table->dpm_levels[i].value,
+ (clocks.num_levels == 1) ?
+ "*" :
+ (smu_v13_0_6_freqs_in_same_level(
+ clocks.data[i].clocks_in_khz /
+ 1000,
+ now) ?
+ "*" :
+ ""));
+ break;
+
+ case SMU_DCLK:
+ ret = smu_v13_0_6_get_current_clk_freq_by_table(smu, SMU_DCLK,
+ &now);
+ if (ret) {
+ dev_err(smu->adev->dev,
+ "Attempt to get current dclk Failed!");
+ return ret;
+ }
+
+ single_dpm_table = &(dpm_context->dpm_tables.dclk_table);
+ ret = smu_v13_0_6_get_clk_table(smu, &clocks, single_dpm_table);
+ if (ret) {
+ dev_err(smu->adev->dev,
+ "Attempt to get dclk levels Failed!");
+ return ret;
+ }
+
+ for (i = 0; i < single_dpm_table->count; i++)
+ size += sysfs_emit_at(
+ buf, size, "%d: %uMhz %s\n", i,
+ single_dpm_table->dpm_levels[i].value,
+ (clocks.num_levels == 1) ?
+ "*" :
+ (smu_v13_0_6_freqs_in_same_level(
+ clocks.data[i].clocks_in_khz /
+ 1000,
+ now) ?
+ "*" :
+ ""));
+ break;
+
+ default:
+ break;
+ }
+
+ return size;
+}
+
+static int smu_v13_0_6_upload_dpm_level(struct smu_context *smu, bool max,
+ uint32_t feature_mask, uint32_t level)
+{
+ struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
+ uint32_t freq;
+ int ret = 0;
+
+ if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_GFXCLK_BIT) &&
+ (feature_mask & FEATURE_MASK(FEATURE_DPM_GFXCLK))) {
+ freq = dpm_context->dpm_tables.gfx_table.dpm_levels[level].value;
+ ret = smu_cmn_send_smc_msg_with_param(
+ smu,
+ (max ? SMU_MSG_SetSoftMaxGfxClk :
+ SMU_MSG_SetSoftMinGfxclk),
+ freq & 0xffff, NULL);
+ if (ret) {
+ dev_err(smu->adev->dev,
+ "Failed to set soft %s gfxclk !\n",
+ max ? "max" : "min");
+ return ret;
+ }
+ }
+
+ if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT) &&
+ (feature_mask & FEATURE_MASK(FEATURE_DPM_UCLK))) {
+ freq = dpm_context->dpm_tables.uclk_table.dpm_levels[level]
+ .value;
+ ret = smu_cmn_send_smc_msg_with_param(
+ smu,
+ (max ? SMU_MSG_SetSoftMaxByFreq :
+ SMU_MSG_SetSoftMinByFreq),
+ (PPCLK_UCLK << 16) | (freq & 0xffff), NULL);
+ if (ret) {
+ dev_err(smu->adev->dev,
+ "Failed to set soft %s memclk !\n",
+ max ? "max" : "min");
+ return ret;
+ }
+ }
+
+ if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_SOCCLK_BIT) &&
+ (feature_mask & FEATURE_MASK(FEATURE_DPM_SOCCLK))) {
+ freq = dpm_context->dpm_tables.soc_table.dpm_levels[level].value;
+ ret = smu_cmn_send_smc_msg_with_param(
+ smu,
+ (max ? SMU_MSG_SetSoftMaxByFreq :
+ SMU_MSG_SetSoftMinByFreq),
+ (PPCLK_SOCCLK << 16) | (freq & 0xffff), NULL);
+ if (ret) {
+ dev_err(smu->adev->dev,
+ "Failed to set soft %s socclk !\n",
+ max ? "max" : "min");
+ return ret;
+ }
+ }
+
+ return ret;
+}
+
+static int smu_v13_0_6_force_clk_levels(struct smu_context *smu,
+ enum smu_clk_type type, uint32_t mask)
+{
+ struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
+ struct smu_13_0_dpm_table *single_dpm_table = NULL;
+ uint32_t soft_min_level, soft_max_level;
+ int ret = 0;
+
+ soft_min_level = mask ? (ffs(mask) - 1) : 0;
+ soft_max_level = mask ? (fls(mask) - 1) : 0;
+
+ switch (type) {
+ case SMU_SCLK:
+ single_dpm_table = &(dpm_context->dpm_tables.gfx_table);
+ if (soft_max_level >= single_dpm_table->count) {
+ dev_err(smu->adev->dev,
+ "Clock level specified %d is over max allowed %d\n",
+ soft_max_level, single_dpm_table->count - 1);
+ ret = -EINVAL;
+ break;
+ }
+
+ ret = smu_v13_0_6_upload_dpm_level(
+ smu, false, FEATURE_MASK(FEATURE_DPM_GFXCLK),
+ soft_min_level);
+ if (ret) {
+ dev_err(smu->adev->dev,
+ "Failed to upload boot level to lowest!\n");
+ break;
+ }
+
+ ret = smu_v13_0_6_upload_dpm_level(
+ smu, true, FEATURE_MASK(FEATURE_DPM_GFXCLK),
+ soft_max_level);
+ if (ret)
+ dev_err(smu->adev->dev,
+ "Failed to upload dpm max level to highest!\n");
+
+ break;
+
+ case SMU_MCLK:
+ case SMU_SOCCLK:
+ case SMU_FCLK:
+ /*
+ * Should not arrive here since smu_13_0_6 does not
+ * support mclk/socclk/fclk softmin/softmax settings
+ */
+ ret = -EINVAL;
+ break;
+
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+static int smu_v13_0_6_get_current_activity_percent(struct smu_context *smu,
+ enum amd_pp_sensors sensor,
+ uint32_t *value)
+{
+ int ret = 0;
+
+ if (!value)
+ return -EINVAL;
+
+ switch (sensor) {
+ case AMDGPU_PP_SENSOR_GPU_LOAD:
+ ret = smu_v13_0_6_get_smu_metrics_data(
+ smu, METRICS_AVERAGE_GFXACTIVITY, value);
+ break;
+ case AMDGPU_PP_SENSOR_MEM_LOAD:
+ ret = smu_v13_0_6_get_smu_metrics_data(
+ smu, METRICS_AVERAGE_MEMACTIVITY, value);
+ break;
+ default:
+ dev_err(smu->adev->dev,
+ "Invalid sensor for retrieving clock activity\n");
+ return -EINVAL;
+ }
+
+ return ret;
+}
+
+static int smu_v13_0_6_get_gpu_power(struct smu_context *smu, uint32_t *value)
+{
+ if (!value)
+ return -EINVAL;
+
+ return smu_v13_0_6_get_smu_metrics_data(smu, METRICS_AVERAGE_SOCKETPOWER,
+ value);
+}
+
+static int smu_v13_0_6_thermal_get_temperature(struct smu_context *smu,
+ enum amd_pp_sensors sensor,
+ uint32_t *value)
+{
+ int ret = 0;
+
+ if (!value)
+ return -EINVAL;
+
+ switch (sensor) {
+ case AMDGPU_PP_SENSOR_HOTSPOT_TEMP:
+ ret = smu_v13_0_6_get_smu_metrics_data(
+ smu, METRICS_TEMPERATURE_HOTSPOT, value);
+ break;
+ case AMDGPU_PP_SENSOR_MEM_TEMP:
+ ret = smu_v13_0_6_get_smu_metrics_data(
+ smu, METRICS_TEMPERATURE_MEM, value);
+ break;
+ default:
+ dev_err(smu->adev->dev, "Invalid sensor for retrieving temp\n");
+ return -EINVAL;
+ }
+
+ return ret;
+}
+
+static int smu_v13_0_6_read_sensor(struct smu_context *smu,
+ enum amd_pp_sensors sensor, void *data,
+ uint32_t *size)
+{
+ int ret = 0;
+
+ if (amdgpu_ras_intr_triggered())
+ return 0;
+
+ if (!data || !size)
+ return -EINVAL;
+
+ switch (sensor) {
+ case AMDGPU_PP_SENSOR_MEM_LOAD:
+ case AMDGPU_PP_SENSOR_GPU_LOAD:
+ ret = smu_v13_0_6_get_current_activity_percent(smu, sensor,
+ (uint32_t *)data);
+ *size = 4;
+ break;
+ case AMDGPU_PP_SENSOR_GPU_POWER:
+ ret = smu_v13_0_6_get_gpu_power(smu, (uint32_t *)data);
+ *size = 4;
+ break;
+ case AMDGPU_PP_SENSOR_HOTSPOT_TEMP:
+ case AMDGPU_PP_SENSOR_MEM_TEMP:
+ ret = smu_v13_0_6_thermal_get_temperature(smu, sensor,
+ (uint32_t *)data);
+ *size = 4;
+ break;
+ case AMDGPU_PP_SENSOR_GFX_MCLK:
+ ret = smu_v13_0_6_get_current_clk_freq_by_table(
+ smu, SMU_UCLK, (uint32_t *)data);
+ /* the output clock frequency in 10K unit */
+ *(uint32_t *)data *= 100;
+ *size = 4;
+ break;
+ case AMDGPU_PP_SENSOR_GFX_SCLK:
+ ret = smu_v13_0_6_get_current_clk_freq_by_table(
+ smu, SMU_GFXCLK, (uint32_t *)data);
+ *(uint32_t *)data *= 100;
+ *size = 4;
+ break;
+ case AMDGPU_PP_SENSOR_VDDGFX:
+ ret = smu_v13_0_get_gfx_vdd(smu, (uint32_t *)data);
+ *size = 4;
+ break;
+ default:
+ ret = -EOPNOTSUPP;
+ break;
+ }
+
+ return ret;
+}
+
+static int smu_v13_0_6_get_power_limit(struct smu_context *smu,
+ uint32_t *current_power_limit,
+ uint32_t *default_power_limit,
+ uint32_t *max_power_limit)
+{
+ struct smu_table_context *smu_table = &smu->smu_table;
+ struct PPTable_t *pptable =
+ (struct PPTable_t *)smu_table->driver_pptable;
+ uint32_t power_limit = 0;
+ int ret;
+
+ if (!smu_cmn_feature_is_enabled(smu, SMU_FEATURE_PPT_BIT)) {
+ if (current_power_limit)
+ *current_power_limit = 0;
+ if (default_power_limit)
+ *default_power_limit = 0;
+ if (max_power_limit)
+ *max_power_limit = 0;
+
+ dev_warn(
+ smu->adev->dev,
+ "PPT feature is not enabled, power values can't be fetched.");
+
+ return 0;
+ }
+
+ ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetPptLimit, &power_limit);
+
+ if (ret) {
+ dev_err(smu->adev->dev, "Couldn't get PPT limit");
+ return -EINVAL;
+ }
+
+ if (current_power_limit)
+ *current_power_limit = power_limit;
+ if (default_power_limit)
+ *default_power_limit = power_limit;
+
+ if (max_power_limit) {
+ *max_power_limit = pptable->MaxSocketPowerLimit;
+ }
+
+ return 0;
+}
+
+static int smu_v13_0_6_set_power_limit(struct smu_context *smu,
+ enum smu_ppt_limit_type limit_type,
+ uint32_t limit)
+{
+ return smu_v13_0_set_power_limit(smu, limit_type, limit);
+}
+
+static int smu_v13_0_6_system_features_control(struct smu_context *smu,
+ bool enable)
+{
+ int ret;
+
+ /* Nothing to be done for APU */
+ if (smu->adev->flags & AMD_IS_APU)
+ return 0;
+
+ ret = smu_v13_0_system_features_control(smu, enable);
+
+ return ret;
+}
+
+static int smu_v13_0_6_set_gfx_soft_freq_limited_range(struct smu_context *smu,
+ uint32_t min,
+ uint32_t max)
+{
+ int ret;
+
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxGfxClk,
+ max & 0xffff, NULL);
+ if (ret)
+ return ret;
+
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMinGfxclk,
+ min & 0xffff, NULL);
+
+ return ret;
+}
+
+static int smu_v13_0_6_set_performance_level(struct smu_context *smu,
+ enum amd_dpm_forced_level level)
+{
+ struct smu_dpm_context *smu_dpm = &(smu->smu_dpm);
+ struct smu_13_0_dpm_context *dpm_context = smu_dpm->dpm_context;
+ struct smu_13_0_dpm_table *gfx_table =
+ &dpm_context->dpm_tables.gfx_table;
+ struct smu_umd_pstate_table *pstate_table = &smu->pstate_table;
+ int ret;
+
+ /* Disable determinism if switching to another mode */
+ if ((smu_dpm->dpm_level == AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM) &&
+ (level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM)) {
+ smu_cmn_send_smc_msg(smu, SMU_MSG_DisableDeterminism, NULL);
+ pstate_table->gfxclk_pstate.curr.max = gfx_table->max;
+ }
+
+ switch (level) {
+ case AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM:
+ return 0;
+
+ case AMD_DPM_FORCED_LEVEL_AUTO:
+ if ((gfx_table->min == pstate_table->gfxclk_pstate.curr.min) &&
+ (gfx_table->max == pstate_table->gfxclk_pstate.curr.max))
+ return 0;
+
+ ret = smu_v13_0_6_set_gfx_soft_freq_limited_range(
+ smu, gfx_table->min, gfx_table->max);
+ if (ret)
+ return ret;
+
+ pstate_table->gfxclk_pstate.curr.min = gfx_table->min;
+ pstate_table->gfxclk_pstate.curr.max = gfx_table->max;
+ return 0;
+ case AMD_DPM_FORCED_LEVEL_MANUAL:
+ return 0;
+ default:
+ break;
+ }
+
+ return -EINVAL;
+}
+
+static int smu_v13_0_6_set_soft_freq_limited_range(struct smu_context *smu,
+ enum smu_clk_type clk_type,
+ uint32_t min, uint32_t max)
+{
+ struct smu_dpm_context *smu_dpm = &(smu->smu_dpm);
+ struct smu_13_0_dpm_context *dpm_context = smu_dpm->dpm_context;
+ struct smu_umd_pstate_table *pstate_table = &smu->pstate_table;
+ struct amdgpu_device *adev = smu->adev;
+ uint32_t min_clk;
+ uint32_t max_clk;
+ int ret = 0;
+
+ if (clk_type != SMU_GFXCLK && clk_type != SMU_SCLK)
+ return -EINVAL;
+
+ if ((smu_dpm->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) &&
+ (smu_dpm->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM))
+ return -EINVAL;
+
+ if (smu_dpm->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) {
+ if (min >= max) {
+ dev_err(smu->adev->dev,
+ "Minimum GFX clk should be less than the maximum allowed clock\n");
+ return -EINVAL;
+ }
+
+ if ((min == pstate_table->gfxclk_pstate.curr.min) &&
+ (max == pstate_table->gfxclk_pstate.curr.max))
+ return 0;
+
+ ret = smu_v13_0_6_set_gfx_soft_freq_limited_range(smu, min, max);
+ if (!ret) {
+ pstate_table->gfxclk_pstate.curr.min = min;
+ pstate_table->gfxclk_pstate.curr.max = max;
+ }
+
+ return ret;
+ }
+
+ if (smu_dpm->dpm_level == AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM) {
+ if (!max || (max < dpm_context->dpm_tables.gfx_table.min) ||
+ (max > dpm_context->dpm_tables.gfx_table.max)) {
+ dev_warn(
+ adev->dev,
+ "Invalid max frequency %d MHz specified for determinism\n",
+ max);
+ return -EINVAL;
+ }
+
+ /* Restore default min/max clocks and enable determinism */
+ min_clk = dpm_context->dpm_tables.gfx_table.min;
+ max_clk = dpm_context->dpm_tables.gfx_table.max;
+ ret = smu_v13_0_6_set_gfx_soft_freq_limited_range(smu, min_clk,
+ max_clk);
+ if (!ret) {
+ usleep_range(500, 1000);
+ ret = smu_cmn_send_smc_msg_with_param(
+ smu, SMU_MSG_EnableDeterminism, max, NULL);
+ if (ret) {
+ dev_err(adev->dev,
+ "Failed to enable determinism at GFX clock %d MHz\n",
+ max);
+ } else {
+ pstate_table->gfxclk_pstate.curr.min = min_clk;
+ pstate_table->gfxclk_pstate.curr.max = max;
+ }
+ }
+ }
+
+ return ret;
+}
+
+static int smu_v13_0_6_usr_edit_dpm_table(struct smu_context *smu,
+ enum PP_OD_DPM_TABLE_COMMAND type,
+ long input[], uint32_t size)
+{
+ struct smu_dpm_context *smu_dpm = &(smu->smu_dpm);
+ struct smu_13_0_dpm_context *dpm_context = smu_dpm->dpm_context;
+ struct smu_umd_pstate_table *pstate_table = &smu->pstate_table;
+ uint32_t min_clk;
+ uint32_t max_clk;
+ int ret = 0;
+
+ /* Only allowed in manual or determinism mode */
+ if ((smu_dpm->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) &&
+ (smu_dpm->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM))
+ return -EINVAL;
+
+ switch (type) {
+ case PP_OD_EDIT_SCLK_VDDC_TABLE:
+ if (size != 2) {
+ dev_err(smu->adev->dev,
+ "Input parameter number not correct\n");
+ return -EINVAL;
+ }
+
+ if (input[0] == 0) {
+ if (input[1] < dpm_context->dpm_tables.gfx_table.min) {
+ dev_warn(
+ smu->adev->dev,
+ "Minimum GFX clk (%ld) MHz specified is less than the minimum allowed (%d) MHz\n",
+ input[1],
+ dpm_context->dpm_tables.gfx_table.min);
+ pstate_table->gfxclk_pstate.custom.min =
+ pstate_table->gfxclk_pstate.curr.min;
+ return -EINVAL;
+ }
+
+ pstate_table->gfxclk_pstate.custom.min = input[1];
+ } else if (input[0] == 1) {
+ if (input[1] > dpm_context->dpm_tables.gfx_table.max) {
+ dev_warn(
+ smu->adev->dev,
+ "Maximum GFX clk (%ld) MHz specified is greater than the maximum allowed (%d) MHz\n",
+ input[1],
+ dpm_context->dpm_tables.gfx_table.max);
+ pstate_table->gfxclk_pstate.custom.max =
+ pstate_table->gfxclk_pstate.curr.max;
+ return -EINVAL;
+ }
+
+ pstate_table->gfxclk_pstate.custom.max = input[1];
+ } else {
+ return -EINVAL;
+ }
+ break;
+ case PP_OD_RESTORE_DEFAULT_TABLE:
+ if (size != 0) {
+ dev_err(smu->adev->dev,
+ "Input parameter number not correct\n");
+ return -EINVAL;
+ } else {
+ /* Use the default frequencies for manual and determinism mode */
+ min_clk = dpm_context->dpm_tables.gfx_table.min;
+ max_clk = dpm_context->dpm_tables.gfx_table.max;
+
+ return smu_v13_0_6_set_soft_freq_limited_range(
+ smu, SMU_GFXCLK, min_clk, max_clk);
+ }
+ break;
+ case PP_OD_COMMIT_DPM_TABLE:
+ if (size != 0) {
+ dev_err(smu->adev->dev,
+ "Input parameter number not correct\n");
+ return -EINVAL;
+ } else {
+ if (!pstate_table->gfxclk_pstate.custom.min)
+ pstate_table->gfxclk_pstate.custom.min =
+ pstate_table->gfxclk_pstate.curr.min;
+
+ if (!pstate_table->gfxclk_pstate.custom.max)
+ pstate_table->gfxclk_pstate.custom.max =
+ pstate_table->gfxclk_pstate.curr.max;
+
+ min_clk = pstate_table->gfxclk_pstate.custom.min;
+ max_clk = pstate_table->gfxclk_pstate.custom.max;
+
+ return smu_v13_0_6_set_soft_freq_limited_range(
+ smu, SMU_GFXCLK, min_clk, max_clk);
+ }
+ break;
+ default:
+ return -ENOSYS;
+ }
+
+ return ret;
+}
+
+static int smu_v13_0_6_get_enabled_mask(struct smu_context *smu,
+ uint64_t *feature_mask)
+{
+ uint32_t smu_version;
+ int ret;
+
+ smu_cmn_get_smc_version(smu, NULL, &smu_version);
+ ret = smu_cmn_get_enabled_mask(smu, feature_mask);
+
+ if (ret == -EIO && smu_version < 0x552F00) {
+ *feature_mask = 0;
+ ret = 0;
+ }
+
+ return ret;
+}
+
+static bool smu_v13_0_6_is_dpm_running(struct smu_context *smu)
+{
+ int ret;
+ uint64_t feature_enabled;
+
+ ret = smu_v13_0_6_get_enabled_mask(smu, &feature_enabled);
+
+ if (ret)
+ return false;
+
+ return !!(feature_enabled & SMC_DPM_FEATURE);
+}
+
+static int smu_v13_0_6_request_i2c_xfer(struct smu_context *smu,
+ void *table_data)
+{
+ struct smu_table_context *smu_table = &smu->smu_table;
+ struct smu_table *table = &smu_table->driver_table;
+ struct amdgpu_device *adev = smu->adev;
+ uint32_t table_size;
+ int ret = 0;
+
+ if (!table_data)
+ return -EINVAL;
+
+ table_size = smu_table->tables[SMU_TABLE_I2C_COMMANDS].size;
+
+ memcpy(table->cpu_addr, table_data, table_size);
+ /* Flush hdp cache */
+ amdgpu_asic_flush_hdp(adev, NULL);
+ ret = smu_cmn_send_smc_msg(smu, SMU_MSG_RequestI2cTransaction,
+ NULL);
+
+ return ret;
+}
+
+static int smu_v13_0_6_i2c_xfer(struct i2c_adapter *i2c_adap,
+ struct i2c_msg *msg, int num_msgs)
+{
+ struct amdgpu_smu_i2c_bus *smu_i2c = i2c_get_adapdata(i2c_adap);
+ struct amdgpu_device *adev = smu_i2c->adev;
+ struct smu_context *smu = adev->powerplay.pp_handle;
+ struct smu_table_context *smu_table = &smu->smu_table;
+ struct smu_table *table = &smu_table->driver_table;
+ SwI2cRequest_t *req, *res = (SwI2cRequest_t *)table->cpu_addr;
+ int i, j, r, c;
+ u16 dir;
+
+ if (!adev->pm.dpm_enabled)
+ return -EBUSY;
+
+ req = kzalloc(sizeof(*req), GFP_KERNEL);
+ if (!req)
+ return -ENOMEM;
+
+ req->I2CcontrollerPort = smu_i2c->port;
+ req->I2CSpeed = I2C_SPEED_FAST_400K;
+ req->SlaveAddress = msg[0].addr << 1; /* wants an 8-bit address */
+ dir = msg[0].flags & I2C_M_RD;
+
+ for (c = i = 0; i < num_msgs; i++) {
+ for (j = 0; j < msg[i].len; j++, c++) {
+ SwI2cCmd_t *cmd = &req->SwI2cCmds[c];
+
+ if (!(msg[i].flags & I2C_M_RD)) {
+ /* write */
+ cmd->CmdConfig |= CMDCONFIG_READWRITE_MASK;
+ cmd->ReadWriteData = msg[i].buf[j];
+ }
+
+ if ((dir ^ msg[i].flags) & I2C_M_RD) {
+ /* The direction changes.
+ */
+ dir = msg[i].flags & I2C_M_RD;
+ cmd->CmdConfig |= CMDCONFIG_RESTART_MASK;
+ }
+
+ req->NumCmds++;
+
+ /*
+ * Insert STOP if we are at the last byte of either last
+ * message for the transaction or the client explicitly
+ * requires a STOP at this particular message.
+ */
+ if ((j == msg[i].len - 1) &&
+ ((i == num_msgs - 1) || (msg[i].flags & I2C_M_STOP))) {
+ cmd->CmdConfig &= ~CMDCONFIG_RESTART_MASK;
+ cmd->CmdConfig |= CMDCONFIG_STOP_MASK;
+ }
+ }
+ }
+ mutex_lock(&adev->pm.mutex);
+ r = smu_v13_0_6_request_i2c_xfer(smu, req);
+ mutex_unlock(&adev->pm.mutex);
+ if (r)
+ goto fail;
+
+ for (c = i = 0; i < num_msgs; i++) {
+ if (!(msg[i].flags & I2C_M_RD)) {
+ c += msg[i].len;
+ continue;
+ }
+ for (j = 0; j < msg[i].len; j++, c++) {
+ SwI2cCmd_t *cmd = &res->SwI2cCmds[c];
+
+ msg[i].buf[j] = cmd->ReadWriteData;
+ }
+ }
+ r = num_msgs;
+fail:
+ kfree(req);
+ return r;
+}
+
+static u32 smu_v13_0_6_i2c_func(struct i2c_adapter *adap)
+{
+ return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
+}
+
+static const struct i2c_algorithm smu_v13_0_6_i2c_algo = {
+ .master_xfer = smu_v13_0_6_i2c_xfer,
+ .functionality = smu_v13_0_6_i2c_func,
+};
+
+static const struct i2c_adapter_quirks smu_v13_0_6_i2c_control_quirks = {
+ .flags = I2C_AQ_COMB | I2C_AQ_COMB_SAME_ADDR | I2C_AQ_NO_ZERO_LEN,
+ .max_read_len = MAX_SW_I2C_COMMANDS,
+ .max_write_len = MAX_SW_I2C_COMMANDS,
+ .max_comb_1st_msg_len = 2,
+ .max_comb_2nd_msg_len = MAX_SW_I2C_COMMANDS - 2,
+};
+
+static int smu_v13_0_6_i2c_control_init(struct smu_context *smu)
+{
+ struct amdgpu_device *adev = smu->adev;
+ int res, i;
+
+ for (i = 0; i < MAX_SMU_I2C_BUSES; i++) {
+ struct amdgpu_smu_i2c_bus *smu_i2c = &adev->pm.smu_i2c[i];
+ struct i2c_adapter *control = &smu_i2c->adapter;
+
+ smu_i2c->adev = adev;
+ smu_i2c->port = i;
+ mutex_init(&smu_i2c->mutex);
+ control->owner = THIS_MODULE;
+ control->class = I2C_CLASS_SPD;
+ control->dev.parent = &adev->pdev->dev;
+ control->algo = &smu_v13_0_6_i2c_algo;
+ snprintf(control->name, sizeof(control->name), "AMDGPU SMU %d", i);
+ control->quirks = &smu_v13_0_6_i2c_control_quirks;
+ i2c_set_adapdata(control, smu_i2c);
+
+ res = i2c_add_adapter(control);
+ if (res) {
+ DRM_ERROR("Failed to register hw i2c, err: %d\n", res);
+ goto Out_err;
+ }
+ }
+
+ adev->pm.ras_eeprom_i2c_bus = &adev->pm.smu_i2c[0].adapter;
+ adev->pm.fru_eeprom_i2c_bus = &adev->pm.smu_i2c[0].adapter;
+
+ return 0;
+Out_err:
+ for ( ; i >= 0; i--) {
+ struct amdgpu_smu_i2c_bus *smu_i2c = &adev->pm.smu_i2c[i];
+ struct i2c_adapter *control = &smu_i2c->adapter;
+
+ i2c_del_adapter(control);
+ }
+ return res;
+}
+
+static void smu_v13_0_6_i2c_control_fini(struct smu_context *smu)
+{
+ struct amdgpu_device *adev = smu->adev;
+ int i;
+
+ for (i = 0; i < MAX_SMU_I2C_BUSES; i++) {
+ struct amdgpu_smu_i2c_bus *smu_i2c = &adev->pm.smu_i2c[i];
+ struct i2c_adapter *control = &smu_i2c->adapter;
+
+ i2c_del_adapter(control);
+ }
+ adev->pm.ras_eeprom_i2c_bus = NULL;
+ adev->pm.fru_eeprom_i2c_bus = NULL;
+}
+
+static void smu_v13_0_6_get_unique_id(struct smu_context *smu)
+{
+ struct amdgpu_device *adev = smu->adev;
+ //SmuMetrics_t *metrics = smu->smu_table.metrics_table;
+ uint32_t upper32 = 0, lower32 = 0;
+ int ret;
+
+ ret = smu_cmn_get_metrics_table(smu, NULL, false);
+ if (ret)
+ goto out;
+
+ //upper32 = metrics->PublicSerialNumUpper32;
+ //lower32 = metrics->PublicSerialNumLower32;
+
+out:
+ adev->unique_id = ((uint64_t)upper32 << 32) | lower32;
+ if (adev->serial[0] == '\0')
+ sprintf(adev->serial, "%016llx", adev->unique_id);
+}
+
+static bool smu_v13_0_6_is_baco_supported(struct smu_context *smu)
+{
+ /* smu_13_0_6 does not support baco */
+
+ return false;
+}
+
+static int smu_v13_0_6_set_df_cstate(struct smu_context *smu,
+ enum pp_df_cstate state)
+{
+ return smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_DFCstateControl,
+ state, NULL);
+}
+
+static int smu_v13_0_6_allow_xgmi_power_down(struct smu_context *smu, bool en)
+{
+ return smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GmiPwrDnControl,
+ en ? 0 : 1, NULL);
+}
+
+static const struct throttling_logging_label {
+ uint32_t feature_mask;
+ const char *label;
+} logging_label[] = {
+ { (1U << THROTTLER_TEMP_HBM_BIT), "HBM" },
+ { (1U << THROTTLER_TEMP_SOC_BIT), "SOC" },
+ { (1U << THROTTLER_TEMP_VR_GFX_BIT), "VR limit" },
+};
+static void smu_v13_0_6_log_thermal_throttling_event(struct smu_context *smu)
+{
+ int ret;
+ int throttler_idx, throtting_events = 0, buf_idx = 0;
+ struct amdgpu_device *adev = smu->adev;
+ uint32_t throttler_status;
+ char log_buf[256];
+
+ ret = smu_v13_0_6_get_smu_metrics_data(smu, METRICS_THROTTLER_STATUS,
+ &throttler_status);
+ if (ret)
+ return;
+
+ memset(log_buf, 0, sizeof(log_buf));
+ for (throttler_idx = 0; throttler_idx < ARRAY_SIZE(logging_label);
+ throttler_idx++) {
+ if (throttler_status &
+ logging_label[throttler_idx].feature_mask) {
+ throtting_events++;
+ buf_idx += snprintf(log_buf + buf_idx,
+ sizeof(log_buf) - buf_idx, "%s%s",
+ throtting_events > 1 ? " and " : "",
+ logging_label[throttler_idx].label);
+ if (buf_idx >= sizeof(log_buf)) {
+ dev_err(adev->dev, "buffer overflow!\n");
+ log_buf[sizeof(log_buf) - 1] = '\0';
+ break;
+ }
+ }
+ }
+
+ dev_warn(
+ adev->dev,
+ "WARN: GPU thermal throttling temperature reached, expect performance decrease. %s.\n",
+ log_buf);
+ kgd2kfd_smi_event_throttle(
+ smu->adev->kfd.dev,
+ smu_cmn_get_indep_throttler_status(throttler_status,
+ smu_v13_0_6_throttler_map));
+}
+
+static int smu_v13_0_6_get_current_pcie_link_speed(struct smu_context *smu)
+{
+ struct amdgpu_device *adev = smu->adev;
+ uint32_t esm_ctrl;
+
+ /* TODO: confirm this on real target */
+ esm_ctrl = RREG32_PCIE(smnPCIE_ESM_CTRL);
+ if ((esm_ctrl >> 15) & 0x1FFFF)
+ return (((esm_ctrl >> 8) & 0x3F) + 128);
+
+ return smu_v13_0_get_current_pcie_link_speed(smu);
+}
+
+static ssize_t smu_v13_0_6_get_gpu_metrics(struct smu_context *smu, void **table)
+{
+ struct smu_table_context *smu_table = &smu->smu_table;
+ struct gpu_metrics_v1_3 *gpu_metrics =
+ (struct gpu_metrics_v1_3 *)smu_table->gpu_metrics_table;
+ MetricsTable_t *metrics;
+ int i, ret = 0;
+
+ metrics = kzalloc(sizeof(MetricsTable_t), GFP_KERNEL);
+ ret = smu_v13_0_6_get_metrics_table(smu, metrics, true);
+ if (ret)
+ return ret;
+
+ smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 3);
+
+ /* TODO: Decide on how to fill in zero value fields */
+ gpu_metrics->temperature_edge = 0;
+ gpu_metrics->temperature_hotspot = 0;
+ gpu_metrics->temperature_mem = 0;
+ gpu_metrics->temperature_vrgfx = 0;
+ gpu_metrics->temperature_vrsoc = 0;
+ gpu_metrics->temperature_vrmem = 0;
+
+ gpu_metrics->average_gfx_activity = 0;
+ gpu_metrics->average_umc_activity = 0;
+ gpu_metrics->average_mm_activity = 0;
+
+ gpu_metrics->average_socket_power = 0;
+ gpu_metrics->energy_accumulator = 0;
+
+ gpu_metrics->average_gfxclk_frequency = 0;
+ gpu_metrics->average_socclk_frequency = 0;
+ gpu_metrics->average_uclk_frequency = 0;
+ gpu_metrics->average_vclk0_frequency = 0;
+ gpu_metrics->average_dclk0_frequency = 0;
+
+ gpu_metrics->current_gfxclk = 0;
+ gpu_metrics->current_socclk = 0;
+ gpu_metrics->current_uclk = 0;
+ gpu_metrics->current_vclk0 = 0;
+ gpu_metrics->current_dclk0 = 0;
+
+ gpu_metrics->throttle_status = 0;
+ gpu_metrics->indep_throttle_status = smu_cmn_get_indep_throttler_status(
+ gpu_metrics->throttle_status, smu_v13_0_6_throttler_map);
+
+ gpu_metrics->current_fan_speed = 0;
+
+ gpu_metrics->pcie_link_width = 0;
+ gpu_metrics->pcie_link_speed = smu_v13_0_6_get_current_pcie_link_speed(smu);
+
+ gpu_metrics->system_clock_counter = ktime_get_boottime_ns();
+
+ gpu_metrics->gfx_activity_acc = 0;
+ gpu_metrics->mem_activity_acc = 0;
+
+ for (i = 0; i < NUM_HBM_INSTANCES; i++)
+ gpu_metrics->temperature_hbm[i] = 0;
+
+ gpu_metrics->firmware_timestamp = 0;
+
+ *table = (void *)gpu_metrics;
+ kfree(metrics);
+
+ return sizeof(struct gpu_metrics_v1_3);
+}
+
+static int smu_v13_0_6_mode2_reset(struct smu_context *smu)
+{
+ u32 smu_version;
+ int ret = 0, index;
+ struct amdgpu_device *adev = smu->adev;
+ int timeout = 10;
+
+ smu_cmn_get_smc_version(smu, NULL, &smu_version);
+
+ index = smu_cmn_to_asic_specific_index(smu, CMN2ASIC_MAPPING_MSG,
+ SMU_MSG_GfxDeviceDriverReset);
+
+ mutex_lock(&smu->message_lock);
+ ret = smu_cmn_send_msg_without_waiting(smu, (uint16_t)index,
+ SMU_RESET_MODE_2);
+ /* This is similar to FLR, wait till max FLR timeout */
+ msleep(100);
+ dev_dbg(smu->adev->dev, "restore config space...\n");
+ /* Restore the config space saved during init */
+ amdgpu_device_load_pci_state(adev->pdev);
+
+ dev_dbg(smu->adev->dev, "wait for reset ack\n");
+ while (ret == -ETIME && timeout) {
+ ret = smu_cmn_wait_for_response(smu);
+ /* Wait a bit more time for getting ACK */
+ if (ret == -ETIME) {
+ --timeout;
+ usleep_range(500, 1000);
+ continue;
+ }
+
+ if (ret != 1) {
+ dev_err(adev->dev,
+ "failed to send mode2 message \tparam: 0x%08x response %#x\n",
+ SMU_RESET_MODE_2, ret);
+ goto out;
+ }
+ }
+
+ if (ret == 1)
+ ret = 0;
+out:
+ mutex_unlock(&smu->message_lock);
+
+ return ret;
+}
+
+static int smu_v13_0_6_mode1_reset(struct smu_context *smu)
+{
+ struct amdgpu_device *adev = smu->adev;
+ struct amdgpu_ras *ras;
+ u32 fatal_err, param;
+ int ret = 0;
+
+ ras = amdgpu_ras_get_context(adev);
+ fatal_err = 0;
+ param = SMU_RESET_MODE_1;
+
+ /* fatal error triggered by ras, PMFW supports the flag */
+ if (ras && atomic_read(&ras->in_recovery))
+ fatal_err = 1;
+
+ param |= (fatal_err << 16);
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GfxDeviceDriverReset,
+ param, NULL);
+
+ if (!ret)
+ msleep(SMU13_MODE1_RESET_WAIT_TIME_IN_MS);
+
+ return ret;
+}
+
+static bool smu_v13_0_6_is_mode1_reset_supported(struct smu_context *smu)
+{
+ /* TODO: Enable this when FW support is added */
+ return false;
+}
+
+static bool smu_v13_0_6_is_mode2_reset_supported(struct smu_context *smu)
+{
+ return true;
+}
+
+static int smu_v13_0_6_smu_send_hbm_bad_page_num(struct smu_context *smu,
+ uint32_t size)
+{
+ int ret = 0;
+
+ /* message SMU to update the bad page number on SMUBUS */
+ ret = smu_cmn_send_smc_msg_with_param(
+ smu, SMU_MSG_SetNumBadHbmPagesRetired, size, NULL);
+ if (ret)
+ dev_err(smu->adev->dev,
+ "[%s] failed to message SMU to update HBM bad pages number\n",
+ __func__);
+
+ return ret;
+}
+
+static const struct pptable_funcs smu_v13_0_6_ppt_funcs = {
+ /* init dpm */
+ .get_allowed_feature_mask = smu_v13_0_6_get_allowed_feature_mask,
+ /* dpm/clk tables */
+ .set_default_dpm_table = smu_v13_0_6_set_default_dpm_table,
+ .populate_umd_state_clk = smu_v13_0_6_populate_umd_state_clk,
+ .print_clk_levels = smu_v13_0_6_print_clk_levels,
+ .force_clk_levels = smu_v13_0_6_force_clk_levels,
+ .read_sensor = smu_v13_0_6_read_sensor,
+ .set_performance_level = smu_v13_0_6_set_performance_level,
+ .get_power_limit = smu_v13_0_6_get_power_limit,
+ .is_dpm_running = smu_v13_0_6_is_dpm_running,
+ .get_unique_id = smu_v13_0_6_get_unique_id,
+ .init_smc_tables = smu_v13_0_6_init_smc_tables,
+ .fini_smc_tables = smu_v13_0_fini_smc_tables,
+ .init_power = smu_v13_0_init_power,
+ .fini_power = smu_v13_0_fini_power,
+ .check_fw_status = smu_v13_0_6_check_fw_status,
+ /* pptable related */
+ .check_fw_version = smu_v13_0_check_fw_version,
+ .set_driver_table_location = smu_v13_0_set_driver_table_location,
+ .set_tool_table_location = smu_v13_0_set_tool_table_location,
+ .notify_memory_pool_location = smu_v13_0_notify_memory_pool_location,
+ .system_features_control = smu_v13_0_6_system_features_control,
+ .send_smc_msg_with_param = smu_cmn_send_smc_msg_with_param,
+ .send_smc_msg = smu_cmn_send_smc_msg,
+ .get_enabled_mask = smu_v13_0_6_get_enabled_mask,
+ .feature_is_enabled = smu_cmn_feature_is_enabled,
+ .set_power_limit = smu_v13_0_6_set_power_limit,
+ .set_xgmi_pstate = smu_v13_0_set_xgmi_pstate,
+ /* TODO: Thermal limits unknown, skip these for now
+ .register_irq_handler = smu_v13_0_register_irq_handler,
+ .enable_thermal_alert = smu_v13_0_enable_thermal_alert,
+ .disable_thermal_alert = smu_v13_0_disable_thermal_alert,
+ */
+ .setup_pptable = smu_v13_0_6_setup_pptable,
+ .baco_is_support = smu_v13_0_6_is_baco_supported,
+ .get_dpm_ultimate_freq = smu_v13_0_6_get_dpm_ultimate_freq,
+ .set_soft_freq_limited_range = smu_v13_0_6_set_soft_freq_limited_range,
+ .od_edit_dpm_table = smu_v13_0_6_usr_edit_dpm_table,
+ .set_df_cstate = smu_v13_0_6_set_df_cstate,
+ .allow_xgmi_power_down = smu_v13_0_6_allow_xgmi_power_down,
+ .log_thermal_throttling_event = smu_v13_0_6_log_thermal_throttling_event,
+ .get_pp_feature_mask = smu_cmn_get_pp_feature_mask,
+ .set_pp_feature_mask = smu_cmn_set_pp_feature_mask,
+ .get_gpu_metrics = smu_v13_0_6_get_gpu_metrics,
+ .mode1_reset_is_support = smu_v13_0_6_is_mode1_reset_supported,
+ .mode2_reset_is_support = smu_v13_0_6_is_mode2_reset_supported,
+ .mode1_reset = smu_v13_0_6_mode1_reset,
+ .mode2_reset = smu_v13_0_6_mode2_reset,
+ .wait_for_event = smu_v13_0_wait_for_event,
+ .i2c_init = smu_v13_0_6_i2c_control_init,
+ .i2c_fini = smu_v13_0_6_i2c_control_fini,
+ .send_hbm_bad_pages_num = smu_v13_0_6_smu_send_hbm_bad_page_num,
+};
+
+void smu_v13_0_6_set_ppt_funcs(struct smu_context *smu)
+{
+ smu->ppt_funcs = &smu_v13_0_6_ppt_funcs;
+ smu->message_map = smu_v13_0_6_message_map;
+ smu->clock_map = smu_v13_0_6_clk_map;
+ smu->feature_map = smu_v13_0_6_feature_mask_map;
+ smu->table_map = smu_v13_0_6_table_map;
+ smu_v13_0_set_smu_mailbox_registers(smu);
+}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.h b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.h
new file mode 100644
index 000000000000..f0fa42a645c0
--- /dev/null
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright 2021 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef __SMU_13_0_6_PPT_H__
+#define __SMU_13_0_6_PPT_H__
+
+#define SMU_13_0_6_UMD_PSTATE_GFXCLK_LEVEL 0x2
+#define SMU_13_0_6_UMD_PSTATE_SOCCLK_LEVEL 0x4
+#define SMU_13_0_6_UMD_PSTATE_MCLK_LEVEL 0x2
+
+extern void smu_v13_0_6_set_ppt_funcs(struct smu_context *smu);
+
+#endif
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
index 9e1967d8049e..1b2c82449f20 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
@@ -1479,7 +1479,9 @@ static int smu_v13_0_7_get_power_profile_mode(struct smu_context *smu, char *buf
workload_type = smu_cmn_to_asic_specific_index(smu,
CMN2ASIC_MAPPING_WORKLOAD,
i);
- if (workload_type < 0) {
+ if (workload_type == -ENOTSUPP)
+ continue;
+ else if (workload_type < 0) {
result = -EINVAL;
goto out;
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
index d5abafc5a682..3ecb900e6ecd 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
@@ -478,13 +478,13 @@ int smu_cmn_to_asic_specific_index(struct smu_context *smu,
return mapping.map_to;
case CMN2ASIC_MAPPING_WORKLOAD:
- if (index > PP_SMC_POWER_PROFILE_WINDOW3D ||
+ if (index >= PP_SMC_POWER_PROFILE_COUNT ||
!smu->workload_map)
return -EINVAL;
mapping = smu->workload_map[index];
if (!mapping.valid_mapping)
- return -EINVAL;
+ return -ENOTSUPP;
return mapping.map_to;
diff --git a/drivers/gpu/drm/arm/hdlcd_drv.c b/drivers/gpu/drm/arm/hdlcd_drv.c
index e3507dd6f82a..9020bf820bc8 100644
--- a/drivers/gpu/drm/arm/hdlcd_drv.c
+++ b/drivers/gpu/drm/arm/hdlcd_drv.c
@@ -26,7 +26,7 @@
#include <drm/drm_crtc.h>
#include <drm/drm_debugfs.h>
#include <drm/drm_drv.h>
-#include <drm/drm_fbdev_generic.h>
+#include <drm/drm_fbdev_dma.h>
#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_modeset_helper.h>
@@ -100,7 +100,6 @@ static int hdlcd_load(struct drm_device *drm, unsigned long flags)
{
struct hdlcd_drm_private *hdlcd = drm_to_hdlcd_priv(drm);
struct platform_device *pdev = to_platform_device(drm->dev);
- struct resource *res;
u32 version;
int ret;
@@ -115,8 +114,7 @@ static int hdlcd_load(struct drm_device *drm, unsigned long flags)
atomic_set(&hdlcd->dma_end_count, 0);
#endif
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- hdlcd->mmio = devm_ioremap_resource(drm->dev, res);
+ hdlcd->mmio = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(hdlcd->mmio)) {
DRM_ERROR("failed to map control registers area\n");
ret = PTR_ERR(hdlcd->mmio);
@@ -301,7 +299,7 @@ static int hdlcd_drm_bind(struct device *dev)
if (ret)
goto err_register;
- drm_fbdev_generic_setup(drm, 32);
+ drm_fbdev_dma_setup(drm, 32);
return 0;
diff --git a/drivers/gpu/drm/arm/malidp_drv.c b/drivers/gpu/drm/arm/malidp_drv.c
index 589c1c66a6dc..c03cfd57b752 100644
--- a/drivers/gpu/drm/arm/malidp_drv.c
+++ b/drivers/gpu/drm/arm/malidp_drv.c
@@ -19,7 +19,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_drv.h>
-#include <drm/drm_fbdev_generic.h>
+#include <drm/drm_fbdev_dma.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
@@ -649,7 +649,7 @@ static ssize_t core_id_show(struct device *dev, struct device_attribute *attr,
struct drm_device *drm = dev_get_drvdata(dev);
struct malidp_drm *malidp = drm_to_malidp(drm);
- return snprintf(buf, PAGE_SIZE, "%08x\n", malidp->core_id);
+ return sysfs_emit(buf, "%08x\n", malidp->core_id);
}
static DEVICE_ATTR_RO(core_id);
@@ -724,8 +724,7 @@ static int malidp_bind(struct device *dev)
hwdev->hw = (struct malidp_hw *)of_device_get_match_data(dev);
malidp->dev = hwdev;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- hwdev->regs = devm_ioremap_resource(dev, res);
+ hwdev->regs = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(hwdev->regs))
return PTR_ERR(hwdev->regs);
@@ -852,7 +851,7 @@ static int malidp_bind(struct device *dev)
if (ret)
goto register_fail;
- drm_fbdev_generic_setup(drm, 32);
+ drm_fbdev_dma_setup(drm, 32);
return 0;
diff --git a/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c b/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c
index ecfb060d2557..c8c7f8215155 100644
--- a/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c
+++ b/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c
@@ -15,7 +15,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_device.h>
-#include <drm/drm_fbdev_generic.h>
+#include <drm/drm_fbdev_dma.h>
#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_module.h>
@@ -341,7 +341,7 @@ static int aspeed_gfx_probe(struct platform_device *pdev)
if (ret)
goto err_unload;
- drm_fbdev_generic_setup(&priv->drm, 32);
+ drm_fbdev_dma_setup(&priv->drm, 32);
return 0;
err_unload:
diff --git a/drivers/gpu/drm/ast/ast_dp.c b/drivers/gpu/drm/ast/ast_dp.c
index 56483860306b..fbb070f63e36 100644
--- a/drivers/gpu/drm/ast/ast_dp.c
+++ b/drivers/gpu/drm/ast/ast_dp.c
@@ -9,7 +9,7 @@
int ast_astdp_read_edid(struct drm_device *dev, u8 *ediddata)
{
- struct ast_private *ast = to_ast_private(dev);
+ struct ast_device *ast = to_ast_device(dev);
u8 i = 0, j = 0;
/*
@@ -125,7 +125,7 @@ void ast_dp_launch(struct drm_device *dev, u8 bPower)
u8 bDPTX = 0;
u8 bDPExecute = 1;
- struct ast_private *ast = to_ast_private(dev);
+ struct ast_device *ast = to_ast_device(dev);
// S3 come back, need more time to wait BMC ready.
if (bPower)
WaitCount = 300;
@@ -172,7 +172,7 @@ void ast_dp_launch(struct drm_device *dev, u8 bPower)
void ast_dp_power_on_off(struct drm_device *dev, bool on)
{
- struct ast_private *ast = to_ast_private(dev);
+ struct ast_device *ast = to_ast_device(dev);
// Read and Turn off DP PHY sleep
u8 bE3 = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xE3, AST_DP_VIDEO_ENABLE);
@@ -188,7 +188,7 @@ void ast_dp_power_on_off(struct drm_device *dev, bool on)
void ast_dp_set_on_off(struct drm_device *dev, bool on)
{
- struct ast_private *ast = to_ast_private(dev);
+ struct ast_device *ast = to_ast_device(dev);
u8 video_on_off = on;
// Video On/Off
@@ -208,7 +208,7 @@ void ast_dp_set_on_off(struct drm_device *dev, bool on)
void ast_dp_set_mode(struct drm_crtc *crtc, struct ast_vbios_mode_info *vbios_mode)
{
- struct ast_private *ast = to_ast_private(crtc->dev);
+ struct ast_device *ast = to_ast_device(crtc->dev);
u32 ulRefreshRateIndex;
u8 ModeIdx;
diff --git a/drivers/gpu/drm/ast/ast_dp501.c b/drivers/gpu/drm/ast/ast_dp501.c
index 4f75a9efb610..1bc35a992369 100644
--- a/drivers/gpu/drm/ast/ast_dp501.c
+++ b/drivers/gpu/drm/ast/ast_dp501.c
@@ -10,7 +10,7 @@ MODULE_FIRMWARE("ast_dp501_fw.bin");
static void ast_release_firmware(void *data)
{
- struct ast_private *ast = data;
+ struct ast_device *ast = data;
release_firmware(ast->dp501_fw);
ast->dp501_fw = NULL;
@@ -18,7 +18,7 @@ static void ast_release_firmware(void *data)
static int ast_load_dp501_microcode(struct drm_device *dev)
{
- struct ast_private *ast = to_ast_private(dev);
+ struct ast_device *ast = to_ast_device(dev);
int ret;
ret = request_firmware(&ast->dp501_fw, "ast_dp501_fw.bin", dev->dev);
@@ -28,7 +28,7 @@ static int ast_load_dp501_microcode(struct drm_device *dev)
return devm_add_action_or_reset(dev->dev, ast_release_firmware, ast);
}
-static void send_ack(struct ast_private *ast)
+static void send_ack(struct ast_device *ast)
{
u8 sendack;
sendack = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x9b, 0xff);
@@ -36,7 +36,7 @@ static void send_ack(struct ast_private *ast)
ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x9b, 0x00, sendack);
}
-static void send_nack(struct ast_private *ast)
+static void send_nack(struct ast_device *ast)
{
u8 sendack;
sendack = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x9b, 0xff);
@@ -44,7 +44,7 @@ static void send_nack(struct ast_private *ast)
ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x9b, 0x00, sendack);
}
-static bool wait_ack(struct ast_private *ast)
+static bool wait_ack(struct ast_device *ast)
{
u8 waitack;
u32 retry = 0;
@@ -60,7 +60,7 @@ static bool wait_ack(struct ast_private *ast)
return false;
}
-static bool wait_nack(struct ast_private *ast)
+static bool wait_nack(struct ast_device *ast)
{
u8 waitack;
u32 retry = 0;
@@ -76,18 +76,18 @@ static bool wait_nack(struct ast_private *ast)
return false;
}
-static void set_cmd_trigger(struct ast_private *ast)
+static void set_cmd_trigger(struct ast_device *ast)
{
ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x9b, ~0x40, 0x40);
}
-static void clear_cmd_trigger(struct ast_private *ast)
+static void clear_cmd_trigger(struct ast_device *ast)
{
ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x9b, ~0x40, 0x00);
}
#if 0
-static bool wait_fw_ready(struct ast_private *ast)
+static bool wait_fw_ready(struct ast_device *ast)
{
u8 waitready;
u32 retry = 0;
@@ -106,7 +106,7 @@ static bool wait_fw_ready(struct ast_private *ast)
static bool ast_write_cmd(struct drm_device *dev, u8 data)
{
- struct ast_private *ast = to_ast_private(dev);
+ struct ast_device *ast = to_ast_device(dev);
int retry = 0;
if (wait_nack(ast)) {
send_nack(ast);
@@ -128,7 +128,7 @@ static bool ast_write_cmd(struct drm_device *dev, u8 data)
static bool ast_write_data(struct drm_device *dev, u8 data)
{
- struct ast_private *ast = to_ast_private(dev);
+ struct ast_device *ast = to_ast_device(dev);
if (wait_nack(ast)) {
send_nack(ast);
@@ -146,7 +146,7 @@ static bool ast_write_data(struct drm_device *dev, u8 data)
#if 0
static bool ast_read_data(struct drm_device *dev, u8 *data)
{
- struct ast_private *ast = to_ast_private(dev);
+ struct ast_device *ast = to_ast_device(dev);
u8 tmp;
*data = 0;
@@ -163,7 +163,7 @@ static bool ast_read_data(struct drm_device *dev, u8 *data)
return true;
}
-static void clear_cmd(struct ast_private *ast)
+static void clear_cmd(struct ast_device *ast)
{
send_nack(ast);
ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x9a, 0x00, 0x00);
@@ -178,14 +178,14 @@ void ast_set_dp501_video_output(struct drm_device *dev, u8 mode)
msleep(10);
}
-static u32 get_fw_base(struct ast_private *ast)
+static u32 get_fw_base(struct ast_device *ast)
{
return ast_mindwm(ast, 0x1e6e2104) & 0x7fffffff;
}
bool ast_backup_fw(struct drm_device *dev, u8 *addr, u32 size)
{
- struct ast_private *ast = to_ast_private(dev);
+ struct ast_device *ast = to_ast_device(dev);
u32 i, data;
u32 boot_address;
@@ -204,7 +204,7 @@ bool ast_backup_fw(struct drm_device *dev, u8 *addr, u32 size)
static bool ast_launch_m68k(struct drm_device *dev)
{
- struct ast_private *ast = to_ast_private(dev);
+ struct ast_device *ast = to_ast_device(dev);
u32 i, data, len = 0;
u32 boot_address;
u8 *fw_addr = NULL;
@@ -274,7 +274,7 @@ static bool ast_launch_m68k(struct drm_device *dev)
bool ast_dp501_read_edid(struct drm_device *dev, u8 *ediddata)
{
- struct ast_private *ast = to_ast_private(dev);
+ struct ast_device *ast = to_ast_device(dev);
u32 i, boot_address, offset, data;
u32 *pEDIDidx;
@@ -334,7 +334,7 @@ bool ast_dp501_read_edid(struct drm_device *dev, u8 *ediddata)
static bool ast_init_dvo(struct drm_device *dev)
{
- struct ast_private *ast = to_ast_private(dev);
+ struct ast_device *ast = to_ast_device(dev);
u8 jreg;
u32 data;
ast_write32(ast, 0xf004, 0x1e6e0000);
@@ -407,7 +407,7 @@ static bool ast_init_dvo(struct drm_device *dev)
static void ast_init_analog(struct drm_device *dev)
{
- struct ast_private *ast = to_ast_private(dev);
+ struct ast_device *ast = to_ast_device(dev);
u32 data;
/*
@@ -434,7 +434,7 @@ static void ast_init_analog(struct drm_device *dev)
void ast_init_3rdtx(struct drm_device *dev)
{
- struct ast_private *ast = to_ast_private(dev);
+ struct ast_device *ast = to_ast_device(dev);
u8 jreg;
if (ast->chip == AST2300 || ast->chip == AST2400) {
diff --git a/drivers/gpu/drm/ast/ast_drv.c b/drivers/gpu/drm/ast/ast_drv.c
index d78852c7cf5b..3a7af6d5aa79 100644
--- a/drivers/gpu/drm/ast/ast_drv.c
+++ b/drivers/gpu/drm/ast/ast_drv.c
@@ -105,7 +105,7 @@ static int ast_remove_conflicting_framebuffers(struct pci_dev *pdev)
static int ast_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
- struct ast_private *ast;
+ struct ast_device *ast;
struct drm_device *dev;
int ret;
diff --git a/drivers/gpu/drm/ast/ast_drv.h b/drivers/gpu/drm/ast/ast_drv.h
index d51b81fea9c8..a501169cddad 100644
--- a/drivers/gpu/drm/ast/ast_drv.h
+++ b/drivers/gpu/drm/ast/ast_drv.h
@@ -157,7 +157,7 @@ to_ast_sil164_connector(struct drm_connector *connector)
* Device
*/
-struct ast_private {
+struct ast_device {
struct drm_device base;
struct mutex ioregs_lock; /* Protects access to I/O registers in ioregs */
@@ -210,14 +210,14 @@ struct ast_private {
const struct firmware *dp501_fw; /* dp501 fw */
};
-static inline struct ast_private *to_ast_private(struct drm_device *dev)
+static inline struct ast_device *to_ast_device(struct drm_device *dev)
{
- return container_of(dev, struct ast_private, base);
+ return container_of(dev, struct ast_device, base);
}
-struct ast_private *ast_device_create(const struct drm_driver *drv,
- struct pci_dev *pdev,
- unsigned long flags);
+struct ast_device *ast_device_create(const struct drm_driver *drv,
+ struct pci_dev *pdev,
+ unsigned long flags);
#define AST_IO_AR_PORT_WRITE (0x40)
#define AST_IO_MISC_PORT_WRITE (0x42)
@@ -238,62 +238,44 @@ struct ast_private *ast_device_create(const struct drm_driver *drv,
#define AST_IO_VGACRCB_HWC_ENABLED BIT(1)
#define AST_IO_VGACRCB_HWC_16BPP BIT(0) /* set: ARGB4444, cleared: 2bpp palette */
-#define __ast_read(x) \
-static inline u##x ast_read##x(struct ast_private *ast, u32 reg) { \
-u##x val = 0;\
-val = ioread##x(ast->regs + reg); \
-return val;\
+static inline u32 ast_read32(struct ast_device *ast, u32 reg)
+{
+ return ioread32(ast->regs + reg);
}
-__ast_read(8);
-__ast_read(16);
-__ast_read(32)
-
-#define __ast_io_read(x) \
-static inline u##x ast_io_read##x(struct ast_private *ast, u32 reg) { \
-u##x val = 0;\
-val = ioread##x(ast->ioregs + reg); \
-return val;\
+static inline void ast_write32(struct ast_device *ast, u32 reg, u32 val)
+{
+ iowrite32(val, ast->regs + reg);
}
-__ast_io_read(8);
-__ast_io_read(16);
-__ast_io_read(32);
-
-#define __ast_write(x) \
-static inline void ast_write##x(struct ast_private *ast, u32 reg, u##x val) {\
- iowrite##x(val, ast->regs + reg);\
- }
-
-__ast_write(8);
-__ast_write(16);
-__ast_write(32);
-
-#define __ast_io_write(x) \
-static inline void ast_io_write##x(struct ast_private *ast, u32 reg, u##x val) {\
- iowrite##x(val, ast->ioregs + reg);\
- }
+static inline u8 ast_io_read8(struct ast_device *ast, u32 reg)
+{
+ return ioread8(ast->ioregs + reg);
+}
-__ast_io_write(8);
-__ast_io_write(16);
-#undef __ast_io_write
+static inline void ast_io_write8(struct ast_device *ast, u32 reg, u8 val)
+{
+ iowrite8(val, ast->ioregs + reg);
+}
-static inline void ast_set_index_reg(struct ast_private *ast,
+static inline void ast_set_index_reg(struct ast_device *ast,
uint32_t base, uint8_t index,
uint8_t val)
{
- ast_io_write16(ast, base, ((u16)val << 8) | index);
+ ast_io_write8(ast, base, index);
+ ++base;
+ ast_io_write8(ast, base, val);
}
-void ast_set_index_reg_mask(struct ast_private *ast,
+void ast_set_index_reg_mask(struct ast_device *ast,
uint32_t base, uint8_t index,
uint8_t mask, uint8_t val);
-uint8_t ast_get_index_reg(struct ast_private *ast,
+uint8_t ast_get_index_reg(struct ast_device *ast,
uint32_t base, uint8_t index);
-uint8_t ast_get_index_reg_mask(struct ast_private *ast,
+uint8_t ast_get_index_reg_mask(struct ast_device *ast,
uint32_t base, uint8_t index, uint8_t mask);
-static inline void ast_open_key(struct ast_private *ast)
+static inline void ast_open_key(struct ast_device *ast)
{
ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x80, 0xA8);
}
@@ -352,7 +334,7 @@ struct ast_crtc_state {
#define to_ast_crtc_state(state) container_of(state, struct ast_crtc_state, base)
-int ast_mode_config_init(struct ast_private *ast);
+int ast_mode_config_init(struct ast_device *ast);
#define AST_MM_ALIGN_SHIFT 4
#define AST_MM_ALIGN_MASK ((1 << AST_MM_ALIGN_SHIFT) - 1)
@@ -476,16 +458,16 @@ int ast_mode_config_init(struct ast_private *ast);
#define ASTDP_1366x768_60 0x1E
#define ASTDP_1152x864_75 0x1F
-int ast_mm_init(struct ast_private *ast);
+int ast_mm_init(struct ast_device *ast);
/* ast post */
void ast_enable_vga(struct drm_device *dev);
void ast_enable_mmio(struct drm_device *dev);
bool ast_is_vga_enabled(struct drm_device *dev);
void ast_post_gpu(struct drm_device *dev);
-u32 ast_mindwm(struct ast_private *ast, u32 r);
-void ast_moutdwm(struct ast_private *ast, u32 r, u32 v);
-void ast_patch_ahb_2500(struct ast_private *ast);
+u32 ast_mindwm(struct ast_device *ast, u32 r);
+void ast_moutdwm(struct ast_device *ast, u32 r, u32 v);
+void ast_patch_ahb_2500(struct ast_device *ast);
/* ast dp501 */
void ast_set_dp501_video_output(struct drm_device *dev, u8 mode);
bool ast_backup_fw(struct drm_device *dev, u8 *addr, u32 size);
diff --git a/drivers/gpu/drm/ast/ast_i2c.c b/drivers/gpu/drm/ast/ast_i2c.c
index 93e91c36d649..d64045c0b849 100644
--- a/drivers/gpu/drm/ast/ast_i2c.c
+++ b/drivers/gpu/drm/ast/ast_i2c.c
@@ -29,7 +29,7 @@
static void ast_i2c_setsda(void *i2c_priv, int data)
{
struct ast_i2c_chan *i2c = i2c_priv;
- struct ast_private *ast = to_ast_private(i2c->dev);
+ struct ast_device *ast = to_ast_device(i2c->dev);
int i;
u8 ujcrb7, jtemp;
@@ -45,7 +45,7 @@ static void ast_i2c_setsda(void *i2c_priv, int data)
static void ast_i2c_setscl(void *i2c_priv, int clock)
{
struct ast_i2c_chan *i2c = i2c_priv;
- struct ast_private *ast = to_ast_private(i2c->dev);
+ struct ast_device *ast = to_ast_device(i2c->dev);
int i;
u8 ujcrb7, jtemp;
@@ -61,7 +61,7 @@ static void ast_i2c_setscl(void *i2c_priv, int clock)
static int ast_i2c_getsda(void *i2c_priv)
{
struct ast_i2c_chan *i2c = i2c_priv;
- struct ast_private *ast = to_ast_private(i2c->dev);
+ struct ast_device *ast = to_ast_device(i2c->dev);
uint32_t val, val2, count, pass;
count = 0;
@@ -83,7 +83,7 @@ static int ast_i2c_getsda(void *i2c_priv)
static int ast_i2c_getscl(void *i2c_priv)
{
struct ast_i2c_chan *i2c = i2c_priv;
- struct ast_private *ast = to_ast_private(i2c->dev);
+ struct ast_device *ast = to_ast_device(i2c->dev);
uint32_t val, val2, count, pass;
count = 0;
diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c
index f83ce77127cb..794ffd4a29c5 100644
--- a/drivers/gpu/drm/ast/ast_main.c
+++ b/drivers/gpu/drm/ast/ast_main.c
@@ -35,7 +35,7 @@
#include "ast_drv.h"
-void ast_set_index_reg_mask(struct ast_private *ast,
+void ast_set_index_reg_mask(struct ast_device *ast,
uint32_t base, uint8_t index,
uint8_t mask, uint8_t val)
{
@@ -45,7 +45,7 @@ void ast_set_index_reg_mask(struct ast_private *ast,
ast_set_index_reg(ast, base, index, tmp);
}
-uint8_t ast_get_index_reg(struct ast_private *ast,
+uint8_t ast_get_index_reg(struct ast_device *ast,
uint32_t base, uint8_t index)
{
uint8_t ret;
@@ -54,7 +54,7 @@ uint8_t ast_get_index_reg(struct ast_private *ast,
return ret;
}
-uint8_t ast_get_index_reg_mask(struct ast_private *ast,
+uint8_t ast_get_index_reg_mask(struct ast_device *ast,
uint32_t base, uint8_t index, uint8_t mask)
{
uint8_t ret;
@@ -66,7 +66,7 @@ uint8_t ast_get_index_reg_mask(struct ast_private *ast,
static void ast_detect_config_mode(struct drm_device *dev, u32 *scu_rev)
{
struct device_node *np = dev->dev->of_node;
- struct ast_private *ast = to_ast_private(dev);
+ struct ast_device *ast = to_ast_device(dev);
struct pci_dev *pdev = to_pci_dev(dev->dev);
uint32_t data, jregd0, jregd1;
@@ -122,7 +122,7 @@ static void ast_detect_config_mode(struct drm_device *dev, u32 *scu_rev)
static int ast_detect_chip(struct drm_device *dev, bool *need_post)
{
- struct ast_private *ast = to_ast_private(dev);
+ struct ast_device *ast = to_ast_device(dev);
struct pci_dev *pdev = to_pci_dev(dev->dev);
uint32_t jreg, scu_rev;
@@ -271,7 +271,7 @@ static int ast_detect_chip(struct drm_device *dev, bool *need_post)
static int ast_get_dram_info(struct drm_device *dev)
{
struct device_node *np = dev->dev->of_node;
- struct ast_private *ast = to_ast_private(dev);
+ struct ast_device *ast = to_ast_device(dev);
uint32_t mcr_cfg, mcr_scu_mpll, mcr_scu_strap;
uint32_t denum, num, div, ref_pll, dsel;
@@ -394,22 +394,22 @@ static int ast_get_dram_info(struct drm_device *dev)
*/
static void ast_device_release(void *data)
{
- struct ast_private *ast = data;
+ struct ast_device *ast = data;
/* enable standard VGA decode */
ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xa1, 0x04);
}
-struct ast_private *ast_device_create(const struct drm_driver *drv,
- struct pci_dev *pdev,
- unsigned long flags)
+struct ast_device *ast_device_create(const struct drm_driver *drv,
+ struct pci_dev *pdev,
+ unsigned long flags)
{
struct drm_device *dev;
- struct ast_private *ast;
+ struct ast_device *ast;
bool need_post;
int ret = 0;
- ast = devm_drm_dev_alloc(&pdev->dev, drv, struct ast_private, base);
+ ast = devm_drm_dev_alloc(&pdev->dev, drv, struct ast_device, base);
if (IS_ERR(ast))
return ast;
dev = &ast->base;
diff --git a/drivers/gpu/drm/ast/ast_mm.c b/drivers/gpu/drm/ast/ast_mm.c
index 248284a4b3ff..e16af60deef9 100644
--- a/drivers/gpu/drm/ast/ast_mm.c
+++ b/drivers/gpu/drm/ast/ast_mm.c
@@ -33,7 +33,7 @@
#include "ast_drv.h"
-static u32 ast_get_vram_size(struct ast_private *ast)
+static u32 ast_get_vram_size(struct ast_device *ast)
{
u8 jreg;
u32 vram_size;
@@ -73,7 +73,7 @@ static u32 ast_get_vram_size(struct ast_private *ast)
return vram_size;
}
-int ast_mm_init(struct ast_private *ast)
+int ast_mm_init(struct ast_device *ast)
{
struct drm_device *dev = &ast->base;
struct pci_dev *pdev = to_pci_dev(dev->dev);
diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c
index 984ec590a7e7..36374828f6c8 100644
--- a/drivers/gpu/drm/ast/ast_mode.c
+++ b/drivers/gpu/drm/ast/ast_mode.c
@@ -51,7 +51,7 @@
#define AST_LUT_SIZE 256
-static inline void ast_load_palette_index(struct ast_private *ast,
+static inline void ast_load_palette_index(struct ast_device *ast,
u8 index, u8 red, u8 green,
u8 blue)
{
@@ -65,7 +65,7 @@ static inline void ast_load_palette_index(struct ast_private *ast,
ast_io_read8(ast, AST_IO_SEQ_PORT);
}
-static void ast_crtc_set_gamma_linear(struct ast_private *ast,
+static void ast_crtc_set_gamma_linear(struct ast_device *ast,
const struct drm_format_info *format)
{
int i;
@@ -84,7 +84,7 @@ static void ast_crtc_set_gamma_linear(struct ast_private *ast,
}
}
-static void ast_crtc_set_gamma(struct ast_private *ast,
+static void ast_crtc_set_gamma(struct ast_device *ast,
const struct drm_format_info *format,
struct drm_color_lut *lut)
{
@@ -232,7 +232,7 @@ static bool ast_get_vbios_mode_info(const struct drm_format_info *format,
return true;
}
-static void ast_set_vbios_color_reg(struct ast_private *ast,
+static void ast_set_vbios_color_reg(struct ast_device *ast,
const struct drm_format_info *format,
const struct ast_vbios_mode_info *vbios_mode)
{
@@ -263,7 +263,7 @@ static void ast_set_vbios_color_reg(struct ast_private *ast,
}
}
-static void ast_set_vbios_mode_reg(struct ast_private *ast,
+static void ast_set_vbios_mode_reg(struct ast_device *ast,
const struct drm_display_mode *adjusted_mode,
const struct ast_vbios_mode_info *vbios_mode)
{
@@ -287,7 +287,7 @@ static void ast_set_vbios_mode_reg(struct ast_private *ast,
}
}
-static void ast_set_std_reg(struct ast_private *ast,
+static void ast_set_std_reg(struct ast_device *ast,
struct drm_display_mode *mode,
struct ast_vbios_mode_info *vbios_mode)
{
@@ -335,7 +335,7 @@ static void ast_set_std_reg(struct ast_private *ast,
ast_set_index_reg(ast, AST_IO_GR_PORT, i, stdtable->gr[i]);
}
-static void ast_set_crtc_reg(struct ast_private *ast,
+static void ast_set_crtc_reg(struct ast_device *ast,
struct drm_display_mode *mode,
struct ast_vbios_mode_info *vbios_mode)
{
@@ -450,7 +450,7 @@ static void ast_set_crtc_reg(struct ast_private *ast,
ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x11, 0x7f, 0x80);
}
-static void ast_set_offset_reg(struct ast_private *ast,
+static void ast_set_offset_reg(struct ast_device *ast,
struct drm_framebuffer *fb)
{
u16 offset;
@@ -460,7 +460,7 @@ static void ast_set_offset_reg(struct ast_private *ast,
ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xb0, (offset >> 8) & 0x3f);
}
-static void ast_set_dclk_reg(struct ast_private *ast,
+static void ast_set_dclk_reg(struct ast_device *ast,
struct drm_display_mode *mode,
struct ast_vbios_mode_info *vbios_mode)
{
@@ -478,7 +478,7 @@ static void ast_set_dclk_reg(struct ast_private *ast,
((clk_info->param3 & 0x3) << 4));
}
-static void ast_set_color_reg(struct ast_private *ast,
+static void ast_set_color_reg(struct ast_device *ast,
const struct drm_format_info *format)
{
u8 jregA0 = 0, jregA3 = 0, jregA8 = 0;
@@ -507,7 +507,7 @@ static void ast_set_color_reg(struct ast_private *ast,
ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xa8, 0xfd, jregA8);
}
-static void ast_set_crtthd_reg(struct ast_private *ast)
+static void ast_set_crtthd_reg(struct ast_device *ast)
{
/* Set Threshold */
if (ast->chip == AST2600) {
@@ -529,7 +529,7 @@ static void ast_set_crtthd_reg(struct ast_private *ast)
}
}
-static void ast_set_sync_reg(struct ast_private *ast,
+static void ast_set_sync_reg(struct ast_device *ast,
struct drm_display_mode *mode,
struct ast_vbios_mode_info *vbios_mode)
{
@@ -544,7 +544,7 @@ static void ast_set_sync_reg(struct ast_private *ast,
ast_io_write8(ast, AST_IO_MISC_PORT_WRITE, jreg);
}
-static void ast_set_start_address_crt1(struct ast_private *ast,
+static void ast_set_start_address_crt1(struct ast_device *ast,
unsigned int offset)
{
u32 addr;
@@ -556,7 +556,7 @@ static void ast_set_start_address_crt1(struct ast_private *ast,
}
-static void ast_wait_for_vretrace(struct ast_private *ast)
+static void ast_wait_for_vretrace(struct ast_device *ast)
{
unsigned long timeout = jiffies + HZ;
u8 vgair1;
@@ -645,7 +645,7 @@ static void ast_primary_plane_helper_atomic_update(struct drm_plane *plane,
struct drm_atomic_state *state)
{
struct drm_device *dev = plane->dev;
- struct ast_private *ast = to_ast_private(dev);
+ struct ast_device *ast = to_ast_device(dev);
struct drm_plane_state *plane_state = drm_atomic_get_new_plane_state(state, plane);
struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(plane_state);
struct drm_framebuffer *fb = plane_state->fb;
@@ -672,23 +672,34 @@ static void ast_primary_plane_helper_atomic_update(struct drm_plane *plane,
/*
* Some BMCs stop scanning out the video signal after the driver
- * reprogrammed the offset or scanout address. This stalls display
- * output for several seconds and makes the display unusable.
- * Therefore only update the offset if it changes and reprogram the
- * address after enabling the plane.
+ * reprogrammed the offset. This stalls display output for several
+ * seconds and makes the display unusable. Therefore only update
+ * the offset if it changes.
*/
if (!old_fb || old_fb->pitches[0] != fb->pitches[0])
ast_set_offset_reg(ast, fb);
- if (!old_fb) {
- ast_set_start_address_crt1(ast, (u32)ast_plane->offset);
- ast_set_index_reg_mask(ast, AST_IO_SEQ_PORT, 0x1, 0xdf, 0x00);
- }
+}
+
+static void ast_primary_plane_helper_atomic_enable(struct drm_plane *plane,
+ struct drm_atomic_state *state)
+{
+ struct ast_device *ast = to_ast_device(plane->dev);
+ struct ast_plane *ast_plane = to_ast_plane(plane);
+
+ /*
+ * Some BMCs stop scanning out the video signal after the driver
+ * reprogrammed the scanout address. This stalls display
+ * output for several seconds and makes the display unusable.
+ * Therefore only reprogram the address after enabling the plane.
+ */
+ ast_set_start_address_crt1(ast, (u32)ast_plane->offset);
+ ast_set_index_reg_mask(ast, AST_IO_SEQ_PORT, 0x1, 0xdf, 0x00);
}
static void ast_primary_plane_helper_atomic_disable(struct drm_plane *plane,
struct drm_atomic_state *state)
{
- struct ast_private *ast = to_ast_private(plane->dev);
+ struct ast_device *ast = to_ast_device(plane->dev);
ast_set_index_reg_mask(ast, AST_IO_SEQ_PORT, 0x1, 0xdf, 0x20);
}
@@ -697,6 +708,7 @@ static const struct drm_plane_helper_funcs ast_primary_plane_helper_funcs = {
DRM_GEM_SHADOW_PLANE_HELPER_FUNCS,
.atomic_check = ast_primary_plane_helper_atomic_check,
.atomic_update = ast_primary_plane_helper_atomic_update,
+ .atomic_enable = ast_primary_plane_helper_atomic_enable,
.atomic_disable = ast_primary_plane_helper_atomic_disable,
};
@@ -707,7 +719,7 @@ static const struct drm_plane_funcs ast_primary_plane_funcs = {
DRM_GEM_SHADOW_PLANE_FUNCS,
};
-static int ast_primary_plane_init(struct ast_private *ast)
+static int ast_primary_plane_init(struct ast_device *ast)
{
struct drm_device *dev = &ast->base;
struct ast_plane *ast_primary_plane = &ast->primary_plane;
@@ -800,7 +812,7 @@ static void ast_update_cursor_image(u8 __iomem *dst, const u8 *src, int width, i
writel(0, dst + AST_HWC_SIGNATURE_HOTSPOTY);
}
-static void ast_set_cursor_base(struct ast_private *ast, u64 address)
+static void ast_set_cursor_base(struct ast_device *ast, u64 address)
{
u8 addr0 = (address >> 3) & 0xff;
u8 addr1 = (address >> 11) & 0xff;
@@ -811,7 +823,7 @@ static void ast_set_cursor_base(struct ast_private *ast, u64 address)
ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xca, addr2);
}
-static void ast_set_cursor_location(struct ast_private *ast, u16 x, u16 y,
+static void ast_set_cursor_location(struct ast_device *ast, u16 x, u16 y,
u8 x_offset, u8 y_offset)
{
u8 x0 = (x & 0x00ff);
@@ -827,7 +839,7 @@ static void ast_set_cursor_location(struct ast_private *ast, u16 x, u16 y,
ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xc7, y1);
}
-static void ast_set_cursor_enabled(struct ast_private *ast, bool enabled)
+static void ast_set_cursor_enabled(struct ast_device *ast, bool enabled)
{
static const u8 mask = (u8)~(AST_IO_VGACRCB_HWC_16BPP |
AST_IO_VGACRCB_HWC_ENABLED);
@@ -876,7 +888,7 @@ static void ast_cursor_plane_helper_atomic_update(struct drm_plane *plane,
struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(plane_state);
struct drm_framebuffer *fb = plane_state->fb;
struct drm_plane_state *old_plane_state = drm_atomic_get_old_plane_state(state, plane);
- struct ast_private *ast = to_ast_private(plane->dev);
+ struct ast_device *ast = to_ast_device(plane->dev);
struct iosys_map src_map = shadow_plane_state->data[0];
struct drm_rect damage;
const u8 *src = src_map.vaddr; /* TODO: Use mapping abstraction properly */
@@ -931,7 +943,7 @@ static void ast_cursor_plane_helper_atomic_update(struct drm_plane *plane,
static void ast_cursor_plane_helper_atomic_disable(struct drm_plane *plane,
struct drm_atomic_state *state)
{
- struct ast_private *ast = to_ast_private(plane->dev);
+ struct ast_device *ast = to_ast_device(plane->dev);
ast_set_cursor_enabled(ast, false);
}
@@ -950,7 +962,7 @@ static const struct drm_plane_funcs ast_cursor_plane_funcs = {
DRM_GEM_SHADOW_PLANE_FUNCS,
};
-static int ast_cursor_plane_init(struct ast_private *ast)
+static int ast_cursor_plane_init(struct ast_device *ast)
{
struct drm_device *dev = &ast->base;
struct ast_plane *ast_cursor_plane = &ast->cursor_plane;
@@ -995,7 +1007,7 @@ static int ast_cursor_plane_init(struct ast_private *ast)
static void ast_crtc_dpms(struct drm_crtc *crtc, int mode)
{
- struct ast_private *ast = to_ast_private(crtc->dev);
+ struct ast_device *ast = to_ast_device(crtc->dev);
u8 ch = AST_DPMS_VSYNC_OFF | AST_DPMS_HSYNC_OFF;
struct ast_crtc_state *ast_state;
const struct drm_format_info *format;
@@ -1052,7 +1064,7 @@ static void ast_crtc_dpms(struct drm_crtc *crtc, int mode)
static enum drm_mode_status
ast_crtc_helper_mode_valid(struct drm_crtc *crtc, const struct drm_display_mode *mode)
{
- struct ast_private *ast = to_ast_private(crtc->dev);
+ struct ast_device *ast = to_ast_device(crtc->dev);
enum drm_mode_status status;
uint32_t jtemp;
@@ -1177,7 +1189,7 @@ ast_crtc_helper_atomic_flush(struct drm_crtc *crtc,
struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
crtc);
struct drm_device *dev = crtc->dev;
- struct ast_private *ast = to_ast_private(dev);
+ struct ast_device *ast = to_ast_device(dev);
struct ast_crtc_state *ast_crtc_state = to_ast_crtc_state(crtc_state);
struct ast_vbios_mode_info *vbios_mode_info = &ast_crtc_state->vbios_mode_info;
@@ -1202,7 +1214,7 @@ ast_crtc_helper_atomic_flush(struct drm_crtc *crtc,
static void ast_crtc_helper_atomic_enable(struct drm_crtc *crtc, struct drm_atomic_state *state)
{
struct drm_device *dev = crtc->dev;
- struct ast_private *ast = to_ast_private(dev);
+ struct ast_device *ast = to_ast_device(dev);
struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
struct ast_crtc_state *ast_crtc_state = to_ast_crtc_state(crtc_state);
struct ast_vbios_mode_info *vbios_mode_info =
@@ -1224,7 +1236,7 @@ static void ast_crtc_helper_atomic_disable(struct drm_crtc *crtc, struct drm_ato
{
struct drm_crtc_state *old_crtc_state = drm_atomic_get_old_crtc_state(state, crtc);
struct drm_device *dev = crtc->dev;
- struct ast_private *ast = to_ast_private(dev);
+ struct ast_device *ast = to_ast_device(dev);
ast_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
@@ -1312,7 +1324,7 @@ static const struct drm_crtc_funcs ast_crtc_funcs = {
static int ast_crtc_init(struct drm_device *dev)
{
- struct ast_private *ast = to_ast_private(dev);
+ struct ast_device *ast = to_ast_device(dev);
struct drm_crtc *crtc = &ast->crtc;
int ret;
@@ -1338,7 +1350,7 @@ static int ast_vga_connector_helper_get_modes(struct drm_connector *connector)
{
struct ast_vga_connector *ast_vga_connector = to_ast_vga_connector(connector);
struct drm_device *dev = connector->dev;
- struct ast_private *ast = to_ast_private(dev);
+ struct ast_device *ast = to_ast_device(dev);
struct edid *edid;
int count;
@@ -1411,7 +1423,7 @@ static int ast_vga_connector_init(struct drm_device *dev,
return 0;
}
-static int ast_vga_output_init(struct ast_private *ast)
+static int ast_vga_output_init(struct ast_device *ast)
{
struct drm_device *dev = &ast->base;
struct drm_crtc *crtc = &ast->crtc;
@@ -1444,7 +1456,7 @@ static int ast_sil164_connector_helper_get_modes(struct drm_connector *connector
{
struct ast_sil164_connector *ast_sil164_connector = to_ast_sil164_connector(connector);
struct drm_device *dev = connector->dev;
- struct ast_private *ast = to_ast_private(dev);
+ struct ast_device *ast = to_ast_device(dev);
struct edid *edid;
int count;
@@ -1517,7 +1529,7 @@ static int ast_sil164_connector_init(struct drm_device *dev,
return 0;
}
-static int ast_sil164_output_init(struct ast_private *ast)
+static int ast_sil164_output_init(struct ast_device *ast)
{
struct drm_device *dev = &ast->base;
struct drm_crtc *crtc = &ast->crtc;
@@ -1604,7 +1616,7 @@ static int ast_dp501_connector_init(struct drm_device *dev, struct drm_connector
return 0;
}
-static int ast_dp501_output_init(struct ast_private *ast)
+static int ast_dp501_output_init(struct ast_device *ast)
{
struct drm_device *dev = &ast->base;
struct drm_crtc *crtc = &ast->crtc;
@@ -1691,7 +1703,7 @@ static int ast_astdp_connector_init(struct drm_device *dev, struct drm_connector
return 0;
}
-static int ast_astdp_output_init(struct ast_private *ast)
+static int ast_astdp_output_init(struct ast_device *ast)
{
struct drm_device *dev = &ast->base;
struct drm_crtc *crtc = &ast->crtc;
@@ -1721,7 +1733,7 @@ static int ast_astdp_output_init(struct ast_private *ast)
static void ast_mode_config_helper_atomic_commit_tail(struct drm_atomic_state *state)
{
- struct ast_private *ast = to_ast_private(state->dev);
+ struct ast_device *ast = to_ast_device(state->dev);
/*
* Concurrent operations could possibly trigger a call to
@@ -1742,7 +1754,7 @@ static enum drm_mode_status ast_mode_config_mode_valid(struct drm_device *dev,
const struct drm_display_mode *mode)
{
static const unsigned long max_bpp = 4; /* DRM_FORMAT_XRGB8888 */
- struct ast_private *ast = to_ast_private(dev);
+ struct ast_device *ast = to_ast_device(dev);
unsigned long fbsize, fbpages, max_fbpages;
max_fbpages = (ast->vram_fb_available) >> PAGE_SHIFT;
@@ -1763,7 +1775,7 @@ static const struct drm_mode_config_funcs ast_mode_config_funcs = {
.atomic_commit = drm_atomic_helper_commit,
};
-int ast_mode_config_init(struct ast_private *ast)
+int ast_mode_config_init(struct ast_device *ast)
{
struct drm_device *dev = &ast->base;
int ret;
diff --git a/drivers/gpu/drm/ast/ast_post.c b/drivers/gpu/drm/ast/ast_post.c
index 82fd3c8adee1..71bb36b865fd 100644
--- a/drivers/gpu/drm/ast/ast_post.c
+++ b/drivers/gpu/drm/ast/ast_post.c
@@ -39,7 +39,7 @@ static void ast_post_chip_2500(struct drm_device *dev);
void ast_enable_vga(struct drm_device *dev)
{
- struct ast_private *ast = to_ast_private(dev);
+ struct ast_device *ast = to_ast_device(dev);
ast_io_write8(ast, AST_IO_VGA_ENABLE_PORT, 0x01);
ast_io_write8(ast, AST_IO_MISC_PORT_WRITE, 0x01);
@@ -47,7 +47,7 @@ void ast_enable_vga(struct drm_device *dev)
void ast_enable_mmio(struct drm_device *dev)
{
- struct ast_private *ast = to_ast_private(dev);
+ struct ast_device *ast = to_ast_device(dev);
ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xa1, 0x06);
}
@@ -55,7 +55,7 @@ void ast_enable_mmio(struct drm_device *dev)
bool ast_is_vga_enabled(struct drm_device *dev)
{
- struct ast_private *ast = to_ast_private(dev);
+ struct ast_device *ast = to_ast_device(dev);
u8 ch;
ch = ast_io_read8(ast, AST_IO_VGA_ENABLE_PORT);
@@ -70,7 +70,7 @@ static const u8 extreginfo_ast2300[] = { 0x0f, 0x04, 0x1f, 0xff };
static void
ast_set_def_ext_reg(struct drm_device *dev)
{
- struct ast_private *ast = to_ast_private(dev);
+ struct ast_device *ast = to_ast_device(dev);
struct pci_dev *pdev = to_pci_dev(dev->dev);
u8 i, index, reg;
const u8 *ext_reg_info;
@@ -110,7 +110,7 @@ ast_set_def_ext_reg(struct drm_device *dev)
ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb6, 0xff, reg);
}
-u32 ast_mindwm(struct ast_private *ast, u32 r)
+u32 ast_mindwm(struct ast_device *ast, u32 r)
{
uint32_t data;
@@ -123,7 +123,7 @@ u32 ast_mindwm(struct ast_private *ast, u32 r)
return ast_read32(ast, 0x10000 + (r & 0x0000ffff));
}
-void ast_moutdwm(struct ast_private *ast, u32 r, u32 v)
+void ast_moutdwm(struct ast_device *ast, u32 r, u32 v)
{
uint32_t data;
ast_write32(ast, 0xf004, r & 0xffff0000);
@@ -162,7 +162,7 @@ static const u32 pattern_AST2150[14] = {
0x20F050E0
};
-static u32 mmctestburst2_ast2150(struct ast_private *ast, u32 datagen)
+static u32 mmctestburst2_ast2150(struct ast_device *ast, u32 datagen)
{
u32 data, timeout;
@@ -192,7 +192,7 @@ static u32 mmctestburst2_ast2150(struct ast_private *ast, u32 datagen)
}
#if 0 /* unused in DDX driver - here for completeness */
-static u32 mmctestsingle2_ast2150(struct ast_private *ast, u32 datagen)
+static u32 mmctestsingle2_ast2150(struct ast_device *ast, u32 datagen)
{
u32 data, timeout;
@@ -212,7 +212,7 @@ static u32 mmctestsingle2_ast2150(struct ast_private *ast, u32 datagen)
}
#endif
-static int cbrtest_ast2150(struct ast_private *ast)
+static int cbrtest_ast2150(struct ast_device *ast)
{
int i;
@@ -222,7 +222,7 @@ static int cbrtest_ast2150(struct ast_private *ast)
return 1;
}
-static int cbrscan_ast2150(struct ast_private *ast, int busw)
+static int cbrscan_ast2150(struct ast_device *ast, int busw)
{
u32 patcnt, loop;
@@ -239,7 +239,7 @@ static int cbrscan_ast2150(struct ast_private *ast, int busw)
}
-static void cbrdlli_ast2150(struct ast_private *ast, int busw)
+static void cbrdlli_ast2150(struct ast_device *ast, int busw)
{
u32 dll_min[4], dll_max[4], dlli, data, passcnt;
@@ -273,7 +273,7 @@ cbr_start:
static void ast_init_dram_reg(struct drm_device *dev)
{
- struct ast_private *ast = to_ast_private(dev);
+ struct ast_device *ast = to_ast_device(dev);
u8 j;
u32 data, temp, i;
const struct ast_dramstruct *dram_reg_info;
@@ -366,7 +366,7 @@ static void ast_init_dram_reg(struct drm_device *dev)
void ast_post_gpu(struct drm_device *dev)
{
- struct ast_private *ast = to_ast_private(dev);
+ struct ast_device *ast = to_ast_device(dev);
struct pci_dev *pdev = to_pci_dev(dev->dev);
u32 reg;
@@ -449,7 +449,7 @@ static const u32 pattern[8] = {
0x7C61D253
};
-static bool mmc_test(struct ast_private *ast, u32 datagen, u8 test_ctl)
+static bool mmc_test(struct ast_device *ast, u32 datagen, u8 test_ctl)
{
u32 data, timeout;
@@ -469,7 +469,7 @@ static bool mmc_test(struct ast_private *ast, u32 datagen, u8 test_ctl)
return true;
}
-static u32 mmc_test2(struct ast_private *ast, u32 datagen, u8 test_ctl)
+static u32 mmc_test2(struct ast_device *ast, u32 datagen, u8 test_ctl)
{
u32 data, timeout;
@@ -490,32 +490,32 @@ static u32 mmc_test2(struct ast_private *ast, u32 datagen, u8 test_ctl)
}
-static bool mmc_test_burst(struct ast_private *ast, u32 datagen)
+static bool mmc_test_burst(struct ast_device *ast, u32 datagen)
{
return mmc_test(ast, datagen, 0xc1);
}
-static u32 mmc_test_burst2(struct ast_private *ast, u32 datagen)
+static u32 mmc_test_burst2(struct ast_device *ast, u32 datagen)
{
return mmc_test2(ast, datagen, 0x41);
}
-static bool mmc_test_single(struct ast_private *ast, u32 datagen)
+static bool mmc_test_single(struct ast_device *ast, u32 datagen)
{
return mmc_test(ast, datagen, 0xc5);
}
-static u32 mmc_test_single2(struct ast_private *ast, u32 datagen)
+static u32 mmc_test_single2(struct ast_device *ast, u32 datagen)
{
return mmc_test2(ast, datagen, 0x05);
}
-static bool mmc_test_single_2500(struct ast_private *ast, u32 datagen)
+static bool mmc_test_single_2500(struct ast_device *ast, u32 datagen)
{
return mmc_test(ast, datagen, 0x85);
}
-static int cbr_test(struct ast_private *ast)
+static int cbr_test(struct ast_device *ast)
{
u32 data;
int i;
@@ -534,7 +534,7 @@ static int cbr_test(struct ast_private *ast)
return 1;
}
-static int cbr_scan(struct ast_private *ast)
+static int cbr_scan(struct ast_device *ast)
{
u32 data, data2, patcnt, loop;
@@ -555,7 +555,7 @@ static int cbr_scan(struct ast_private *ast)
return data2;
}
-static u32 cbr_test2(struct ast_private *ast)
+static u32 cbr_test2(struct ast_device *ast)
{
u32 data;
@@ -569,7 +569,7 @@ static u32 cbr_test2(struct ast_private *ast)
return ~data & 0xffff;
}
-static u32 cbr_scan2(struct ast_private *ast)
+static u32 cbr_scan2(struct ast_device *ast)
{
u32 data, data2, patcnt, loop;
@@ -590,7 +590,7 @@ static u32 cbr_scan2(struct ast_private *ast)
return data2;
}
-static bool cbr_test3(struct ast_private *ast)
+static bool cbr_test3(struct ast_device *ast)
{
if (!mmc_test_burst(ast, 0))
return false;
@@ -599,7 +599,7 @@ static bool cbr_test3(struct ast_private *ast)
return true;
}
-static bool cbr_scan3(struct ast_private *ast)
+static bool cbr_scan3(struct ast_device *ast)
{
u32 patcnt, loop;
@@ -615,7 +615,7 @@ static bool cbr_scan3(struct ast_private *ast)
return true;
}
-static bool finetuneDQI_L(struct ast_private *ast, struct ast2300_dram_param *param)
+static bool finetuneDQI_L(struct ast_device *ast, struct ast2300_dram_param *param)
{
u32 gold_sadj[2], dllmin[16], dllmax[16], dlli, data, cnt, mask, passcnt, retry = 0;
bool status = false;
@@ -714,7 +714,7 @@ FINETUNE_DONE:
return status;
} /* finetuneDQI_L */
-static void finetuneDQSI(struct ast_private *ast)
+static void finetuneDQSI(struct ast_device *ast)
{
u32 dlli, dqsip, dqidly;
u32 reg_mcr18, reg_mcr0c, passcnt[2], diff;
@@ -804,7 +804,7 @@ static void finetuneDQSI(struct ast_private *ast)
ast_moutdwm(ast, 0x1E6E0018, reg_mcr18);
}
-static bool cbr_dll2(struct ast_private *ast, struct ast2300_dram_param *param)
+static bool cbr_dll2(struct ast_device *ast, struct ast2300_dram_param *param)
{
u32 dllmin[2], dllmax[2], dlli, data, passcnt, retry = 0;
bool status = false;
@@ -860,7 +860,7 @@ CBR_DONE2:
return status;
} /* CBRDLL2 */
-static void get_ddr3_info(struct ast_private *ast, struct ast2300_dram_param *param)
+static void get_ddr3_info(struct ast_device *ast, struct ast2300_dram_param *param)
{
u32 trap, trap_AC2, trap_MRS;
@@ -1102,7 +1102,7 @@ static void get_ddr3_info(struct ast_private *ast, struct ast2300_dram_param *pa
}
-static void ddr3_init(struct ast_private *ast, struct ast2300_dram_param *param)
+static void ddr3_init(struct ast_device *ast, struct ast2300_dram_param *param)
{
u32 data, data2, retry = 0;
@@ -1225,7 +1225,7 @@ ddr3_init_start:
}
-static void get_ddr2_info(struct ast_private *ast, struct ast2300_dram_param *param)
+static void get_ddr2_info(struct ast_device *ast, struct ast2300_dram_param *param)
{
u32 trap, trap_AC2, trap_MRS;
@@ -1472,7 +1472,7 @@ static void get_ddr2_info(struct ast_private *ast, struct ast2300_dram_param *pa
}
}
-static void ddr2_init(struct ast_private *ast, struct ast2300_dram_param *param)
+static void ddr2_init(struct ast_device *ast, struct ast2300_dram_param *param)
{
u32 data, data2, retry = 0;
@@ -1600,7 +1600,7 @@ ddr2_init_start:
static void ast_post_chip_2300(struct drm_device *dev)
{
- struct ast_private *ast = to_ast_private(dev);
+ struct ast_device *ast = to_ast_device(dev);
struct ast2300_dram_param param;
u32 temp;
u8 reg;
@@ -1681,7 +1681,7 @@ static void ast_post_chip_2300(struct drm_device *dev)
} while ((reg & 0x40) == 0);
}
-static bool cbr_test_2500(struct ast_private *ast)
+static bool cbr_test_2500(struct ast_device *ast)
{
ast_moutdwm(ast, 0x1E6E0074, 0x0000FFFF);
ast_moutdwm(ast, 0x1E6E007C, 0xFF00FF00);
@@ -1692,7 +1692,7 @@ static bool cbr_test_2500(struct ast_private *ast)
return true;
}
-static bool ddr_test_2500(struct ast_private *ast)
+static bool ddr_test_2500(struct ast_device *ast)
{
ast_moutdwm(ast, 0x1E6E0074, 0x0000FFFF);
ast_moutdwm(ast, 0x1E6E007C, 0xFF00FF00);
@@ -1709,7 +1709,7 @@ static bool ddr_test_2500(struct ast_private *ast)
return true;
}
-static void ddr_init_common_2500(struct ast_private *ast)
+static void ddr_init_common_2500(struct ast_device *ast)
{
ast_moutdwm(ast, 0x1E6E0034, 0x00020080);
ast_moutdwm(ast, 0x1E6E0008, 0x2003000F);
@@ -1732,7 +1732,7 @@ static void ddr_init_common_2500(struct ast_private *ast)
ast_moutdwm(ast, 0x1E6E024C, 0x80808080);
}
-static void ddr_phy_init_2500(struct ast_private *ast)
+static void ddr_phy_init_2500(struct ast_device *ast)
{
u32 data, pass, timecnt;
@@ -1766,7 +1766,7 @@ static void ddr_phy_init_2500(struct ast_private *ast)
* 4Gb : 0x80000000 ~ 0x9FFFFFFF
* 8Gb : 0x80000000 ~ 0xBFFFFFFF
*/
-static void check_dram_size_2500(struct ast_private *ast, u32 tRFC)
+static void check_dram_size_2500(struct ast_device *ast, u32 tRFC)
{
u32 reg_04, reg_14;
@@ -1797,7 +1797,7 @@ static void check_dram_size_2500(struct ast_private *ast, u32 tRFC)
ast_moutdwm(ast, 0x1E6E0014, reg_14);
}
-static void enable_cache_2500(struct ast_private *ast)
+static void enable_cache_2500(struct ast_device *ast)
{
u32 reg_04, data;
@@ -1810,7 +1810,7 @@ static void enable_cache_2500(struct ast_private *ast)
ast_moutdwm(ast, 0x1E6E0004, reg_04 | 0x400);
}
-static void set_mpll_2500(struct ast_private *ast)
+static void set_mpll_2500(struct ast_device *ast)
{
u32 addr, data, param;
@@ -1837,7 +1837,7 @@ static void set_mpll_2500(struct ast_private *ast)
udelay(100);
}
-static void reset_mmc_2500(struct ast_private *ast)
+static void reset_mmc_2500(struct ast_device *ast)
{
ast_moutdwm(ast, 0x1E78505C, 0x00000004);
ast_moutdwm(ast, 0x1E785044, 0x00000001);
@@ -1848,7 +1848,7 @@ static void reset_mmc_2500(struct ast_private *ast)
ast_moutdwm(ast, 0x1E6E0000, 0xFC600309);
}
-static void ddr3_init_2500(struct ast_private *ast, const u32 *ddr_table)
+static void ddr3_init_2500(struct ast_device *ast, const u32 *ddr_table)
{
ast_moutdwm(ast, 0x1E6E0004, 0x00000303);
@@ -1892,7 +1892,7 @@ static void ddr3_init_2500(struct ast_private *ast, const u32 *ddr_table)
ast_moutdwm(ast, 0x1E6E0038, 0xFFFFFF00);
}
-static void ddr4_init_2500(struct ast_private *ast, const u32 *ddr_table)
+static void ddr4_init_2500(struct ast_device *ast, const u32 *ddr_table)
{
u32 data, data2, pass, retrycnt;
u32 ddr_vref, phy_vref;
@@ -2002,7 +2002,7 @@ static void ddr4_init_2500(struct ast_private *ast, const u32 *ddr_table)
ast_moutdwm(ast, 0x1E6E0038, 0xFFFFFF00);
}
-static bool ast_dram_init_2500(struct ast_private *ast)
+static bool ast_dram_init_2500(struct ast_device *ast)
{
u32 data;
u32 max_tries = 5;
@@ -2030,7 +2030,7 @@ static bool ast_dram_init_2500(struct ast_private *ast)
return true;
}
-void ast_patch_ahb_2500(struct ast_private *ast)
+void ast_patch_ahb_2500(struct ast_device *ast)
{
u32 data;
@@ -2066,7 +2066,7 @@ void ast_patch_ahb_2500(struct ast_private *ast)
void ast_post_chip_2500(struct drm_device *dev)
{
- struct ast_private *ast = to_ast_private(dev);
+ struct ast_device *ast = to_ast_device(dev);
u32 temp;
u8 reg;
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
index 4e806b06d35d..29603561d501 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
@@ -19,7 +19,7 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_drv.h>
-#include <drm/drm_fbdev_generic.h>
+#include <drm/drm_fbdev_dma.h>
#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_module.h>
@@ -760,7 +760,7 @@ static int atmel_hlcdc_dc_drm_probe(struct platform_device *pdev)
if (ret)
goto err_unload;
- drm_fbdev_generic_setup(ddev, 24);
+ drm_fbdev_dma_setup(ddev, 24);
return 0;
diff --git a/drivers/gpu/drm/bridge/Kconfig b/drivers/gpu/drm/bridge/Kconfig
index 8b2226f72b24..f076a09afac0 100644
--- a/drivers/gpu/drm/bridge/Kconfig
+++ b/drivers/gpu/drm/bridge/Kconfig
@@ -220,6 +220,18 @@ config DRM_PARADE_PS8640
The PS8640 is a high-performance and low-power
MIPI DSI to eDP converter
+config DRM_SAMSUNG_DSIM
+ tristate "Samsung MIPI DSIM bridge driver"
+ depends on COMMON_CLK
+ depends on OF && HAS_IOMEM
+ select DRM_KMS_HELPER
+ select DRM_MIPI_DSI
+ select DRM_PANEL_BRIDGE
+ help
+ The Samsung MIPI DSIM bridge controller driver.
+ This MIPI DSIM bridge can be found it on Exynos SoCs and
+ NXP's i.MX8M Mini/Nano.
+
config DRM_SIL_SII8620
tristate "Silicon Image SII8620 HDMI/MHL bridge"
depends on OF
@@ -326,7 +338,7 @@ config DRM_TI_DLPC3433
input that produces a DMD output in RGB565, RGB666, RGB888
formats.
- It supports upto 720p resolution with 60 and 120 Hz refresh
+ It supports up to 720p resolution with 60 and 120 Hz refresh
rates.
config DRM_TI_TFP410
diff --git a/drivers/gpu/drm/bridge/Makefile b/drivers/gpu/drm/bridge/Makefile
index 52f6e8b4a821..2b892b7ed59e 100644
--- a/drivers/gpu/drm/bridge/Makefile
+++ b/drivers/gpu/drm/bridge/Makefile
@@ -14,6 +14,7 @@ obj-$(CONFIG_DRM_MEGACHIPS_STDPXXXX_GE_B850V3_FW) += megachips-stdpxxxx-ge-b850v
obj-$(CONFIG_DRM_NXP_PTN3460) += nxp-ptn3460.o
obj-$(CONFIG_DRM_PARADE_PS8622) += parade-ps8622.o
obj-$(CONFIG_DRM_PARADE_PS8640) += parade-ps8640.o
+obj-$(CONFIG_DRM_SAMSUNG_DSIM) += samsung-dsim.o
obj-$(CONFIG_DRM_SIL_SII8620) += sil-sii8620.o
obj-$(CONFIG_DRM_SII902X) += sii902x.o
obj-$(CONFIG_DRM_SII9234) += sii9234.o
diff --git a/drivers/gpu/drm/bridge/adv7511/adv7533.c b/drivers/gpu/drm/bridge/adv7511/adv7533.c
index fdfeadcefe80..7e3e56441aed 100644
--- a/drivers/gpu/drm/bridge/adv7511/adv7533.c
+++ b/drivers/gpu/drm/bridge/adv7511/adv7533.c
@@ -103,22 +103,19 @@ void adv7533_dsi_power_off(struct adv7511 *adv)
enum drm_mode_status adv7533_mode_valid(struct adv7511 *adv,
const struct drm_display_mode *mode)
{
- int lanes;
+ unsigned long max_lane_freq;
struct mipi_dsi_device *dsi = adv->dsi;
+ u8 bpp = mipi_dsi_pixel_format_to_bpp(dsi->format);
- if (mode->clock > 80000)
- lanes = 4;
- else
- lanes = 3;
-
- /*
- * TODO: add support for dynamic switching of lanes
- * by using the bridge pre_enable() op . Till then filter
- * out the modes which shall need different number of lanes
- * than what was configured in the device tree.
- */
- if (lanes != dsi->lanes)
- return MODE_BAD;
+ /* Check max clock for either 7533 or 7535 */
+ if (mode->clock > (adv->type == ADV7533 ? 80000 : 148500))
+ return MODE_CLOCK_HIGH;
+
+ /* Check max clock for each lane */
+ max_lane_freq = (adv->type == ADV7533 ? 800000 : 891000);
+
+ if (mode->clock * bpp > max_lane_freq * adv->num_dsi_lanes)
+ return MODE_CLOCK_HIGH;
return MODE_OK;
}
diff --git a/drivers/gpu/drm/bridge/cadence/cdns-dsi-core.c b/drivers/gpu/drm/bridge/cadence/cdns-dsi-core.c
index 5dbfc7226b31..f50d65f54314 100644
--- a/drivers/gpu/drm/bridge/cadence/cdns-dsi-core.c
+++ b/drivers/gpu/drm/bridge/cadence/cdns-dsi-core.c
@@ -1278,7 +1278,7 @@ err_disable_pclk:
return ret;
}
-static int cdns_dsi_drm_remove(struct platform_device *pdev)
+static void cdns_dsi_drm_remove(struct platform_device *pdev)
{
struct cdns_dsi *dsi = platform_get_drvdata(pdev);
@@ -1288,8 +1288,6 @@ static int cdns_dsi_drm_remove(struct platform_device *pdev)
dsi->platform_ops->deinit(dsi);
pm_runtime_disable(&pdev->dev);
-
- return 0;
}
static const struct of_device_id cdns_dsi_of_match[] = {
@@ -1303,7 +1301,7 @@ MODULE_DEVICE_TABLE(of, cdns_dsi_of_match);
static struct platform_driver cdns_dsi_platform_driver = {
.probe = cdns_dsi_drm_probe,
- .remove = cdns_dsi_drm_remove,
+ .remove_new = cdns_dsi_drm_remove,
.driver = {
.name = "cdns-dsi",
.of_match_table = cdns_dsi_of_match,
diff --git a/drivers/gpu/drm/bridge/display-connector.c b/drivers/gpu/drm/bridge/display-connector.c
index 9a12449ad7b8..56ae511367b1 100644
--- a/drivers/gpu/drm/bridge/display-connector.c
+++ b/drivers/gpu/drm/bridge/display-connector.c
@@ -271,12 +271,9 @@ static int display_connector_probe(struct platform_device *pdev)
type == DRM_MODE_CONNECTOR_DisplayPort) {
conn->hpd_gpio = devm_gpiod_get_optional(&pdev->dev, "hpd",
GPIOD_IN);
- if (IS_ERR(conn->hpd_gpio)) {
- if (PTR_ERR(conn->hpd_gpio) != -EPROBE_DEFER)
- dev_err(&pdev->dev,
- "Unable to retrieve HPD GPIO\n");
- return PTR_ERR(conn->hpd_gpio);
- }
+ if (IS_ERR(conn->hpd_gpio))
+ return dev_err_probe(&pdev->dev, PTR_ERR(conn->hpd_gpio),
+ "Unable to retrieve HPD GPIO\n");
conn->hpd_irq = gpiod_to_irq(conn->hpd_gpio);
} else {
@@ -382,7 +379,7 @@ static int display_connector_probe(struct platform_device *pdev)
return 0;
}
-static int display_connector_remove(struct platform_device *pdev)
+static void display_connector_remove(struct platform_device *pdev)
{
struct display_connector *conn = platform_get_drvdata(pdev);
@@ -396,8 +393,6 @@ static int display_connector_remove(struct platform_device *pdev)
if (!IS_ERR(conn->bridge.ddc))
i2c_put_adapter(conn->bridge.ddc);
-
- return 0;
}
static const struct of_device_id display_connector_match[] = {
@@ -426,7 +421,7 @@ MODULE_DEVICE_TABLE(of, display_connector_match);
static struct platform_driver display_connector_driver = {
.probe = display_connector_probe,
- .remove = display_connector_remove,
+ .remove_new = display_connector_remove,
.driver = {
.name = "display-connector",
.of_match_table = display_connector_match,
diff --git a/drivers/gpu/drm/bridge/fsl-ldb.c b/drivers/gpu/drm/bridge/fsl-ldb.c
index 6bac160b395b..450b352914f4 100644
--- a/drivers/gpu/drm/bridge/fsl-ldb.c
+++ b/drivers/gpu/drm/bridge/fsl-ldb.c
@@ -347,13 +347,11 @@ static int fsl_ldb_probe(struct platform_device *pdev)
return 0;
}
-static int fsl_ldb_remove(struct platform_device *pdev)
+static void fsl_ldb_remove(struct platform_device *pdev)
{
struct fsl_ldb *fsl_ldb = platform_get_drvdata(pdev);
drm_bridge_remove(&fsl_ldb->bridge);
-
- return 0;
}
static const struct of_device_id fsl_ldb_match[] = {
@@ -367,7 +365,7 @@ MODULE_DEVICE_TABLE(of, fsl_ldb_match);
static struct platform_driver fsl_ldb_driver = {
.probe = fsl_ldb_probe,
- .remove = fsl_ldb_remove,
+ .remove_new = fsl_ldb_remove,
.driver = {
.name = "fsl-ldb",
.of_match_table = fsl_ldb_match,
diff --git a/drivers/gpu/drm/bridge/imx/imx8qm-ldb-drv.c b/drivers/gpu/drm/bridge/imx/imx8qm-ldb-drv.c
index 178af8d2d80b..386032a02599 100644
--- a/drivers/gpu/drm/bridge/imx/imx8qm-ldb-drv.c
+++ b/drivers/gpu/drm/bridge/imx/imx8qm-ldb-drv.c
@@ -532,7 +532,7 @@ static int imx8qm_ldb_probe(struct platform_device *pdev)
return ret;
}
-static int imx8qm_ldb_remove(struct platform_device *pdev)
+static void imx8qm_ldb_remove(struct platform_device *pdev)
{
struct imx8qm_ldb *imx8qm_ldb = platform_get_drvdata(pdev);
struct ldb *ldb = &imx8qm_ldb->base;
@@ -540,8 +540,6 @@ static int imx8qm_ldb_remove(struct platform_device *pdev)
ldb_remove_bridge_helper(ldb);
pm_runtime_disable(&pdev->dev);
-
- return 0;
}
static int __maybe_unused imx8qm_ldb_runtime_suspend(struct device *dev)
@@ -573,7 +571,7 @@ MODULE_DEVICE_TABLE(of, imx8qm_ldb_dt_ids);
static struct platform_driver imx8qm_ldb_driver = {
.probe = imx8qm_ldb_probe,
- .remove = imx8qm_ldb_remove,
+ .remove_new = imx8qm_ldb_remove,
.driver = {
.pm = &imx8qm_ldb_pm_ops,
.name = DRIVER_NAME,
diff --git a/drivers/gpu/drm/bridge/imx/imx8qxp-ldb-drv.c b/drivers/gpu/drm/bridge/imx/imx8qxp-ldb-drv.c
index 63948d5d20fd..c806576b1e22 100644
--- a/drivers/gpu/drm/bridge/imx/imx8qxp-ldb-drv.c
+++ b/drivers/gpu/drm/bridge/imx/imx8qxp-ldb-drv.c
@@ -667,7 +667,7 @@ static int imx8qxp_ldb_probe(struct platform_device *pdev)
return ret;
}
-static int imx8qxp_ldb_remove(struct platform_device *pdev)
+static void imx8qxp_ldb_remove(struct platform_device *pdev)
{
struct imx8qxp_ldb *imx8qxp_ldb = platform_get_drvdata(pdev);
struct ldb *ldb = &imx8qxp_ldb->base;
@@ -675,8 +675,6 @@ static int imx8qxp_ldb_remove(struct platform_device *pdev)
ldb_remove_bridge_helper(ldb);
pm_runtime_disable(&pdev->dev);
-
- return 0;
}
static int __maybe_unused imx8qxp_ldb_runtime_suspend(struct device *dev)
@@ -708,7 +706,7 @@ MODULE_DEVICE_TABLE(of, imx8qxp_ldb_dt_ids);
static struct platform_driver imx8qxp_ldb_driver = {
.probe = imx8qxp_ldb_probe,
- .remove = imx8qxp_ldb_remove,
+ .remove_new = imx8qxp_ldb_remove,
.driver = {
.pm = &imx8qxp_ldb_pm_ops,
.name = DRIVER_NAME,
diff --git a/drivers/gpu/drm/bridge/imx/imx8qxp-pixel-combiner.c b/drivers/gpu/drm/bridge/imx/imx8qxp-pixel-combiner.c
index 503bd8db8afe..d0868a6ac6c9 100644
--- a/drivers/gpu/drm/bridge/imx/imx8qxp-pixel-combiner.c
+++ b/drivers/gpu/drm/bridge/imx/imx8qxp-pixel-combiner.c
@@ -357,7 +357,7 @@ free_child:
return ret;
}
-static int imx8qxp_pc_bridge_remove(struct platform_device *pdev)
+static void imx8qxp_pc_bridge_remove(struct platform_device *pdev)
{
struct imx8qxp_pc *pc = platform_get_drvdata(pdev);
struct imx8qxp_pc_channel *ch;
@@ -374,8 +374,6 @@ static int imx8qxp_pc_bridge_remove(struct platform_device *pdev)
}
pm_runtime_disable(&pdev->dev);
-
- return 0;
}
static int __maybe_unused imx8qxp_pc_runtime_suspend(struct device *dev)
@@ -435,7 +433,7 @@ MODULE_DEVICE_TABLE(of, imx8qxp_pc_dt_ids);
static struct platform_driver imx8qxp_pc_bridge_driver = {
.probe = imx8qxp_pc_bridge_probe,
- .remove = imx8qxp_pc_bridge_remove,
+ .remove_new = imx8qxp_pc_bridge_remove,
.driver = {
.pm = &imx8qxp_pc_pm_ops,
.name = DRIVER_NAME,
diff --git a/drivers/gpu/drm/bridge/imx/imx8qxp-pixel-link.c b/drivers/gpu/drm/bridge/imx/imx8qxp-pixel-link.c
index 9e5f2b4dc2e5..25dc82a44ef4 100644
--- a/drivers/gpu/drm/bridge/imx/imx8qxp-pixel-link.c
+++ b/drivers/gpu/drm/bridge/imx/imx8qxp-pixel-link.c
@@ -398,13 +398,11 @@ static int imx8qxp_pixel_link_bridge_probe(struct platform_device *pdev)
return ret;
}
-static int imx8qxp_pixel_link_bridge_remove(struct platform_device *pdev)
+static void imx8qxp_pixel_link_bridge_remove(struct platform_device *pdev)
{
struct imx8qxp_pixel_link *pl = platform_get_drvdata(pdev);
drm_bridge_remove(&pl->bridge);
-
- return 0;
}
static const struct of_device_id imx8qxp_pixel_link_dt_ids[] = {
@@ -416,7 +414,7 @@ MODULE_DEVICE_TABLE(of, imx8qxp_pixel_link_dt_ids);
static struct platform_driver imx8qxp_pixel_link_bridge_driver = {
.probe = imx8qxp_pixel_link_bridge_probe,
- .remove = imx8qxp_pixel_link_bridge_remove,
+ .remove_new = imx8qxp_pixel_link_bridge_remove,
.driver = {
.of_match_table = imx8qxp_pixel_link_dt_ids,
.name = DRIVER_NAME,
diff --git a/drivers/gpu/drm/bridge/imx/imx8qxp-pxl2dpi.c b/drivers/gpu/drm/bridge/imx/imx8qxp-pxl2dpi.c
index d0fec82f0cf8..4a886cb808ca 100644
--- a/drivers/gpu/drm/bridge/imx/imx8qxp-pxl2dpi.c
+++ b/drivers/gpu/drm/bridge/imx/imx8qxp-pxl2dpi.c
@@ -455,15 +455,13 @@ static int imx8qxp_pxl2dpi_bridge_probe(struct platform_device *pdev)
return ret;
}
-static int imx8qxp_pxl2dpi_bridge_remove(struct platform_device *pdev)
+static void imx8qxp_pxl2dpi_bridge_remove(struct platform_device *pdev)
{
struct imx8qxp_pxl2dpi *p2d = platform_get_drvdata(pdev);
drm_bridge_remove(&p2d->bridge);
pm_runtime_disable(&pdev->dev);
-
- return 0;
}
static const struct of_device_id imx8qxp_pxl2dpi_dt_ids[] = {
@@ -474,7 +472,7 @@ MODULE_DEVICE_TABLE(of, imx8qxp_pxl2dpi_dt_ids);
static struct platform_driver imx8qxp_pxl2dpi_bridge_driver = {
.probe = imx8qxp_pxl2dpi_bridge_probe,
- .remove = imx8qxp_pxl2dpi_bridge_remove,
+ .remove_new = imx8qxp_pxl2dpi_bridge_remove,
.driver = {
.of_match_table = imx8qxp_pxl2dpi_dt_ids,
.name = DRIVER_NAME,
diff --git a/drivers/gpu/drm/bridge/lvds-codec.c b/drivers/gpu/drm/bridge/lvds-codec.c
index 39e7004de720..67368f23d4aa 100644
--- a/drivers/gpu/drm/bridge/lvds-codec.c
+++ b/drivers/gpu/drm/bridge/lvds-codec.c
@@ -215,13 +215,11 @@ static int lvds_codec_probe(struct platform_device *pdev)
return 0;
}
-static int lvds_codec_remove(struct platform_device *pdev)
+static void lvds_codec_remove(struct platform_device *pdev)
{
struct lvds_codec *lvds_codec = platform_get_drvdata(pdev);
drm_bridge_remove(&lvds_codec->bridge);
-
- return 0;
}
static const struct of_device_id lvds_codec_match[] = {
@@ -243,7 +241,7 @@ MODULE_DEVICE_TABLE(of, lvds_codec_match);
static struct platform_driver lvds_codec_driver = {
.probe = lvds_codec_probe,
- .remove = lvds_codec_remove,
+ .remove_new = lvds_codec_remove,
.driver = {
.name = "lvds-codec",
.of_match_table = lvds_codec_match,
diff --git a/drivers/gpu/drm/bridge/nwl-dsi.c b/drivers/gpu/drm/bridge/nwl-dsi.c
index 6dc2a4e191d7..4a5f5c4f5dcc 100644
--- a/drivers/gpu/drm/bridge/nwl-dsi.c
+++ b/drivers/gpu/drm/bridge/nwl-dsi.c
@@ -1199,7 +1199,7 @@ static int nwl_dsi_probe(struct platform_device *pdev)
return 0;
}
-static int nwl_dsi_remove(struct platform_device *pdev)
+static void nwl_dsi_remove(struct platform_device *pdev)
{
struct nwl_dsi *dsi = platform_get_drvdata(pdev);
@@ -1207,12 +1207,11 @@ static int nwl_dsi_remove(struct platform_device *pdev)
mipi_dsi_host_unregister(&dsi->dsi_host);
drm_bridge_remove(&dsi->bridge);
pm_runtime_disable(&pdev->dev);
- return 0;
}
static struct platform_driver nwl_dsi_driver = {
.probe = nwl_dsi_probe,
- .remove = nwl_dsi_remove,
+ .remove_new = nwl_dsi_remove,
.driver = {
.of_match_table = nwl_dsi_dt_ids,
.name = DRV_NAME,
diff --git a/drivers/gpu/drm/bridge/panel.c b/drivers/gpu/drm/bridge/panel.c
index e8aae3cdc73d..d4b112911a99 100644
--- a/drivers/gpu/drm/bridge/panel.c
+++ b/drivers/gpu/drm/bridge/panel.c
@@ -81,6 +81,8 @@ static int panel_bridge_attach(struct drm_bridge *bridge,
return ret;
}
+ drm_panel_bridge_set_orientation(connector, bridge);
+
drm_connector_attach_encoder(&panel_bridge->connector,
bridge->encoder);
diff --git a/drivers/gpu/drm/bridge/samsung-dsim.c b/drivers/gpu/drm/bridge/samsung-dsim.c
new file mode 100644
index 000000000000..e0a402a85787
--- /dev/null
+++ b/drivers/gpu/drm/bridge/samsung-dsim.c
@@ -0,0 +1,1967 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Samsung MIPI DSIM bridge driver.
+ *
+ * Copyright (C) 2021 Amarula Solutions(India)
+ * Copyright (c) 2014 Samsung Electronics Co., Ltd
+ * Author: Jagan Teki <jagan@amarulasolutions.com>
+ *
+ * Based on exynos_drm_dsi from
+ * Tomasz Figa <t.figa@samsung.com>
+ */
+
+#include <asm/unaligned.h>
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/irq.h>
+#include <linux/media-bus-format.h>
+#include <linux/of_device.h>
+#include <linux/phy/phy.h>
+
+#include <video/mipi_display.h>
+
+#include <drm/bridge/samsung-dsim.h>
+#include <drm/drm_panel.h>
+#include <drm/drm_print.h>
+
+/* returns true iff both arguments logically differs */
+#define NEQV(a, b) (!(a) ^ !(b))
+
+/* DSIM_STATUS */
+#define DSIM_STOP_STATE_DAT(x) (((x) & 0xf) << 0)
+#define DSIM_STOP_STATE_CLK BIT(8)
+#define DSIM_TX_READY_HS_CLK BIT(10)
+#define DSIM_PLL_STABLE BIT(31)
+
+/* DSIM_SWRST */
+#define DSIM_FUNCRST BIT(16)
+#define DSIM_SWRST BIT(0)
+
+/* DSIM_TIMEOUT */
+#define DSIM_LPDR_TIMEOUT(x) ((x) << 0)
+#define DSIM_BTA_TIMEOUT(x) ((x) << 16)
+
+/* DSIM_CLKCTRL */
+#define DSIM_ESC_PRESCALER(x) (((x) & 0xffff) << 0)
+#define DSIM_ESC_PRESCALER_MASK (0xffff << 0)
+#define DSIM_LANE_ESC_CLK_EN_CLK BIT(19)
+#define DSIM_LANE_ESC_CLK_EN_DATA(x) (((x) & 0xf) << 20)
+#define DSIM_LANE_ESC_CLK_EN_DATA_MASK (0xf << 20)
+#define DSIM_BYTE_CLKEN BIT(24)
+#define DSIM_BYTE_CLK_SRC(x) (((x) & 0x3) << 25)
+#define DSIM_BYTE_CLK_SRC_MASK (0x3 << 25)
+#define DSIM_PLL_BYPASS BIT(27)
+#define DSIM_ESC_CLKEN BIT(28)
+#define DSIM_TX_REQUEST_HSCLK BIT(31)
+
+/* DSIM_CONFIG */
+#define DSIM_LANE_EN_CLK BIT(0)
+#define DSIM_LANE_EN(x) (((x) & 0xf) << 1)
+#define DSIM_NUM_OF_DATA_LANE(x) (((x) & 0x3) << 5)
+#define DSIM_SUB_PIX_FORMAT(x) (((x) & 0x7) << 8)
+#define DSIM_MAIN_PIX_FORMAT_MASK (0x7 << 12)
+#define DSIM_MAIN_PIX_FORMAT_RGB888 (0x7 << 12)
+#define DSIM_MAIN_PIX_FORMAT_RGB666 (0x6 << 12)
+#define DSIM_MAIN_PIX_FORMAT_RGB666_P (0x5 << 12)
+#define DSIM_MAIN_PIX_FORMAT_RGB565 (0x4 << 12)
+#define DSIM_SUB_VC (((x) & 0x3) << 16)
+#define DSIM_MAIN_VC (((x) & 0x3) << 18)
+#define DSIM_HSA_DISABLE_MODE BIT(20)
+#define DSIM_HBP_DISABLE_MODE BIT(21)
+#define DSIM_HFP_DISABLE_MODE BIT(22)
+/*
+ * The i.MX 8M Mini Applications Processor Reference Manual,
+ * Rev. 3, 11/2020 Page 4091
+ * The i.MX 8M Nano Applications Processor Reference Manual,
+ * Rev. 2, 07/2022 Page 3058
+ * The i.MX 8M Plus Applications Processor Reference Manual,
+ * Rev. 1, 06/2021 Page 5436
+ * all claims this bit is 'HseDisableMode' with the definition
+ * 0 = Disables transfer
+ * 1 = Enables transfer
+ *
+ * This clearly states that HSE is not a disabled bit.
+ *
+ * The naming convention follows as per the manual and the
+ * driver logic is based on the MIPI_DSI_MODE_VIDEO_HSE flag.
+ */
+#define DSIM_HSE_DISABLE_MODE BIT(23)
+#define DSIM_AUTO_MODE BIT(24)
+#define DSIM_VIDEO_MODE BIT(25)
+#define DSIM_BURST_MODE BIT(26)
+#define DSIM_SYNC_INFORM BIT(27)
+#define DSIM_EOT_DISABLE BIT(28)
+#define DSIM_MFLUSH_VS BIT(29)
+/* This flag is valid only for exynos3250/3472/5260/5430 */
+#define DSIM_CLKLANE_STOP BIT(30)
+
+/* DSIM_ESCMODE */
+#define DSIM_TX_TRIGGER_RST BIT(4)
+#define DSIM_TX_LPDT_LP BIT(6)
+#define DSIM_CMD_LPDT_LP BIT(7)
+#define DSIM_FORCE_BTA BIT(16)
+#define DSIM_FORCE_STOP_STATE BIT(20)
+#define DSIM_STOP_STATE_CNT(x) (((x) & 0x7ff) << 21)
+#define DSIM_STOP_STATE_CNT_MASK (0x7ff << 21)
+
+/* DSIM_MDRESOL */
+#define DSIM_MAIN_STAND_BY BIT(31)
+#define DSIM_MAIN_VRESOL(x, num_bits) (((x) & ((1 << (num_bits)) - 1)) << 16)
+#define DSIM_MAIN_HRESOL(x, num_bits) (((x) & ((1 << (num_bits)) - 1)) << 0)
+
+/* DSIM_MVPORCH */
+#define DSIM_CMD_ALLOW(x) ((x) << 28)
+#define DSIM_STABLE_VFP(x) ((x) << 16)
+#define DSIM_MAIN_VBP(x) ((x) << 0)
+#define DSIM_CMD_ALLOW_MASK (0xf << 28)
+#define DSIM_STABLE_VFP_MASK (0x7ff << 16)
+#define DSIM_MAIN_VBP_MASK (0x7ff << 0)
+
+/* DSIM_MHPORCH */
+#define DSIM_MAIN_HFP(x) ((x) << 16)
+#define DSIM_MAIN_HBP(x) ((x) << 0)
+#define DSIM_MAIN_HFP_MASK ((0xffff) << 16)
+#define DSIM_MAIN_HBP_MASK ((0xffff) << 0)
+
+/* DSIM_MSYNC */
+#define DSIM_MAIN_VSA(x) ((x) << 22)
+#define DSIM_MAIN_HSA(x) ((x) << 0)
+#define DSIM_MAIN_VSA_MASK ((0x3ff) << 22)
+#define DSIM_MAIN_HSA_MASK ((0xffff) << 0)
+
+/* DSIM_SDRESOL */
+#define DSIM_SUB_STANDY(x) ((x) << 31)
+#define DSIM_SUB_VRESOL(x) ((x) << 16)
+#define DSIM_SUB_HRESOL(x) ((x) << 0)
+#define DSIM_SUB_STANDY_MASK ((0x1) << 31)
+#define DSIM_SUB_VRESOL_MASK ((0x7ff) << 16)
+#define DSIM_SUB_HRESOL_MASK ((0x7ff) << 0)
+
+/* DSIM_INTSRC */
+#define DSIM_INT_PLL_STABLE BIT(31)
+#define DSIM_INT_SW_RST_RELEASE BIT(30)
+#define DSIM_INT_SFR_FIFO_EMPTY BIT(29)
+#define DSIM_INT_SFR_HDR_FIFO_EMPTY BIT(28)
+#define DSIM_INT_BTA BIT(25)
+#define DSIM_INT_FRAME_DONE BIT(24)
+#define DSIM_INT_RX_TIMEOUT BIT(21)
+#define DSIM_INT_BTA_TIMEOUT BIT(20)
+#define DSIM_INT_RX_DONE BIT(18)
+#define DSIM_INT_RX_TE BIT(17)
+#define DSIM_INT_RX_ACK BIT(16)
+#define DSIM_INT_RX_ECC_ERR BIT(15)
+#define DSIM_INT_RX_CRC_ERR BIT(14)
+
+/* DSIM_FIFOCTRL */
+#define DSIM_RX_DATA_FULL BIT(25)
+#define DSIM_RX_DATA_EMPTY BIT(24)
+#define DSIM_SFR_HEADER_FULL BIT(23)
+#define DSIM_SFR_HEADER_EMPTY BIT(22)
+#define DSIM_SFR_PAYLOAD_FULL BIT(21)
+#define DSIM_SFR_PAYLOAD_EMPTY BIT(20)
+#define DSIM_I80_HEADER_FULL BIT(19)
+#define DSIM_I80_HEADER_EMPTY BIT(18)
+#define DSIM_I80_PAYLOAD_FULL BIT(17)
+#define DSIM_I80_PAYLOAD_EMPTY BIT(16)
+#define DSIM_SD_HEADER_FULL BIT(15)
+#define DSIM_SD_HEADER_EMPTY BIT(14)
+#define DSIM_SD_PAYLOAD_FULL BIT(13)
+#define DSIM_SD_PAYLOAD_EMPTY BIT(12)
+#define DSIM_MD_HEADER_FULL BIT(11)
+#define DSIM_MD_HEADER_EMPTY BIT(10)
+#define DSIM_MD_PAYLOAD_FULL BIT(9)
+#define DSIM_MD_PAYLOAD_EMPTY BIT(8)
+#define DSIM_RX_FIFO BIT(4)
+#define DSIM_SFR_FIFO BIT(3)
+#define DSIM_I80_FIFO BIT(2)
+#define DSIM_SD_FIFO BIT(1)
+#define DSIM_MD_FIFO BIT(0)
+
+/* DSIM_PHYACCHR */
+#define DSIM_AFC_EN BIT(14)
+#define DSIM_AFC_CTL(x) (((x) & 0x7) << 5)
+
+/* DSIM_PLLCTRL */
+#define DSIM_FREQ_BAND(x) ((x) << 24)
+#define DSIM_PLL_EN BIT(23)
+#define DSIM_PLL_P(x, offset) ((x) << (offset))
+#define DSIM_PLL_M(x) ((x) << 4)
+#define DSIM_PLL_S(x) ((x) << 1)
+
+/* DSIM_PHYCTRL */
+#define DSIM_PHYCTRL_ULPS_EXIT(x) (((x) & 0x1ff) << 0)
+#define DSIM_PHYCTRL_B_DPHYCTL_VREG_LP BIT(30)
+#define DSIM_PHYCTRL_B_DPHYCTL_SLEW_UP BIT(14)
+
+/* DSIM_PHYTIMING */
+#define DSIM_PHYTIMING_LPX(x) ((x) << 8)
+#define DSIM_PHYTIMING_HS_EXIT(x) ((x) << 0)
+
+/* DSIM_PHYTIMING1 */
+#define DSIM_PHYTIMING1_CLK_PREPARE(x) ((x) << 24)
+#define DSIM_PHYTIMING1_CLK_ZERO(x) ((x) << 16)
+#define DSIM_PHYTIMING1_CLK_POST(x) ((x) << 8)
+#define DSIM_PHYTIMING1_CLK_TRAIL(x) ((x) << 0)
+
+/* DSIM_PHYTIMING2 */
+#define DSIM_PHYTIMING2_HS_PREPARE(x) ((x) << 16)
+#define DSIM_PHYTIMING2_HS_ZERO(x) ((x) << 8)
+#define DSIM_PHYTIMING2_HS_TRAIL(x) ((x) << 0)
+
+#define DSI_MAX_BUS_WIDTH 4
+#define DSI_NUM_VIRTUAL_CHANNELS 4
+#define DSI_TX_FIFO_SIZE 2048
+#define DSI_RX_FIFO_SIZE 256
+#define DSI_XFER_TIMEOUT_MS 100
+#define DSI_RX_FIFO_EMPTY 0x30800002
+
+#define OLD_SCLK_MIPI_CLK_NAME "pll_clk"
+
+static const char *const clk_names[5] = {
+ "bus_clk",
+ "sclk_mipi",
+ "phyclk_mipidphy0_bitclkdiv8",
+ "phyclk_mipidphy0_rxclkesc0",
+ "sclk_rgb_vclk_to_dsim0"
+};
+
+enum samsung_dsim_transfer_type {
+ EXYNOS_DSI_TX,
+ EXYNOS_DSI_RX,
+};
+
+enum reg_idx {
+ DSIM_STATUS_REG, /* Status register */
+ DSIM_SWRST_REG, /* Software reset register */
+ DSIM_CLKCTRL_REG, /* Clock control register */
+ DSIM_TIMEOUT_REG, /* Time out register */
+ DSIM_CONFIG_REG, /* Configuration register */
+ DSIM_ESCMODE_REG, /* Escape mode register */
+ DSIM_MDRESOL_REG,
+ DSIM_MVPORCH_REG, /* Main display Vporch register */
+ DSIM_MHPORCH_REG, /* Main display Hporch register */
+ DSIM_MSYNC_REG, /* Main display sync area register */
+ DSIM_INTSRC_REG, /* Interrupt source register */
+ DSIM_INTMSK_REG, /* Interrupt mask register */
+ DSIM_PKTHDR_REG, /* Packet Header FIFO register */
+ DSIM_PAYLOAD_REG, /* Payload FIFO register */
+ DSIM_RXFIFO_REG, /* Read FIFO register */
+ DSIM_FIFOCTRL_REG, /* FIFO status and control register */
+ DSIM_PLLCTRL_REG, /* PLL control register */
+ DSIM_PHYCTRL_REG,
+ DSIM_PHYTIMING_REG,
+ DSIM_PHYTIMING1_REG,
+ DSIM_PHYTIMING2_REG,
+ NUM_REGS
+};
+
+static const unsigned int exynos_reg_ofs[] = {
+ [DSIM_STATUS_REG] = 0x00,
+ [DSIM_SWRST_REG] = 0x04,
+ [DSIM_CLKCTRL_REG] = 0x08,
+ [DSIM_TIMEOUT_REG] = 0x0c,
+ [DSIM_CONFIG_REG] = 0x10,
+ [DSIM_ESCMODE_REG] = 0x14,
+ [DSIM_MDRESOL_REG] = 0x18,
+ [DSIM_MVPORCH_REG] = 0x1c,
+ [DSIM_MHPORCH_REG] = 0x20,
+ [DSIM_MSYNC_REG] = 0x24,
+ [DSIM_INTSRC_REG] = 0x2c,
+ [DSIM_INTMSK_REG] = 0x30,
+ [DSIM_PKTHDR_REG] = 0x34,
+ [DSIM_PAYLOAD_REG] = 0x38,
+ [DSIM_RXFIFO_REG] = 0x3c,
+ [DSIM_FIFOCTRL_REG] = 0x44,
+ [DSIM_PLLCTRL_REG] = 0x4c,
+ [DSIM_PHYCTRL_REG] = 0x5c,
+ [DSIM_PHYTIMING_REG] = 0x64,
+ [DSIM_PHYTIMING1_REG] = 0x68,
+ [DSIM_PHYTIMING2_REG] = 0x6c,
+};
+
+static const unsigned int exynos5433_reg_ofs[] = {
+ [DSIM_STATUS_REG] = 0x04,
+ [DSIM_SWRST_REG] = 0x0C,
+ [DSIM_CLKCTRL_REG] = 0x10,
+ [DSIM_TIMEOUT_REG] = 0x14,
+ [DSIM_CONFIG_REG] = 0x18,
+ [DSIM_ESCMODE_REG] = 0x1C,
+ [DSIM_MDRESOL_REG] = 0x20,
+ [DSIM_MVPORCH_REG] = 0x24,
+ [DSIM_MHPORCH_REG] = 0x28,
+ [DSIM_MSYNC_REG] = 0x2C,
+ [DSIM_INTSRC_REG] = 0x34,
+ [DSIM_INTMSK_REG] = 0x38,
+ [DSIM_PKTHDR_REG] = 0x3C,
+ [DSIM_PAYLOAD_REG] = 0x40,
+ [DSIM_RXFIFO_REG] = 0x44,
+ [DSIM_FIFOCTRL_REG] = 0x4C,
+ [DSIM_PLLCTRL_REG] = 0x94,
+ [DSIM_PHYCTRL_REG] = 0xA4,
+ [DSIM_PHYTIMING_REG] = 0xB4,
+ [DSIM_PHYTIMING1_REG] = 0xB8,
+ [DSIM_PHYTIMING2_REG] = 0xBC,
+};
+
+enum reg_value_idx {
+ RESET_TYPE,
+ PLL_TIMER,
+ STOP_STATE_CNT,
+ PHYCTRL_ULPS_EXIT,
+ PHYCTRL_VREG_LP,
+ PHYCTRL_SLEW_UP,
+ PHYTIMING_LPX,
+ PHYTIMING_HS_EXIT,
+ PHYTIMING_CLK_PREPARE,
+ PHYTIMING_CLK_ZERO,
+ PHYTIMING_CLK_POST,
+ PHYTIMING_CLK_TRAIL,
+ PHYTIMING_HS_PREPARE,
+ PHYTIMING_HS_ZERO,
+ PHYTIMING_HS_TRAIL
+};
+
+static const unsigned int reg_values[] = {
+ [RESET_TYPE] = DSIM_SWRST,
+ [PLL_TIMER] = 500,
+ [STOP_STATE_CNT] = 0xf,
+ [PHYCTRL_ULPS_EXIT] = DSIM_PHYCTRL_ULPS_EXIT(0x0af),
+ [PHYCTRL_VREG_LP] = 0,
+ [PHYCTRL_SLEW_UP] = 0,
+ [PHYTIMING_LPX] = DSIM_PHYTIMING_LPX(0x06),
+ [PHYTIMING_HS_EXIT] = DSIM_PHYTIMING_HS_EXIT(0x0b),
+ [PHYTIMING_CLK_PREPARE] = DSIM_PHYTIMING1_CLK_PREPARE(0x07),
+ [PHYTIMING_CLK_ZERO] = DSIM_PHYTIMING1_CLK_ZERO(0x27),
+ [PHYTIMING_CLK_POST] = DSIM_PHYTIMING1_CLK_POST(0x0d),
+ [PHYTIMING_CLK_TRAIL] = DSIM_PHYTIMING1_CLK_TRAIL(0x08),
+ [PHYTIMING_HS_PREPARE] = DSIM_PHYTIMING2_HS_PREPARE(0x09),
+ [PHYTIMING_HS_ZERO] = DSIM_PHYTIMING2_HS_ZERO(0x0d),
+ [PHYTIMING_HS_TRAIL] = DSIM_PHYTIMING2_HS_TRAIL(0x0b),
+};
+
+static const unsigned int exynos5422_reg_values[] = {
+ [RESET_TYPE] = DSIM_SWRST,
+ [PLL_TIMER] = 500,
+ [STOP_STATE_CNT] = 0xf,
+ [PHYCTRL_ULPS_EXIT] = DSIM_PHYCTRL_ULPS_EXIT(0xaf),
+ [PHYCTRL_VREG_LP] = 0,
+ [PHYCTRL_SLEW_UP] = 0,
+ [PHYTIMING_LPX] = DSIM_PHYTIMING_LPX(0x08),
+ [PHYTIMING_HS_EXIT] = DSIM_PHYTIMING_HS_EXIT(0x0d),
+ [PHYTIMING_CLK_PREPARE] = DSIM_PHYTIMING1_CLK_PREPARE(0x09),
+ [PHYTIMING_CLK_ZERO] = DSIM_PHYTIMING1_CLK_ZERO(0x30),
+ [PHYTIMING_CLK_POST] = DSIM_PHYTIMING1_CLK_POST(0x0e),
+ [PHYTIMING_CLK_TRAIL] = DSIM_PHYTIMING1_CLK_TRAIL(0x0a),
+ [PHYTIMING_HS_PREPARE] = DSIM_PHYTIMING2_HS_PREPARE(0x0c),
+ [PHYTIMING_HS_ZERO] = DSIM_PHYTIMING2_HS_ZERO(0x11),
+ [PHYTIMING_HS_TRAIL] = DSIM_PHYTIMING2_HS_TRAIL(0x0d),
+};
+
+static const unsigned int exynos5433_reg_values[] = {
+ [RESET_TYPE] = DSIM_FUNCRST,
+ [PLL_TIMER] = 22200,
+ [STOP_STATE_CNT] = 0xa,
+ [PHYCTRL_ULPS_EXIT] = DSIM_PHYCTRL_ULPS_EXIT(0x190),
+ [PHYCTRL_VREG_LP] = DSIM_PHYCTRL_B_DPHYCTL_VREG_LP,
+ [PHYCTRL_SLEW_UP] = DSIM_PHYCTRL_B_DPHYCTL_SLEW_UP,
+ [PHYTIMING_LPX] = DSIM_PHYTIMING_LPX(0x07),
+ [PHYTIMING_HS_EXIT] = DSIM_PHYTIMING_HS_EXIT(0x0c),
+ [PHYTIMING_CLK_PREPARE] = DSIM_PHYTIMING1_CLK_PREPARE(0x09),
+ [PHYTIMING_CLK_ZERO] = DSIM_PHYTIMING1_CLK_ZERO(0x2d),
+ [PHYTIMING_CLK_POST] = DSIM_PHYTIMING1_CLK_POST(0x0e),
+ [PHYTIMING_CLK_TRAIL] = DSIM_PHYTIMING1_CLK_TRAIL(0x09),
+ [PHYTIMING_HS_PREPARE] = DSIM_PHYTIMING2_HS_PREPARE(0x0b),
+ [PHYTIMING_HS_ZERO] = DSIM_PHYTIMING2_HS_ZERO(0x10),
+ [PHYTIMING_HS_TRAIL] = DSIM_PHYTIMING2_HS_TRAIL(0x0c),
+};
+
+static const unsigned int imx8mm_dsim_reg_values[] = {
+ [RESET_TYPE] = DSIM_SWRST,
+ [PLL_TIMER] = 500,
+ [STOP_STATE_CNT] = 0xf,
+ [PHYCTRL_ULPS_EXIT] = 0,
+ [PHYCTRL_VREG_LP] = 0,
+ [PHYCTRL_SLEW_UP] = 0,
+ [PHYTIMING_LPX] = DSIM_PHYTIMING_LPX(0x06),
+ [PHYTIMING_HS_EXIT] = DSIM_PHYTIMING_HS_EXIT(0x0b),
+ [PHYTIMING_CLK_PREPARE] = DSIM_PHYTIMING1_CLK_PREPARE(0x07),
+ [PHYTIMING_CLK_ZERO] = DSIM_PHYTIMING1_CLK_ZERO(0x26),
+ [PHYTIMING_CLK_POST] = DSIM_PHYTIMING1_CLK_POST(0x0d),
+ [PHYTIMING_CLK_TRAIL] = DSIM_PHYTIMING1_CLK_TRAIL(0x08),
+ [PHYTIMING_HS_PREPARE] = DSIM_PHYTIMING2_HS_PREPARE(0x08),
+ [PHYTIMING_HS_ZERO] = DSIM_PHYTIMING2_HS_ZERO(0x0d),
+ [PHYTIMING_HS_TRAIL] = DSIM_PHYTIMING2_HS_TRAIL(0x0b),
+};
+
+static const struct samsung_dsim_driver_data exynos3_dsi_driver_data = {
+ .reg_ofs = exynos_reg_ofs,
+ .plltmr_reg = 0x50,
+ .has_freqband = 1,
+ .has_clklane_stop = 1,
+ .num_clks = 2,
+ .max_freq = 1000,
+ .wait_for_reset = 1,
+ .num_bits_resol = 11,
+ .pll_p_offset = 13,
+ .reg_values = reg_values,
+};
+
+static const struct samsung_dsim_driver_data exynos4_dsi_driver_data = {
+ .reg_ofs = exynos_reg_ofs,
+ .plltmr_reg = 0x50,
+ .has_freqband = 1,
+ .has_clklane_stop = 1,
+ .num_clks = 2,
+ .max_freq = 1000,
+ .wait_for_reset = 1,
+ .num_bits_resol = 11,
+ .pll_p_offset = 13,
+ .reg_values = reg_values,
+};
+
+static const struct samsung_dsim_driver_data exynos5_dsi_driver_data = {
+ .reg_ofs = exynos_reg_ofs,
+ .plltmr_reg = 0x58,
+ .num_clks = 2,
+ .max_freq = 1000,
+ .wait_for_reset = 1,
+ .num_bits_resol = 11,
+ .pll_p_offset = 13,
+ .reg_values = reg_values,
+};
+
+static const struct samsung_dsim_driver_data exynos5433_dsi_driver_data = {
+ .reg_ofs = exynos5433_reg_ofs,
+ .plltmr_reg = 0xa0,
+ .has_clklane_stop = 1,
+ .num_clks = 5,
+ .max_freq = 1500,
+ .wait_for_reset = 0,
+ .num_bits_resol = 12,
+ .pll_p_offset = 13,
+ .reg_values = exynos5433_reg_values,
+};
+
+static const struct samsung_dsim_driver_data exynos5422_dsi_driver_data = {
+ .reg_ofs = exynos5433_reg_ofs,
+ .plltmr_reg = 0xa0,
+ .has_clklane_stop = 1,
+ .num_clks = 2,
+ .max_freq = 1500,
+ .wait_for_reset = 1,
+ .num_bits_resol = 12,
+ .pll_p_offset = 13,
+ .reg_values = exynos5422_reg_values,
+};
+
+static const struct samsung_dsim_driver_data imx8mm_dsi_driver_data = {
+ .reg_ofs = exynos5433_reg_ofs,
+ .plltmr_reg = 0xa0,
+ .has_clklane_stop = 1,
+ .num_clks = 2,
+ .max_freq = 2100,
+ .wait_for_reset = 0,
+ .num_bits_resol = 12,
+ /*
+ * Unlike Exynos, PLL_P(PMS_P) offset 14 is used in i.MX8M Mini/Nano/Plus
+ * downstream driver - drivers/gpu/drm/bridge/sec-dsim.c
+ */
+ .pll_p_offset = 14,
+ .reg_values = imx8mm_dsim_reg_values,
+};
+
+static const struct samsung_dsim_driver_data *
+samsung_dsim_types[DSIM_TYPE_COUNT] = {
+ [DSIM_TYPE_EXYNOS3250] = &exynos3_dsi_driver_data,
+ [DSIM_TYPE_EXYNOS4210] = &exynos4_dsi_driver_data,
+ [DSIM_TYPE_EXYNOS5410] = &exynos5_dsi_driver_data,
+ [DSIM_TYPE_EXYNOS5422] = &exynos5422_dsi_driver_data,
+ [DSIM_TYPE_EXYNOS5433] = &exynos5433_dsi_driver_data,
+ [DSIM_TYPE_IMX8MM] = &imx8mm_dsi_driver_data,
+ [DSIM_TYPE_IMX8MP] = &imx8mm_dsi_driver_data,
+};
+
+static inline struct samsung_dsim *host_to_dsi(struct mipi_dsi_host *h)
+{
+ return container_of(h, struct samsung_dsim, dsi_host);
+}
+
+static inline struct samsung_dsim *bridge_to_dsi(struct drm_bridge *b)
+{
+ return container_of(b, struct samsung_dsim, bridge);
+}
+
+static inline void samsung_dsim_write(struct samsung_dsim *dsi,
+ enum reg_idx idx, u32 val)
+{
+ writel(val, dsi->reg_base + dsi->driver_data->reg_ofs[idx]);
+}
+
+static inline u32 samsung_dsim_read(struct samsung_dsim *dsi, enum reg_idx idx)
+{
+ return readl(dsi->reg_base + dsi->driver_data->reg_ofs[idx]);
+}
+
+static void samsung_dsim_wait_for_reset(struct samsung_dsim *dsi)
+{
+ if (wait_for_completion_timeout(&dsi->completed, msecs_to_jiffies(300)))
+ return;
+
+ dev_err(dsi->dev, "timeout waiting for reset\n");
+}
+
+static void samsung_dsim_reset(struct samsung_dsim *dsi)
+{
+ u32 reset_val = dsi->driver_data->reg_values[RESET_TYPE];
+
+ reinit_completion(&dsi->completed);
+ samsung_dsim_write(dsi, DSIM_SWRST_REG, reset_val);
+}
+
+#ifndef MHZ
+#define MHZ (1000 * 1000)
+#endif
+
+static unsigned long samsung_dsim_pll_find_pms(struct samsung_dsim *dsi,
+ unsigned long fin,
+ unsigned long fout,
+ u8 *p, u16 *m, u8 *s)
+{
+ const struct samsung_dsim_driver_data *driver_data = dsi->driver_data;
+ unsigned long best_freq = 0;
+ u32 min_delta = 0xffffffff;
+ u8 p_min, p_max;
+ u8 _p, best_p;
+ u16 _m, best_m;
+ u8 _s, best_s;
+
+ p_min = DIV_ROUND_UP(fin, (12 * MHZ));
+ p_max = fin / (6 * MHZ);
+
+ for (_p = p_min; _p <= p_max; ++_p) {
+ for (_s = 0; _s <= 5; ++_s) {
+ u64 tmp;
+ u32 delta;
+
+ tmp = (u64)fout * (_p << _s);
+ do_div(tmp, fin);
+ _m = tmp;
+ if (_m < 41 || _m > 125)
+ continue;
+
+ tmp = (u64)_m * fin;
+ do_div(tmp, _p);
+ if (tmp < 500 * MHZ ||
+ tmp > driver_data->max_freq * MHZ)
+ continue;
+
+ tmp = (u64)_m * fin;
+ do_div(tmp, _p << _s);
+
+ delta = abs(fout - tmp);
+ if (delta < min_delta) {
+ best_p = _p;
+ best_m = _m;
+ best_s = _s;
+ min_delta = delta;
+ best_freq = tmp;
+ }
+ }
+ }
+
+ if (best_freq) {
+ *p = best_p;
+ *m = best_m;
+ *s = best_s;
+ }
+
+ return best_freq;
+}
+
+static unsigned long samsung_dsim_set_pll(struct samsung_dsim *dsi,
+ unsigned long freq)
+{
+ const struct samsung_dsim_driver_data *driver_data = dsi->driver_data;
+ unsigned long fin, fout;
+ int timeout;
+ u8 p, s;
+ u16 m;
+ u32 reg;
+
+ fin = dsi->pll_clk_rate;
+ fout = samsung_dsim_pll_find_pms(dsi, fin, freq, &p, &m, &s);
+ if (!fout) {
+ dev_err(dsi->dev,
+ "failed to find PLL PMS for requested frequency\n");
+ return 0;
+ }
+ dev_dbg(dsi->dev, "PLL freq %lu, (p %d, m %d, s %d)\n", fout, p, m, s);
+
+ writel(driver_data->reg_values[PLL_TIMER],
+ dsi->reg_base + driver_data->plltmr_reg);
+
+ reg = DSIM_PLL_EN | DSIM_PLL_P(p, driver_data->pll_p_offset) |
+ DSIM_PLL_M(m) | DSIM_PLL_S(s);
+
+ if (driver_data->has_freqband) {
+ static const unsigned long freq_bands[] = {
+ 100 * MHZ, 120 * MHZ, 160 * MHZ, 200 * MHZ,
+ 270 * MHZ, 320 * MHZ, 390 * MHZ, 450 * MHZ,
+ 510 * MHZ, 560 * MHZ, 640 * MHZ, 690 * MHZ,
+ 770 * MHZ, 870 * MHZ, 950 * MHZ,
+ };
+ int band;
+
+ for (band = 0; band < ARRAY_SIZE(freq_bands); ++band)
+ if (fout < freq_bands[band])
+ break;
+
+ dev_dbg(dsi->dev, "band %d\n", band);
+
+ reg |= DSIM_FREQ_BAND(band);
+ }
+
+ samsung_dsim_write(dsi, DSIM_PLLCTRL_REG, reg);
+
+ timeout = 1000;
+ do {
+ if (timeout-- == 0) {
+ dev_err(dsi->dev, "PLL failed to stabilize\n");
+ return 0;
+ }
+ reg = samsung_dsim_read(dsi, DSIM_STATUS_REG);
+ } while ((reg & DSIM_PLL_STABLE) == 0);
+
+ return fout;
+}
+
+static int samsung_dsim_enable_clock(struct samsung_dsim *dsi)
+{
+ unsigned long hs_clk, byte_clk, esc_clk;
+ unsigned long esc_div;
+ u32 reg;
+
+ hs_clk = samsung_dsim_set_pll(dsi, dsi->burst_clk_rate);
+ if (!hs_clk) {
+ dev_err(dsi->dev, "failed to configure DSI PLL\n");
+ return -EFAULT;
+ }
+
+ byte_clk = hs_clk / 8;
+ esc_div = DIV_ROUND_UP(byte_clk, dsi->esc_clk_rate);
+ esc_clk = byte_clk / esc_div;
+
+ if (esc_clk > 20 * MHZ) {
+ ++esc_div;
+ esc_clk = byte_clk / esc_div;
+ }
+
+ dev_dbg(dsi->dev, "hs_clk = %lu, byte_clk = %lu, esc_clk = %lu\n",
+ hs_clk, byte_clk, esc_clk);
+
+ reg = samsung_dsim_read(dsi, DSIM_CLKCTRL_REG);
+ reg &= ~(DSIM_ESC_PRESCALER_MASK | DSIM_LANE_ESC_CLK_EN_CLK
+ | DSIM_LANE_ESC_CLK_EN_DATA_MASK | DSIM_PLL_BYPASS
+ | DSIM_BYTE_CLK_SRC_MASK);
+ reg |= DSIM_ESC_CLKEN | DSIM_BYTE_CLKEN
+ | DSIM_ESC_PRESCALER(esc_div)
+ | DSIM_LANE_ESC_CLK_EN_CLK
+ | DSIM_LANE_ESC_CLK_EN_DATA(BIT(dsi->lanes) - 1)
+ | DSIM_BYTE_CLK_SRC(0)
+ | DSIM_TX_REQUEST_HSCLK;
+ samsung_dsim_write(dsi, DSIM_CLKCTRL_REG, reg);
+
+ return 0;
+}
+
+static void samsung_dsim_set_phy_ctrl(struct samsung_dsim *dsi)
+{
+ const struct samsung_dsim_driver_data *driver_data = dsi->driver_data;
+ const unsigned int *reg_values = driver_data->reg_values;
+ u32 reg;
+
+ if (driver_data->has_freqband)
+ return;
+
+ /* B D-PHY: D-PHY Master & Slave Analog Block control */
+ reg = reg_values[PHYCTRL_ULPS_EXIT] | reg_values[PHYCTRL_VREG_LP] |
+ reg_values[PHYCTRL_SLEW_UP];
+ samsung_dsim_write(dsi, DSIM_PHYCTRL_REG, reg);
+
+ /*
+ * T LPX: Transmitted length of any Low-Power state period
+ * T HS-EXIT: Time that the transmitter drives LP-11 following a HS
+ * burst
+ */
+ reg = reg_values[PHYTIMING_LPX] | reg_values[PHYTIMING_HS_EXIT];
+ samsung_dsim_write(dsi, DSIM_PHYTIMING_REG, reg);
+
+ /*
+ * T CLK-PREPARE: Time that the transmitter drives the Clock Lane LP-00
+ * Line state immediately before the HS-0 Line state starting the
+ * HS transmission
+ * T CLK-ZERO: Time that the transmitter drives the HS-0 state prior to
+ * transmitting the Clock.
+ * T CLK_POST: Time that the transmitter continues to send HS clock
+ * after the last associated Data Lane has transitioned to LP Mode
+ * Interval is defined as the period from the end of T HS-TRAIL to
+ * the beginning of T CLK-TRAIL
+ * T CLK-TRAIL: Time that the transmitter drives the HS-0 state after
+ * the last payload clock bit of a HS transmission burst
+ */
+ reg = reg_values[PHYTIMING_CLK_PREPARE] |
+ reg_values[PHYTIMING_CLK_ZERO] |
+ reg_values[PHYTIMING_CLK_POST] |
+ reg_values[PHYTIMING_CLK_TRAIL];
+
+ samsung_dsim_write(dsi, DSIM_PHYTIMING1_REG, reg);
+
+ /*
+ * T HS-PREPARE: Time that the transmitter drives the Data Lane LP-00
+ * Line state immediately before the HS-0 Line state starting the
+ * HS transmission
+ * T HS-ZERO: Time that the transmitter drives the HS-0 state prior to
+ * transmitting the Sync sequence.
+ * T HS-TRAIL: Time that the transmitter drives the flipped differential
+ * state after last payload data bit of a HS transmission burst
+ */
+ reg = reg_values[PHYTIMING_HS_PREPARE] | reg_values[PHYTIMING_HS_ZERO] |
+ reg_values[PHYTIMING_HS_TRAIL];
+ samsung_dsim_write(dsi, DSIM_PHYTIMING2_REG, reg);
+}
+
+static void samsung_dsim_disable_clock(struct samsung_dsim *dsi)
+{
+ u32 reg;
+
+ reg = samsung_dsim_read(dsi, DSIM_CLKCTRL_REG);
+ reg &= ~(DSIM_LANE_ESC_CLK_EN_CLK | DSIM_LANE_ESC_CLK_EN_DATA_MASK
+ | DSIM_ESC_CLKEN | DSIM_BYTE_CLKEN);
+ samsung_dsim_write(dsi, DSIM_CLKCTRL_REG, reg);
+
+ reg = samsung_dsim_read(dsi, DSIM_PLLCTRL_REG);
+ reg &= ~DSIM_PLL_EN;
+ samsung_dsim_write(dsi, DSIM_PLLCTRL_REG, reg);
+}
+
+static void samsung_dsim_enable_lane(struct samsung_dsim *dsi, u32 lane)
+{
+ u32 reg = samsung_dsim_read(dsi, DSIM_CONFIG_REG);
+
+ reg |= (DSIM_NUM_OF_DATA_LANE(dsi->lanes - 1) | DSIM_LANE_EN_CLK |
+ DSIM_LANE_EN(lane));
+ samsung_dsim_write(dsi, DSIM_CONFIG_REG, reg);
+}
+
+static int samsung_dsim_init_link(struct samsung_dsim *dsi)
+{
+ const struct samsung_dsim_driver_data *driver_data = dsi->driver_data;
+ int timeout;
+ u32 reg;
+ u32 lanes_mask;
+
+ /* Initialize FIFO pointers */
+ reg = samsung_dsim_read(dsi, DSIM_FIFOCTRL_REG);
+ reg &= ~0x1f;
+ samsung_dsim_write(dsi, DSIM_FIFOCTRL_REG, reg);
+
+ usleep_range(9000, 11000);
+
+ reg |= 0x1f;
+ samsung_dsim_write(dsi, DSIM_FIFOCTRL_REG, reg);
+ usleep_range(9000, 11000);
+
+ /* DSI configuration */
+ reg = 0;
+
+ /*
+ * The first bit of mode_flags specifies display configuration.
+ * If this bit is set[= MIPI_DSI_MODE_VIDEO], dsi will support video
+ * mode, otherwise it will support command mode.
+ */
+ if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO) {
+ reg |= DSIM_VIDEO_MODE;
+
+ /*
+ * The user manual describes that following bits are ignored in
+ * command mode.
+ */
+ if (!(dsi->mode_flags & MIPI_DSI_MODE_VSYNC_FLUSH))
+ reg |= DSIM_MFLUSH_VS;
+ if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE)
+ reg |= DSIM_SYNC_INFORM;
+ if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO_BURST)
+ reg |= DSIM_BURST_MODE;
+ if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO_AUTO_VERT)
+ reg |= DSIM_AUTO_MODE;
+ if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO_HSE)
+ reg |= DSIM_HSE_DISABLE_MODE;
+ if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO_NO_HFP)
+ reg |= DSIM_HFP_DISABLE_MODE;
+ if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO_NO_HBP)
+ reg |= DSIM_HBP_DISABLE_MODE;
+ if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO_NO_HSA)
+ reg |= DSIM_HSA_DISABLE_MODE;
+ }
+
+ if (dsi->mode_flags & MIPI_DSI_MODE_NO_EOT_PACKET)
+ reg |= DSIM_EOT_DISABLE;
+
+ switch (dsi->format) {
+ case MIPI_DSI_FMT_RGB888:
+ reg |= DSIM_MAIN_PIX_FORMAT_RGB888;
+ break;
+ case MIPI_DSI_FMT_RGB666:
+ reg |= DSIM_MAIN_PIX_FORMAT_RGB666;
+ break;
+ case MIPI_DSI_FMT_RGB666_PACKED:
+ reg |= DSIM_MAIN_PIX_FORMAT_RGB666_P;
+ break;
+ case MIPI_DSI_FMT_RGB565:
+ reg |= DSIM_MAIN_PIX_FORMAT_RGB565;
+ break;
+ default:
+ dev_err(dsi->dev, "invalid pixel format\n");
+ return -EINVAL;
+ }
+
+ /*
+ * Use non-continuous clock mode if the periparal wants and
+ * host controller supports
+ *
+ * In non-continous clock mode, host controller will turn off
+ * the HS clock between high-speed transmissions to reduce
+ * power consumption.
+ */
+ if (driver_data->has_clklane_stop &&
+ dsi->mode_flags & MIPI_DSI_CLOCK_NON_CONTINUOUS)
+ reg |= DSIM_CLKLANE_STOP;
+ samsung_dsim_write(dsi, DSIM_CONFIG_REG, reg);
+
+ lanes_mask = BIT(dsi->lanes) - 1;
+ samsung_dsim_enable_lane(dsi, lanes_mask);
+
+ /* Check clock and data lane state are stop state */
+ timeout = 100;
+ do {
+ if (timeout-- == 0) {
+ dev_err(dsi->dev, "waiting for bus lanes timed out\n");
+ return -EFAULT;
+ }
+
+ reg = samsung_dsim_read(dsi, DSIM_STATUS_REG);
+ if ((reg & DSIM_STOP_STATE_DAT(lanes_mask))
+ != DSIM_STOP_STATE_DAT(lanes_mask))
+ continue;
+ } while (!(reg & (DSIM_STOP_STATE_CLK | DSIM_TX_READY_HS_CLK)));
+
+ reg = samsung_dsim_read(dsi, DSIM_ESCMODE_REG);
+ reg &= ~DSIM_STOP_STATE_CNT_MASK;
+ reg |= DSIM_STOP_STATE_CNT(driver_data->reg_values[STOP_STATE_CNT]);
+ samsung_dsim_write(dsi, DSIM_ESCMODE_REG, reg);
+
+ reg = DSIM_BTA_TIMEOUT(0xff) | DSIM_LPDR_TIMEOUT(0xffff);
+ samsung_dsim_write(dsi, DSIM_TIMEOUT_REG, reg);
+
+ return 0;
+}
+
+static void samsung_dsim_set_display_mode(struct samsung_dsim *dsi)
+{
+ struct drm_display_mode *m = &dsi->mode;
+ unsigned int num_bits_resol = dsi->driver_data->num_bits_resol;
+ u32 reg;
+
+ if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO) {
+ reg = DSIM_CMD_ALLOW(0xf)
+ | DSIM_STABLE_VFP(m->vsync_start - m->vdisplay)
+ | DSIM_MAIN_VBP(m->vtotal - m->vsync_end);
+ samsung_dsim_write(dsi, DSIM_MVPORCH_REG, reg);
+
+ reg = DSIM_MAIN_HFP(m->hsync_start - m->hdisplay)
+ | DSIM_MAIN_HBP(m->htotal - m->hsync_end);
+ samsung_dsim_write(dsi, DSIM_MHPORCH_REG, reg);
+
+ reg = DSIM_MAIN_VSA(m->vsync_end - m->vsync_start)
+ | DSIM_MAIN_HSA(m->hsync_end - m->hsync_start);
+ samsung_dsim_write(dsi, DSIM_MSYNC_REG, reg);
+ }
+ reg = DSIM_MAIN_HRESOL(m->hdisplay, num_bits_resol) |
+ DSIM_MAIN_VRESOL(m->vdisplay, num_bits_resol);
+
+ samsung_dsim_write(dsi, DSIM_MDRESOL_REG, reg);
+
+ dev_dbg(dsi->dev, "LCD size = %dx%d\n", m->hdisplay, m->vdisplay);
+}
+
+static void samsung_dsim_set_display_enable(struct samsung_dsim *dsi, bool enable)
+{
+ u32 reg;
+
+ reg = samsung_dsim_read(dsi, DSIM_MDRESOL_REG);
+ if (enable)
+ reg |= DSIM_MAIN_STAND_BY;
+ else
+ reg &= ~DSIM_MAIN_STAND_BY;
+ samsung_dsim_write(dsi, DSIM_MDRESOL_REG, reg);
+}
+
+static int samsung_dsim_wait_for_hdr_fifo(struct samsung_dsim *dsi)
+{
+ int timeout = 2000;
+
+ do {
+ u32 reg = samsung_dsim_read(dsi, DSIM_FIFOCTRL_REG);
+
+ if (!(reg & DSIM_SFR_HEADER_FULL))
+ return 0;
+
+ if (!cond_resched())
+ usleep_range(950, 1050);
+ } while (--timeout);
+
+ return -ETIMEDOUT;
+}
+
+static void samsung_dsim_set_cmd_lpm(struct samsung_dsim *dsi, bool lpm)
+{
+ u32 v = samsung_dsim_read(dsi, DSIM_ESCMODE_REG);
+
+ if (lpm)
+ v |= DSIM_CMD_LPDT_LP;
+ else
+ v &= ~DSIM_CMD_LPDT_LP;
+
+ samsung_dsim_write(dsi, DSIM_ESCMODE_REG, v);
+}
+
+static void samsung_dsim_force_bta(struct samsung_dsim *dsi)
+{
+ u32 v = samsung_dsim_read(dsi, DSIM_ESCMODE_REG);
+
+ v |= DSIM_FORCE_BTA;
+ samsung_dsim_write(dsi, DSIM_ESCMODE_REG, v);
+}
+
+static void samsung_dsim_send_to_fifo(struct samsung_dsim *dsi,
+ struct samsung_dsim_transfer *xfer)
+{
+ struct device *dev = dsi->dev;
+ struct mipi_dsi_packet *pkt = &xfer->packet;
+ const u8 *payload = pkt->payload + xfer->tx_done;
+ u16 length = pkt->payload_length - xfer->tx_done;
+ bool first = !xfer->tx_done;
+ u32 reg;
+
+ dev_dbg(dev, "< xfer %pK: tx len %u, done %u, rx len %u, done %u\n",
+ xfer, length, xfer->tx_done, xfer->rx_len, xfer->rx_done);
+
+ if (length > DSI_TX_FIFO_SIZE)
+ length = DSI_TX_FIFO_SIZE;
+
+ xfer->tx_done += length;
+
+ /* Send payload */
+ while (length >= 4) {
+ reg = get_unaligned_le32(payload);
+ samsung_dsim_write(dsi, DSIM_PAYLOAD_REG, reg);
+ payload += 4;
+ length -= 4;
+ }
+
+ reg = 0;
+ switch (length) {
+ case 3:
+ reg |= payload[2] << 16;
+ fallthrough;
+ case 2:
+ reg |= payload[1] << 8;
+ fallthrough;
+ case 1:
+ reg |= payload[0];
+ samsung_dsim_write(dsi, DSIM_PAYLOAD_REG, reg);
+ break;
+ }
+
+ /* Send packet header */
+ if (!first)
+ return;
+
+ reg = get_unaligned_le32(pkt->header);
+ if (samsung_dsim_wait_for_hdr_fifo(dsi)) {
+ dev_err(dev, "waiting for header FIFO timed out\n");
+ return;
+ }
+
+ if (NEQV(xfer->flags & MIPI_DSI_MSG_USE_LPM,
+ dsi->state & DSIM_STATE_CMD_LPM)) {
+ samsung_dsim_set_cmd_lpm(dsi, xfer->flags & MIPI_DSI_MSG_USE_LPM);
+ dsi->state ^= DSIM_STATE_CMD_LPM;
+ }
+
+ samsung_dsim_write(dsi, DSIM_PKTHDR_REG, reg);
+
+ if (xfer->flags & MIPI_DSI_MSG_REQ_ACK)
+ samsung_dsim_force_bta(dsi);
+}
+
+static void samsung_dsim_read_from_fifo(struct samsung_dsim *dsi,
+ struct samsung_dsim_transfer *xfer)
+{
+ u8 *payload = xfer->rx_payload + xfer->rx_done;
+ bool first = !xfer->rx_done;
+ struct device *dev = dsi->dev;
+ u16 length;
+ u32 reg;
+
+ if (first) {
+ reg = samsung_dsim_read(dsi, DSIM_RXFIFO_REG);
+
+ switch (reg & 0x3f) {
+ case MIPI_DSI_RX_GENERIC_SHORT_READ_RESPONSE_2BYTE:
+ case MIPI_DSI_RX_DCS_SHORT_READ_RESPONSE_2BYTE:
+ if (xfer->rx_len >= 2) {
+ payload[1] = reg >> 16;
+ ++xfer->rx_done;
+ }
+ fallthrough;
+ case MIPI_DSI_RX_GENERIC_SHORT_READ_RESPONSE_1BYTE:
+ case MIPI_DSI_RX_DCS_SHORT_READ_RESPONSE_1BYTE:
+ payload[0] = reg >> 8;
+ ++xfer->rx_done;
+ xfer->rx_len = xfer->rx_done;
+ xfer->result = 0;
+ goto clear_fifo;
+ case MIPI_DSI_RX_ACKNOWLEDGE_AND_ERROR_REPORT:
+ dev_err(dev, "DSI Error Report: 0x%04x\n", (reg >> 8) & 0xffff);
+ xfer->result = 0;
+ goto clear_fifo;
+ }
+
+ length = (reg >> 8) & 0xffff;
+ if (length > xfer->rx_len) {
+ dev_err(dev,
+ "response too long (%u > %u bytes), stripping\n",
+ xfer->rx_len, length);
+ length = xfer->rx_len;
+ } else if (length < xfer->rx_len) {
+ xfer->rx_len = length;
+ }
+ }
+
+ length = xfer->rx_len - xfer->rx_done;
+ xfer->rx_done += length;
+
+ /* Receive payload */
+ while (length >= 4) {
+ reg = samsung_dsim_read(dsi, DSIM_RXFIFO_REG);
+ payload[0] = (reg >> 0) & 0xff;
+ payload[1] = (reg >> 8) & 0xff;
+ payload[2] = (reg >> 16) & 0xff;
+ payload[3] = (reg >> 24) & 0xff;
+ payload += 4;
+ length -= 4;
+ }
+
+ if (length) {
+ reg = samsung_dsim_read(dsi, DSIM_RXFIFO_REG);
+ switch (length) {
+ case 3:
+ payload[2] = (reg >> 16) & 0xff;
+ fallthrough;
+ case 2:
+ payload[1] = (reg >> 8) & 0xff;
+ fallthrough;
+ case 1:
+ payload[0] = reg & 0xff;
+ }
+ }
+
+ if (xfer->rx_done == xfer->rx_len)
+ xfer->result = 0;
+
+clear_fifo:
+ length = DSI_RX_FIFO_SIZE / 4;
+ do {
+ reg = samsung_dsim_read(dsi, DSIM_RXFIFO_REG);
+ if (reg == DSI_RX_FIFO_EMPTY)
+ break;
+ } while (--length);
+}
+
+static void samsung_dsim_transfer_start(struct samsung_dsim *dsi)
+{
+ unsigned long flags;
+ struct samsung_dsim_transfer *xfer;
+ bool start = false;
+
+again:
+ spin_lock_irqsave(&dsi->transfer_lock, flags);
+
+ if (list_empty(&dsi->transfer_list)) {
+ spin_unlock_irqrestore(&dsi->transfer_lock, flags);
+ return;
+ }
+
+ xfer = list_first_entry(&dsi->transfer_list,
+ struct samsung_dsim_transfer, list);
+
+ spin_unlock_irqrestore(&dsi->transfer_lock, flags);
+
+ if (xfer->packet.payload_length &&
+ xfer->tx_done == xfer->packet.payload_length)
+ /* waiting for RX */
+ return;
+
+ samsung_dsim_send_to_fifo(dsi, xfer);
+
+ if (xfer->packet.payload_length || xfer->rx_len)
+ return;
+
+ xfer->result = 0;
+ complete(&xfer->completed);
+
+ spin_lock_irqsave(&dsi->transfer_lock, flags);
+
+ list_del_init(&xfer->list);
+ start = !list_empty(&dsi->transfer_list);
+
+ spin_unlock_irqrestore(&dsi->transfer_lock, flags);
+
+ if (start)
+ goto again;
+}
+
+static bool samsung_dsim_transfer_finish(struct samsung_dsim *dsi)
+{
+ struct samsung_dsim_transfer *xfer;
+ unsigned long flags;
+ bool start = true;
+
+ spin_lock_irqsave(&dsi->transfer_lock, flags);
+
+ if (list_empty(&dsi->transfer_list)) {
+ spin_unlock_irqrestore(&dsi->transfer_lock, flags);
+ return false;
+ }
+
+ xfer = list_first_entry(&dsi->transfer_list,
+ struct samsung_dsim_transfer, list);
+
+ spin_unlock_irqrestore(&dsi->transfer_lock, flags);
+
+ dev_dbg(dsi->dev,
+ "> xfer %pK, tx_len %zu, tx_done %u, rx_len %u, rx_done %u\n",
+ xfer, xfer->packet.payload_length, xfer->tx_done, xfer->rx_len,
+ xfer->rx_done);
+
+ if (xfer->tx_done != xfer->packet.payload_length)
+ return true;
+
+ if (xfer->rx_done != xfer->rx_len)
+ samsung_dsim_read_from_fifo(dsi, xfer);
+
+ if (xfer->rx_done != xfer->rx_len)
+ return true;
+
+ spin_lock_irqsave(&dsi->transfer_lock, flags);
+
+ list_del_init(&xfer->list);
+ start = !list_empty(&dsi->transfer_list);
+
+ spin_unlock_irqrestore(&dsi->transfer_lock, flags);
+
+ if (!xfer->rx_len)
+ xfer->result = 0;
+ complete(&xfer->completed);
+
+ return start;
+}
+
+static void samsung_dsim_remove_transfer(struct samsung_dsim *dsi,
+ struct samsung_dsim_transfer *xfer)
+{
+ unsigned long flags;
+ bool start;
+
+ spin_lock_irqsave(&dsi->transfer_lock, flags);
+
+ if (!list_empty(&dsi->transfer_list) &&
+ xfer == list_first_entry(&dsi->transfer_list,
+ struct samsung_dsim_transfer, list)) {
+ list_del_init(&xfer->list);
+ start = !list_empty(&dsi->transfer_list);
+ spin_unlock_irqrestore(&dsi->transfer_lock, flags);
+ if (start)
+ samsung_dsim_transfer_start(dsi);
+ return;
+ }
+
+ list_del_init(&xfer->list);
+
+ spin_unlock_irqrestore(&dsi->transfer_lock, flags);
+}
+
+static int samsung_dsim_transfer(struct samsung_dsim *dsi,
+ struct samsung_dsim_transfer *xfer)
+{
+ unsigned long flags;
+ bool stopped;
+
+ xfer->tx_done = 0;
+ xfer->rx_done = 0;
+ xfer->result = -ETIMEDOUT;
+ init_completion(&xfer->completed);
+
+ spin_lock_irqsave(&dsi->transfer_lock, flags);
+
+ stopped = list_empty(&dsi->transfer_list);
+ list_add_tail(&xfer->list, &dsi->transfer_list);
+
+ spin_unlock_irqrestore(&dsi->transfer_lock, flags);
+
+ if (stopped)
+ samsung_dsim_transfer_start(dsi);
+
+ wait_for_completion_timeout(&xfer->completed,
+ msecs_to_jiffies(DSI_XFER_TIMEOUT_MS));
+ if (xfer->result == -ETIMEDOUT) {
+ struct mipi_dsi_packet *pkt = &xfer->packet;
+
+ samsung_dsim_remove_transfer(dsi, xfer);
+ dev_err(dsi->dev, "xfer timed out: %*ph %*ph\n", 4, pkt->header,
+ (int)pkt->payload_length, pkt->payload);
+ return -ETIMEDOUT;
+ }
+
+ /* Also covers hardware timeout condition */
+ return xfer->result;
+}
+
+static irqreturn_t samsung_dsim_irq(int irq, void *dev_id)
+{
+ struct samsung_dsim *dsi = dev_id;
+ u32 status;
+
+ status = samsung_dsim_read(dsi, DSIM_INTSRC_REG);
+ if (!status) {
+ static unsigned long j;
+
+ if (printk_timed_ratelimit(&j, 500))
+ dev_warn(dsi->dev, "spurious interrupt\n");
+ return IRQ_HANDLED;
+ }
+ samsung_dsim_write(dsi, DSIM_INTSRC_REG, status);
+
+ if (status & DSIM_INT_SW_RST_RELEASE) {
+ unsigned long mask = ~(DSIM_INT_RX_DONE |
+ DSIM_INT_SFR_FIFO_EMPTY |
+ DSIM_INT_SFR_HDR_FIFO_EMPTY |
+ DSIM_INT_RX_ECC_ERR |
+ DSIM_INT_SW_RST_RELEASE);
+ samsung_dsim_write(dsi, DSIM_INTMSK_REG, mask);
+ complete(&dsi->completed);
+ return IRQ_HANDLED;
+ }
+
+ if (!(status & (DSIM_INT_RX_DONE | DSIM_INT_SFR_FIFO_EMPTY |
+ DSIM_INT_PLL_STABLE)))
+ return IRQ_HANDLED;
+
+ if (samsung_dsim_transfer_finish(dsi))
+ samsung_dsim_transfer_start(dsi);
+
+ return IRQ_HANDLED;
+}
+
+static void samsung_dsim_enable_irq(struct samsung_dsim *dsi)
+{
+ enable_irq(dsi->irq);
+
+ if (dsi->te_gpio)
+ enable_irq(gpiod_to_irq(dsi->te_gpio));
+}
+
+static void samsung_dsim_disable_irq(struct samsung_dsim *dsi)
+{
+ if (dsi->te_gpio)
+ disable_irq(gpiod_to_irq(dsi->te_gpio));
+
+ disable_irq(dsi->irq);
+}
+
+static int samsung_dsim_init(struct samsung_dsim *dsi)
+{
+ const struct samsung_dsim_driver_data *driver_data = dsi->driver_data;
+
+ if (dsi->state & DSIM_STATE_INITIALIZED)
+ return 0;
+
+ samsung_dsim_reset(dsi);
+ samsung_dsim_enable_irq(dsi);
+
+ if (driver_data->reg_values[RESET_TYPE] == DSIM_FUNCRST)
+ samsung_dsim_enable_lane(dsi, BIT(dsi->lanes) - 1);
+
+ samsung_dsim_enable_clock(dsi);
+ if (driver_data->wait_for_reset)
+ samsung_dsim_wait_for_reset(dsi);
+ samsung_dsim_set_phy_ctrl(dsi);
+ samsung_dsim_init_link(dsi);
+
+ dsi->state |= DSIM_STATE_INITIALIZED;
+
+ return 0;
+}
+
+static void samsung_dsim_atomic_pre_enable(struct drm_bridge *bridge,
+ struct drm_bridge_state *old_bridge_state)
+{
+ struct samsung_dsim *dsi = bridge_to_dsi(bridge);
+ int ret;
+
+ if (dsi->state & DSIM_STATE_ENABLED)
+ return;
+
+ ret = pm_runtime_resume_and_get(dsi->dev);
+ if (ret < 0) {
+ dev_err(dsi->dev, "failed to enable DSI device.\n");
+ return;
+ }
+
+ dsi->state |= DSIM_STATE_ENABLED;
+
+ /*
+ * For Exynos-DSIM the downstream bridge, or panel are expecting
+ * the host initialization during DSI transfer.
+ */
+ if (!samsung_dsim_hw_is_exynos(dsi->plat_data->hw_type)) {
+ ret = samsung_dsim_init(dsi);
+ if (ret)
+ return;
+ }
+}
+
+static void samsung_dsim_atomic_enable(struct drm_bridge *bridge,
+ struct drm_bridge_state *old_bridge_state)
+{
+ struct samsung_dsim *dsi = bridge_to_dsi(bridge);
+
+ samsung_dsim_set_display_mode(dsi);
+ samsung_dsim_set_display_enable(dsi, true);
+
+ dsi->state |= DSIM_STATE_VIDOUT_AVAILABLE;
+}
+
+static void samsung_dsim_atomic_disable(struct drm_bridge *bridge,
+ struct drm_bridge_state *old_bridge_state)
+{
+ struct samsung_dsim *dsi = bridge_to_dsi(bridge);
+
+ if (!(dsi->state & DSIM_STATE_ENABLED))
+ return;
+
+ dsi->state &= ~DSIM_STATE_VIDOUT_AVAILABLE;
+}
+
+static void samsung_dsim_atomic_post_disable(struct drm_bridge *bridge,
+ struct drm_bridge_state *old_bridge_state)
+{
+ struct samsung_dsim *dsi = bridge_to_dsi(bridge);
+
+ samsung_dsim_set_display_enable(dsi, false);
+
+ dsi->state &= ~DSIM_STATE_ENABLED;
+ pm_runtime_put_sync(dsi->dev);
+}
+
+/*
+ * This pixel output formats list referenced from,
+ * AN13573 i.MX 8/RT MIPI DSI/CSI-2, Rev. 0, 21 March 2022
+ * 3.7.4 Pixel formats
+ * Table 14. DSI pixel packing formats
+ */
+static const u32 samsung_dsim_pixel_output_fmts[] = {
+ MEDIA_BUS_FMT_YUYV10_1X20,
+ MEDIA_BUS_FMT_YUYV12_1X24,
+ MEDIA_BUS_FMT_UYVY8_1X16,
+ MEDIA_BUS_FMT_RGB101010_1X30,
+ MEDIA_BUS_FMT_RGB121212_1X36,
+ MEDIA_BUS_FMT_RGB565_1X16,
+ MEDIA_BUS_FMT_RGB666_1X18,
+ MEDIA_BUS_FMT_RGB888_1X24,
+};
+
+static bool samsung_dsim_pixel_output_fmt_supported(u32 fmt)
+{
+ int i;
+
+ if (fmt == MEDIA_BUS_FMT_FIXED)
+ return false;
+
+ for (i = 0; i < ARRAY_SIZE(samsung_dsim_pixel_output_fmts); i++) {
+ if (samsung_dsim_pixel_output_fmts[i] == fmt)
+ return true;
+ }
+
+ return false;
+}
+
+static u32 *
+samsung_dsim_atomic_get_input_bus_fmts(struct drm_bridge *bridge,
+ struct drm_bridge_state *bridge_state,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state,
+ u32 output_fmt,
+ unsigned int *num_input_fmts)
+{
+ u32 *input_fmts;
+
+ input_fmts = kmalloc(sizeof(*input_fmts), GFP_KERNEL);
+ if (!input_fmts)
+ return NULL;
+
+ if (!samsung_dsim_pixel_output_fmt_supported(output_fmt))
+ /*
+ * Some bridge/display drivers are still not able to pass the
+ * correct format, so handle those pipelines by falling back
+ * to the default format till the supported formats finalized.
+ */
+ output_fmt = MEDIA_BUS_FMT_RGB888_1X24;
+
+ input_fmts[0] = output_fmt;
+ *num_input_fmts = 1;
+
+ return input_fmts;
+}
+
+static int samsung_dsim_atomic_check(struct drm_bridge *bridge,
+ struct drm_bridge_state *bridge_state,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
+{
+ struct samsung_dsim *dsi = bridge_to_dsi(bridge);
+ struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
+
+ /*
+ * The i.MX8M Mini/Nano glue logic between LCDIF and DSIM
+ * inverts HS/VS/DE sync signals polarity, therefore, while
+ * i.MX 8M Mini Applications Processor Reference Manual Rev. 3, 11/2020
+ * 13.6.3.5.2 RGB interface
+ * i.MX 8M Nano Applications Processor Reference Manual Rev. 2, 07/2022
+ * 13.6.2.7.2 RGB interface
+ * both claim "Vsync, Hsync, and VDEN are active high signals.", the
+ * LCDIF must generate inverted HS/VS/DE signals, i.e. active LOW.
+ *
+ * The i.MX8M Plus glue logic between LCDIFv3 and DSIM does not
+ * implement the same behavior, therefore LCDIFv3 must generate
+ * HS/VS/DE signals active HIGH.
+ */
+ if (dsi->plat_data->hw_type == DSIM_TYPE_IMX8MM) {
+ adjusted_mode->flags |= (DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC);
+ adjusted_mode->flags &= ~(DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC);
+ } else if (dsi->plat_data->hw_type == DSIM_TYPE_IMX8MP) {
+ adjusted_mode->flags &= ~(DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC);
+ adjusted_mode->flags |= (DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC);
+ }
+
+ return 0;
+}
+
+static void samsung_dsim_mode_set(struct drm_bridge *bridge,
+ const struct drm_display_mode *mode,
+ const struct drm_display_mode *adjusted_mode)
+{
+ struct samsung_dsim *dsi = bridge_to_dsi(bridge);
+
+ drm_mode_copy(&dsi->mode, adjusted_mode);
+}
+
+static int samsung_dsim_attach(struct drm_bridge *bridge,
+ enum drm_bridge_attach_flags flags)
+{
+ struct samsung_dsim *dsi = bridge_to_dsi(bridge);
+
+ return drm_bridge_attach(bridge->encoder, dsi->out_bridge, bridge,
+ flags);
+}
+
+static const struct drm_bridge_funcs samsung_dsim_bridge_funcs = {
+ .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
+ .atomic_reset = drm_atomic_helper_bridge_reset,
+ .atomic_get_input_bus_fmts = samsung_dsim_atomic_get_input_bus_fmts,
+ .atomic_check = samsung_dsim_atomic_check,
+ .atomic_pre_enable = samsung_dsim_atomic_pre_enable,
+ .atomic_enable = samsung_dsim_atomic_enable,
+ .atomic_disable = samsung_dsim_atomic_disable,
+ .atomic_post_disable = samsung_dsim_atomic_post_disable,
+ .mode_set = samsung_dsim_mode_set,
+ .attach = samsung_dsim_attach,
+};
+
+static irqreturn_t samsung_dsim_te_irq_handler(int irq, void *dev_id)
+{
+ struct samsung_dsim *dsi = (struct samsung_dsim *)dev_id;
+ const struct samsung_dsim_plat_data *pdata = dsi->plat_data;
+
+ if (pdata->host_ops && pdata->host_ops->te_irq_handler)
+ return pdata->host_ops->te_irq_handler(dsi);
+
+ return IRQ_HANDLED;
+}
+
+static int samsung_dsim_register_te_irq(struct samsung_dsim *dsi, struct device *dev)
+{
+ int te_gpio_irq;
+ int ret;
+
+ dsi->te_gpio = devm_gpiod_get_optional(dev, "te", GPIOD_IN);
+ if (!dsi->te_gpio)
+ return 0;
+ else if (IS_ERR(dsi->te_gpio))
+ return dev_err_probe(dev, PTR_ERR(dsi->te_gpio), "failed to get te GPIO\n");
+
+ te_gpio_irq = gpiod_to_irq(dsi->te_gpio);
+
+ ret = request_threaded_irq(te_gpio_irq, samsung_dsim_te_irq_handler, NULL,
+ IRQF_TRIGGER_RISING | IRQF_NO_AUTOEN, "TE", dsi);
+ if (ret) {
+ dev_err(dsi->dev, "request interrupt failed with %d\n", ret);
+ gpiod_put(dsi->te_gpio);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int samsung_dsim_host_attach(struct mipi_dsi_host *host,
+ struct mipi_dsi_device *device)
+{
+ struct samsung_dsim *dsi = host_to_dsi(host);
+ const struct samsung_dsim_plat_data *pdata = dsi->plat_data;
+ struct device *dev = dsi->dev;
+ struct device_node *np = dev->of_node;
+ struct device_node *remote;
+ struct drm_panel *panel;
+ int ret;
+
+ /*
+ * Devices can also be child nodes when we also control that device
+ * through the upstream device (ie, MIPI-DCS for a MIPI-DSI device).
+ *
+ * Lookup for a child node of the given parent that isn't either port
+ * or ports.
+ */
+ for_each_available_child_of_node(np, remote) {
+ if (of_node_name_eq(remote, "port") ||
+ of_node_name_eq(remote, "ports"))
+ continue;
+
+ goto of_find_panel_or_bridge;
+ }
+
+ /*
+ * of_graph_get_remote_node() produces a noisy error message if port
+ * node isn't found and the absence of the port is a legit case here,
+ * so at first we silently check whether graph presents in the
+ * device-tree node.
+ */
+ if (!of_graph_is_present(np))
+ return -ENODEV;
+
+ remote = of_graph_get_remote_node(np, 1, 0);
+
+of_find_panel_or_bridge:
+ if (!remote)
+ return -ENODEV;
+
+ panel = of_drm_find_panel(remote);
+ if (!IS_ERR(panel)) {
+ dsi->out_bridge = devm_drm_panel_bridge_add(dev, panel);
+ } else {
+ dsi->out_bridge = of_drm_find_bridge(remote);
+ if (!dsi->out_bridge)
+ dsi->out_bridge = ERR_PTR(-EINVAL);
+ }
+
+ of_node_put(remote);
+
+ if (IS_ERR(dsi->out_bridge)) {
+ ret = PTR_ERR(dsi->out_bridge);
+ DRM_DEV_ERROR(dev, "failed to find the bridge: %d\n", ret);
+ return ret;
+ }
+
+ DRM_DEV_INFO(dev, "Attached %s device\n", device->name);
+
+ drm_bridge_add(&dsi->bridge);
+
+ /*
+ * This is a temporary solution and should be made by more generic way.
+ *
+ * If attached panel device is for command mode one, dsi should register
+ * TE interrupt handler.
+ */
+ if (!(device->mode_flags & MIPI_DSI_MODE_VIDEO)) {
+ ret = samsung_dsim_register_te_irq(dsi, &device->dev);
+ if (ret)
+ return ret;
+ }
+
+ if (pdata->host_ops && pdata->host_ops->attach) {
+ ret = pdata->host_ops->attach(dsi, device);
+ if (ret)
+ return ret;
+ }
+
+ dsi->lanes = device->lanes;
+ dsi->format = device->format;
+ dsi->mode_flags = device->mode_flags;
+
+ return 0;
+}
+
+static void samsung_dsim_unregister_te_irq(struct samsung_dsim *dsi)
+{
+ if (dsi->te_gpio) {
+ free_irq(gpiod_to_irq(dsi->te_gpio), dsi);
+ gpiod_put(dsi->te_gpio);
+ }
+}
+
+static int samsung_dsim_host_detach(struct mipi_dsi_host *host,
+ struct mipi_dsi_device *device)
+{
+ struct samsung_dsim *dsi = host_to_dsi(host);
+ const struct samsung_dsim_plat_data *pdata = dsi->plat_data;
+
+ dsi->out_bridge = NULL;
+
+ if (pdata->host_ops && pdata->host_ops->detach)
+ pdata->host_ops->detach(dsi, device);
+
+ samsung_dsim_unregister_te_irq(dsi);
+
+ drm_bridge_remove(&dsi->bridge);
+
+ return 0;
+}
+
+static ssize_t samsung_dsim_host_transfer(struct mipi_dsi_host *host,
+ const struct mipi_dsi_msg *msg)
+{
+ struct samsung_dsim *dsi = host_to_dsi(host);
+ struct samsung_dsim_transfer xfer;
+ int ret;
+
+ if (!(dsi->state & DSIM_STATE_ENABLED))
+ return -EINVAL;
+
+ ret = samsung_dsim_init(dsi);
+ if (ret)
+ return ret;
+
+ ret = mipi_dsi_create_packet(&xfer.packet, msg);
+ if (ret < 0)
+ return ret;
+
+ xfer.rx_len = msg->rx_len;
+ xfer.rx_payload = msg->rx_buf;
+ xfer.flags = msg->flags;
+
+ ret = samsung_dsim_transfer(dsi, &xfer);
+ return (ret < 0) ? ret : xfer.rx_done;
+}
+
+static const struct mipi_dsi_host_ops samsung_dsim_ops = {
+ .attach = samsung_dsim_host_attach,
+ .detach = samsung_dsim_host_detach,
+ .transfer = samsung_dsim_host_transfer,
+};
+
+static int samsung_dsim_of_read_u32(const struct device_node *np,
+ const char *propname, u32 *out_value)
+{
+ int ret = of_property_read_u32(np, propname, out_value);
+
+ if (ret < 0)
+ pr_err("%pOF: failed to get '%s' property\n", np, propname);
+
+ return ret;
+}
+
+static int samsung_dsim_parse_dt(struct samsung_dsim *dsi)
+{
+ struct device *dev = dsi->dev;
+ struct device_node *node = dev->of_node;
+ int ret;
+
+ ret = samsung_dsim_of_read_u32(node, "samsung,pll-clock-frequency",
+ &dsi->pll_clk_rate);
+ if (ret < 0)
+ return ret;
+
+ ret = samsung_dsim_of_read_u32(node, "samsung,burst-clock-frequency",
+ &dsi->burst_clk_rate);
+ if (ret < 0)
+ return ret;
+
+ ret = samsung_dsim_of_read_u32(node, "samsung,esc-clock-frequency",
+ &dsi->esc_clk_rate);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static int generic_dsim_register_host(struct samsung_dsim *dsi)
+{
+ return mipi_dsi_host_register(&dsi->dsi_host);
+}
+
+static void generic_dsim_unregister_host(struct samsung_dsim *dsi)
+{
+ mipi_dsi_host_unregister(&dsi->dsi_host);
+}
+
+static const struct samsung_dsim_host_ops generic_dsim_host_ops = {
+ .register_host = generic_dsim_register_host,
+ .unregister_host = generic_dsim_unregister_host,
+};
+
+static const struct drm_bridge_timings samsung_dsim_bridge_timings_de_high = {
+ .input_bus_flags = DRM_BUS_FLAG_DE_HIGH,
+};
+
+static const struct drm_bridge_timings samsung_dsim_bridge_timings_de_low = {
+ .input_bus_flags = DRM_BUS_FLAG_DE_LOW,
+};
+
+int samsung_dsim_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct samsung_dsim *dsi;
+ int ret, i;
+
+ dsi = devm_kzalloc(dev, sizeof(*dsi), GFP_KERNEL);
+ if (!dsi)
+ return -ENOMEM;
+
+ init_completion(&dsi->completed);
+ spin_lock_init(&dsi->transfer_lock);
+ INIT_LIST_HEAD(&dsi->transfer_list);
+
+ dsi->dsi_host.ops = &samsung_dsim_ops;
+ dsi->dsi_host.dev = dev;
+
+ dsi->dev = dev;
+ dsi->plat_data = of_device_get_match_data(dev);
+ dsi->driver_data = samsung_dsim_types[dsi->plat_data->hw_type];
+
+ dsi->supplies[0].supply = "vddcore";
+ dsi->supplies[1].supply = "vddio";
+ ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(dsi->supplies),
+ dsi->supplies);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to get regulators\n");
+
+ dsi->clks = devm_kcalloc(dev, dsi->driver_data->num_clks,
+ sizeof(*dsi->clks), GFP_KERNEL);
+ if (!dsi->clks)
+ return -ENOMEM;
+
+ for (i = 0; i < dsi->driver_data->num_clks; i++) {
+ dsi->clks[i] = devm_clk_get(dev, clk_names[i]);
+ if (IS_ERR(dsi->clks[i])) {
+ if (strcmp(clk_names[i], "sclk_mipi") == 0) {
+ dsi->clks[i] = devm_clk_get(dev, OLD_SCLK_MIPI_CLK_NAME);
+ if (!IS_ERR(dsi->clks[i]))
+ continue;
+ }
+
+ dev_info(dev, "failed to get the clock: %s\n", clk_names[i]);
+ return PTR_ERR(dsi->clks[i]);
+ }
+ }
+
+ dsi->reg_base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(dsi->reg_base))
+ return PTR_ERR(dsi->reg_base);
+
+ dsi->phy = devm_phy_optional_get(dev, "dsim");
+ if (IS_ERR(dsi->phy)) {
+ dev_info(dev, "failed to get dsim phy\n");
+ return PTR_ERR(dsi->phy);
+ }
+
+ dsi->irq = platform_get_irq(pdev, 0);
+ if (dsi->irq < 0)
+ return dsi->irq;
+
+ ret = devm_request_threaded_irq(dev, dsi->irq, NULL,
+ samsung_dsim_irq,
+ IRQF_ONESHOT | IRQF_NO_AUTOEN,
+ dev_name(dev), dsi);
+ if (ret) {
+ dev_err(dev, "failed to request dsi irq\n");
+ return ret;
+ }
+
+ ret = samsung_dsim_parse_dt(dsi);
+ if (ret)
+ return ret;
+
+ platform_set_drvdata(pdev, dsi);
+
+ pm_runtime_enable(dev);
+
+ dsi->bridge.funcs = &samsung_dsim_bridge_funcs;
+ dsi->bridge.of_node = dev->of_node;
+ dsi->bridge.type = DRM_MODE_CONNECTOR_DSI;
+
+ /* DE_LOW: i.MX8M Mini/Nano LCDIF-DSIM glue logic inverts HS/VS/DE */
+ if (dsi->plat_data->hw_type == DSIM_TYPE_IMX8MM)
+ dsi->bridge.timings = &samsung_dsim_bridge_timings_de_low;
+ else
+ dsi->bridge.timings = &samsung_dsim_bridge_timings_de_high;
+
+ if (dsi->plat_data->host_ops && dsi->plat_data->host_ops->register_host)
+ ret = dsi->plat_data->host_ops->register_host(dsi);
+
+ if (ret)
+ goto err_disable_runtime;
+
+ return 0;
+
+err_disable_runtime:
+ pm_runtime_disable(dev);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(samsung_dsim_probe);
+
+int samsung_dsim_remove(struct platform_device *pdev)
+{
+ struct samsung_dsim *dsi = platform_get_drvdata(pdev);
+
+ pm_runtime_disable(&pdev->dev);
+
+ if (dsi->plat_data->host_ops && dsi->plat_data->host_ops->unregister_host)
+ dsi->plat_data->host_ops->unregister_host(dsi);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(samsung_dsim_remove);
+
+static int __maybe_unused samsung_dsim_suspend(struct device *dev)
+{
+ struct samsung_dsim *dsi = dev_get_drvdata(dev);
+ const struct samsung_dsim_driver_data *driver_data = dsi->driver_data;
+ int ret, i;
+
+ usleep_range(10000, 20000);
+
+ if (dsi->state & DSIM_STATE_INITIALIZED) {
+ dsi->state &= ~DSIM_STATE_INITIALIZED;
+
+ samsung_dsim_disable_clock(dsi);
+
+ samsung_dsim_disable_irq(dsi);
+ }
+
+ dsi->state &= ~DSIM_STATE_CMD_LPM;
+
+ phy_power_off(dsi->phy);
+
+ for (i = driver_data->num_clks - 1; i > -1; i--)
+ clk_disable_unprepare(dsi->clks[i]);
+
+ ret = regulator_bulk_disable(ARRAY_SIZE(dsi->supplies), dsi->supplies);
+ if (ret < 0)
+ dev_err(dsi->dev, "cannot disable regulators %d\n", ret);
+
+ return 0;
+}
+
+static int __maybe_unused samsung_dsim_resume(struct device *dev)
+{
+ struct samsung_dsim *dsi = dev_get_drvdata(dev);
+ const struct samsung_dsim_driver_data *driver_data = dsi->driver_data;
+ int ret, i;
+
+ ret = regulator_bulk_enable(ARRAY_SIZE(dsi->supplies), dsi->supplies);
+ if (ret < 0) {
+ dev_err(dsi->dev, "cannot enable regulators %d\n", ret);
+ return ret;
+ }
+
+ for (i = 0; i < driver_data->num_clks; i++) {
+ ret = clk_prepare_enable(dsi->clks[i]);
+ if (ret < 0)
+ goto err_clk;
+ }
+
+ ret = phy_power_on(dsi->phy);
+ if (ret < 0) {
+ dev_err(dsi->dev, "cannot enable phy %d\n", ret);
+ goto err_clk;
+ }
+
+ return 0;
+
+err_clk:
+ while (--i > -1)
+ clk_disable_unprepare(dsi->clks[i]);
+ regulator_bulk_disable(ARRAY_SIZE(dsi->supplies), dsi->supplies);
+
+ return ret;
+}
+
+const struct dev_pm_ops samsung_dsim_pm_ops = {
+ SET_RUNTIME_PM_OPS(samsung_dsim_suspend, samsung_dsim_resume, NULL)
+ SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
+};
+EXPORT_SYMBOL_GPL(samsung_dsim_pm_ops);
+
+static const struct samsung_dsim_plat_data samsung_dsim_imx8mm_pdata = {
+ .hw_type = DSIM_TYPE_IMX8MM,
+ .host_ops = &generic_dsim_host_ops,
+};
+
+static const struct samsung_dsim_plat_data samsung_dsim_imx8mp_pdata = {
+ .hw_type = DSIM_TYPE_IMX8MP,
+ .host_ops = &generic_dsim_host_ops,
+};
+
+static const struct of_device_id samsung_dsim_of_match[] = {
+ {
+ .compatible = "fsl,imx8mm-mipi-dsim",
+ .data = &samsung_dsim_imx8mm_pdata,
+ },
+ {
+ .compatible = "fsl,imx8mp-mipi-dsim",
+ .data = &samsung_dsim_imx8mp_pdata,
+ },
+ { /* sentinel. */ }
+};
+MODULE_DEVICE_TABLE(of, samsung_dsim_of_match);
+
+static struct platform_driver samsung_dsim_driver = {
+ .probe = samsung_dsim_probe,
+ .remove = samsung_dsim_remove,
+ .driver = {
+ .name = "samsung-dsim",
+ .owner = THIS_MODULE,
+ .pm = &samsung_dsim_pm_ops,
+ .of_match_table = samsung_dsim_of_match,
+ },
+};
+
+module_platform_driver(samsung_dsim_driver);
+
+MODULE_AUTHOR("Jagan Teki <jagan@amarulasolutions.com>");
+MODULE_DESCRIPTION("Samsung MIPI DSIM controller bridge");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/bridge/sii9234.c b/drivers/gpu/drm/bridge/sii9234.c
index 099b510ff285..2d17f227867b 100644
--- a/drivers/gpu/drm/bridge/sii9234.c
+++ b/drivers/gpu/drm/bridge/sii9234.c
@@ -867,11 +867,6 @@ static int sii9234_init_resources(struct sii9234 *ctx,
return 0;
}
-static inline struct sii9234 *bridge_to_sii9234(struct drm_bridge *bridge)
-{
- return container_of(bridge, struct sii9234, bridge);
-}
-
static enum drm_mode_status sii9234_mode_valid(struct drm_bridge *bridge,
const struct drm_display_info *info,
const struct drm_display_mode *mode)
diff --git a/drivers/gpu/drm/bridge/simple-bridge.c b/drivers/gpu/drm/bridge/simple-bridge.c
index 2c5c5211bdab..d85d9ee463b8 100644
--- a/drivers/gpu/drm/bridge/simple-bridge.c
+++ b/drivers/gpu/drm/bridge/simple-bridge.c
@@ -202,11 +202,9 @@ static int simple_bridge_probe(struct platform_device *pdev)
sbridge->enable = devm_gpiod_get_optional(&pdev->dev, "enable",
GPIOD_OUT_LOW);
- if (IS_ERR(sbridge->enable)) {
- if (PTR_ERR(sbridge->enable) != -EPROBE_DEFER)
- dev_err(&pdev->dev, "Unable to retrieve enable GPIO\n");
- return PTR_ERR(sbridge->enable);
- }
+ if (IS_ERR(sbridge->enable))
+ return dev_err_probe(&pdev->dev, PTR_ERR(sbridge->enable),
+ "Unable to retrieve enable GPIO\n");
/* Register the bridge. */
sbridge->bridge.funcs = &simple_bridge_bridge_funcs;
@@ -218,13 +216,11 @@ static int simple_bridge_probe(struct platform_device *pdev)
return 0;
}
-static int simple_bridge_remove(struct platform_device *pdev)
+static void simple_bridge_remove(struct platform_device *pdev)
{
struct simple_bridge *sbridge = platform_get_drvdata(pdev);
drm_bridge_remove(&sbridge->bridge);
-
- return 0;
}
/*
@@ -301,7 +297,7 @@ MODULE_DEVICE_TABLE(of, simple_bridge_match);
static struct platform_driver simple_bridge_driver = {
.probe = simple_bridge_probe,
- .remove = simple_bridge_remove,
+ .remove_new = simple_bridge_remove,
.driver = {
.name = "simple-bridge",
.of_match_table = simple_bridge_match,
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-ahb-audio.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-ahb-audio.c
index 4efb62bcdb63..67b8d17a722a 100644
--- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-ahb-audio.c
+++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-ahb-audio.c
@@ -584,13 +584,11 @@ err:
return ret;
}
-static int snd_dw_hdmi_remove(struct platform_device *pdev)
+static void snd_dw_hdmi_remove(struct platform_device *pdev)
{
struct snd_dw_hdmi *dw = platform_get_drvdata(pdev);
snd_card_free(dw->card);
-
- return 0;
}
#if defined(CONFIG_PM_SLEEP) && defined(IS_NOT_BROKEN)
@@ -625,7 +623,7 @@ static SIMPLE_DEV_PM_OPS(snd_dw_hdmi_pm, snd_dw_hdmi_suspend,
static struct platform_driver snd_dw_hdmi_driver = {
.probe = snd_dw_hdmi_probe,
- .remove = snd_dw_hdmi_remove,
+ .remove_new = snd_dw_hdmi_remove,
.driver = {
.name = DRIVER_NAME,
.pm = PM_OPS,
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-cec.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-cec.c
index c8f44bcb298a..9389ce526eb1 100644
--- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-cec.c
+++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-cec.c
@@ -296,19 +296,17 @@ static int dw_hdmi_cec_probe(struct platform_device *pdev)
return 0;
}
-static int dw_hdmi_cec_remove(struct platform_device *pdev)
+static void dw_hdmi_cec_remove(struct platform_device *pdev)
{
struct dw_hdmi_cec *cec = platform_get_drvdata(pdev);
cec_notifier_cec_adap_unregister(cec->notify, cec->adap);
cec_unregister_adapter(cec->adap);
-
- return 0;
}
static struct platform_driver dw_hdmi_cec_driver = {
.probe = dw_hdmi_cec_probe,
- .remove = dw_hdmi_cec_remove,
+ .remove_new = dw_hdmi_cec_remove,
.driver = {
.name = "dw-hdmi-cec",
},
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-gp-audio.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-gp-audio.c
index 557966239677..423762da2ab4 100644
--- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-gp-audio.c
+++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-gp-audio.c
@@ -172,18 +172,16 @@ static int snd_dw_hdmi_probe(struct platform_device *pdev)
return PTR_ERR_OR_ZERO(dw->audio_pdev);
}
-static int snd_dw_hdmi_remove(struct platform_device *pdev)
+static void snd_dw_hdmi_remove(struct platform_device *pdev)
{
struct snd_dw_hdmi *dw = platform_get_drvdata(pdev);
platform_device_unregister(dw->audio_pdev);
-
- return 0;
}
static struct platform_driver snd_dw_hdmi_driver = {
.probe = snd_dw_hdmi_probe,
- .remove = snd_dw_hdmi_remove,
+ .remove_new = snd_dw_hdmi_remove,
.driver = {
.name = DRIVER_NAME,
},
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-i2s-audio.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-i2s-audio.c
index d751820c6da6..26c187d20d97 100644
--- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-i2s-audio.c
+++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-i2s-audio.c
@@ -216,18 +216,16 @@ static int snd_dw_hdmi_probe(struct platform_device *pdev)
return 0;
}
-static int snd_dw_hdmi_remove(struct platform_device *pdev)
+static void snd_dw_hdmi_remove(struct platform_device *pdev)
{
struct platform_device *platform = dev_get_drvdata(&pdev->dev);
platform_device_unregister(platform);
-
- return 0;
}
static struct platform_driver snd_dw_hdmi_driver = {
.probe = snd_dw_hdmi_probe,
- .remove = snd_dw_hdmi_remove,
+ .remove_new = snd_dw_hdmi_remove,
.driver = {
.name = DRIVER_NAME,
},
diff --git a/drivers/gpu/drm/bridge/tc358762.c b/drivers/gpu/drm/bridge/tc358762.c
index 0b6a28436885..77f7f7f54757 100644
--- a/drivers/gpu/drm/bridge/tc358762.c
+++ b/drivers/gpu/drm/bridge/tc358762.c
@@ -229,6 +229,7 @@ static int tc358762_probe(struct mipi_dsi_device *dsi)
ctx->bridge.funcs = &tc358762_bridge_funcs;
ctx->bridge.type = DRM_MODE_CONNECTOR_DPI;
ctx->bridge.of_node = dev->of_node;
+ ctx->bridge.pre_enable_prev_first = true;
drm_bridge_add(&ctx->bridge);
diff --git a/drivers/gpu/drm/bridge/thc63lvd1024.c b/drivers/gpu/drm/bridge/thc63lvd1024.c
index e21078b2f8b5..d4c1a601bbb5 100644
--- a/drivers/gpu/drm/bridge/thc63lvd1024.c
+++ b/drivers/gpu/drm/bridge/thc63lvd1024.c
@@ -230,13 +230,11 @@ static int thc63_probe(struct platform_device *pdev)
return 0;
}
-static int thc63_remove(struct platform_device *pdev)
+static void thc63_remove(struct platform_device *pdev)
{
struct thc63_dev *thc63 = platform_get_drvdata(pdev);
drm_bridge_remove(&thc63->bridge);
-
- return 0;
}
static const struct of_device_id thc63_match[] = {
@@ -247,7 +245,7 @@ MODULE_DEVICE_TABLE(of, thc63_match);
static struct platform_driver thc63_driver = {
.probe = thc63_probe,
- .remove = thc63_remove,
+ .remove_new = thc63_remove,
.driver = {
.name = "thc63lvd1024",
.of_match_table = thc63_match,
diff --git a/drivers/gpu/drm/bridge/ti-tfp410.c b/drivers/gpu/drm/bridge/ti-tfp410.c
index 6db69df0e18b..ab63225cd635 100644
--- a/drivers/gpu/drm/bridge/ti-tfp410.c
+++ b/drivers/gpu/drm/bridge/ti-tfp410.c
@@ -355,11 +355,9 @@ static int tfp410_probe(struct platform_device *pdev)
return tfp410_init(&pdev->dev, false);
}
-static int tfp410_remove(struct platform_device *pdev)
+static void tfp410_remove(struct platform_device *pdev)
{
tfp410_fini(&pdev->dev);
-
- return 0;
}
static const struct of_device_id tfp410_match[] = {
@@ -370,7 +368,7 @@ MODULE_DEVICE_TABLE(of, tfp410_match);
static struct platform_driver tfp410_platform_driver = {
.probe = tfp410_probe,
- .remove = tfp410_remove,
+ .remove_new = tfp410_remove,
.driver = {
.name = "tfp410-bridge",
.of_match_table = tfp410_match,
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
index 28e3f2c8917e..d4d2a2ce40f8 100644
--- a/drivers/gpu/drm/drm_atomic_helper.c
+++ b/drivers/gpu/drm/drm_atomic_helper.c
@@ -2739,6 +2739,11 @@ void drm_atomic_helper_commit_planes(struct drm_device *dev,
funcs->atomic_disable(plane, old_state);
} else if (new_plane_state->crtc || disabling) {
funcs->atomic_update(plane, old_state);
+
+ if (!disabling && funcs->atomic_enable) {
+ if (drm_atomic_plane_enabling(old_plane_state, new_plane_state))
+ funcs->atomic_enable(plane, old_state);
+ }
}
}
@@ -2799,6 +2804,7 @@ drm_atomic_helper_commit_planes_on_crtc(struct drm_crtc_state *old_crtc_state)
struct drm_plane_state *new_plane_state =
drm_atomic_get_new_plane_state(old_state, plane);
const struct drm_plane_helper_funcs *plane_funcs;
+ bool disabling;
plane_funcs = plane->helper_private;
@@ -2808,12 +2814,18 @@ drm_atomic_helper_commit_planes_on_crtc(struct drm_crtc_state *old_crtc_state)
WARN_ON(new_plane_state->crtc &&
new_plane_state->crtc != crtc);
- if (drm_atomic_plane_disabling(old_plane_state, new_plane_state) &&
- plane_funcs->atomic_disable)
+ disabling = drm_atomic_plane_disabling(old_plane_state, new_plane_state);
+
+ if (disabling && plane_funcs->atomic_disable) {
plane_funcs->atomic_disable(plane, old_state);
- else if (new_plane_state->crtc ||
- drm_atomic_plane_disabling(old_plane_state, new_plane_state))
+ } else if (new_plane_state->crtc || disabling) {
plane_funcs->atomic_update(plane, old_state);
+
+ if (!disabling && plane_funcs->atomic_enable) {
+ if (drm_atomic_plane_enabling(old_plane_state, new_plane_state))
+ plane_funcs->atomic_enable(plane, old_state);
+ }
+ }
}
if (crtc_funcs && crtc_funcs->atomic_flush)
diff --git a/drivers/gpu/drm/drm_connector.c b/drivers/gpu/drm/drm_connector.c
index 9d0250c28e9b..48df7a5ea503 100644
--- a/drivers/gpu/drm/drm_connector.c
+++ b/drivers/gpu/drm/drm_connector.c
@@ -33,9 +33,11 @@
#include <drm/drm_sysfs.h>
#include <drm/drm_utils.h>
-#include <linux/fb.h>
+#include <linux/property.h>
#include <linux/uaccess.h>
+#include <video/cmdline.h>
+
#include "drm_crtc_internal.h"
#include "drm_internal.h"
@@ -154,9 +156,10 @@ EXPORT_SYMBOL(drm_get_connector_type_name);
static void drm_connector_get_cmdline_mode(struct drm_connector *connector)
{
struct drm_cmdline_mode *mode = &connector->cmdline_mode;
- char *option = NULL;
+ const char *option;
- if (fb_get_options(connector->name, &option))
+ option = video_get_options(connector->name);
+ if (!option)
return;
if (!drm_mode_parse_command_line_for_connector(option,
@@ -1446,6 +1449,20 @@ static const struct drm_prop_enum_list dp_colorspaces[] = {
* a firmware handled hotkey. Therefor userspace must not include the
* privacy-screen sw-state in an atomic commit unless it wants to change
* its value.
+ *
+ * left margin, right margin, top margin, bottom margin:
+ * Add margins to the connector's viewport. This is typically used to
+ * mitigate overscan on TVs.
+ *
+ * The value is the size in pixels of the black border which will be
+ * added. The attached CRTC's content will be scaled to fill the whole
+ * area inside the margin.
+ *
+ * The margins configuration might be sent to the sink, e.g. via HDMI AVI
+ * InfoFrames.
+ *
+ * Drivers can set up these properties by calling
+ * drm_mode_create_tv_margin_properties().
*/
int drm_connector_create_standard_properties(struct drm_device *dev)
@@ -1590,10 +1607,6 @@ EXPORT_SYMBOL(drm_connector_attach_dp_subconnector_property);
/*
* TODO: Document the properties:
- * - left margin
- * - right margin
- * - top margin
- * - bottom margin
* - brightness
* - contrast
* - flicker reduction
@@ -1602,7 +1615,6 @@ EXPORT_SYMBOL(drm_connector_attach_dp_subconnector_property);
* - overscan
* - saturation
* - select subconnector
- * - subconnector
*/
/**
* DOC: Analog TV Connector Properties
diff --git a/drivers/gpu/drm/drm_debugfs.c b/drivers/gpu/drm/drm_debugfs.c
index 4f643a490dc3..4855230ba2c6 100644
--- a/drivers/gpu/drm/drm_debugfs.c
+++ b/drivers/gpu/drm/drm_debugfs.c
@@ -80,7 +80,7 @@ static int drm_clients_info(struct seq_file *m, void *data)
seq_printf(m,
"%20s %5s %3s master a %5s %10s\n",
"command",
- "pid",
+ "tgid",
"dev",
"uid",
"magic");
@@ -94,7 +94,7 @@ static int drm_clients_info(struct seq_file *m, void *data)
bool is_current_master = drm_is_current_master(priv);
rcu_read_lock(); /* locks pid_task()->comm */
- task = pid_task(priv->pid, PIDTYPE_PID);
+ task = pid_task(priv->pid, PIDTYPE_TGID);
uid = task ? __task_cred(task)->euid : GLOBAL_ROOT_UID;
seq_printf(m, "%20s %5d %3d %c %c %5d %10u\n",
task ? task->comm : "<unknown>",
diff --git a/drivers/gpu/drm/drm_displayid.c b/drivers/gpu/drm/drm_displayid.c
index 38ea8203df45..9edc111be7ee 100644
--- a/drivers/gpu/drm/drm_displayid.c
+++ b/drivers/gpu/drm/drm_displayid.c
@@ -7,13 +7,29 @@
#include <drm/drm_edid.h>
#include <drm/drm_print.h>
-static int validate_displayid(const u8 *displayid, int length, int idx)
+static const struct displayid_header *
+displayid_get_header(const u8 *displayid, int length, int index)
+{
+ const struct displayid_header *base;
+
+ if (sizeof(*base) > length - index)
+ return ERR_PTR(-EINVAL);
+
+ base = (const struct displayid_header *)&displayid[index];
+
+ return base;
+}
+
+static const struct displayid_header *
+validate_displayid(const u8 *displayid, int length, int idx)
{
int i, dispid_length;
u8 csum = 0;
const struct displayid_header *base;
- base = (const struct displayid_header *)&displayid[idx];
+ base = displayid_get_header(displayid, length, idx);
+ if (IS_ERR(base))
+ return base;
DRM_DEBUG_KMS("base revision 0x%x, length %d, %d %d\n",
base->rev, base->bytes, base->prod_id, base->ext_count);
@@ -21,16 +37,16 @@ static int validate_displayid(const u8 *displayid, int length, int idx)
/* +1 for DispID checksum */
dispid_length = sizeof(*base) + base->bytes + 1;
if (dispid_length > length - idx)
- return -EINVAL;
+ return ERR_PTR(-EINVAL);
for (i = 0; i < dispid_length; i++)
csum += displayid[idx + i];
if (csum) {
DRM_NOTE("DisplayID checksum invalid, remainder is %d\n", csum);
- return -EINVAL;
+ return ERR_PTR(-EINVAL);
}
- return 0;
+ return base;
}
static const u8 *drm_find_displayid_extension(const struct drm_edid *drm_edid,
@@ -39,7 +55,6 @@ static const u8 *drm_find_displayid_extension(const struct drm_edid *drm_edid,
{
const u8 *displayid = drm_find_edid_extension(drm_edid, DISPLAYID_EXT, ext_index);
const struct displayid_header *base;
- int ret;
if (!displayid)
return NULL;
@@ -48,11 +63,10 @@ static const u8 *drm_find_displayid_extension(const struct drm_edid *drm_edid,
*length = EDID_LENGTH - 1;
*idx = 1;
- ret = validate_displayid(displayid, *length, *idx);
- if (ret)
+ base = validate_displayid(displayid, *length, *idx);
+ if (IS_ERR(base))
return NULL;
- base = (const struct displayid_header *)&displayid[*idx];
*length = *idx + sizeof(*base) + base->bytes;
return displayid;
@@ -109,6 +123,9 @@ __displayid_iter_next(struct displayid_iter *iter)
}
for (;;) {
+ /* The first section we encounter is the base section */
+ bool base_section = !iter->section;
+
iter->section = drm_find_displayid_extension(iter->drm_edid,
&iter->length,
&iter->idx,
@@ -118,6 +135,18 @@ __displayid_iter_next(struct displayid_iter *iter)
return NULL;
}
+ /* Save the structure version and primary use case. */
+ if (base_section) {
+ const struct displayid_header *base;
+
+ base = displayid_get_header(iter->section, iter->length,
+ iter->idx);
+ if (!IS_ERR(base)) {
+ iter->version = base->rev;
+ iter->primary_use = base->prod_id;
+ }
+ }
+
iter->idx += sizeof(struct displayid_header);
block = displayid_iter_block(iter);
@@ -130,3 +159,18 @@ void displayid_iter_end(struct displayid_iter *iter)
{
memset(iter, 0, sizeof(*iter));
}
+
+/* DisplayID Structure Version/Revision from the Base Section. */
+u8 displayid_version(const struct displayid_iter *iter)
+{
+ return iter->version;
+}
+
+/*
+ * DisplayID Primary Use Case (2.0+) or Product Type Identifier (1.0-1.3) from
+ * the Base Section.
+ */
+u8 displayid_primary_use(const struct displayid_iter *iter)
+{
+ return iter->primary_use;
+}
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index c6eb8972451a..cee0cc522ed9 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -691,9 +691,11 @@ static int drm_dev_init(struct drm_device *dev,
}
}
- ret = drm_dev_set_unique(dev, dev_name(parent));
- if (ret)
+ dev->unique = drmm_kstrdup(dev, dev_name(parent), GFP_KERNEL);
+ if (!dev->unique) {
+ ret = -ENOMEM;
goto err;
+ }
return 0;
@@ -1000,26 +1002,6 @@ void drm_dev_unregister(struct drm_device *dev)
}
EXPORT_SYMBOL(drm_dev_unregister);
-/**
- * drm_dev_set_unique - Set the unique name of a DRM device
- * @dev: device of which to set the unique name
- * @name: unique name
- *
- * Sets the unique name of a DRM device using the specified string. This is
- * already done by drm_dev_init(), drivers should only override the default
- * unique name for backwards compatibility reasons.
- *
- * Return: 0 on success or a negative error code on failure.
- */
-int drm_dev_set_unique(struct drm_device *dev, const char *name)
-{
- drmm_kfree(dev, dev->unique);
- dev->unique = drmm_kstrdup(dev, name, GFP_KERNEL);
-
- return dev->unique ? 0 : -ENOMEM;
-}
-EXPORT_SYMBOL(drm_dev_set_unique);
-
/*
* DRM Core
* The DRM core module initializes all global DRM objects and makes them
diff --git a/drivers/gpu/drm/drm_dumb_buffers.c b/drivers/gpu/drm/drm_dumb_buffers.c
index ad17fa21cebb..70032bba1c97 100644
--- a/drivers/gpu/drm/drm_dumb_buffers.c
+++ b/drivers/gpu/drm/drm_dumb_buffers.c
@@ -139,10 +139,7 @@ int drm_mode_destroy_dumb(struct drm_device *dev, u32 handle,
if (!dev->driver->dumb_create)
return -ENOSYS;
- if (dev->driver->dumb_destroy)
- return dev->driver->dumb_destroy(file_priv, dev, handle);
- else
- return drm_gem_dumb_destroy(file_priv, dev, handle);
+ return drm_gem_handle_delete(file_priv, handle);
}
int drm_mode_destroy_dumb_ioctl(struct drm_device *dev,
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index 3d0a4da661bc..c18ec866678d 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -3424,10 +3424,6 @@ static struct drm_display_mode *drm_mode_detailed(struct drm_connector *connecto
connector->base.id, connector->name);
return NULL;
}
- if (!(pt->misc & DRM_EDID_PT_SEPARATE_SYNC)) {
- drm_dbg_kms(dev, "[CONNECTOR:%d:%s] Composite sync not supported\n",
- connector->base.id, connector->name);
- }
/* it is incorrect if hsync/vsync width is zero */
if (!hsync_pulse_width || !vsync_pulse_width) {
@@ -3474,10 +3470,27 @@ static struct drm_display_mode *drm_mode_detailed(struct drm_connector *connecto
if (info->quirks & EDID_QUIRK_DETAILED_SYNC_PP) {
mode->flags |= DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC;
} else {
- mode->flags |= (pt->misc & DRM_EDID_PT_HSYNC_POSITIVE) ?
- DRM_MODE_FLAG_PHSYNC : DRM_MODE_FLAG_NHSYNC;
- mode->flags |= (pt->misc & DRM_EDID_PT_VSYNC_POSITIVE) ?
- DRM_MODE_FLAG_PVSYNC : DRM_MODE_FLAG_NVSYNC;
+ switch (pt->misc & DRM_EDID_PT_SYNC_MASK) {
+ case DRM_EDID_PT_ANALOG_CSYNC:
+ case DRM_EDID_PT_BIPOLAR_ANALOG_CSYNC:
+ drm_dbg_kms(dev, "[CONNECTOR:%d:%s] Analog composite sync!\n",
+ connector->base.id, connector->name);
+ mode->flags |= DRM_MODE_FLAG_CSYNC | DRM_MODE_FLAG_NCSYNC;
+ break;
+ case DRM_EDID_PT_DIGITAL_CSYNC:
+ drm_dbg_kms(dev, "[CONNECTOR:%d:%s] Digital composite sync!\n",
+ connector->base.id, connector->name);
+ mode->flags |= DRM_MODE_FLAG_CSYNC;
+ mode->flags |= (pt->misc & DRM_EDID_PT_HSYNC_POSITIVE) ?
+ DRM_MODE_FLAG_PCSYNC : DRM_MODE_FLAG_NCSYNC;
+ break;
+ case DRM_EDID_PT_DIGITAL_SEPARATE_SYNC:
+ mode->flags |= (pt->misc & DRM_EDID_PT_HSYNC_POSITIVE) ?
+ DRM_MODE_FLAG_PHSYNC : DRM_MODE_FLAG_NHSYNC;
+ mode->flags |= (pt->misc & DRM_EDID_PT_VSYNC_POSITIVE) ?
+ DRM_MODE_FLAG_PVSYNC : DRM_MODE_FLAG_NVSYNC;
+ break;
+ }
}
set_size:
@@ -6433,6 +6446,29 @@ static void drm_reset_display_info(struct drm_connector *connector)
info->quirks = 0;
}
+static void update_displayid_info(struct drm_connector *connector,
+ const struct drm_edid *drm_edid)
+{
+ struct drm_display_info *info = &connector->display_info;
+ const struct displayid_block *block;
+ struct displayid_iter iter;
+
+ displayid_iter_edid_begin(drm_edid, &iter);
+ displayid_iter_for_each(block, &iter) {
+ if (displayid_version(&iter) == DISPLAY_ID_STRUCTURE_VER_20 &&
+ (displayid_primary_use(&iter) == PRIMARY_USE_HEAD_MOUNTED_VR ||
+ displayid_primary_use(&iter) == PRIMARY_USE_HEAD_MOUNTED_AR))
+ info->non_desktop = true;
+
+ /*
+ * We're only interested in the base section here, no need to
+ * iterate further.
+ */
+ break;
+ }
+ displayid_iter_end(&iter);
+}
+
static void update_display_info(struct drm_connector *connector,
const struct drm_edid *drm_edid)
{
@@ -6463,6 +6499,8 @@ static void update_display_info(struct drm_connector *connector,
info->color_formats |= DRM_COLOR_FORMAT_RGB444;
drm_parse_cea_ext(connector, drm_edid);
+ update_displayid_info(connector, drm_edid);
+
/*
* Digital sink with "DFP 1.x compliant TMDS" according to EDID 1.3?
*
@@ -7242,6 +7280,15 @@ static void drm_parse_tiled_block(struct drm_connector *connector,
}
}
+static bool displayid_is_tiled_block(const struct displayid_iter *iter,
+ const struct displayid_block *block)
+{
+ return (displayid_version(iter) == DISPLAY_ID_STRUCTURE_VER_12 &&
+ block->tag == DATA_BLOCK_TILED_DISPLAY) ||
+ (displayid_version(iter) == DISPLAY_ID_STRUCTURE_VER_20 &&
+ block->tag == DATA_BLOCK_2_TILED_DISPLAY_TOPOLOGY);
+}
+
static void _drm_update_tile_info(struct drm_connector *connector,
const struct drm_edid *drm_edid)
{
@@ -7252,7 +7299,7 @@ static void _drm_update_tile_info(struct drm_connector *connector,
displayid_iter_edid_begin(drm_edid, &iter);
displayid_iter_for_each(block, &iter) {
- if (block->tag == DATA_BLOCK_TILED_DISPLAY)
+ if (displayid_is_tiled_block(&iter, block))
drm_parse_tiled_block(connector, block);
}
displayid_iter_end(&iter);
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index a39998047f8a..63ec95e86d0e 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -60,16 +60,17 @@ MODULE_PARM_DESC(drm_fbdev_overalloc,
* In order to keep user-space compatibility, we want in certain use-cases
* to keep leaking the fbdev physical address to the user-space program
* handling the fbdev buffer.
- * This is a bad habit essentially kept into closed source opengl driver
- * that should really be moved into open-source upstream projects instead
- * of using legacy physical addresses in user space to communicate with
- * other out-of-tree kernel modules.
+ *
+ * This is a bad habit, essentially kept to support closed-source OpenGL
+ * drivers that should really be moved into open-source upstream projects
+ * instead of using legacy physical addresses in user space to communicate
+ * with other out-of-tree kernel modules.
*
* This module_param *should* be removed as soon as possible and be
* considered as a broken and legacy behaviour from a modern fbdev device.
*/
-#if IS_ENABLED(CONFIG_DRM_FBDEV_LEAK_PHYS_SMEM)
static bool drm_leak_fbdev_smem;
+#if IS_ENABLED(CONFIG_DRM_FBDEV_LEAK_PHYS_SMEM)
module_param_unsafe(drm_leak_fbdev_smem, bool, 0600);
MODULE_PARM_DESC(drm_leak_fbdev_smem,
"Allow unsafe leaking fbdev physical smem address [default=false]");
@@ -539,6 +540,29 @@ err_release:
EXPORT_SYMBOL(drm_fb_helper_alloc_info);
/**
+ * drm_fb_helper_release_info - release fb_info and its members
+ * @fb_helper: driver-allocated fbdev helper
+ *
+ * A helper to release fb_info and the member cmap. Drivers do not
+ * need to release the allocated fb_info structure themselves, this is
+ * automatically done when calling drm_fb_helper_fini().
+ */
+void drm_fb_helper_release_info(struct drm_fb_helper *fb_helper)
+{
+ struct fb_info *info = fb_helper->info;
+
+ if (!info)
+ return;
+
+ fb_helper->info = NULL;
+
+ if (info->cmap.len)
+ fb_dealloc_cmap(&info->cmap);
+ framebuffer_release(info);
+}
+EXPORT_SYMBOL(drm_fb_helper_release_info);
+
+/**
* drm_fb_helper_unregister_info - unregister fb_info framebuffer device
* @fb_helper: driver-allocated fbdev helper, can be NULL
*
@@ -561,8 +585,6 @@ EXPORT_SYMBOL(drm_fb_helper_unregister_info);
*/
void drm_fb_helper_fini(struct drm_fb_helper *fb_helper)
{
- struct fb_info *info;
-
if (!fb_helper)
return;
@@ -574,13 +596,7 @@ void drm_fb_helper_fini(struct drm_fb_helper *fb_helper)
cancel_work_sync(&fb_helper->resume_work);
cancel_work_sync(&fb_helper->damage_work);
- info = fb_helper->info;
- if (info) {
- if (info->cmap.len)
- fb_dealloc_cmap(&info->cmap);
- framebuffer_release(info);
- }
- fb_helper->info = NULL;
+ drm_fb_helper_release_info(fb_helper);
mutex_lock(&kernel_fb_helper_lock);
if (!list_empty(&fb_helper->kernel_fb_list)) {
@@ -657,7 +673,7 @@ static void drm_fb_helper_memory_range_to_clip(struct fb_info *info, off_t off,
void drm_fb_helper_deferred_io(struct fb_info *info, struct list_head *pagereflist)
{
struct drm_fb_helper *helper = info->par;
- unsigned long start, end, min_off, max_off;
+ unsigned long start, end, min_off, max_off, total_size;
struct fb_deferred_io_pageref *pageref;
struct drm_rect damage_area;
@@ -675,7 +691,11 @@ void drm_fb_helper_deferred_io(struct fb_info *info, struct list_head *pagerefli
* of the screen and account for non-existing scanlines. Hence,
* keep the covered memory area within the screen buffer.
*/
- max_off = min(max_off, info->screen_size);
+ if (info->screen_size)
+ total_size = info->screen_size;
+ else
+ total_size = info->fix.smem_len;
+ max_off = min(max_off, total_size);
if (min_off < max_off) {
drm_fb_helper_memory_range_to_clip(info, min_off, max_off - min_off, &damage_area);
@@ -1964,10 +1984,6 @@ static int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper)
return ret;
}
-#if IS_ENABLED(CONFIG_DRM_FBDEV_LEAK_PHYS_SMEM)
- fb_helper->hint_leak_smem_start = drm_leak_fbdev_smem;
-#endif
-
/* push down into drivers */
ret = (*fb_helper->funcs->fb_probe)(fb_helper, &sizes);
if (ret < 0)
@@ -2166,11 +2182,8 @@ __drm_fb_helper_initial_config_and_unlock(struct drm_fb_helper *fb_helper)
info = fb_helper->info;
info->var.pixclock = 0;
- /* Shamelessly allow physical address leaking to userspace */
-#if IS_ENABLED(CONFIG_DRM_FBDEV_LEAK_PHYS_SMEM)
- if (!fb_helper->hint_leak_smem_start)
-#endif
- /* don't leak any physical addresses to userspace */
+
+ if (!drm_leak_fbdev_smem)
info->flags |= FBINFO_HIDE_SMEM_START;
/* Need to drop locks to avoid recursive deadlock in
diff --git a/drivers/gpu/drm/drm_fbdev_dma.c b/drivers/gpu/drm/drm_fbdev_dma.c
new file mode 100644
index 000000000000..728deffcc0d9
--- /dev/null
+++ b/drivers/gpu/drm/drm_fbdev_dma.c
@@ -0,0 +1,268 @@
+// SPDX-License-Identifier: MIT
+
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_fb_helper.h>
+#include <drm/drm_framebuffer.h>
+#include <drm/drm_gem_dma_helper.h>
+
+#include <drm/drm_fbdev_dma.h>
+
+/*
+ * struct fb_ops
+ */
+
+static int drm_fbdev_dma_fb_open(struct fb_info *info, int user)
+{
+ struct drm_fb_helper *fb_helper = info->par;
+
+ /* No need to take a ref for fbcon because it unbinds on unregister */
+ if (user && !try_module_get(fb_helper->dev->driver->fops->owner))
+ return -ENODEV;
+
+ return 0;
+}
+
+static int drm_fbdev_dma_fb_release(struct fb_info *info, int user)
+{
+ struct drm_fb_helper *fb_helper = info->par;
+
+ if (user)
+ module_put(fb_helper->dev->driver->fops->owner);
+
+ return 0;
+}
+
+static void drm_fbdev_dma_fb_destroy(struct fb_info *info)
+{
+ struct drm_fb_helper *fb_helper = info->par;
+
+ if (!fb_helper->dev)
+ return;
+
+ drm_fb_helper_fini(fb_helper);
+
+ drm_client_buffer_vunmap(fb_helper->buffer);
+ drm_client_framebuffer_delete(fb_helper->buffer);
+ drm_client_release(&fb_helper->client);
+ drm_fb_helper_unprepare(fb_helper);
+ kfree(fb_helper);
+}
+
+static int drm_fbdev_dma_fb_mmap(struct fb_info *info, struct vm_area_struct *vma)
+{
+ struct drm_fb_helper *fb_helper = info->par;
+ struct drm_device *dev = fb_helper->dev;
+
+ if (drm_WARN_ON_ONCE(dev, !fb_helper->dev->driver->gem_prime_mmap))
+ return -ENODEV;
+
+ return fb_helper->dev->driver->gem_prime_mmap(fb_helper->buffer->gem, vma);
+}
+
+static const struct fb_ops drm_fbdev_dma_fb_ops = {
+ .owner = THIS_MODULE,
+ .fb_open = drm_fbdev_dma_fb_open,
+ .fb_release = drm_fbdev_dma_fb_release,
+ .fb_read = drm_fb_helper_sys_read,
+ .fb_write = drm_fb_helper_sys_write,
+ DRM_FB_HELPER_DEFAULT_OPS,
+ .fb_fillrect = drm_fb_helper_sys_fillrect,
+ .fb_copyarea = drm_fb_helper_sys_copyarea,
+ .fb_imageblit = drm_fb_helper_sys_imageblit,
+ .fb_destroy = drm_fbdev_dma_fb_destroy,
+ .fb_mmap = drm_fbdev_dma_fb_mmap,
+};
+
+/*
+ * struct drm_fb_helper
+ */
+
+static int drm_fbdev_dma_helper_fb_probe(struct drm_fb_helper *fb_helper,
+ struct drm_fb_helper_surface_size *sizes)
+{
+ struct drm_client_dev *client = &fb_helper->client;
+ struct drm_device *dev = fb_helper->dev;
+ struct drm_client_buffer *buffer;
+ struct drm_gem_dma_object *dma_obj;
+ struct drm_framebuffer *fb;
+ struct fb_info *info;
+ u32 format;
+ struct iosys_map map;
+ int ret;
+
+ drm_dbg_kms(dev, "surface width(%d), height(%d) and bpp(%d)\n",
+ sizes->surface_width, sizes->surface_height,
+ sizes->surface_bpp);
+
+ format = drm_mode_legacy_fb_format(sizes->surface_bpp, sizes->surface_depth);
+ buffer = drm_client_framebuffer_create(client, sizes->surface_width,
+ sizes->surface_height, format);
+ if (IS_ERR(buffer))
+ return PTR_ERR(buffer);
+ dma_obj = to_drm_gem_dma_obj(buffer->gem);
+
+ fb = buffer->fb;
+ if (drm_WARN_ON(dev, fb->funcs->dirty)) {
+ ret = -ENODEV; /* damage handling not supported; use generic emulation */
+ goto err_drm_client_buffer_delete;
+ }
+
+ ret = drm_client_buffer_vmap(buffer, &map);
+ if (ret) {
+ goto err_drm_client_buffer_delete;
+ } else if (drm_WARN_ON(dev, map.is_iomem)) {
+ ret = -ENODEV; /* I/O memory not supported; use generic emulation */
+ goto err_drm_client_buffer_delete;
+ }
+
+ fb_helper->buffer = buffer;
+ fb_helper->fb = buffer->fb;
+
+ info = drm_fb_helper_alloc_info(fb_helper);
+ if (IS_ERR(info)) {
+ ret = PTR_ERR(info);
+ goto err_drm_client_buffer_vunmap;
+ }
+
+ drm_fb_helper_fill_info(info, fb_helper, sizes);
+
+ info->fbops = &drm_fbdev_dma_fb_ops;
+ info->flags = FBINFO_DEFAULT;
+
+ /* screen */
+ info->flags |= FBINFO_VIRTFB; /* system memory */
+ if (dma_obj->map_noncoherent)
+ info->flags |= FBINFO_READS_FAST; /* signal caching */
+ info->screen_size = sizes->surface_height * fb->pitches[0];
+ info->screen_buffer = map.vaddr;
+ info->fix.smem_start = page_to_phys(virt_to_page(info->screen_buffer));
+ info->fix.smem_len = info->screen_size;
+
+ return 0;
+
+err_drm_client_buffer_vunmap:
+ fb_helper->fb = NULL;
+ fb_helper->buffer = NULL;
+ drm_client_buffer_vunmap(buffer);
+err_drm_client_buffer_delete:
+ drm_client_framebuffer_delete(buffer);
+ return ret;
+}
+
+static const struct drm_fb_helper_funcs drm_fbdev_dma_helper_funcs = {
+ .fb_probe = drm_fbdev_dma_helper_fb_probe,
+};
+
+/*
+ * struct drm_client_funcs
+ */
+
+static void drm_fbdev_dma_client_unregister(struct drm_client_dev *client)
+{
+ struct drm_fb_helper *fb_helper = drm_fb_helper_from_client(client);
+
+ if (fb_helper->info) {
+ drm_fb_helper_unregister_info(fb_helper);
+ } else {
+ drm_client_release(&fb_helper->client);
+ drm_fb_helper_unprepare(fb_helper);
+ kfree(fb_helper);
+ }
+}
+
+static int drm_fbdev_dma_client_restore(struct drm_client_dev *client)
+{
+ drm_fb_helper_lastclose(client->dev);
+
+ return 0;
+}
+
+static int drm_fbdev_dma_client_hotplug(struct drm_client_dev *client)
+{
+ struct drm_fb_helper *fb_helper = drm_fb_helper_from_client(client);
+ struct drm_device *dev = client->dev;
+ int ret;
+
+ if (dev->fb_helper)
+ return drm_fb_helper_hotplug_event(dev->fb_helper);
+
+ ret = drm_fb_helper_init(dev, fb_helper);
+ if (ret)
+ goto err_drm_err;
+
+ if (!drm_drv_uses_atomic_modeset(dev))
+ drm_helper_disable_unused_functions(dev);
+
+ ret = drm_fb_helper_initial_config(fb_helper);
+ if (ret)
+ goto err_drm_fb_helper_fini;
+
+ return 0;
+
+err_drm_fb_helper_fini:
+ drm_fb_helper_fini(fb_helper);
+err_drm_err:
+ drm_err(dev, "fbdev-dma: Failed to setup generic emulation (ret=%d)\n", ret);
+ return ret;
+}
+
+static const struct drm_client_funcs drm_fbdev_dma_client_funcs = {
+ .owner = THIS_MODULE,
+ .unregister = drm_fbdev_dma_client_unregister,
+ .restore = drm_fbdev_dma_client_restore,
+ .hotplug = drm_fbdev_dma_client_hotplug,
+};
+
+/**
+ * drm_fbdev_dma_setup() - Setup fbdev emulation for GEM DMA helpers
+ * @dev: DRM device
+ * @preferred_bpp: Preferred bits per pixel for the device.
+ * @dev->mode_config.preferred_depth is used if this is zero.
+ *
+ * This function sets up fbdev emulation for GEM DMA drivers that support
+ * dumb buffers with a virtual address and that can be mmap'ed.
+ * drm_fbdev_dma_setup() shall be called after the DRM driver registered
+ * the new DRM device with drm_dev_register().
+ *
+ * Restore, hotplug events and teardown are all taken care of. Drivers that do
+ * suspend/resume need to call drm_fb_helper_set_suspend_unlocked() themselves.
+ * Simple drivers might use drm_mode_config_helper_suspend().
+ *
+ * This function is safe to call even when there are no connectors present.
+ * Setup will be retried on the next hotplug event.
+ *
+ * The fbdev is destroyed by drm_dev_unregister().
+ */
+void drm_fbdev_dma_setup(struct drm_device *dev, unsigned int preferred_bpp)
+{
+ struct drm_fb_helper *fb_helper;
+ int ret;
+
+ drm_WARN(dev, !dev->registered, "Device has not been registered.\n");
+ drm_WARN(dev, dev->fb_helper, "fb_helper is already set!\n");
+
+ fb_helper = kzalloc(sizeof(*fb_helper), GFP_KERNEL);
+ if (!fb_helper)
+ return;
+ drm_fb_helper_prepare(dev, fb_helper, preferred_bpp, &drm_fbdev_dma_helper_funcs);
+
+ ret = drm_client_init(dev, &fb_helper->client, "fbdev", &drm_fbdev_dma_client_funcs);
+ if (ret) {
+ drm_err(dev, "Failed to register client: %d\n", ret);
+ goto err_drm_client_init;
+ }
+
+ ret = drm_fbdev_dma_client_hotplug(&fb_helper->client);
+ if (ret)
+ drm_dbg_kms(dev, "client hotplug ret=%d\n", ret);
+
+ drm_client_register(&fb_helper->client);
+
+ return;
+
+err_drm_client_init:
+ drm_fb_helper_unprepare(fb_helper);
+ kfree(fb_helper);
+}
+EXPORT_SYMBOL(drm_fbdev_dma_setup);
diff --git a/drivers/gpu/drm/drm_fbdev_generic.c b/drivers/gpu/drm/drm_fbdev_generic.c
index 4d6325e91565..8e5148bf40bb 100644
--- a/drivers/gpu/drm/drm_fbdev_generic.c
+++ b/drivers/gpu/drm/drm_fbdev_generic.c
@@ -7,22 +7,13 @@
#include <drm/drm_drv.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_framebuffer.h>
+#include <drm/drm_gem.h>
#include <drm/drm_print.h>
#include <drm/drm_fbdev_generic.h>
-static bool drm_fbdev_use_shadow_fb(struct drm_fb_helper *fb_helper)
-{
- struct drm_device *dev = fb_helper->dev;
- struct drm_framebuffer *fb = fb_helper->fb;
-
- return dev->mode_config.prefer_shadow_fbdev ||
- dev->mode_config.prefer_shadow ||
- fb->funcs->dirty;
-}
-
/* @user: 1=userspace, 0=fbcon */
-static int drm_fbdev_fb_open(struct fb_info *info, int user)
+static int drm_fbdev_generic_fb_open(struct fb_info *info, int user)
{
struct drm_fb_helper *fb_helper = info->par;
@@ -33,7 +24,7 @@ static int drm_fbdev_fb_open(struct fb_info *info, int user)
return 0;
}
-static int drm_fbdev_fb_release(struct fb_info *info, int user)
+static int drm_fbdev_generic_fb_release(struct fb_info *info, int user)
{
struct drm_fb_helper *fb_helper = info->par;
@@ -43,133 +34,51 @@ static int drm_fbdev_fb_release(struct fb_info *info, int user)
return 0;
}
-static void drm_fbdev_fb_destroy(struct fb_info *info)
+static void drm_fbdev_generic_fb_destroy(struct fb_info *info)
{
struct drm_fb_helper *fb_helper = info->par;
- void *shadow = NULL;
+ void *shadow = info->screen_buffer;
if (!fb_helper->dev)
return;
- if (info->fbdefio)
- fb_deferred_io_cleanup(info);
- if (drm_fbdev_use_shadow_fb(fb_helper))
- shadow = info->screen_buffer;
-
+ fb_deferred_io_cleanup(info);
drm_fb_helper_fini(fb_helper);
-
- if (shadow)
- vfree(shadow);
- else if (fb_helper->buffer)
- drm_client_buffer_vunmap(fb_helper->buffer);
-
+ vfree(shadow);
drm_client_framebuffer_delete(fb_helper->buffer);
- drm_client_release(&fb_helper->client);
+ drm_client_release(&fb_helper->client);
drm_fb_helper_unprepare(fb_helper);
kfree(fb_helper);
}
-static int drm_fbdev_fb_mmap(struct fb_info *info, struct vm_area_struct *vma)
-{
- struct drm_fb_helper *fb_helper = info->par;
-
- if (drm_fbdev_use_shadow_fb(fb_helper))
- return fb_deferred_io_mmap(info, vma);
- else if (fb_helper->dev->driver->gem_prime_mmap)
- return fb_helper->dev->driver->gem_prime_mmap(fb_helper->buffer->gem, vma);
- else
- return -ENODEV;
-}
-
-static bool drm_fbdev_use_iomem(struct fb_info *info)
-{
- struct drm_fb_helper *fb_helper = info->par;
- struct drm_client_buffer *buffer = fb_helper->buffer;
-
- return !drm_fbdev_use_shadow_fb(fb_helper) && buffer->map.is_iomem;
-}
-
-static ssize_t drm_fbdev_fb_read(struct fb_info *info, char __user *buf,
- size_t count, loff_t *ppos)
-{
- ssize_t ret;
-
- if (drm_fbdev_use_iomem(info))
- ret = drm_fb_helper_cfb_read(info, buf, count, ppos);
- else
- ret = drm_fb_helper_sys_read(info, buf, count, ppos);
-
- return ret;
-}
-
-static ssize_t drm_fbdev_fb_write(struct fb_info *info, const char __user *buf,
- size_t count, loff_t *ppos)
-{
- ssize_t ret;
-
- if (drm_fbdev_use_iomem(info))
- ret = drm_fb_helper_cfb_write(info, buf, count, ppos);
- else
- ret = drm_fb_helper_sys_write(info, buf, count, ppos);
-
- return ret;
-}
-
-static void drm_fbdev_fb_fillrect(struct fb_info *info,
- const struct fb_fillrect *rect)
-{
- if (drm_fbdev_use_iomem(info))
- drm_fb_helper_cfb_fillrect(info, rect);
- else
- drm_fb_helper_sys_fillrect(info, rect);
-}
-
-static void drm_fbdev_fb_copyarea(struct fb_info *info,
- const struct fb_copyarea *area)
-{
- if (drm_fbdev_use_iomem(info))
- drm_fb_helper_cfb_copyarea(info, area);
- else
- drm_fb_helper_sys_copyarea(info, area);
-}
-
-static void drm_fbdev_fb_imageblit(struct fb_info *info,
- const struct fb_image *image)
-{
- if (drm_fbdev_use_iomem(info))
- drm_fb_helper_cfb_imageblit(info, image);
- else
- drm_fb_helper_sys_imageblit(info, image);
-}
-
-static const struct fb_ops drm_fbdev_fb_ops = {
+static const struct fb_ops drm_fbdev_generic_fb_ops = {
.owner = THIS_MODULE,
+ .fb_open = drm_fbdev_generic_fb_open,
+ .fb_release = drm_fbdev_generic_fb_release,
+ .fb_read = drm_fb_helper_sys_read,
+ .fb_write = drm_fb_helper_sys_write,
DRM_FB_HELPER_DEFAULT_OPS,
- .fb_open = drm_fbdev_fb_open,
- .fb_release = drm_fbdev_fb_release,
- .fb_destroy = drm_fbdev_fb_destroy,
- .fb_mmap = drm_fbdev_fb_mmap,
- .fb_read = drm_fbdev_fb_read,
- .fb_write = drm_fbdev_fb_write,
- .fb_fillrect = drm_fbdev_fb_fillrect,
- .fb_copyarea = drm_fbdev_fb_copyarea,
- .fb_imageblit = drm_fbdev_fb_imageblit,
+ .fb_fillrect = drm_fb_helper_sys_fillrect,
+ .fb_copyarea = drm_fb_helper_sys_copyarea,
+ .fb_imageblit = drm_fb_helper_sys_imageblit,
+ .fb_mmap = fb_deferred_io_mmap,
+ .fb_destroy = drm_fbdev_generic_fb_destroy,
};
/*
* This function uses the client API to create a framebuffer backed by a dumb buffer.
*/
-static int drm_fbdev_fb_probe(struct drm_fb_helper *fb_helper,
- struct drm_fb_helper_surface_size *sizes)
+static int drm_fbdev_generic_helper_fb_probe(struct drm_fb_helper *fb_helper,
+ struct drm_fb_helper_surface_size *sizes)
{
struct drm_client_dev *client = &fb_helper->client;
struct drm_device *dev = fb_helper->dev;
struct drm_client_buffer *buffer;
- struct drm_framebuffer *fb;
struct fb_info *info;
+ size_t screen_size;
+ void *screen_buffer;
u32 format;
- struct iosys_map map;
int ret;
drm_dbg_kms(dev, "surface width(%d), height(%d) and bpp(%d)\n",
@@ -184,64 +93,56 @@ static int drm_fbdev_fb_probe(struct drm_fb_helper *fb_helper,
fb_helper->buffer = buffer;
fb_helper->fb = buffer->fb;
- fb = buffer->fb;
- info = drm_fb_helper_alloc_info(fb_helper);
- if (IS_ERR(info))
- return PTR_ERR(info);
+ screen_size = buffer->gem->size;
+ screen_buffer = vzalloc(screen_size);
+ if (!screen_buffer) {
+ ret = -ENOMEM;
+ goto err_drm_client_framebuffer_delete;
+ }
- info->fbops = &drm_fbdev_fb_ops;
- info->screen_size = sizes->surface_height * fb->pitches[0];
- info->fix.smem_len = info->screen_size;
- info->flags = FBINFO_DEFAULT;
+ info = drm_fb_helper_alloc_info(fb_helper);
+ if (IS_ERR(info)) {
+ ret = PTR_ERR(info);
+ goto err_vfree;
+ }
drm_fb_helper_fill_info(info, fb_helper, sizes);
- if (drm_fbdev_use_shadow_fb(fb_helper)) {
- info->screen_buffer = vzalloc(info->screen_size);
- if (!info->screen_buffer)
- return -ENOMEM;
- info->flags |= FBINFO_VIRTFB | FBINFO_READS_FAST;
+ info->fbops = &drm_fbdev_generic_fb_ops;
+ info->flags = FBINFO_DEFAULT;
- /* Set a default deferred I/O handler */
- fb_helper->fbdefio.delay = HZ / 20;
- fb_helper->fbdefio.deferred_io = drm_fb_helper_deferred_io;
+ /* screen */
+ info->flags |= FBINFO_VIRTFB | FBINFO_READS_FAST;
+ info->screen_buffer = screen_buffer;
+ info->fix.smem_start = page_to_phys(vmalloc_to_page(info->screen_buffer));
+ info->fix.smem_len = screen_size;
- info->fbdefio = &fb_helper->fbdefio;
- ret = fb_deferred_io_init(info);
- if (ret)
- return ret;
- } else {
- /* buffer is mapped for HW framebuffer */
- ret = drm_client_buffer_vmap(fb_helper->buffer, &map);
- if (ret)
- return ret;
- if (map.is_iomem) {
- info->screen_base = map.vaddr_iomem;
- } else {
- info->screen_buffer = map.vaddr;
- info->flags |= FBINFO_VIRTFB;
- }
-
- /*
- * Shamelessly leak the physical address to user-space. As
- * page_to_phys() is undefined for I/O memory, warn in this
- * case.
- */
-#if IS_ENABLED(CONFIG_DRM_FBDEV_LEAK_PHYS_SMEM)
- if (fb_helper->hint_leak_smem_start && info->fix.smem_start == 0 &&
- !drm_WARN_ON_ONCE(dev, map.is_iomem))
- info->fix.smem_start =
- page_to_phys(virt_to_page(info->screen_buffer));
-#endif
- }
+ /* deferred I/O */
+ fb_helper->fbdefio.delay = HZ / 20;
+ fb_helper->fbdefio.deferred_io = drm_fb_helper_deferred_io;
+
+ info->fbdefio = &fb_helper->fbdefio;
+ ret = fb_deferred_io_init(info);
+ if (ret)
+ goto err_drm_fb_helper_release_info;
return 0;
+
+err_drm_fb_helper_release_info:
+ drm_fb_helper_release_info(fb_helper);
+err_vfree:
+ vfree(screen_buffer);
+err_drm_client_framebuffer_delete:
+ fb_helper->fb = NULL;
+ fb_helper->buffer = NULL;
+ drm_client_framebuffer_delete(buffer);
+ return ret;
}
-static void drm_fbdev_damage_blit_real(struct drm_fb_helper *fb_helper,
- struct drm_clip_rect *clip,
- struct iosys_map *dst)
+static void drm_fbdev_generic_damage_blit_real(struct drm_fb_helper *fb_helper,
+ struct drm_clip_rect *clip,
+ struct iosys_map *dst)
{
struct drm_framebuffer *fb = fb_helper->fb;
size_t offset = clip->y1 * fb->pitches[0];
@@ -278,8 +179,8 @@ static void drm_fbdev_damage_blit_real(struct drm_fb_helper *fb_helper,
}
}
-static int drm_fbdev_damage_blit(struct drm_fb_helper *fb_helper,
- struct drm_clip_rect *clip)
+static int drm_fbdev_generic_damage_blit(struct drm_fb_helper *fb_helper,
+ struct drm_clip_rect *clip)
{
struct drm_client_buffer *buffer = fb_helper->buffer;
struct iosys_map map, dst;
@@ -303,7 +204,7 @@ static int drm_fbdev_damage_blit(struct drm_fb_helper *fb_helper,
goto out;
dst = map;
- drm_fbdev_damage_blit_real(fb_helper, clip, &dst);
+ drm_fbdev_generic_damage_blit_real(fb_helper, clip, &dst);
drm_client_buffer_vunmap(buffer);
@@ -313,23 +214,19 @@ out:
return ret;
}
-static int drm_fbdev_fb_dirty(struct drm_fb_helper *helper, struct drm_clip_rect *clip)
+static int drm_fbdev_generic_helper_fb_dirty(struct drm_fb_helper *helper,
+ struct drm_clip_rect *clip)
{
struct drm_device *dev = helper->dev;
int ret;
- if (!drm_fbdev_use_shadow_fb(helper))
- return 0;
-
/* Call damage handlers only if necessary */
if (!(clip->x1 < clip->x2 && clip->y1 < clip->y2))
return 0;
- if (helper->buffer) {
- ret = drm_fbdev_damage_blit(helper, clip);
- if (drm_WARN_ONCE(dev, ret, "Damage blitter failed: ret=%d\n", ret))
- return ret;
- }
+ ret = drm_fbdev_generic_damage_blit(helper, clip);
+ if (drm_WARN_ONCE(dev, ret, "Damage blitter failed: ret=%d\n", ret))
+ return ret;
if (helper->fb->funcs->dirty) {
ret = helper->fb->funcs->dirty(helper->fb, NULL, 0, 0, clip, 1);
@@ -340,12 +237,12 @@ static int drm_fbdev_fb_dirty(struct drm_fb_helper *helper, struct drm_clip_rect
return 0;
}
-static const struct drm_fb_helper_funcs drm_fb_helper_generic_funcs = {
- .fb_probe = drm_fbdev_fb_probe,
- .fb_dirty = drm_fbdev_fb_dirty,
+static const struct drm_fb_helper_funcs drm_fbdev_generic_helper_funcs = {
+ .fb_probe = drm_fbdev_generic_helper_fb_probe,
+ .fb_dirty = drm_fbdev_generic_helper_fb_dirty,
};
-static void drm_fbdev_client_unregister(struct drm_client_dev *client)
+static void drm_fbdev_generic_client_unregister(struct drm_client_dev *client)
{
struct drm_fb_helper *fb_helper = drm_fb_helper_from_client(client);
@@ -358,14 +255,14 @@ static void drm_fbdev_client_unregister(struct drm_client_dev *client)
}
}
-static int drm_fbdev_client_restore(struct drm_client_dev *client)
+static int drm_fbdev_generic_client_restore(struct drm_client_dev *client)
{
drm_fb_helper_lastclose(client->dev);
return 0;
}
-static int drm_fbdev_client_hotplug(struct drm_client_dev *client)
+static int drm_fbdev_generic_client_hotplug(struct drm_client_dev *client)
{
struct drm_fb_helper *fb_helper = drm_fb_helper_from_client(client);
struct drm_device *dev = client->dev;
@@ -394,11 +291,11 @@ err_drm_err:
return ret;
}
-static const struct drm_client_funcs drm_fbdev_client_funcs = {
+static const struct drm_client_funcs drm_fbdev_generic_client_funcs = {
.owner = THIS_MODULE,
- .unregister = drm_fbdev_client_unregister,
- .restore = drm_fbdev_client_restore,
- .hotplug = drm_fbdev_client_hotplug,
+ .unregister = drm_fbdev_generic_client_unregister,
+ .restore = drm_fbdev_generic_client_restore,
+ .hotplug = drm_fbdev_generic_client_hotplug,
};
/**
@@ -415,20 +312,16 @@ static const struct drm_client_funcs drm_fbdev_client_funcs = {
* suspend/resume need to call drm_fb_helper_set_suspend_unlocked() themselves.
* Simple drivers might use drm_mode_config_helper_suspend().
*
- * Drivers that set the dirty callback on their framebuffer will get a shadow
- * fbdev buffer that is blitted onto the real buffer. This is done in order to
- * make deferred I/O work with all kinds of buffers. A shadow buffer can be
- * requested explicitly by setting struct drm_mode_config.prefer_shadow or
- * struct drm_mode_config.prefer_shadow_fbdev to true beforehand. This is
- * required to use generic fbdev emulation with SHMEM helpers.
+ * In order to provide fixed mmap-able memory ranges, generic fbdev emulation
+ * uses a shadow buffer in system memory. The implementation blits the shadow
+ * fbdev buffer onto the real buffer in regular intervals.
*
* This function is safe to call even when there are no connectors present.
* Setup will be retried on the next hotplug event.
*
* The fbdev is destroyed by drm_dev_unregister().
*/
-void drm_fbdev_generic_setup(struct drm_device *dev,
- unsigned int preferred_bpp)
+void drm_fbdev_generic_setup(struct drm_device *dev, unsigned int preferred_bpp)
{
struct drm_fb_helper *fb_helper;
int ret;
@@ -439,15 +332,15 @@ void drm_fbdev_generic_setup(struct drm_device *dev,
fb_helper = kzalloc(sizeof(*fb_helper), GFP_KERNEL);
if (!fb_helper)
return;
- drm_fb_helper_prepare(dev, fb_helper, preferred_bpp, &drm_fb_helper_generic_funcs);
+ drm_fb_helper_prepare(dev, fb_helper, preferred_bpp, &drm_fbdev_generic_helper_funcs);
- ret = drm_client_init(dev, &fb_helper->client, "fbdev", &drm_fbdev_client_funcs);
+ ret = drm_client_init(dev, &fb_helper->client, "fbdev", &drm_fbdev_generic_client_funcs);
if (ret) {
drm_err(dev, "Failed to register client: %d\n", ret);
goto err_drm_client_init;
}
- ret = drm_fbdev_client_hotplug(&fb_helper->client);
+ ret = drm_fbdev_generic_client_hotplug(&fb_helper->client);
if (ret)
drm_dbg_kms(dev, "client hotplug ret=%d\n", ret);
diff --git a/drivers/gpu/drm/drm_file.c b/drivers/gpu/drm/drm_file.c
index a51ff8cee049..c1018c470047 100644
--- a/drivers/gpu/drm/drm_file.c
+++ b/drivers/gpu/drm/drm_file.c
@@ -156,7 +156,7 @@ struct drm_file *drm_file_alloc(struct drm_minor *minor)
if (!file)
return ERR_PTR(-ENOMEM);
- file->pid = get_pid(task_pid(current));
+ file->pid = get_pid(task_tgid(current));
file->minor = minor;
/* for compatibility root is always authenticated */
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index 7a3cb08dc942..ee3e11e7177d 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -336,13 +336,6 @@ out:
}
EXPORT_SYMBOL_GPL(drm_gem_dumb_map_offset);
-int drm_gem_dumb_destroy(struct drm_file *file,
- struct drm_device *dev,
- u32 handle)
-{
- return drm_gem_handle_delete(file, handle);
-}
-
/**
* drm_gem_handle_create_tail - internal functions to create a handle
* @file_priv: drm file-private structure to register the handle for
@@ -1466,3 +1459,21 @@ tail:
return freed;
}
EXPORT_SYMBOL(drm_gem_lru_scan);
+
+/**
+ * drm_gem_evict - helper to evict backing pages for a GEM object
+ * @obj: obj in question
+ */
+int drm_gem_evict(struct drm_gem_object *obj)
+{
+ dma_resv_assert_held(obj->resv);
+
+ if (!dma_resv_test_signaled(obj->resv, DMA_RESV_USAGE_READ))
+ return -EBUSY;
+
+ if (obj->funcs->evict)
+ return obj->funcs->evict(obj);
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_gem_evict);
diff --git a/drivers/gpu/drm/drm_gem_shmem_helper.c b/drivers/gpu/drm/drm_gem_shmem_helper.c
index 75185a960fc4..9b0d540ff4a8 100644
--- a/drivers/gpu/drm/drm_gem_shmem_helper.c
+++ b/drivers/gpu/drm/drm_gem_shmem_helper.c
@@ -141,7 +141,7 @@ void drm_gem_shmem_free(struct drm_gem_shmem_object *shmem)
{
struct drm_gem_object *obj = &shmem->base;
- WARN_ON(shmem->vmap_use_count);
+ drm_WARN_ON(obj->dev, shmem->vmap_use_count);
if (obj->import_attach) {
drm_prime_gem_destroy(obj, shmem->sgt);
@@ -156,7 +156,7 @@ void drm_gem_shmem_free(struct drm_gem_shmem_object *shmem)
drm_gem_shmem_put_pages(shmem);
}
- WARN_ON(shmem->pages_use_count);
+ drm_WARN_ON(obj->dev, shmem->pages_use_count);
drm_gem_object_release(obj);
mutex_destroy(&shmem->pages_lock);
@@ -175,7 +175,8 @@ static int drm_gem_shmem_get_pages_locked(struct drm_gem_shmem_object *shmem)
pages = drm_gem_get_pages(obj);
if (IS_ERR(pages)) {
- DRM_DEBUG_KMS("Failed to get pages (%ld)\n", PTR_ERR(pages));
+ drm_dbg_kms(obj->dev, "Failed to get pages (%ld)\n",
+ PTR_ERR(pages));
shmem->pages_use_count = 0;
return PTR_ERR(pages);
}
@@ -207,9 +208,10 @@ static int drm_gem_shmem_get_pages_locked(struct drm_gem_shmem_object *shmem)
*/
int drm_gem_shmem_get_pages(struct drm_gem_shmem_object *shmem)
{
+ struct drm_gem_object *obj = &shmem->base;
int ret;
- WARN_ON(shmem->base.import_attach);
+ drm_WARN_ON(obj->dev, obj->import_attach);
ret = mutex_lock_interruptible(&shmem->pages_lock);
if (ret)
@@ -225,7 +227,7 @@ static void drm_gem_shmem_put_pages_locked(struct drm_gem_shmem_object *shmem)
{
struct drm_gem_object *obj = &shmem->base;
- if (WARN_ON_ONCE(!shmem->pages_use_count))
+ if (drm_WARN_ON_ONCE(obj->dev, !shmem->pages_use_count))
return;
if (--shmem->pages_use_count > 0)
@@ -268,7 +270,9 @@ EXPORT_SYMBOL(drm_gem_shmem_put_pages);
*/
int drm_gem_shmem_pin(struct drm_gem_shmem_object *shmem)
{
- WARN_ON(shmem->base.import_attach);
+ struct drm_gem_object *obj = &shmem->base;
+
+ drm_WARN_ON(obj->dev, obj->import_attach);
return drm_gem_shmem_get_pages(shmem);
}
@@ -283,7 +287,9 @@ EXPORT_SYMBOL(drm_gem_shmem_pin);
*/
void drm_gem_shmem_unpin(struct drm_gem_shmem_object *shmem)
{
- WARN_ON(shmem->base.import_attach);
+ struct drm_gem_object *obj = &shmem->base;
+
+ drm_WARN_ON(obj->dev, obj->import_attach);
drm_gem_shmem_put_pages(shmem);
}
@@ -295,24 +301,22 @@ static int drm_gem_shmem_vmap_locked(struct drm_gem_shmem_object *shmem,
struct drm_gem_object *obj = &shmem->base;
int ret = 0;
- if (shmem->vmap_use_count++ > 0) {
- iosys_map_set_vaddr(map, shmem->vaddr);
- return 0;
- }
-
if (obj->import_attach) {
ret = dma_buf_vmap(obj->import_attach->dmabuf, map);
if (!ret) {
- if (WARN_ON(map->is_iomem)) {
+ if (drm_WARN_ON(obj->dev, map->is_iomem)) {
dma_buf_vunmap(obj->import_attach->dmabuf, map);
- ret = -EIO;
- goto err_put_pages;
+ return -EIO;
}
- shmem->vaddr = map->vaddr;
}
} else {
pgprot_t prot = PAGE_KERNEL;
+ if (shmem->vmap_use_count++ > 0) {
+ iosys_map_set_vaddr(map, shmem->vaddr);
+ return 0;
+ }
+
ret = drm_gem_shmem_get_pages(shmem);
if (ret)
goto err_zero_use;
@@ -328,7 +332,7 @@ static int drm_gem_shmem_vmap_locked(struct drm_gem_shmem_object *shmem,
}
if (ret) {
- DRM_DEBUG_KMS("Failed to vmap pages, error %d\n", ret);
+ drm_dbg_kms(obj->dev, "Failed to vmap pages, error %d\n", ret);
goto err_put_pages;
}
@@ -378,15 +382,15 @@ static void drm_gem_shmem_vunmap_locked(struct drm_gem_shmem_object *shmem,
{
struct drm_gem_object *obj = &shmem->base;
- if (WARN_ON_ONCE(!shmem->vmap_use_count))
- return;
-
- if (--shmem->vmap_use_count > 0)
- return;
-
if (obj->import_attach) {
dma_buf_vunmap(obj->import_attach->dmabuf, map);
} else {
+ if (drm_WARN_ON_ONCE(obj->dev, !shmem->vmap_use_count))
+ return;
+
+ if (--shmem->vmap_use_count > 0)
+ return;
+
vunmap(shmem->vaddr);
drm_gem_shmem_put_pages(shmem);
}
@@ -461,7 +465,7 @@ void drm_gem_shmem_purge_locked(struct drm_gem_shmem_object *shmem)
struct drm_gem_object *obj = &shmem->base;
struct drm_device *dev = obj->dev;
- WARN_ON(!drm_gem_shmem_is_purgeable(shmem));
+ drm_WARN_ON(obj->dev, !drm_gem_shmem_is_purgeable(shmem));
dma_unmap_sgtable(dev->dev, shmem->sgt, DMA_BIDIRECTIONAL, 0);
sg_free_table(shmem->sgt);
@@ -550,7 +554,7 @@ static vm_fault_t drm_gem_shmem_fault(struct vm_fault *vmf)
mutex_lock(&shmem->pages_lock);
if (page_offset >= num_pages ||
- WARN_ON_ONCE(!shmem->pages) ||
+ drm_WARN_ON_ONCE(obj->dev, !shmem->pages) ||
shmem->madv < 0) {
ret = VM_FAULT_SIGBUS;
} else {
@@ -569,7 +573,7 @@ static void drm_gem_shmem_vm_open(struct vm_area_struct *vma)
struct drm_gem_object *obj = vma->vm_private_data;
struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
- WARN_ON(shmem->base.import_attach);
+ drm_WARN_ON(obj->dev, obj->import_attach);
mutex_lock(&shmem->pages_lock);
@@ -578,7 +582,7 @@ static void drm_gem_shmem_vm_open(struct vm_area_struct *vma)
* mmap'd, vm_open() just grabs an additional reference for the new
* mm the vma is getting copied into (ie. on fork()).
*/
- if (!WARN_ON_ONCE(!shmem->pages_use_count))
+ if (!drm_WARN_ON_ONCE(obj->dev, !shmem->pages_use_count))
shmem->pages_use_count++;
mutex_unlock(&shmem->pages_lock);
@@ -648,6 +652,9 @@ EXPORT_SYMBOL_GPL(drm_gem_shmem_mmap);
void drm_gem_shmem_print_info(const struct drm_gem_shmem_object *shmem,
struct drm_printer *p, unsigned int indent)
{
+ if (shmem->base.import_attach)
+ return;
+
drm_printf_indent(p, indent, "pages_use_count=%u\n", shmem->pages_use_count);
drm_printf_indent(p, indent, "vmap_use_count=%u\n", shmem->vmap_use_count);
drm_printf_indent(p, indent, "vaddr=%p\n", shmem->vaddr);
@@ -672,7 +679,7 @@ struct sg_table *drm_gem_shmem_get_sg_table(struct drm_gem_shmem_object *shmem)
{
struct drm_gem_object *obj = &shmem->base;
- WARN_ON(shmem->base.import_attach);
+ drm_WARN_ON(obj->dev, obj->import_attach);
return drm_prime_pages_to_sg(obj->dev, shmem->pages, obj->size >> PAGE_SHIFT);
}
@@ -687,7 +694,7 @@ static struct sg_table *drm_gem_shmem_get_pages_sgt_locked(struct drm_gem_shmem_
if (shmem->sgt)
return shmem->sgt;
- WARN_ON(obj->import_attach);
+ drm_WARN_ON(obj->dev, obj->import_attach);
ret = drm_gem_shmem_get_pages_locked(shmem);
if (ret)
diff --git a/drivers/gpu/drm/drm_gem_vram_helper.c b/drivers/gpu/drm/drm_gem_vram_helper.c
index d40b3edb52d0..0bea3df2a16d 100644
--- a/drivers/gpu/drm/drm_gem_vram_helper.c
+++ b/drivers/gpu/drm/drm_gem_vram_helper.c
@@ -916,6 +916,17 @@ static int bo_driver_move(struct ttm_buffer_object *bo,
{
struct drm_gem_vram_object *gbo;
+ if (!bo->resource) {
+ if (new_mem->mem_type != TTM_PL_SYSTEM) {
+ hop->mem_type = TTM_PL_SYSTEM;
+ hop->flags = TTM_PL_FLAG_TEMPORARY;
+ return -EMULTIHOP;
+ }
+
+ ttm_bo_move_null(bo, new_mem);
+ return 0;
+ }
+
gbo = drm_gem_vram_of_bo(bo);
return drm_gem_vram_bo_driver_move(gbo, evict, ctx, new_mem);
diff --git a/drivers/gpu/drm/drm_internal.h b/drivers/gpu/drm/drm_internal.h
index ed2103ee272c..d7e023bbb0d5 100644
--- a/drivers/gpu/drm/drm_internal.h
+++ b/drivers/gpu/drm/drm_internal.h
@@ -178,9 +178,6 @@ void drm_gem_unpin(struct drm_gem_object *obj);
int drm_gem_vmap(struct drm_gem_object *obj, struct iosys_map *map);
void drm_gem_vunmap(struct drm_gem_object *obj, struct iosys_map *map);
-int drm_gem_dumb_destroy(struct drm_file *file, struct drm_device *dev,
- u32 handle);
-
/* drm_debugfs.c drm_debugfs_crc.c */
#if defined(CONFIG_DEBUG_FS)
int drm_debugfs_init(struct drm_minor *minor, int minor_id,
diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c
index 40d482a01178..ac9a406250c5 100644
--- a/drivers/gpu/drm/drm_modes.c
+++ b/drivers/gpu/drm/drm_modes.c
@@ -2339,8 +2339,7 @@ static int drm_mode_parse_cmdline_named_mode(const char *name,
* @mode: preallocated drm_cmdline_mode structure to fill out
*
* This parses @mode_option command line modeline for modes and options to
- * configure the connector. If @mode_option is NULL the default command line
- * modeline in fb_mode_option will be parsed instead.
+ * configure the connector.
*
* This uses the same parameters as the fb modedb.c, except for an extra
* force-enable, force-enable-digital and force-disable bit at the end::
diff --git a/drivers/gpu/drm/drm_of.c b/drivers/gpu/drm/drm_of.c
index 7bbcb999bb75..177b600895d3 100644
--- a/drivers/gpu/drm/drm_of.c
+++ b/drivers/gpu/drm/drm_of.c
@@ -10,6 +10,7 @@
#include <drm/drm_crtc.h>
#include <drm/drm_device.h>
#include <drm/drm_encoder.h>
+#include <drm/drm_mipi_dsi.h>
#include <drm/drm_of.h>
#include <drm/drm_panel.h>
@@ -493,3 +494,53 @@ int drm_of_get_data_lanes_count_ep(const struct device_node *port,
return ret;
}
EXPORT_SYMBOL_GPL(drm_of_get_data_lanes_count_ep);
+
+#if IS_ENABLED(CONFIG_DRM_MIPI_DSI)
+
+/**
+ * drm_of_get_dsi_bus - find the DSI bus for a given device
+ * @dev: parent device of display (SPI, I2C)
+ *
+ * Gets parent DSI bus for a DSI device controlled through a bus other
+ * than MIPI-DCS (SPI, I2C, etc.) using the Device Tree.
+ *
+ * Returns pointer to mipi_dsi_host if successful, -EINVAL if the
+ * request is unsupported, -EPROBE_DEFER if the DSI host is found but
+ * not available, or -ENODEV otherwise.
+ */
+struct mipi_dsi_host *drm_of_get_dsi_bus(struct device *dev)
+{
+ struct mipi_dsi_host *dsi_host;
+ struct device_node *endpoint, *dsi_host_node;
+
+ /*
+ * Get first endpoint child from device.
+ */
+ endpoint = of_graph_get_next_endpoint(dev->of_node, NULL);
+ if (!endpoint)
+ return ERR_PTR(-ENODEV);
+
+ /*
+ * Follow the first endpoint to get the DSI host node and then
+ * release the endpoint since we no longer need it.
+ */
+ dsi_host_node = of_graph_get_remote_port_parent(endpoint);
+ of_node_put(endpoint);
+ if (!dsi_host_node)
+ return ERR_PTR(-ENODEV);
+
+ /*
+ * Get the DSI host from the DSI host node. If we get an error
+ * or the return is null assume we're not ready to probe just
+ * yet. Release the DSI host node since we're done with it.
+ */
+ dsi_host = of_find_mipi_dsi_host_by_node(dsi_host_node);
+ of_node_put(dsi_host_node);
+ if (IS_ERR_OR_NULL(dsi_host))
+ return ERR_PTR(-EPROBE_DEFER);
+
+ return dsi_host;
+}
+EXPORT_SYMBOL_GPL(drm_of_get_dsi_bus);
+
+#endif /* CONFIG_DRM_MIPI_DSI */
diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c
index f924b8b4ab6b..149cd4ff6a3b 100644
--- a/drivers/gpu/drm/drm_prime.c
+++ b/drivers/gpu/drm/drm_prime.c
@@ -285,7 +285,7 @@ EXPORT_SYMBOL(drm_gem_dmabuf_release);
/**
* drm_gem_prime_fd_to_handle - PRIME import function for GEM drivers
- * @dev: dev to export the buffer from
+ * @dev: drm_device to import into
* @file_priv: drm file-private structure
* @prime_fd: fd id of the dma-buf which should be imported
* @handle: pointer to storage for the handle of the imported buffer object
@@ -925,7 +925,7 @@ struct drm_gem_object *drm_gem_prime_import_dev(struct drm_device *dev,
obj = dma_buf->priv;
if (obj->dev == dev) {
/*
- * Importing dmabuf exported from out own gem increases
+ * Importing dmabuf exported from our own gem increases
* refcount on gem itself instead of f_count of dmabuf.
*/
drm_gem_object_get(obj);
diff --git a/drivers/gpu/drm/drm_probe_helper.c b/drivers/gpu/drm/drm_probe_helper.c
index 8127be134c39..2fb9bf901a2c 100644
--- a/drivers/gpu/drm/drm_probe_helper.c
+++ b/drivers/gpu/drm/drm_probe_helper.c
@@ -590,8 +590,9 @@ retry:
*/
dev->mode_config.delayed_event = true;
if (dev->mode_config.poll_enabled)
- schedule_delayed_work(&dev->mode_config.output_poll_work,
- 0);
+ mod_delayed_work(system_wq,
+ &dev->mode_config.output_poll_work,
+ 0);
}
/* Re-enable polling in case the global poll config changed. */
diff --git a/drivers/gpu/drm/drm_suballoc.c b/drivers/gpu/drm/drm_suballoc.c
new file mode 100644
index 000000000000..38cc7a123819
--- /dev/null
+++ b/drivers/gpu/drm/drm_suballoc.c
@@ -0,0 +1,457 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/*
+ * Copyright 2011 Red Hat Inc.
+ * Copyright 2023 Intel Corporation.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ */
+/* Algorithm:
+ *
+ * We store the last allocated bo in "hole", we always try to allocate
+ * after the last allocated bo. Principle is that in a linear GPU ring
+ * progression was is after last is the oldest bo we allocated and thus
+ * the first one that should no longer be in use by the GPU.
+ *
+ * If it's not the case we skip over the bo after last to the closest
+ * done bo if such one exist. If none exist and we are not asked to
+ * block we report failure to allocate.
+ *
+ * If we are asked to block we wait on all the oldest fence of all
+ * rings. We just wait for any of those fence to complete.
+ */
+
+#include <drm/drm_suballoc.h>
+#include <drm/drm_print.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+#include <linux/dma-fence.h>
+
+static void drm_suballoc_remove_locked(struct drm_suballoc *sa);
+static void drm_suballoc_try_free(struct drm_suballoc_manager *sa_manager);
+
+/**
+ * drm_suballoc_manager_init() - Initialise the drm_suballoc_manager
+ * @sa_manager: pointer to the sa_manager
+ * @size: number of bytes we want to suballocate
+ * @align: alignment for each suballocated chunk
+ *
+ * Prepares the suballocation manager for suballocations.
+ */
+void drm_suballoc_manager_init(struct drm_suballoc_manager *sa_manager,
+ size_t size, size_t align)
+{
+ unsigned int i;
+
+ BUILD_BUG_ON(!is_power_of_2(DRM_SUBALLOC_MAX_QUEUES));
+
+ if (!align)
+ align = 1;
+
+ /* alignment must be a power of 2 */
+ if (WARN_ON_ONCE(align & (align - 1)))
+ align = roundup_pow_of_two(align);
+
+ init_waitqueue_head(&sa_manager->wq);
+ sa_manager->size = size;
+ sa_manager->align = align;
+ sa_manager->hole = &sa_manager->olist;
+ INIT_LIST_HEAD(&sa_manager->olist);
+ for (i = 0; i < DRM_SUBALLOC_MAX_QUEUES; ++i)
+ INIT_LIST_HEAD(&sa_manager->flist[i]);
+}
+EXPORT_SYMBOL(drm_suballoc_manager_init);
+
+/**
+ * drm_suballoc_manager_fini() - Destroy the drm_suballoc_manager
+ * @sa_manager: pointer to the sa_manager
+ *
+ * Cleans up the suballocation manager after use. All fences added
+ * with drm_suballoc_free() must be signaled, or we cannot clean up
+ * the entire manager.
+ */
+void drm_suballoc_manager_fini(struct drm_suballoc_manager *sa_manager)
+{
+ struct drm_suballoc *sa, *tmp;
+
+ if (!sa_manager->size)
+ return;
+
+ if (!list_empty(&sa_manager->olist)) {
+ sa_manager->hole = &sa_manager->olist;
+ drm_suballoc_try_free(sa_manager);
+ if (!list_empty(&sa_manager->olist))
+ DRM_ERROR("sa_manager is not empty, clearing anyway\n");
+ }
+ list_for_each_entry_safe(sa, tmp, &sa_manager->olist, olist) {
+ drm_suballoc_remove_locked(sa);
+ }
+
+ sa_manager->size = 0;
+}
+EXPORT_SYMBOL(drm_suballoc_manager_fini);
+
+static void drm_suballoc_remove_locked(struct drm_suballoc *sa)
+{
+ struct drm_suballoc_manager *sa_manager = sa->manager;
+
+ if (sa_manager->hole == &sa->olist)
+ sa_manager->hole = sa->olist.prev;
+
+ list_del_init(&sa->olist);
+ list_del_init(&sa->flist);
+ dma_fence_put(sa->fence);
+ kfree(sa);
+}
+
+static void drm_suballoc_try_free(struct drm_suballoc_manager *sa_manager)
+{
+ struct drm_suballoc *sa, *tmp;
+
+ if (sa_manager->hole->next == &sa_manager->olist)
+ return;
+
+ sa = list_entry(sa_manager->hole->next, struct drm_suballoc, olist);
+ list_for_each_entry_safe_from(sa, tmp, &sa_manager->olist, olist) {
+ if (!sa->fence || !dma_fence_is_signaled(sa->fence))
+ return;
+
+ drm_suballoc_remove_locked(sa);
+ }
+}
+
+static size_t drm_suballoc_hole_soffset(struct drm_suballoc_manager *sa_manager)
+{
+ struct list_head *hole = sa_manager->hole;
+
+ if (hole != &sa_manager->olist)
+ return list_entry(hole, struct drm_suballoc, olist)->eoffset;
+
+ return 0;
+}
+
+static size_t drm_suballoc_hole_eoffset(struct drm_suballoc_manager *sa_manager)
+{
+ struct list_head *hole = sa_manager->hole;
+
+ if (hole->next != &sa_manager->olist)
+ return list_entry(hole->next, struct drm_suballoc, olist)->soffset;
+ return sa_manager->size;
+}
+
+static bool drm_suballoc_try_alloc(struct drm_suballoc_manager *sa_manager,
+ struct drm_suballoc *sa,
+ size_t size, size_t align)
+{
+ size_t soffset, eoffset, wasted;
+
+ soffset = drm_suballoc_hole_soffset(sa_manager);
+ eoffset = drm_suballoc_hole_eoffset(sa_manager);
+ wasted = round_up(soffset, align) - soffset;
+
+ if ((eoffset - soffset) >= (size + wasted)) {
+ soffset += wasted;
+
+ sa->manager = sa_manager;
+ sa->soffset = soffset;
+ sa->eoffset = soffset + size;
+ list_add(&sa->olist, sa_manager->hole);
+ INIT_LIST_HEAD(&sa->flist);
+ sa_manager->hole = &sa->olist;
+ return true;
+ }
+ return false;
+}
+
+static bool __drm_suballoc_event(struct drm_suballoc_manager *sa_manager,
+ size_t size, size_t align)
+{
+ size_t soffset, eoffset, wasted;
+ unsigned int i;
+
+ for (i = 0; i < DRM_SUBALLOC_MAX_QUEUES; ++i)
+ if (!list_empty(&sa_manager->flist[i]))
+ return true;
+
+ soffset = drm_suballoc_hole_soffset(sa_manager);
+ eoffset = drm_suballoc_hole_eoffset(sa_manager);
+ wasted = round_up(soffset, align) - soffset;
+
+ return ((eoffset - soffset) >= (size + wasted));
+}
+
+/**
+ * drm_suballoc_event() - Check if we can stop waiting
+ * @sa_manager: pointer to the sa_manager
+ * @size: number of bytes we want to allocate
+ * @align: alignment we need to match
+ *
+ * Return: true if either there is a fence we can wait for or
+ * enough free memory to satisfy the allocation directly.
+ * false otherwise.
+ */
+static bool drm_suballoc_event(struct drm_suballoc_manager *sa_manager,
+ size_t size, size_t align)
+{
+ bool ret;
+
+ spin_lock(&sa_manager->wq.lock);
+ ret = __drm_suballoc_event(sa_manager, size, align);
+ spin_unlock(&sa_manager->wq.lock);
+ return ret;
+}
+
+static bool drm_suballoc_next_hole(struct drm_suballoc_manager *sa_manager,
+ struct dma_fence **fences,
+ unsigned int *tries)
+{
+ struct drm_suballoc *best_bo = NULL;
+ unsigned int i, best_idx;
+ size_t soffset, best, tmp;
+
+ /* if hole points to the end of the buffer */
+ if (sa_manager->hole->next == &sa_manager->olist) {
+ /* try again with its beginning */
+ sa_manager->hole = &sa_manager->olist;
+ return true;
+ }
+
+ soffset = drm_suballoc_hole_soffset(sa_manager);
+ /* to handle wrap around we add sa_manager->size */
+ best = sa_manager->size * 2;
+ /* go over all fence list and try to find the closest sa
+ * of the current last
+ */
+ for (i = 0; i < DRM_SUBALLOC_MAX_QUEUES; ++i) {
+ struct drm_suballoc *sa;
+
+ fences[i] = NULL;
+
+ if (list_empty(&sa_manager->flist[i]))
+ continue;
+
+ sa = list_first_entry(&sa_manager->flist[i],
+ struct drm_suballoc, flist);
+
+ if (!dma_fence_is_signaled(sa->fence)) {
+ fences[i] = sa->fence;
+ continue;
+ }
+
+ /* limit the number of tries each freelist gets */
+ if (tries[i] > 2)
+ continue;
+
+ tmp = sa->soffset;
+ if (tmp < soffset) {
+ /* wrap around, pretend it's after */
+ tmp += sa_manager->size;
+ }
+ tmp -= soffset;
+ if (tmp < best) {
+ /* this sa bo is the closest one */
+ best = tmp;
+ best_idx = i;
+ best_bo = sa;
+ }
+ }
+
+ if (best_bo) {
+ ++tries[best_idx];
+ sa_manager->hole = best_bo->olist.prev;
+
+ /*
+ * We know that this one is signaled,
+ * so it's safe to remove it.
+ */
+ drm_suballoc_remove_locked(best_bo);
+ return true;
+ }
+ return false;
+}
+
+/**
+ * drm_suballoc_new() - Make a suballocation.
+ * @sa_manager: pointer to the sa_manager
+ * @size: number of bytes we want to suballocate.
+ * @gfp: gfp flags used for memory allocation. Typically GFP_KERNEL but
+ * the argument is provided for suballocations from reclaim context or
+ * where the caller wants to avoid pipelining rather than wait for
+ * reclaim.
+ * @intr: Whether to perform waits interruptible. This should typically
+ * always be true, unless the caller needs to propagate a
+ * non-interruptible context from above layers.
+ * @align: Alignment. Must not exceed the default manager alignment.
+ * If @align is zero, then the manager alignment is used.
+ *
+ * Try to make a suballocation of size @size, which will be rounded
+ * up to the alignment specified in specified in drm_suballoc_manager_init().
+ *
+ * Return: a new suballocated bo, or an ERR_PTR.
+ */
+struct drm_suballoc *
+drm_suballoc_new(struct drm_suballoc_manager *sa_manager, size_t size,
+ gfp_t gfp, bool intr, size_t align)
+{
+ struct dma_fence *fences[DRM_SUBALLOC_MAX_QUEUES];
+ unsigned int tries[DRM_SUBALLOC_MAX_QUEUES];
+ unsigned int count;
+ int i, r;
+ struct drm_suballoc *sa;
+
+ if (WARN_ON_ONCE(align > sa_manager->align))
+ return ERR_PTR(-EINVAL);
+ if (WARN_ON_ONCE(size > sa_manager->size || !size))
+ return ERR_PTR(-EINVAL);
+
+ if (!align)
+ align = sa_manager->align;
+
+ sa = kmalloc(sizeof(*sa), gfp);
+ if (!sa)
+ return ERR_PTR(-ENOMEM);
+ sa->manager = sa_manager;
+ sa->fence = NULL;
+ INIT_LIST_HEAD(&sa->olist);
+ INIT_LIST_HEAD(&sa->flist);
+
+ spin_lock(&sa_manager->wq.lock);
+ do {
+ for (i = 0; i < DRM_SUBALLOC_MAX_QUEUES; ++i)
+ tries[i] = 0;
+
+ do {
+ drm_suballoc_try_free(sa_manager);
+
+ if (drm_suballoc_try_alloc(sa_manager, sa,
+ size, align)) {
+ spin_unlock(&sa_manager->wq.lock);
+ return sa;
+ }
+
+ /* see if we can skip over some allocations */
+ } while (drm_suballoc_next_hole(sa_manager, fences, tries));
+
+ for (i = 0, count = 0; i < DRM_SUBALLOC_MAX_QUEUES; ++i)
+ if (fences[i])
+ fences[count++] = dma_fence_get(fences[i]);
+
+ if (count) {
+ long t;
+
+ spin_unlock(&sa_manager->wq.lock);
+ t = dma_fence_wait_any_timeout(fences, count, intr,
+ MAX_SCHEDULE_TIMEOUT,
+ NULL);
+ for (i = 0; i < count; ++i)
+ dma_fence_put(fences[i]);
+
+ r = (t > 0) ? 0 : t;
+ spin_lock(&sa_manager->wq.lock);
+ } else if (intr) {
+ /* if we have nothing to wait for block */
+ r = wait_event_interruptible_locked
+ (sa_manager->wq,
+ __drm_suballoc_event(sa_manager, size, align));
+ } else {
+ spin_unlock(&sa_manager->wq.lock);
+ wait_event(sa_manager->wq,
+ drm_suballoc_event(sa_manager, size, align));
+ r = 0;
+ spin_lock(&sa_manager->wq.lock);
+ }
+ } while (!r);
+
+ spin_unlock(&sa_manager->wq.lock);
+ kfree(sa);
+ return ERR_PTR(r);
+}
+EXPORT_SYMBOL(drm_suballoc_new);
+
+/**
+ * drm_suballoc_free - Free a suballocation
+ * @suballoc: pointer to the suballocation
+ * @fence: fence that signals when suballocation is idle
+ *
+ * Free the suballocation. The suballocation can be re-used after @fence signals.
+ */
+void drm_suballoc_free(struct drm_suballoc *suballoc,
+ struct dma_fence *fence)
+{
+ struct drm_suballoc_manager *sa_manager;
+
+ if (!suballoc)
+ return;
+
+ sa_manager = suballoc->manager;
+
+ spin_lock(&sa_manager->wq.lock);
+ if (fence && !dma_fence_is_signaled(fence)) {
+ u32 idx;
+
+ suballoc->fence = dma_fence_get(fence);
+ idx = fence->context & (DRM_SUBALLOC_MAX_QUEUES - 1);
+ list_add_tail(&suballoc->flist, &sa_manager->flist[idx]);
+ } else {
+ drm_suballoc_remove_locked(suballoc);
+ }
+ wake_up_all_locked(&sa_manager->wq);
+ spin_unlock(&sa_manager->wq.lock);
+}
+EXPORT_SYMBOL(drm_suballoc_free);
+
+#ifdef CONFIG_DEBUG_FS
+void drm_suballoc_dump_debug_info(struct drm_suballoc_manager *sa_manager,
+ struct drm_printer *p,
+ unsigned long long suballoc_base)
+{
+ struct drm_suballoc *i;
+
+ spin_lock(&sa_manager->wq.lock);
+ list_for_each_entry(i, &sa_manager->olist, olist) {
+ unsigned long long soffset = i->soffset;
+ unsigned long long eoffset = i->eoffset;
+
+ if (&i->olist == sa_manager->hole)
+ drm_puts(p, ">");
+ else
+ drm_puts(p, " ");
+
+ drm_printf(p, "[0x%010llx 0x%010llx] size %8lld",
+ suballoc_base + soffset, suballoc_base + eoffset,
+ eoffset - soffset);
+
+ if (i->fence)
+ drm_printf(p, " protected by 0x%016llx on context %llu",
+ (unsigned long long)i->fence->seqno,
+ (unsigned long long)i->fence->context);
+
+ drm_puts(p, "\n");
+ }
+ spin_unlock(&sa_manager->wq.lock);
+}
+EXPORT_SYMBOL(drm_suballoc_dump_debug_info);
+#endif
+MODULE_AUTHOR("Multiple");
+MODULE_DESCRIPTION("Range suballocator helper");
+MODULE_LICENSE("Dual MIT/GPL");
diff --git a/drivers/gpu/drm/exynos/Kconfig b/drivers/gpu/drm/exynos/Kconfig
index 3d2f025d4fd4..0cb92d651ff1 100644
--- a/drivers/gpu/drm/exynos/Kconfig
+++ b/drivers/gpu/drm/exynos/Kconfig
@@ -2,7 +2,7 @@
config DRM_EXYNOS
tristate "DRM Support for Samsung SoC Exynos Series"
depends on OF && DRM && COMMON_CLK
- depends on ARCH_S3C64XX || ARCH_S5PV210 || ARCH_EXYNOS || ARCH_MULTIPLATFORM || COMPILE_TEST
+ depends on ARCH_S3C64XX || ARCH_S5PV210 || ARCH_EXYNOS || COMPILE_TEST
depends on MMU
select DRM_DISPLAY_HELPER if DRM_EXYNOS_DP
select DRM_KMS_HELPER
@@ -59,6 +59,7 @@ config DRM_EXYNOS_DSI
depends on DRM_EXYNOS_FIMD || DRM_EXYNOS5433_DECON || DRM_EXYNOS7_DECON
select DRM_MIPI_DSI
select DRM_PANEL
+ select DRM_SAMSUNG_DSIM
default n
help
This enables support for Exynos MIPI-DSI device.
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dsi.c b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
index 06d6513ddaae..fc81f728e6ba 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_dsi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
@@ -1,1521 +1,56 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Samsung SoC MIPI DSI Master driver.
+ * Samsung MIPI DSIM glue for Exynos SoCs.
*
* Copyright (c) 2014 Samsung Electronics Co., Ltd
*
* Contacts: Tomasz Figa <t.figa@samsung.com>
-*/
+ */
-#include <linux/clk.h>
-#include <linux/delay.h>
#include <linux/component.h>
-#include <linux/gpio/consumer.h>
-#include <linux/irq.h>
#include <linux/of_device.h>
-#include <linux/of_graph.h>
-#include <linux/phy/phy.h>
-#include <linux/regulator/consumer.h>
-
-#include <asm/unaligned.h>
-#include <video/mipi_display.h>
-#include <video/videomode.h>
-
-#include <drm/drm_atomic_helper.h>
-#include <drm/drm_bridge.h>
-#include <drm/drm_mipi_dsi.h>
-#include <drm/drm_panel.h>
-#include <drm/drm_print.h>
+#include <drm/bridge/samsung-dsim.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_simple_kms_helper.h>
#include "exynos_drm_crtc.h"
#include "exynos_drm_drv.h"
-/* returns true iff both arguments logically differs */
-#define NEQV(a, b) (!(a) ^ !(b))
-
-/* DSIM_STATUS */
-#define DSIM_STOP_STATE_DAT(x) (((x) & 0xf) << 0)
-#define DSIM_STOP_STATE_CLK (1 << 8)
-#define DSIM_TX_READY_HS_CLK (1 << 10)
-#define DSIM_PLL_STABLE (1 << 31)
-
-/* DSIM_SWRST */
-#define DSIM_FUNCRST (1 << 16)
-#define DSIM_SWRST (1 << 0)
-
-/* DSIM_TIMEOUT */
-#define DSIM_LPDR_TIMEOUT(x) ((x) << 0)
-#define DSIM_BTA_TIMEOUT(x) ((x) << 16)
-
-/* DSIM_CLKCTRL */
-#define DSIM_ESC_PRESCALER(x) (((x) & 0xffff) << 0)
-#define DSIM_ESC_PRESCALER_MASK (0xffff << 0)
-#define DSIM_LANE_ESC_CLK_EN_CLK (1 << 19)
-#define DSIM_LANE_ESC_CLK_EN_DATA(x) (((x) & 0xf) << 20)
-#define DSIM_LANE_ESC_CLK_EN_DATA_MASK (0xf << 20)
-#define DSIM_BYTE_CLKEN (1 << 24)
-#define DSIM_BYTE_CLK_SRC(x) (((x) & 0x3) << 25)
-#define DSIM_BYTE_CLK_SRC_MASK (0x3 << 25)
-#define DSIM_PLL_BYPASS (1 << 27)
-#define DSIM_ESC_CLKEN (1 << 28)
-#define DSIM_TX_REQUEST_HSCLK (1 << 31)
-
-/* DSIM_CONFIG */
-#define DSIM_LANE_EN_CLK (1 << 0)
-#define DSIM_LANE_EN(x) (((x) & 0xf) << 1)
-#define DSIM_NUM_OF_DATA_LANE(x) (((x) & 0x3) << 5)
-#define DSIM_SUB_PIX_FORMAT(x) (((x) & 0x7) << 8)
-#define DSIM_MAIN_PIX_FORMAT_MASK (0x7 << 12)
-#define DSIM_MAIN_PIX_FORMAT_RGB888 (0x7 << 12)
-#define DSIM_MAIN_PIX_FORMAT_RGB666 (0x6 << 12)
-#define DSIM_MAIN_PIX_FORMAT_RGB666_P (0x5 << 12)
-#define DSIM_MAIN_PIX_FORMAT_RGB565 (0x4 << 12)
-#define DSIM_SUB_VC (((x) & 0x3) << 16)
-#define DSIM_MAIN_VC (((x) & 0x3) << 18)
-#define DSIM_HSA_DISABLE_MODE (1 << 20)
-#define DSIM_HBP_DISABLE_MODE (1 << 21)
-#define DSIM_HFP_DISABLE_MODE (1 << 22)
-/*
- * The i.MX 8M Mini Applications Processor Reference Manual,
- * Rev. 3, 11/2020 Page 4091
- * The i.MX 8M Nano Applications Processor Reference Manual,
- * Rev. 2, 07/2022 Page 3058
- * The i.MX 8M Plus Applications Processor Reference Manual,
- * Rev. 1, 06/2021 Page 5436
- * named this bit as 'HseDisableMode' but the bit definition
- * is quite opposite like
- * 0 = Disables transfer
- * 1 = Enables transfer
- * which clearly states that HSE is not a disable bit.
- *
- * This bit is named as per the manual even though it is not
- * a disable bit however the driver logic for handling HSE
- * is based on the MIPI_DSI_MODE_VIDEO_HSE flag itself.
- */
-#define DSIM_HSE_DISABLE_MODE (1 << 23)
-#define DSIM_AUTO_MODE (1 << 24)
-#define DSIM_VIDEO_MODE (1 << 25)
-#define DSIM_BURST_MODE (1 << 26)
-#define DSIM_SYNC_INFORM (1 << 27)
-#define DSIM_EOT_DISABLE (1 << 28)
-#define DSIM_MFLUSH_VS (1 << 29)
-/* This flag is valid only for exynos3250/3472/5260/5430 */
-#define DSIM_CLKLANE_STOP (1 << 30)
-
-/* DSIM_ESCMODE */
-#define DSIM_TX_TRIGGER_RST (1 << 4)
-#define DSIM_TX_LPDT_LP (1 << 6)
-#define DSIM_CMD_LPDT_LP (1 << 7)
-#define DSIM_FORCE_BTA (1 << 16)
-#define DSIM_FORCE_STOP_STATE (1 << 20)
-#define DSIM_STOP_STATE_CNT(x) (((x) & 0x7ff) << 21)
-#define DSIM_STOP_STATE_CNT_MASK (0x7ff << 21)
-
-/* DSIM_MDRESOL */
-#define DSIM_MAIN_STAND_BY (1 << 31)
-#define DSIM_MAIN_VRESOL(x, num_bits) (((x) & ((1 << (num_bits)) - 1)) << 16)
-#define DSIM_MAIN_HRESOL(x, num_bits) (((x) & ((1 << (num_bits)) - 1)) << 0)
-
-/* DSIM_MVPORCH */
-#define DSIM_CMD_ALLOW(x) ((x) << 28)
-#define DSIM_STABLE_VFP(x) ((x) << 16)
-#define DSIM_MAIN_VBP(x) ((x) << 0)
-#define DSIM_CMD_ALLOW_MASK (0xf << 28)
-#define DSIM_STABLE_VFP_MASK (0x7ff << 16)
-#define DSIM_MAIN_VBP_MASK (0x7ff << 0)
-
-/* DSIM_MHPORCH */
-#define DSIM_MAIN_HFP(x) ((x) << 16)
-#define DSIM_MAIN_HBP(x) ((x) << 0)
-#define DSIM_MAIN_HFP_MASK ((0xffff) << 16)
-#define DSIM_MAIN_HBP_MASK ((0xffff) << 0)
-
-/* DSIM_MSYNC */
-#define DSIM_MAIN_VSA(x) ((x) << 22)
-#define DSIM_MAIN_HSA(x) ((x) << 0)
-#define DSIM_MAIN_VSA_MASK ((0x3ff) << 22)
-#define DSIM_MAIN_HSA_MASK ((0xffff) << 0)
-
-/* DSIM_SDRESOL */
-#define DSIM_SUB_STANDY(x) ((x) << 31)
-#define DSIM_SUB_VRESOL(x) ((x) << 16)
-#define DSIM_SUB_HRESOL(x) ((x) << 0)
-#define DSIM_SUB_STANDY_MASK ((0x1) << 31)
-#define DSIM_SUB_VRESOL_MASK ((0x7ff) << 16)
-#define DSIM_SUB_HRESOL_MASK ((0x7ff) << 0)
-
-/* DSIM_INTSRC */
-#define DSIM_INT_PLL_STABLE (1 << 31)
-#define DSIM_INT_SW_RST_RELEASE (1 << 30)
-#define DSIM_INT_SFR_FIFO_EMPTY (1 << 29)
-#define DSIM_INT_SFR_HDR_FIFO_EMPTY (1 << 28)
-#define DSIM_INT_BTA (1 << 25)
-#define DSIM_INT_FRAME_DONE (1 << 24)
-#define DSIM_INT_RX_TIMEOUT (1 << 21)
-#define DSIM_INT_BTA_TIMEOUT (1 << 20)
-#define DSIM_INT_RX_DONE (1 << 18)
-#define DSIM_INT_RX_TE (1 << 17)
-#define DSIM_INT_RX_ACK (1 << 16)
-#define DSIM_INT_RX_ECC_ERR (1 << 15)
-#define DSIM_INT_RX_CRC_ERR (1 << 14)
-
-/* DSIM_FIFOCTRL */
-#define DSIM_RX_DATA_FULL (1 << 25)
-#define DSIM_RX_DATA_EMPTY (1 << 24)
-#define DSIM_SFR_HEADER_FULL (1 << 23)
-#define DSIM_SFR_HEADER_EMPTY (1 << 22)
-#define DSIM_SFR_PAYLOAD_FULL (1 << 21)
-#define DSIM_SFR_PAYLOAD_EMPTY (1 << 20)
-#define DSIM_I80_HEADER_FULL (1 << 19)
-#define DSIM_I80_HEADER_EMPTY (1 << 18)
-#define DSIM_I80_PAYLOAD_FULL (1 << 17)
-#define DSIM_I80_PAYLOAD_EMPTY (1 << 16)
-#define DSIM_SD_HEADER_FULL (1 << 15)
-#define DSIM_SD_HEADER_EMPTY (1 << 14)
-#define DSIM_SD_PAYLOAD_FULL (1 << 13)
-#define DSIM_SD_PAYLOAD_EMPTY (1 << 12)
-#define DSIM_MD_HEADER_FULL (1 << 11)
-#define DSIM_MD_HEADER_EMPTY (1 << 10)
-#define DSIM_MD_PAYLOAD_FULL (1 << 9)
-#define DSIM_MD_PAYLOAD_EMPTY (1 << 8)
-#define DSIM_RX_FIFO (1 << 4)
-#define DSIM_SFR_FIFO (1 << 3)
-#define DSIM_I80_FIFO (1 << 2)
-#define DSIM_SD_FIFO (1 << 1)
-#define DSIM_MD_FIFO (1 << 0)
-
-/* DSIM_PHYACCHR */
-#define DSIM_AFC_EN (1 << 14)
-#define DSIM_AFC_CTL(x) (((x) & 0x7) << 5)
-
-/* DSIM_PLLCTRL */
-#define DSIM_FREQ_BAND(x) ((x) << 24)
-#define DSIM_PLL_EN (1 << 23)
-#define DSIM_PLL_P(x) ((x) << 13)
-#define DSIM_PLL_M(x) ((x) << 4)
-#define DSIM_PLL_S(x) ((x) << 1)
-
-/* DSIM_PHYCTRL */
-#define DSIM_PHYCTRL_ULPS_EXIT(x) (((x) & 0x1ff) << 0)
-#define DSIM_PHYCTRL_B_DPHYCTL_VREG_LP (1 << 30)
-#define DSIM_PHYCTRL_B_DPHYCTL_SLEW_UP (1 << 14)
-
-/* DSIM_PHYTIMING */
-#define DSIM_PHYTIMING_LPX(x) ((x) << 8)
-#define DSIM_PHYTIMING_HS_EXIT(x) ((x) << 0)
-
-/* DSIM_PHYTIMING1 */
-#define DSIM_PHYTIMING1_CLK_PREPARE(x) ((x) << 24)
-#define DSIM_PHYTIMING1_CLK_ZERO(x) ((x) << 16)
-#define DSIM_PHYTIMING1_CLK_POST(x) ((x) << 8)
-#define DSIM_PHYTIMING1_CLK_TRAIL(x) ((x) << 0)
-
-/* DSIM_PHYTIMING2 */
-#define DSIM_PHYTIMING2_HS_PREPARE(x) ((x) << 16)
-#define DSIM_PHYTIMING2_HS_ZERO(x) ((x) << 8)
-#define DSIM_PHYTIMING2_HS_TRAIL(x) ((x) << 0)
-
-#define DSI_MAX_BUS_WIDTH 4
-#define DSI_NUM_VIRTUAL_CHANNELS 4
-#define DSI_TX_FIFO_SIZE 2048
-#define DSI_RX_FIFO_SIZE 256
-#define DSI_XFER_TIMEOUT_MS 100
-#define DSI_RX_FIFO_EMPTY 0x30800002
-
-#define OLD_SCLK_MIPI_CLK_NAME "pll_clk"
-
-static const char *const clk_names[5] = { "bus_clk", "sclk_mipi",
- "phyclk_mipidphy0_bitclkdiv8", "phyclk_mipidphy0_rxclkesc0",
- "sclk_rgb_vclk_to_dsim0" };
-
-enum exynos_dsi_transfer_type {
- EXYNOS_DSI_TX,
- EXYNOS_DSI_RX,
-};
-
-struct exynos_dsi_transfer {
- struct list_head list;
- struct completion completed;
- int result;
- struct mipi_dsi_packet packet;
- u16 flags;
- u16 tx_done;
-
- u8 *rx_payload;
- u16 rx_len;
- u16 rx_done;
-};
-
-#define DSIM_STATE_ENABLED BIT(0)
-#define DSIM_STATE_INITIALIZED BIT(1)
-#define DSIM_STATE_CMD_LPM BIT(2)
-#define DSIM_STATE_VIDOUT_AVAILABLE BIT(3)
-
-struct exynos_dsi_driver_data {
- const unsigned int *reg_ofs;
- unsigned int plltmr_reg;
- unsigned int has_freqband:1;
- unsigned int has_clklane_stop:1;
- unsigned int num_clks;
- unsigned int max_freq;
- unsigned int wait_for_reset;
- unsigned int num_bits_resol;
- const unsigned int *reg_values;
-};
-
struct exynos_dsi {
struct drm_encoder encoder;
- struct mipi_dsi_host dsi_host;
- struct drm_bridge bridge;
- struct drm_bridge *out_bridge;
- struct device *dev;
- struct drm_display_mode mode;
-
- void __iomem *reg_base;
- struct phy *phy;
- struct clk **clks;
- struct regulator_bulk_data supplies[2];
- int irq;
- struct gpio_desc *te_gpio;
-
- u32 pll_clk_rate;
- u32 burst_clk_rate;
- u32 esc_clk_rate;
- u32 lanes;
- u32 mode_flags;
- u32 format;
-
- int state;
- struct drm_property *brightness;
- struct completion completed;
-
- spinlock_t transfer_lock; /* protects transfer_list */
- struct list_head transfer_list;
-
- const struct exynos_dsi_driver_data *driver_data;
-};
-
-#define host_to_dsi(host) container_of(host, struct exynos_dsi, dsi_host)
-
-static inline struct exynos_dsi *bridge_to_dsi(struct drm_bridge *b)
-{
- return container_of(b, struct exynos_dsi, bridge);
-}
-
-enum reg_idx {
- DSIM_STATUS_REG, /* Status register */
- DSIM_SWRST_REG, /* Software reset register */
- DSIM_CLKCTRL_REG, /* Clock control register */
- DSIM_TIMEOUT_REG, /* Time out register */
- DSIM_CONFIG_REG, /* Configuration register */
- DSIM_ESCMODE_REG, /* Escape mode register */
- DSIM_MDRESOL_REG,
- DSIM_MVPORCH_REG, /* Main display Vporch register */
- DSIM_MHPORCH_REG, /* Main display Hporch register */
- DSIM_MSYNC_REG, /* Main display sync area register */
- DSIM_INTSRC_REG, /* Interrupt source register */
- DSIM_INTMSK_REG, /* Interrupt mask register */
- DSIM_PKTHDR_REG, /* Packet Header FIFO register */
- DSIM_PAYLOAD_REG, /* Payload FIFO register */
- DSIM_RXFIFO_REG, /* Read FIFO register */
- DSIM_FIFOCTRL_REG, /* FIFO status and control register */
- DSIM_PLLCTRL_REG, /* PLL control register */
- DSIM_PHYCTRL_REG,
- DSIM_PHYTIMING_REG,
- DSIM_PHYTIMING1_REG,
- DSIM_PHYTIMING2_REG,
- NUM_REGS
-};
-
-static inline void exynos_dsi_write(struct exynos_dsi *dsi, enum reg_idx idx,
- u32 val)
-{
-
- writel(val, dsi->reg_base + dsi->driver_data->reg_ofs[idx]);
-}
-
-static inline u32 exynos_dsi_read(struct exynos_dsi *dsi, enum reg_idx idx)
-{
- return readl(dsi->reg_base + dsi->driver_data->reg_ofs[idx]);
-}
-
-static const unsigned int exynos_reg_ofs[] = {
- [DSIM_STATUS_REG] = 0x00,
- [DSIM_SWRST_REG] = 0x04,
- [DSIM_CLKCTRL_REG] = 0x08,
- [DSIM_TIMEOUT_REG] = 0x0c,
- [DSIM_CONFIG_REG] = 0x10,
- [DSIM_ESCMODE_REG] = 0x14,
- [DSIM_MDRESOL_REG] = 0x18,
- [DSIM_MVPORCH_REG] = 0x1c,
- [DSIM_MHPORCH_REG] = 0x20,
- [DSIM_MSYNC_REG] = 0x24,
- [DSIM_INTSRC_REG] = 0x2c,
- [DSIM_INTMSK_REG] = 0x30,
- [DSIM_PKTHDR_REG] = 0x34,
- [DSIM_PAYLOAD_REG] = 0x38,
- [DSIM_RXFIFO_REG] = 0x3c,
- [DSIM_FIFOCTRL_REG] = 0x44,
- [DSIM_PLLCTRL_REG] = 0x4c,
- [DSIM_PHYCTRL_REG] = 0x5c,
- [DSIM_PHYTIMING_REG] = 0x64,
- [DSIM_PHYTIMING1_REG] = 0x68,
- [DSIM_PHYTIMING2_REG] = 0x6c,
-};
-
-static const unsigned int exynos5433_reg_ofs[] = {
- [DSIM_STATUS_REG] = 0x04,
- [DSIM_SWRST_REG] = 0x0C,
- [DSIM_CLKCTRL_REG] = 0x10,
- [DSIM_TIMEOUT_REG] = 0x14,
- [DSIM_CONFIG_REG] = 0x18,
- [DSIM_ESCMODE_REG] = 0x1C,
- [DSIM_MDRESOL_REG] = 0x20,
- [DSIM_MVPORCH_REG] = 0x24,
- [DSIM_MHPORCH_REG] = 0x28,
- [DSIM_MSYNC_REG] = 0x2C,
- [DSIM_INTSRC_REG] = 0x34,
- [DSIM_INTMSK_REG] = 0x38,
- [DSIM_PKTHDR_REG] = 0x3C,
- [DSIM_PAYLOAD_REG] = 0x40,
- [DSIM_RXFIFO_REG] = 0x44,
- [DSIM_FIFOCTRL_REG] = 0x4C,
- [DSIM_PLLCTRL_REG] = 0x94,
- [DSIM_PHYCTRL_REG] = 0xA4,
- [DSIM_PHYTIMING_REG] = 0xB4,
- [DSIM_PHYTIMING1_REG] = 0xB8,
- [DSIM_PHYTIMING2_REG] = 0xBC,
-};
-
-enum reg_value_idx {
- RESET_TYPE,
- PLL_TIMER,
- STOP_STATE_CNT,
- PHYCTRL_ULPS_EXIT,
- PHYCTRL_VREG_LP,
- PHYCTRL_SLEW_UP,
- PHYTIMING_LPX,
- PHYTIMING_HS_EXIT,
- PHYTIMING_CLK_PREPARE,
- PHYTIMING_CLK_ZERO,
- PHYTIMING_CLK_POST,
- PHYTIMING_CLK_TRAIL,
- PHYTIMING_HS_PREPARE,
- PHYTIMING_HS_ZERO,
- PHYTIMING_HS_TRAIL
-};
-
-static const unsigned int reg_values[] = {
- [RESET_TYPE] = DSIM_SWRST,
- [PLL_TIMER] = 500,
- [STOP_STATE_CNT] = 0xf,
- [PHYCTRL_ULPS_EXIT] = DSIM_PHYCTRL_ULPS_EXIT(0x0af),
- [PHYCTRL_VREG_LP] = 0,
- [PHYCTRL_SLEW_UP] = 0,
- [PHYTIMING_LPX] = DSIM_PHYTIMING_LPX(0x06),
- [PHYTIMING_HS_EXIT] = DSIM_PHYTIMING_HS_EXIT(0x0b),
- [PHYTIMING_CLK_PREPARE] = DSIM_PHYTIMING1_CLK_PREPARE(0x07),
- [PHYTIMING_CLK_ZERO] = DSIM_PHYTIMING1_CLK_ZERO(0x27),
- [PHYTIMING_CLK_POST] = DSIM_PHYTIMING1_CLK_POST(0x0d),
- [PHYTIMING_CLK_TRAIL] = DSIM_PHYTIMING1_CLK_TRAIL(0x08),
- [PHYTIMING_HS_PREPARE] = DSIM_PHYTIMING2_HS_PREPARE(0x09),
- [PHYTIMING_HS_ZERO] = DSIM_PHYTIMING2_HS_ZERO(0x0d),
- [PHYTIMING_HS_TRAIL] = DSIM_PHYTIMING2_HS_TRAIL(0x0b),
-};
-
-static const unsigned int exynos5422_reg_values[] = {
- [RESET_TYPE] = DSIM_SWRST,
- [PLL_TIMER] = 500,
- [STOP_STATE_CNT] = 0xf,
- [PHYCTRL_ULPS_EXIT] = DSIM_PHYCTRL_ULPS_EXIT(0xaf),
- [PHYCTRL_VREG_LP] = 0,
- [PHYCTRL_SLEW_UP] = 0,
- [PHYTIMING_LPX] = DSIM_PHYTIMING_LPX(0x08),
- [PHYTIMING_HS_EXIT] = DSIM_PHYTIMING_HS_EXIT(0x0d),
- [PHYTIMING_CLK_PREPARE] = DSIM_PHYTIMING1_CLK_PREPARE(0x09),
- [PHYTIMING_CLK_ZERO] = DSIM_PHYTIMING1_CLK_ZERO(0x30),
- [PHYTIMING_CLK_POST] = DSIM_PHYTIMING1_CLK_POST(0x0e),
- [PHYTIMING_CLK_TRAIL] = DSIM_PHYTIMING1_CLK_TRAIL(0x0a),
- [PHYTIMING_HS_PREPARE] = DSIM_PHYTIMING2_HS_PREPARE(0x0c),
- [PHYTIMING_HS_ZERO] = DSIM_PHYTIMING2_HS_ZERO(0x11),
- [PHYTIMING_HS_TRAIL] = DSIM_PHYTIMING2_HS_TRAIL(0x0d),
-};
-
-static const unsigned int exynos5433_reg_values[] = {
- [RESET_TYPE] = DSIM_FUNCRST,
- [PLL_TIMER] = 22200,
- [STOP_STATE_CNT] = 0xa,
- [PHYCTRL_ULPS_EXIT] = DSIM_PHYCTRL_ULPS_EXIT(0x190),
- [PHYCTRL_VREG_LP] = DSIM_PHYCTRL_B_DPHYCTL_VREG_LP,
- [PHYCTRL_SLEW_UP] = DSIM_PHYCTRL_B_DPHYCTL_SLEW_UP,
- [PHYTIMING_LPX] = DSIM_PHYTIMING_LPX(0x07),
- [PHYTIMING_HS_EXIT] = DSIM_PHYTIMING_HS_EXIT(0x0c),
- [PHYTIMING_CLK_PREPARE] = DSIM_PHYTIMING1_CLK_PREPARE(0x09),
- [PHYTIMING_CLK_ZERO] = DSIM_PHYTIMING1_CLK_ZERO(0x2d),
- [PHYTIMING_CLK_POST] = DSIM_PHYTIMING1_CLK_POST(0x0e),
- [PHYTIMING_CLK_TRAIL] = DSIM_PHYTIMING1_CLK_TRAIL(0x09),
- [PHYTIMING_HS_PREPARE] = DSIM_PHYTIMING2_HS_PREPARE(0x0b),
- [PHYTIMING_HS_ZERO] = DSIM_PHYTIMING2_HS_ZERO(0x10),
- [PHYTIMING_HS_TRAIL] = DSIM_PHYTIMING2_HS_TRAIL(0x0c),
-};
-
-static const struct exynos_dsi_driver_data exynos3_dsi_driver_data = {
- .reg_ofs = exynos_reg_ofs,
- .plltmr_reg = 0x50,
- .has_freqband = 1,
- .has_clklane_stop = 1,
- .num_clks = 2,
- .max_freq = 1000,
- .wait_for_reset = 1,
- .num_bits_resol = 11,
- .reg_values = reg_values,
};
-static const struct exynos_dsi_driver_data exynos4_dsi_driver_data = {
- .reg_ofs = exynos_reg_ofs,
- .plltmr_reg = 0x50,
- .has_freqband = 1,
- .has_clklane_stop = 1,
- .num_clks = 2,
- .max_freq = 1000,
- .wait_for_reset = 1,
- .num_bits_resol = 11,
- .reg_values = reg_values,
-};
-
-static const struct exynos_dsi_driver_data exynos5_dsi_driver_data = {
- .reg_ofs = exynos_reg_ofs,
- .plltmr_reg = 0x58,
- .num_clks = 2,
- .max_freq = 1000,
- .wait_for_reset = 1,
- .num_bits_resol = 11,
- .reg_values = reg_values,
-};
-
-static const struct exynos_dsi_driver_data exynos5433_dsi_driver_data = {
- .reg_ofs = exynos5433_reg_ofs,
- .plltmr_reg = 0xa0,
- .has_clklane_stop = 1,
- .num_clks = 5,
- .max_freq = 1500,
- .wait_for_reset = 0,
- .num_bits_resol = 12,
- .reg_values = exynos5433_reg_values,
-};
-
-static const struct exynos_dsi_driver_data exynos5422_dsi_driver_data = {
- .reg_ofs = exynos5433_reg_ofs,
- .plltmr_reg = 0xa0,
- .has_clklane_stop = 1,
- .num_clks = 2,
- .max_freq = 1500,
- .wait_for_reset = 1,
- .num_bits_resol = 12,
- .reg_values = exynos5422_reg_values,
-};
-
-static const struct of_device_id exynos_dsi_of_match[] = {
- { .compatible = "samsung,exynos3250-mipi-dsi",
- .data = &exynos3_dsi_driver_data },
- { .compatible = "samsung,exynos4210-mipi-dsi",
- .data = &exynos4_dsi_driver_data },
- { .compatible = "samsung,exynos5410-mipi-dsi",
- .data = &exynos5_dsi_driver_data },
- { .compatible = "samsung,exynos5422-mipi-dsi",
- .data = &exynos5422_dsi_driver_data },
- { .compatible = "samsung,exynos5433-mipi-dsi",
- .data = &exynos5433_dsi_driver_data },
- { }
-};
-
-static void exynos_dsi_wait_for_reset(struct exynos_dsi *dsi)
-{
- if (wait_for_completion_timeout(&dsi->completed, msecs_to_jiffies(300)))
- return;
-
- dev_err(dsi->dev, "timeout waiting for reset\n");
-}
-
-static void exynos_dsi_reset(struct exynos_dsi *dsi)
-{
- u32 reset_val = dsi->driver_data->reg_values[RESET_TYPE];
-
- reinit_completion(&dsi->completed);
- exynos_dsi_write(dsi, DSIM_SWRST_REG, reset_val);
-}
-
-#ifndef MHZ
-#define MHZ (1000*1000)
-#endif
-
-static unsigned long exynos_dsi_pll_find_pms(struct exynos_dsi *dsi,
- unsigned long fin, unsigned long fout, u8 *p, u16 *m, u8 *s)
-{
- const struct exynos_dsi_driver_data *driver_data = dsi->driver_data;
- unsigned long best_freq = 0;
- u32 min_delta = 0xffffffff;
- u8 p_min, p_max;
- u8 _p, best_p;
- u16 _m, best_m;
- u8 _s, best_s;
-
- p_min = DIV_ROUND_UP(fin, (12 * MHZ));
- p_max = fin / (6 * MHZ);
-
- for (_p = p_min; _p <= p_max; ++_p) {
- for (_s = 0; _s <= 5; ++_s) {
- u64 tmp;
- u32 delta;
-
- tmp = (u64)fout * (_p << _s);
- do_div(tmp, fin);
- _m = tmp;
- if (_m < 41 || _m > 125)
- continue;
-
- tmp = (u64)_m * fin;
- do_div(tmp, _p);
- if (tmp < 500 * MHZ ||
- tmp > driver_data->max_freq * MHZ)
- continue;
-
- tmp = (u64)_m * fin;
- do_div(tmp, _p << _s);
-
- delta = abs(fout - tmp);
- if (delta < min_delta) {
- best_p = _p;
- best_m = _m;
- best_s = _s;
- min_delta = delta;
- best_freq = tmp;
- }
- }
- }
-
- if (best_freq) {
- *p = best_p;
- *m = best_m;
- *s = best_s;
- }
-
- return best_freq;
-}
-
-static unsigned long exynos_dsi_set_pll(struct exynos_dsi *dsi,
- unsigned long freq)
-{
- const struct exynos_dsi_driver_data *driver_data = dsi->driver_data;
- unsigned long fin, fout;
- int timeout;
- u8 p, s;
- u16 m;
- u32 reg;
-
- fin = dsi->pll_clk_rate;
- fout = exynos_dsi_pll_find_pms(dsi, fin, freq, &p, &m, &s);
- if (!fout) {
- dev_err(dsi->dev,
- "failed to find PLL PMS for requested frequency\n");
- return 0;
- }
- dev_dbg(dsi->dev, "PLL freq %lu, (p %d, m %d, s %d)\n", fout, p, m, s);
-
- writel(driver_data->reg_values[PLL_TIMER],
- dsi->reg_base + driver_data->plltmr_reg);
-
- reg = DSIM_PLL_EN | DSIM_PLL_P(p) | DSIM_PLL_M(m) | DSIM_PLL_S(s);
-
- if (driver_data->has_freqband) {
- static const unsigned long freq_bands[] = {
- 100 * MHZ, 120 * MHZ, 160 * MHZ, 200 * MHZ,
- 270 * MHZ, 320 * MHZ, 390 * MHZ, 450 * MHZ,
- 510 * MHZ, 560 * MHZ, 640 * MHZ, 690 * MHZ,
- 770 * MHZ, 870 * MHZ, 950 * MHZ,
- };
- int band;
-
- for (band = 0; band < ARRAY_SIZE(freq_bands); ++band)
- if (fout < freq_bands[band])
- break;
-
- dev_dbg(dsi->dev, "band %d\n", band);
-
- reg |= DSIM_FREQ_BAND(band);
- }
-
- exynos_dsi_write(dsi, DSIM_PLLCTRL_REG, reg);
-
- timeout = 1000;
- do {
- if (timeout-- == 0) {
- dev_err(dsi->dev, "PLL failed to stabilize\n");
- return 0;
- }
- reg = exynos_dsi_read(dsi, DSIM_STATUS_REG);
- } while ((reg & DSIM_PLL_STABLE) == 0);
-
- return fout;
-}
-
-static int exynos_dsi_enable_clock(struct exynos_dsi *dsi)
-{
- unsigned long hs_clk, byte_clk, esc_clk;
- unsigned long esc_div;
- u32 reg;
-
- hs_clk = exynos_dsi_set_pll(dsi, dsi->burst_clk_rate);
- if (!hs_clk) {
- dev_err(dsi->dev, "failed to configure DSI PLL\n");
- return -EFAULT;
- }
-
- byte_clk = hs_clk / 8;
- esc_div = DIV_ROUND_UP(byte_clk, dsi->esc_clk_rate);
- esc_clk = byte_clk / esc_div;
-
- if (esc_clk > 20 * MHZ) {
- ++esc_div;
- esc_clk = byte_clk / esc_div;
- }
-
- dev_dbg(dsi->dev, "hs_clk = %lu, byte_clk = %lu, esc_clk = %lu\n",
- hs_clk, byte_clk, esc_clk);
-
- reg = exynos_dsi_read(dsi, DSIM_CLKCTRL_REG);
- reg &= ~(DSIM_ESC_PRESCALER_MASK | DSIM_LANE_ESC_CLK_EN_CLK
- | DSIM_LANE_ESC_CLK_EN_DATA_MASK | DSIM_PLL_BYPASS
- | DSIM_BYTE_CLK_SRC_MASK);
- reg |= DSIM_ESC_CLKEN | DSIM_BYTE_CLKEN
- | DSIM_ESC_PRESCALER(esc_div)
- | DSIM_LANE_ESC_CLK_EN_CLK
- | DSIM_LANE_ESC_CLK_EN_DATA(BIT(dsi->lanes) - 1)
- | DSIM_BYTE_CLK_SRC(0)
- | DSIM_TX_REQUEST_HSCLK;
- exynos_dsi_write(dsi, DSIM_CLKCTRL_REG, reg);
-
- return 0;
-}
-
-static void exynos_dsi_set_phy_ctrl(struct exynos_dsi *dsi)
-{
- const struct exynos_dsi_driver_data *driver_data = dsi->driver_data;
- const unsigned int *reg_values = driver_data->reg_values;
- u32 reg;
-
- if (driver_data->has_freqband)
- return;
-
- /* B D-PHY: D-PHY Master & Slave Analog Block control */
- reg = reg_values[PHYCTRL_ULPS_EXIT] | reg_values[PHYCTRL_VREG_LP] |
- reg_values[PHYCTRL_SLEW_UP];
- exynos_dsi_write(dsi, DSIM_PHYCTRL_REG, reg);
-
- /*
- * T LPX: Transmitted length of any Low-Power state period
- * T HS-EXIT: Time that the transmitter drives LP-11 following a HS
- * burst
- */
- reg = reg_values[PHYTIMING_LPX] | reg_values[PHYTIMING_HS_EXIT];
- exynos_dsi_write(dsi, DSIM_PHYTIMING_REG, reg);
-
- /*
- * T CLK-PREPARE: Time that the transmitter drives the Clock Lane LP-00
- * Line state immediately before the HS-0 Line state starting the
- * HS transmission
- * T CLK-ZERO: Time that the transmitter drives the HS-0 state prior to
- * transmitting the Clock.
- * T CLK_POST: Time that the transmitter continues to send HS clock
- * after the last associated Data Lane has transitioned to LP Mode
- * Interval is defined as the period from the end of T HS-TRAIL to
- * the beginning of T CLK-TRAIL
- * T CLK-TRAIL: Time that the transmitter drives the HS-0 state after
- * the last payload clock bit of a HS transmission burst
- */
- reg = reg_values[PHYTIMING_CLK_PREPARE] |
- reg_values[PHYTIMING_CLK_ZERO] |
- reg_values[PHYTIMING_CLK_POST] |
- reg_values[PHYTIMING_CLK_TRAIL];
-
- exynos_dsi_write(dsi, DSIM_PHYTIMING1_REG, reg);
-
- /*
- * T HS-PREPARE: Time that the transmitter drives the Data Lane LP-00
- * Line state immediately before the HS-0 Line state starting the
- * HS transmission
- * T HS-ZERO: Time that the transmitter drives the HS-0 state prior to
- * transmitting the Sync sequence.
- * T HS-TRAIL: Time that the transmitter drives the flipped differential
- * state after last payload data bit of a HS transmission burst
- */
- reg = reg_values[PHYTIMING_HS_PREPARE] | reg_values[PHYTIMING_HS_ZERO] |
- reg_values[PHYTIMING_HS_TRAIL];
- exynos_dsi_write(dsi, DSIM_PHYTIMING2_REG, reg);
-}
-
-static void exynos_dsi_disable_clock(struct exynos_dsi *dsi)
-{
- u32 reg;
-
- reg = exynos_dsi_read(dsi, DSIM_CLKCTRL_REG);
- reg &= ~(DSIM_LANE_ESC_CLK_EN_CLK | DSIM_LANE_ESC_CLK_EN_DATA_MASK
- | DSIM_ESC_CLKEN | DSIM_BYTE_CLKEN);
- exynos_dsi_write(dsi, DSIM_CLKCTRL_REG, reg);
-
- reg = exynos_dsi_read(dsi, DSIM_PLLCTRL_REG);
- reg &= ~DSIM_PLL_EN;
- exynos_dsi_write(dsi, DSIM_PLLCTRL_REG, reg);
-}
-
-static void exynos_dsi_enable_lane(struct exynos_dsi *dsi, u32 lane)
-{
- u32 reg = exynos_dsi_read(dsi, DSIM_CONFIG_REG);
- reg |= (DSIM_NUM_OF_DATA_LANE(dsi->lanes - 1) | DSIM_LANE_EN_CLK |
- DSIM_LANE_EN(lane));
- exynos_dsi_write(dsi, DSIM_CONFIG_REG, reg);
-}
-
-static int exynos_dsi_init_link(struct exynos_dsi *dsi)
-{
- const struct exynos_dsi_driver_data *driver_data = dsi->driver_data;
- int timeout;
- u32 reg;
- u32 lanes_mask;
-
- /* Initialize FIFO pointers */
- reg = exynos_dsi_read(dsi, DSIM_FIFOCTRL_REG);
- reg &= ~0x1f;
- exynos_dsi_write(dsi, DSIM_FIFOCTRL_REG, reg);
-
- usleep_range(9000, 11000);
-
- reg |= 0x1f;
- exynos_dsi_write(dsi, DSIM_FIFOCTRL_REG, reg);
- usleep_range(9000, 11000);
-
- /* DSI configuration */
- reg = 0;
-
- /*
- * The first bit of mode_flags specifies display configuration.
- * If this bit is set[= MIPI_DSI_MODE_VIDEO], dsi will support video
- * mode, otherwise it will support command mode.
- */
- if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO) {
- reg |= DSIM_VIDEO_MODE;
-
- /*
- * The user manual describes that following bits are ignored in
- * command mode.
- */
- if (!(dsi->mode_flags & MIPI_DSI_MODE_VSYNC_FLUSH))
- reg |= DSIM_MFLUSH_VS;
- if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE)
- reg |= DSIM_SYNC_INFORM;
- if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO_BURST)
- reg |= DSIM_BURST_MODE;
- if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO_AUTO_VERT)
- reg |= DSIM_AUTO_MODE;
- if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO_HSE)
- reg |= DSIM_HSE_DISABLE_MODE;
- if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO_NO_HFP)
- reg |= DSIM_HFP_DISABLE_MODE;
- if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO_NO_HBP)
- reg |= DSIM_HBP_DISABLE_MODE;
- if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO_NO_HSA)
- reg |= DSIM_HSA_DISABLE_MODE;
- }
-
- if (dsi->mode_flags & MIPI_DSI_MODE_NO_EOT_PACKET)
- reg |= DSIM_EOT_DISABLE;
-
- switch (dsi->format) {
- case MIPI_DSI_FMT_RGB888:
- reg |= DSIM_MAIN_PIX_FORMAT_RGB888;
- break;
- case MIPI_DSI_FMT_RGB666:
- reg |= DSIM_MAIN_PIX_FORMAT_RGB666;
- break;
- case MIPI_DSI_FMT_RGB666_PACKED:
- reg |= DSIM_MAIN_PIX_FORMAT_RGB666_P;
- break;
- case MIPI_DSI_FMT_RGB565:
- reg |= DSIM_MAIN_PIX_FORMAT_RGB565;
- break;
- default:
- dev_err(dsi->dev, "invalid pixel format\n");
- return -EINVAL;
- }
-
- /*
- * Use non-continuous clock mode if the periparal wants and
- * host controller supports
- *
- * In non-continous clock mode, host controller will turn off
- * the HS clock between high-speed transmissions to reduce
- * power consumption.
- */
- if (driver_data->has_clklane_stop &&
- dsi->mode_flags & MIPI_DSI_CLOCK_NON_CONTINUOUS) {
- reg |= DSIM_CLKLANE_STOP;
- }
- exynos_dsi_write(dsi, DSIM_CONFIG_REG, reg);
-
- lanes_mask = BIT(dsi->lanes) - 1;
- exynos_dsi_enable_lane(dsi, lanes_mask);
-
- /* Check clock and data lane state are stop state */
- timeout = 100;
- do {
- if (timeout-- == 0) {
- dev_err(dsi->dev, "waiting for bus lanes timed out\n");
- return -EFAULT;
- }
-
- reg = exynos_dsi_read(dsi, DSIM_STATUS_REG);
- if ((reg & DSIM_STOP_STATE_DAT(lanes_mask))
- != DSIM_STOP_STATE_DAT(lanes_mask))
- continue;
- } while (!(reg & (DSIM_STOP_STATE_CLK | DSIM_TX_READY_HS_CLK)));
-
- reg = exynos_dsi_read(dsi, DSIM_ESCMODE_REG);
- reg &= ~DSIM_STOP_STATE_CNT_MASK;
- reg |= DSIM_STOP_STATE_CNT(driver_data->reg_values[STOP_STATE_CNT]);
- exynos_dsi_write(dsi, DSIM_ESCMODE_REG, reg);
-
- reg = DSIM_BTA_TIMEOUT(0xff) | DSIM_LPDR_TIMEOUT(0xffff);
- exynos_dsi_write(dsi, DSIM_TIMEOUT_REG, reg);
-
- return 0;
-}
-
-static void exynos_dsi_set_display_mode(struct exynos_dsi *dsi)
-{
- struct drm_display_mode *m = &dsi->mode;
- unsigned int num_bits_resol = dsi->driver_data->num_bits_resol;
- u32 reg;
-
- if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO) {
- reg = DSIM_CMD_ALLOW(0xf)
- | DSIM_STABLE_VFP(m->vsync_start - m->vdisplay)
- | DSIM_MAIN_VBP(m->vtotal - m->vsync_end);
- exynos_dsi_write(dsi, DSIM_MVPORCH_REG, reg);
-
- reg = DSIM_MAIN_HFP(m->hsync_start - m->hdisplay)
- | DSIM_MAIN_HBP(m->htotal - m->hsync_end);
- exynos_dsi_write(dsi, DSIM_MHPORCH_REG, reg);
-
- reg = DSIM_MAIN_VSA(m->vsync_end - m->vsync_start)
- | DSIM_MAIN_HSA(m->hsync_end - m->hsync_start);
- exynos_dsi_write(dsi, DSIM_MSYNC_REG, reg);
- }
- reg = DSIM_MAIN_HRESOL(m->hdisplay, num_bits_resol) |
- DSIM_MAIN_VRESOL(m->vdisplay, num_bits_resol);
-
- exynos_dsi_write(dsi, DSIM_MDRESOL_REG, reg);
-
- dev_dbg(dsi->dev, "LCD size = %dx%d\n", m->hdisplay, m->vdisplay);
-}
-
-static void exynos_dsi_set_display_enable(struct exynos_dsi *dsi, bool enable)
-{
- u32 reg;
-
- reg = exynos_dsi_read(dsi, DSIM_MDRESOL_REG);
- if (enable)
- reg |= DSIM_MAIN_STAND_BY;
- else
- reg &= ~DSIM_MAIN_STAND_BY;
- exynos_dsi_write(dsi, DSIM_MDRESOL_REG, reg);
-}
-
-static int exynos_dsi_wait_for_hdr_fifo(struct exynos_dsi *dsi)
-{
- int timeout = 2000;
-
- do {
- u32 reg = exynos_dsi_read(dsi, DSIM_FIFOCTRL_REG);
-
- if (!(reg & DSIM_SFR_HEADER_FULL))
- return 0;
-
- if (!cond_resched())
- usleep_range(950, 1050);
- } while (--timeout);
-
- return -ETIMEDOUT;
-}
-
-static void exynos_dsi_set_cmd_lpm(struct exynos_dsi *dsi, bool lpm)
-{
- u32 v = exynos_dsi_read(dsi, DSIM_ESCMODE_REG);
-
- if (lpm)
- v |= DSIM_CMD_LPDT_LP;
- else
- v &= ~DSIM_CMD_LPDT_LP;
-
- exynos_dsi_write(dsi, DSIM_ESCMODE_REG, v);
-}
-
-static void exynos_dsi_force_bta(struct exynos_dsi *dsi)
-{
- u32 v = exynos_dsi_read(dsi, DSIM_ESCMODE_REG);
- v |= DSIM_FORCE_BTA;
- exynos_dsi_write(dsi, DSIM_ESCMODE_REG, v);
-}
-
-static void exynos_dsi_send_to_fifo(struct exynos_dsi *dsi,
- struct exynos_dsi_transfer *xfer)
-{
- struct device *dev = dsi->dev;
- struct mipi_dsi_packet *pkt = &xfer->packet;
- const u8 *payload = pkt->payload + xfer->tx_done;
- u16 length = pkt->payload_length - xfer->tx_done;
- bool first = !xfer->tx_done;
- u32 reg;
-
- dev_dbg(dev, "< xfer %pK: tx len %u, done %u, rx len %u, done %u\n",
- xfer, length, xfer->tx_done, xfer->rx_len, xfer->rx_done);
-
- if (length > DSI_TX_FIFO_SIZE)
- length = DSI_TX_FIFO_SIZE;
-
- xfer->tx_done += length;
-
- /* Send payload */
- while (length >= 4) {
- reg = get_unaligned_le32(payload);
- exynos_dsi_write(dsi, DSIM_PAYLOAD_REG, reg);
- payload += 4;
- length -= 4;
- }
-
- reg = 0;
- switch (length) {
- case 3:
- reg |= payload[2] << 16;
- fallthrough;
- case 2:
- reg |= payload[1] << 8;
- fallthrough;
- case 1:
- reg |= payload[0];
- exynos_dsi_write(dsi, DSIM_PAYLOAD_REG, reg);
- break;
- }
-
- /* Send packet header */
- if (!first)
- return;
-
- reg = get_unaligned_le32(pkt->header);
- if (exynos_dsi_wait_for_hdr_fifo(dsi)) {
- dev_err(dev, "waiting for header FIFO timed out\n");
- return;
- }
-
- if (NEQV(xfer->flags & MIPI_DSI_MSG_USE_LPM,
- dsi->state & DSIM_STATE_CMD_LPM)) {
- exynos_dsi_set_cmd_lpm(dsi, xfer->flags & MIPI_DSI_MSG_USE_LPM);
- dsi->state ^= DSIM_STATE_CMD_LPM;
- }
-
- exynos_dsi_write(dsi, DSIM_PKTHDR_REG, reg);
-
- if (xfer->flags & MIPI_DSI_MSG_REQ_ACK)
- exynos_dsi_force_bta(dsi);
-}
-
-static void exynos_dsi_read_from_fifo(struct exynos_dsi *dsi,
- struct exynos_dsi_transfer *xfer)
-{
- u8 *payload = xfer->rx_payload + xfer->rx_done;
- bool first = !xfer->rx_done;
- struct device *dev = dsi->dev;
- u16 length;
- u32 reg;
-
- if (first) {
- reg = exynos_dsi_read(dsi, DSIM_RXFIFO_REG);
-
- switch (reg & 0x3f) {
- case MIPI_DSI_RX_GENERIC_SHORT_READ_RESPONSE_2BYTE:
- case MIPI_DSI_RX_DCS_SHORT_READ_RESPONSE_2BYTE:
- if (xfer->rx_len >= 2) {
- payload[1] = reg >> 16;
- ++xfer->rx_done;
- }
- fallthrough;
- case MIPI_DSI_RX_GENERIC_SHORT_READ_RESPONSE_1BYTE:
- case MIPI_DSI_RX_DCS_SHORT_READ_RESPONSE_1BYTE:
- payload[0] = reg >> 8;
- ++xfer->rx_done;
- xfer->rx_len = xfer->rx_done;
- xfer->result = 0;
- goto clear_fifo;
- case MIPI_DSI_RX_ACKNOWLEDGE_AND_ERROR_REPORT:
- dev_err(dev, "DSI Error Report: 0x%04x\n",
- (reg >> 8) & 0xffff);
- xfer->result = 0;
- goto clear_fifo;
- }
-
- length = (reg >> 8) & 0xffff;
- if (length > xfer->rx_len) {
- dev_err(dev,
- "response too long (%u > %u bytes), stripping\n",
- xfer->rx_len, length);
- length = xfer->rx_len;
- } else if (length < xfer->rx_len)
- xfer->rx_len = length;
- }
-
- length = xfer->rx_len - xfer->rx_done;
- xfer->rx_done += length;
-
- /* Receive payload */
- while (length >= 4) {
- reg = exynos_dsi_read(dsi, DSIM_RXFIFO_REG);
- payload[0] = (reg >> 0) & 0xff;
- payload[1] = (reg >> 8) & 0xff;
- payload[2] = (reg >> 16) & 0xff;
- payload[3] = (reg >> 24) & 0xff;
- payload += 4;
- length -= 4;
- }
-
- if (length) {
- reg = exynos_dsi_read(dsi, DSIM_RXFIFO_REG);
- switch (length) {
- case 3:
- payload[2] = (reg >> 16) & 0xff;
- fallthrough;
- case 2:
- payload[1] = (reg >> 8) & 0xff;
- fallthrough;
- case 1:
- payload[0] = reg & 0xff;
- }
- }
-
- if (xfer->rx_done == xfer->rx_len)
- xfer->result = 0;
-
-clear_fifo:
- length = DSI_RX_FIFO_SIZE / 4;
- do {
- reg = exynos_dsi_read(dsi, DSIM_RXFIFO_REG);
- if (reg == DSI_RX_FIFO_EMPTY)
- break;
- } while (--length);
-}
-
-static void exynos_dsi_transfer_start(struct exynos_dsi *dsi)
-{
- unsigned long flags;
- struct exynos_dsi_transfer *xfer;
- bool start = false;
-
-again:
- spin_lock_irqsave(&dsi->transfer_lock, flags);
-
- if (list_empty(&dsi->transfer_list)) {
- spin_unlock_irqrestore(&dsi->transfer_lock, flags);
- return;
- }
-
- xfer = list_first_entry(&dsi->transfer_list,
- struct exynos_dsi_transfer, list);
-
- spin_unlock_irqrestore(&dsi->transfer_lock, flags);
-
- if (xfer->packet.payload_length &&
- xfer->tx_done == xfer->packet.payload_length)
- /* waiting for RX */
- return;
-
- exynos_dsi_send_to_fifo(dsi, xfer);
-
- if (xfer->packet.payload_length || xfer->rx_len)
- return;
-
- xfer->result = 0;
- complete(&xfer->completed);
-
- spin_lock_irqsave(&dsi->transfer_lock, flags);
-
- list_del_init(&xfer->list);
- start = !list_empty(&dsi->transfer_list);
-
- spin_unlock_irqrestore(&dsi->transfer_lock, flags);
-
- if (start)
- goto again;
-}
-
-static bool exynos_dsi_transfer_finish(struct exynos_dsi *dsi)
-{
- struct exynos_dsi_transfer *xfer;
- unsigned long flags;
- bool start = true;
-
- spin_lock_irqsave(&dsi->transfer_lock, flags);
-
- if (list_empty(&dsi->transfer_list)) {
- spin_unlock_irqrestore(&dsi->transfer_lock, flags);
- return false;
- }
-
- xfer = list_first_entry(&dsi->transfer_list,
- struct exynos_dsi_transfer, list);
-
- spin_unlock_irqrestore(&dsi->transfer_lock, flags);
-
- dev_dbg(dsi->dev,
- "> xfer %pK, tx_len %zu, tx_done %u, rx_len %u, rx_done %u\n",
- xfer, xfer->packet.payload_length, xfer->tx_done, xfer->rx_len,
- xfer->rx_done);
-
- if (xfer->tx_done != xfer->packet.payload_length)
- return true;
-
- if (xfer->rx_done != xfer->rx_len)
- exynos_dsi_read_from_fifo(dsi, xfer);
-
- if (xfer->rx_done != xfer->rx_len)
- return true;
-
- spin_lock_irqsave(&dsi->transfer_lock, flags);
-
- list_del_init(&xfer->list);
- start = !list_empty(&dsi->transfer_list);
-
- spin_unlock_irqrestore(&dsi->transfer_lock, flags);
-
- if (!xfer->rx_len)
- xfer->result = 0;
- complete(&xfer->completed);
-
- return start;
-}
-
-static void exynos_dsi_remove_transfer(struct exynos_dsi *dsi,
- struct exynos_dsi_transfer *xfer)
+static irqreturn_t exynos_dsi_te_irq_handler(struct samsung_dsim *dsim)
{
- unsigned long flags;
- bool start;
-
- spin_lock_irqsave(&dsi->transfer_lock, flags);
-
- if (!list_empty(&dsi->transfer_list) &&
- xfer == list_first_entry(&dsi->transfer_list,
- struct exynos_dsi_transfer, list)) {
- list_del_init(&xfer->list);
- start = !list_empty(&dsi->transfer_list);
- spin_unlock_irqrestore(&dsi->transfer_lock, flags);
- if (start)
- exynos_dsi_transfer_start(dsi);
- return;
- }
-
- list_del_init(&xfer->list);
-
- spin_unlock_irqrestore(&dsi->transfer_lock, flags);
-}
-
-static int exynos_dsi_transfer(struct exynos_dsi *dsi,
- struct exynos_dsi_transfer *xfer)
-{
- unsigned long flags;
- bool stopped;
-
- xfer->tx_done = 0;
- xfer->rx_done = 0;
- xfer->result = -ETIMEDOUT;
- init_completion(&xfer->completed);
-
- spin_lock_irqsave(&dsi->transfer_lock, flags);
-
- stopped = list_empty(&dsi->transfer_list);
- list_add_tail(&xfer->list, &dsi->transfer_list);
-
- spin_unlock_irqrestore(&dsi->transfer_lock, flags);
-
- if (stopped)
- exynos_dsi_transfer_start(dsi);
-
- wait_for_completion_timeout(&xfer->completed,
- msecs_to_jiffies(DSI_XFER_TIMEOUT_MS));
- if (xfer->result == -ETIMEDOUT) {
- struct mipi_dsi_packet *pkt = &xfer->packet;
- exynos_dsi_remove_transfer(dsi, xfer);
- dev_err(dsi->dev, "xfer timed out: %*ph %*ph\n", 4, pkt->header,
- (int)pkt->payload_length, pkt->payload);
- return -ETIMEDOUT;
- }
-
- /* Also covers hardware timeout condition */
- return xfer->result;
-}
-
-static irqreturn_t exynos_dsi_irq(int irq, void *dev_id)
-{
- struct exynos_dsi *dsi = dev_id;
- u32 status;
-
- status = exynos_dsi_read(dsi, DSIM_INTSRC_REG);
- if (!status) {
- static unsigned long int j;
- if (printk_timed_ratelimit(&j, 500))
- dev_warn(dsi->dev, "spurious interrupt\n");
- return IRQ_HANDLED;
- }
- exynos_dsi_write(dsi, DSIM_INTSRC_REG, status);
-
- if (status & DSIM_INT_SW_RST_RELEASE) {
- u32 mask = ~(DSIM_INT_RX_DONE | DSIM_INT_SFR_FIFO_EMPTY |
- DSIM_INT_SFR_HDR_FIFO_EMPTY | DSIM_INT_RX_ECC_ERR |
- DSIM_INT_SW_RST_RELEASE);
- exynos_dsi_write(dsi, DSIM_INTMSK_REG, mask);
- complete(&dsi->completed);
- return IRQ_HANDLED;
- }
-
- if (!(status & (DSIM_INT_RX_DONE | DSIM_INT_SFR_FIFO_EMPTY |
- DSIM_INT_PLL_STABLE)))
- return IRQ_HANDLED;
-
- if (exynos_dsi_transfer_finish(dsi))
- exynos_dsi_transfer_start(dsi);
-
- return IRQ_HANDLED;
-}
-
-static irqreturn_t exynos_dsi_te_irq_handler(int irq, void *dev_id)
-{
- struct exynos_dsi *dsi = (struct exynos_dsi *)dev_id;
+ struct exynos_dsi *dsi = dsim->priv;
struct drm_encoder *encoder = &dsi->encoder;
- if (dsi->state & DSIM_STATE_VIDOUT_AVAILABLE)
+ if (dsim->state & DSIM_STATE_VIDOUT_AVAILABLE)
exynos_drm_crtc_te_handler(encoder->crtc);
return IRQ_HANDLED;
}
-static void exynos_dsi_enable_irq(struct exynos_dsi *dsi)
-{
- enable_irq(dsi->irq);
-
- if (dsi->te_gpio)
- enable_irq(gpiod_to_irq(dsi->te_gpio));
-}
-
-static void exynos_dsi_disable_irq(struct exynos_dsi *dsi)
-{
- if (dsi->te_gpio)
- disable_irq(gpiod_to_irq(dsi->te_gpio));
-
- disable_irq(dsi->irq);
-}
-
-static int exynos_dsi_init(struct exynos_dsi *dsi)
-{
- const struct exynos_dsi_driver_data *driver_data = dsi->driver_data;
-
- exynos_dsi_reset(dsi);
- exynos_dsi_enable_irq(dsi);
-
- if (driver_data->reg_values[RESET_TYPE] == DSIM_FUNCRST)
- exynos_dsi_enable_lane(dsi, BIT(dsi->lanes) - 1);
-
- exynos_dsi_enable_clock(dsi);
- if (driver_data->wait_for_reset)
- exynos_dsi_wait_for_reset(dsi);
- exynos_dsi_set_phy_ctrl(dsi);
- exynos_dsi_init_link(dsi);
-
- return 0;
-}
-
-static int exynos_dsi_register_te_irq(struct exynos_dsi *dsi,
- struct device *panel)
-{
- int ret;
- int te_gpio_irq;
-
- dsi->te_gpio = gpiod_get_optional(panel, "te", GPIOD_IN);
- if (!dsi->te_gpio) {
- return 0;
- } else if (IS_ERR(dsi->te_gpio)) {
- dev_err(dsi->dev, "gpio request failed with %ld\n",
- PTR_ERR(dsi->te_gpio));
- return PTR_ERR(dsi->te_gpio);
- }
-
- te_gpio_irq = gpiod_to_irq(dsi->te_gpio);
-
- ret = request_threaded_irq(te_gpio_irq, exynos_dsi_te_irq_handler, NULL,
- IRQF_TRIGGER_RISING | IRQF_NO_AUTOEN, "TE", dsi);
- if (ret) {
- dev_err(dsi->dev, "request interrupt failed with %d\n", ret);
- gpiod_put(dsi->te_gpio);
- return ret;
- }
-
- return 0;
-}
-
-static void exynos_dsi_unregister_te_irq(struct exynos_dsi *dsi)
-{
- if (dsi->te_gpio) {
- free_irq(gpiod_to_irq(dsi->te_gpio), dsi);
- gpiod_put(dsi->te_gpio);
- }
-}
-
-static void exynos_dsi_atomic_pre_enable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_bridge_state)
-{
- struct exynos_dsi *dsi = bridge_to_dsi(bridge);
- int ret;
-
- if (dsi->state & DSIM_STATE_ENABLED)
- return;
-
- ret = pm_runtime_resume_and_get(dsi->dev);
- if (ret < 0) {
- dev_err(dsi->dev, "failed to enable DSI device.\n");
- return;
- }
-
- dsi->state |= DSIM_STATE_ENABLED;
-}
-
-static void exynos_dsi_atomic_enable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_bridge_state)
-{
- struct exynos_dsi *dsi = bridge_to_dsi(bridge);
-
- exynos_dsi_set_display_mode(dsi);
- exynos_dsi_set_display_enable(dsi, true);
-
- dsi->state |= DSIM_STATE_VIDOUT_AVAILABLE;
-
- return;
-}
-
-static void exynos_dsi_atomic_disable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_bridge_state)
-{
- struct exynos_dsi *dsi = bridge_to_dsi(bridge);
-
- if (!(dsi->state & DSIM_STATE_ENABLED))
- return;
-
- dsi->state &= ~DSIM_STATE_VIDOUT_AVAILABLE;
-}
-
-static void exynos_dsi_atomic_post_disable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_bridge_state)
-{
- struct exynos_dsi *dsi = bridge_to_dsi(bridge);
-
- exynos_dsi_set_display_enable(dsi, false);
-
- dsi->state &= ~DSIM_STATE_ENABLED;
- pm_runtime_put_sync(dsi->dev);
-}
-
-static void exynos_dsi_mode_set(struct drm_bridge *bridge,
- const struct drm_display_mode *mode,
- const struct drm_display_mode *adjusted_mode)
-{
- struct exynos_dsi *dsi = bridge_to_dsi(bridge);
-
- drm_mode_copy(&dsi->mode, adjusted_mode);
-}
-
-static int exynos_dsi_attach(struct drm_bridge *bridge,
- enum drm_bridge_attach_flags flags)
-{
- struct exynos_dsi *dsi = bridge_to_dsi(bridge);
-
- return drm_bridge_attach(bridge->encoder, dsi->out_bridge, bridge,
- flags);
-}
-
-static const struct drm_bridge_funcs exynos_dsi_bridge_funcs = {
- .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
- .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
- .atomic_reset = drm_atomic_helper_bridge_reset,
- .atomic_pre_enable = exynos_dsi_atomic_pre_enable,
- .atomic_enable = exynos_dsi_atomic_enable,
- .atomic_disable = exynos_dsi_atomic_disable,
- .atomic_post_disable = exynos_dsi_atomic_post_disable,
- .mode_set = exynos_dsi_mode_set,
- .attach = exynos_dsi_attach,
-};
-
-MODULE_DEVICE_TABLE(of, exynos_dsi_of_match);
-
-static int exynos_dsi_host_attach(struct mipi_dsi_host *host,
+static int exynos_dsi_host_attach(struct samsung_dsim *dsim,
struct mipi_dsi_device *device)
{
- struct exynos_dsi *dsi = host_to_dsi(host);
- struct device *dev = dsi->dev;
+ struct exynos_dsi *dsi = dsim->priv;
struct drm_encoder *encoder = &dsi->encoder;
struct drm_device *drm = encoder->dev;
- struct drm_panel *panel;
- int ret;
-
- panel = of_drm_find_panel(device->dev.of_node);
- if (!IS_ERR(panel)) {
- dsi->out_bridge = devm_drm_panel_bridge_add(dev, panel);
- } else {
- dsi->out_bridge = of_drm_find_bridge(device->dev.of_node);
- if (!dsi->out_bridge)
- dsi->out_bridge = ERR_PTR(-EINVAL);
- }
-
- if (IS_ERR(dsi->out_bridge)) {
- ret = PTR_ERR(dsi->out_bridge);
- DRM_DEV_ERROR(dev, "failed to find the bridge: %d\n", ret);
- return ret;
- }
-
- DRM_DEV_INFO(dev, "Attached %s device\n", device->name);
- drm_bridge_add(&dsi->bridge);
-
- drm_bridge_attach(encoder, &dsi->bridge,
+ drm_bridge_attach(encoder, &dsim->bridge,
list_first_entry_or_null(&encoder->bridge_chain,
struct drm_bridge,
chain_node), 0);
- /*
- * This is a temporary solution and should be made by more generic way.
- *
- * If attached panel device is for command mode one, dsi should register
- * TE interrupt handler.
- */
- if (!(device->mode_flags & MIPI_DSI_MODE_VIDEO)) {
- ret = exynos_dsi_register_te_irq(dsi, &device->dev);
- if (ret)
- return ret;
- }
-
mutex_lock(&drm->mode_config.mutex);
- dsi->lanes = device->lanes;
- dsi->format = device->format;
- dsi->mode_flags = device->mode_flags;
+ dsim->lanes = device->lanes;
+ dsim->format = device->format;
+ dsim->mode_flags = device->mode_flags;
exynos_drm_crtc_get_by_type(drm, EXYNOS_DISPLAY_TYPE_LCD)->i80_mode =
- !(dsi->mode_flags & MIPI_DSI_MODE_VIDEO);
+ !(dsim->mode_flags & MIPI_DSI_MODE_VIDEO);
mutex_unlock(&drm->mode_config.mutex);
@@ -1525,100 +60,20 @@ static int exynos_dsi_host_attach(struct mipi_dsi_host *host,
return 0;
}
-static int exynos_dsi_host_detach(struct mipi_dsi_host *host,
- struct mipi_dsi_device *device)
+static void exynos_dsi_host_detach(struct samsung_dsim *dsim,
+ struct mipi_dsi_device *device)
{
- struct exynos_dsi *dsi = host_to_dsi(host);
+ struct exynos_dsi *dsi = dsim->priv;
struct drm_device *drm = dsi->encoder.dev;
- if (dsi->out_bridge->funcs->detach)
- dsi->out_bridge->funcs->detach(dsi->out_bridge);
- dsi->out_bridge = NULL;
-
if (drm->mode_config.poll_enabled)
drm_kms_helper_hotplug_event(drm);
-
- exynos_dsi_unregister_te_irq(dsi);
-
- drm_bridge_remove(&dsi->bridge);
-
- return 0;
}
-static ssize_t exynos_dsi_host_transfer(struct mipi_dsi_host *host,
- const struct mipi_dsi_msg *msg)
+static int exynos_dsi_bind(struct device *dev, struct device *master, void *data)
{
- struct exynos_dsi *dsi = host_to_dsi(host);
- struct exynos_dsi_transfer xfer;
- int ret;
-
- if (!(dsi->state & DSIM_STATE_ENABLED))
- return -EINVAL;
-
- if (!(dsi->state & DSIM_STATE_INITIALIZED)) {
- ret = exynos_dsi_init(dsi);
- if (ret)
- return ret;
- dsi->state |= DSIM_STATE_INITIALIZED;
- }
-
- ret = mipi_dsi_create_packet(&xfer.packet, msg);
- if (ret < 0)
- return ret;
-
- xfer.rx_len = msg->rx_len;
- xfer.rx_payload = msg->rx_buf;
- xfer.flags = msg->flags;
-
- ret = exynos_dsi_transfer(dsi, &xfer);
- return (ret < 0) ? ret : xfer.rx_done;
-}
-
-static const struct mipi_dsi_host_ops exynos_dsi_ops = {
- .attach = exynos_dsi_host_attach,
- .detach = exynos_dsi_host_detach,
- .transfer = exynos_dsi_host_transfer,
-};
-
-static int exynos_dsi_of_read_u32(const struct device_node *np,
- const char *propname, u32 *out_value)
-{
- int ret = of_property_read_u32(np, propname, out_value);
-
- if (ret < 0)
- pr_err("%pOF: failed to get '%s' property\n", np, propname);
-
- return ret;
-}
-
-static int exynos_dsi_parse_dt(struct exynos_dsi *dsi)
-{
- struct device *dev = dsi->dev;
- struct device_node *node = dev->of_node;
- int ret;
-
- ret = exynos_dsi_of_read_u32(node, "samsung,pll-clock-frequency",
- &dsi->pll_clk_rate);
- if (ret < 0)
- return ret;
-
- ret = exynos_dsi_of_read_u32(node, "samsung,burst-clock-frequency",
- &dsi->burst_clk_rate);
- if (ret < 0)
- return ret;
-
- ret = exynos_dsi_of_read_u32(node, "samsung,esc-clock-frequency",
- &dsi->esc_clk_rate);
- if (ret < 0)
- return ret;
-
- return 0;
-}
-
-static int exynos_dsi_bind(struct device *dev, struct device *master,
- void *data)
-{
- struct exynos_dsi *dsi = dev_get_drvdata(dev);
+ struct samsung_dsim *dsim = dev_get_drvdata(dev);
+ struct exynos_dsi *dsi = dsim->priv;
struct drm_encoder *encoder = &dsi->encoder;
struct drm_device *drm_dev = data;
int ret;
@@ -1629,17 +84,16 @@ static int exynos_dsi_bind(struct device *dev, struct device *master,
if (ret < 0)
return ret;
- return mipi_dsi_host_register(&dsi->dsi_host);
+ return mipi_dsi_host_register(&dsim->dsi_host);
}
-static void exynos_dsi_unbind(struct device *dev, struct device *master,
- void *data)
+static void exynos_dsi_unbind(struct device *dev, struct device *master, void *data)
{
- struct exynos_dsi *dsi = dev_get_drvdata(dev);
+ struct samsung_dsim *dsim = dev_get_drvdata(dev);
- exynos_dsi_atomic_disable(&dsi->bridge, NULL);
+ dsim->bridge.funcs->atomic_disable(&dsim->bridge, NULL);
- mipi_dsi_host_unregister(&dsi->dsi_host);
+ mipi_dsi_host_unregister(&dsim->dsi_host);
}
static const struct component_ops exynos_dsi_component_ops = {
@@ -1647,189 +101,90 @@ static const struct component_ops exynos_dsi_component_ops = {
.unbind = exynos_dsi_unbind,
};
-static int exynos_dsi_probe(struct platform_device *pdev)
+static int exynos_dsi_register_host(struct samsung_dsim *dsim)
{
- struct device *dev = &pdev->dev;
struct exynos_dsi *dsi;
- int ret, i;
- dsi = devm_kzalloc(dev, sizeof(*dsi), GFP_KERNEL);
+ dsi = devm_kzalloc(dsim->dev, sizeof(*dsi), GFP_KERNEL);
if (!dsi)
return -ENOMEM;
- init_completion(&dsi->completed);
- spin_lock_init(&dsi->transfer_lock);
- INIT_LIST_HEAD(&dsi->transfer_list);
-
- dsi->dsi_host.ops = &exynos_dsi_ops;
- dsi->dsi_host.dev = dev;
+ dsim->priv = dsi;
+ dsim->bridge.pre_enable_prev_first = true;
- dsi->dev = dev;
- dsi->driver_data = of_device_get_match_data(dev);
-
- dsi->supplies[0].supply = "vddcore";
- dsi->supplies[1].supply = "vddio";
- ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(dsi->supplies),
- dsi->supplies);
- if (ret)
- return dev_err_probe(dev, ret, "failed to get regulators\n");
-
- dsi->clks = devm_kcalloc(dev,
- dsi->driver_data->num_clks, sizeof(*dsi->clks),
- GFP_KERNEL);
- if (!dsi->clks)
- return -ENOMEM;
-
- for (i = 0; i < dsi->driver_data->num_clks; i++) {
- dsi->clks[i] = devm_clk_get(dev, clk_names[i]);
- if (IS_ERR(dsi->clks[i])) {
- if (strcmp(clk_names[i], "sclk_mipi") == 0) {
- dsi->clks[i] = devm_clk_get(dev,
- OLD_SCLK_MIPI_CLK_NAME);
- if (!IS_ERR(dsi->clks[i]))
- continue;
- }
-
- dev_info(dev, "failed to get the clock: %s\n",
- clk_names[i]);
- return PTR_ERR(dsi->clks[i]);
- }
- }
-
- dsi->reg_base = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(dsi->reg_base))
- return PTR_ERR(dsi->reg_base);
-
- dsi->phy = devm_phy_get(dev, "dsim");
- if (IS_ERR(dsi->phy)) {
- dev_info(dev, "failed to get dsim phy\n");
- return PTR_ERR(dsi->phy);
- }
-
- dsi->irq = platform_get_irq(pdev, 0);
- if (dsi->irq < 0)
- return dsi->irq;
-
- ret = devm_request_threaded_irq(dev, dsi->irq, NULL,
- exynos_dsi_irq,
- IRQF_ONESHOT | IRQF_NO_AUTOEN,
- dev_name(dev), dsi);
- if (ret) {
- dev_err(dev, "failed to request dsi irq\n");
- return ret;
- }
-
- ret = exynos_dsi_parse_dt(dsi);
- if (ret)
- return ret;
-
- platform_set_drvdata(pdev, dsi);
-
- pm_runtime_enable(dev);
-
- dsi->bridge.funcs = &exynos_dsi_bridge_funcs;
- dsi->bridge.of_node = dev->of_node;
- dsi->bridge.type = DRM_MODE_CONNECTOR_DSI;
- dsi->bridge.pre_enable_prev_first = true;
-
- ret = component_add(dev, &exynos_dsi_component_ops);
- if (ret)
- goto err_disable_runtime;
-
- return 0;
-
-err_disable_runtime:
- pm_runtime_disable(dev);
-
- return ret;
+ return component_add(dsim->dev, &exynos_dsi_component_ops);
}
-static int exynos_dsi_remove(struct platform_device *pdev)
+static void exynos_dsi_unregister_host(struct samsung_dsim *dsim)
{
- pm_runtime_disable(&pdev->dev);
-
- component_del(&pdev->dev, &exynos_dsi_component_ops);
-
- return 0;
-}
-
-static int __maybe_unused exynos_dsi_suspend(struct device *dev)
-{
- struct exynos_dsi *dsi = dev_get_drvdata(dev);
- const struct exynos_dsi_driver_data *driver_data = dsi->driver_data;
- int ret, i;
-
- usleep_range(10000, 20000);
-
- if (dsi->state & DSIM_STATE_INITIALIZED) {
- dsi->state &= ~DSIM_STATE_INITIALIZED;
-
- exynos_dsi_disable_clock(dsi);
-
- exynos_dsi_disable_irq(dsi);
- }
-
- dsi->state &= ~DSIM_STATE_CMD_LPM;
-
- phy_power_off(dsi->phy);
-
- for (i = driver_data->num_clks - 1; i > -1; i--)
- clk_disable_unprepare(dsi->clks[i]);
-
- ret = regulator_bulk_disable(ARRAY_SIZE(dsi->supplies), dsi->supplies);
- if (ret < 0)
- dev_err(dsi->dev, "cannot disable regulators %d\n", ret);
-
- return 0;
+ component_del(dsim->dev, &exynos_dsi_component_ops);
}
-static int __maybe_unused exynos_dsi_resume(struct device *dev)
-{
- struct exynos_dsi *dsi = dev_get_drvdata(dev);
- const struct exynos_dsi_driver_data *driver_data = dsi->driver_data;
- int ret, i;
-
- ret = regulator_bulk_enable(ARRAY_SIZE(dsi->supplies), dsi->supplies);
- if (ret < 0) {
- dev_err(dsi->dev, "cannot enable regulators %d\n", ret);
- return ret;
- }
+static const struct samsung_dsim_host_ops exynos_dsi_exynos_host_ops = {
+ .register_host = exynos_dsi_register_host,
+ .unregister_host = exynos_dsi_unregister_host,
+ .attach = exynos_dsi_host_attach,
+ .detach = exynos_dsi_host_detach,
+ .te_irq_handler = exynos_dsi_te_irq_handler,
+};
- for (i = 0; i < driver_data->num_clks; i++) {
- ret = clk_prepare_enable(dsi->clks[i]);
- if (ret < 0)
- goto err_clk;
- }
+static const struct samsung_dsim_plat_data exynos3250_dsi_pdata = {
+ .hw_type = DSIM_TYPE_EXYNOS3250,
+ .host_ops = &exynos_dsi_exynos_host_ops,
+};
- ret = phy_power_on(dsi->phy);
- if (ret < 0) {
- dev_err(dsi->dev, "cannot enable phy %d\n", ret);
- goto err_clk;
- }
+static const struct samsung_dsim_plat_data exynos4210_dsi_pdata = {
+ .hw_type = DSIM_TYPE_EXYNOS4210,
+ .host_ops = &exynos_dsi_exynos_host_ops,
+};
- return 0;
+static const struct samsung_dsim_plat_data exynos5410_dsi_pdata = {
+ .hw_type = DSIM_TYPE_EXYNOS5410,
+ .host_ops = &exynos_dsi_exynos_host_ops,
+};
-err_clk:
- while (--i > -1)
- clk_disable_unprepare(dsi->clks[i]);
- regulator_bulk_disable(ARRAY_SIZE(dsi->supplies), dsi->supplies);
+static const struct samsung_dsim_plat_data exynos5422_dsi_pdata = {
+ .hw_type = DSIM_TYPE_EXYNOS5422,
+ .host_ops = &exynos_dsi_exynos_host_ops,
+};
- return ret;
-}
+static const struct samsung_dsim_plat_data exynos5433_dsi_pdata = {
+ .hw_type = DSIM_TYPE_EXYNOS5433,
+ .host_ops = &exynos_dsi_exynos_host_ops,
+};
-static const struct dev_pm_ops exynos_dsi_pm_ops = {
- SET_RUNTIME_PM_OPS(exynos_dsi_suspend, exynos_dsi_resume, NULL)
- SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
- pm_runtime_force_resume)
+static const struct of_device_id exynos_dsi_of_match[] = {
+ {
+ .compatible = "samsung,exynos3250-mipi-dsi",
+ .data = &exynos3250_dsi_pdata,
+ },
+ {
+ .compatible = "samsung,exynos4210-mipi-dsi",
+ .data = &exynos4210_dsi_pdata,
+ },
+ {
+ .compatible = "samsung,exynos5410-mipi-dsi",
+ .data = &exynos5410_dsi_pdata,
+ },
+ {
+ .compatible = "samsung,exynos5422-mipi-dsi",
+ .data = &exynos5422_dsi_pdata,
+ },
+ {
+ .compatible = "samsung,exynos5433-mipi-dsi",
+ .data = &exynos5433_dsi_pdata,
+ },
+ { /* sentinel. */ }
};
+MODULE_DEVICE_TABLE(of, exynos_dsi_of_match);
struct platform_driver dsi_driver = {
- .probe = exynos_dsi_probe,
- .remove = exynos_dsi_remove,
+ .probe = samsung_dsim_probe,
+ .remove = samsung_dsim_remove,
.driver = {
.name = "exynos-dsi",
.owner = THIS_MODULE,
- .pm = &exynos_dsi_pm_ops,
+ .pm = &samsung_dsim_pm_ops,
.of_match_table = exynos_dsi_of_match,
},
};
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c
index 8579c7629f5e..c09ba019ba5e 100644
--- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c
@@ -20,7 +20,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_drv.h>
-#include <drm/drm_fbdev_generic.h>
+#include <drm/drm_fbdev_dma.h>
#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_modeset_helper.h>
#include <drm/drm_module.h>
@@ -333,7 +333,7 @@ static int fsl_dcu_drm_probe(struct platform_device *pdev)
if (ret < 0)
goto put;
- drm_fbdev_generic_setup(drm, legacyfb_depth);
+ drm_fbdev_dma_setup(drm, legacyfb_depth);
return 0;
diff --git a/drivers/gpu/drm/gma500/Makefile b/drivers/gpu/drm/gma500/Makefile
index 63012bf2485a..4f302cd5e1a6 100644
--- a/drivers/gpu/drm/gma500/Makefile
+++ b/drivers/gpu/drm/gma500/Makefile
@@ -38,5 +38,6 @@ gma500_gfx-y += \
psb_irq.o
gma500_gfx-$(CONFIG_ACPI) += opregion.o
+gma500_gfx-$(CONFIG_DRM_FBDEV_EMULATION) += fbdev.o
obj-$(CONFIG_DRM_GMA500) += gma500_gfx.o
diff --git a/drivers/gpu/drm/gma500/fbdev.c b/drivers/gpu/drm/gma500/fbdev.c
new file mode 100644
index 000000000000..62287407e717
--- /dev/null
+++ b/drivers/gpu/drm/gma500/fbdev.c
@@ -0,0 +1,344 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/**************************************************************************
+ * Copyright (c) 2007-2011, Intel Corporation.
+ * All Rights Reserved.
+ *
+ **************************************************************************/
+
+#include <linux/pfn_t.h>
+
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_fb_helper.h>
+#include <drm/drm_framebuffer.h>
+
+#include "gem.h"
+#include "psb_drv.h"
+
+/*
+ * VM area struct
+ */
+
+static vm_fault_t psb_fbdev_vm_fault(struct vm_fault *vmf)
+{
+ struct vm_area_struct *vma = vmf->vma;
+ struct fb_info *info = vma->vm_private_data;
+ unsigned long address = vmf->address - (vmf->pgoff << PAGE_SHIFT);
+ unsigned long pfn = info->fix.smem_start >> PAGE_SHIFT;
+ vm_fault_t err = VM_FAULT_SIGBUS;
+ unsigned long page_num = vma_pages(vma);
+ unsigned long i;
+
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+
+ for (i = 0; i < page_num; ++i) {
+ err = vmf_insert_mixed(vma, address, __pfn_to_pfn_t(pfn, PFN_DEV));
+ if (unlikely(err & VM_FAULT_ERROR))
+ break;
+ address += PAGE_SIZE;
+ ++pfn;
+ }
+
+ return err;
+}
+
+static const struct vm_operations_struct psb_fbdev_vm_ops = {
+ .fault = psb_fbdev_vm_fault,
+};
+
+/*
+ * struct fb_ops
+ */
+
+#define CMAP_TOHW(_val, _width) ((((_val) << (_width)) + 0x7FFF - (_val)) >> 16)
+
+static int psb_fbdev_fb_setcolreg(unsigned int regno,
+ unsigned int red, unsigned int green,
+ unsigned int blue, unsigned int transp,
+ struct fb_info *info)
+{
+ struct drm_fb_helper *fb_helper = info->par;
+ struct drm_framebuffer *fb = fb_helper->fb;
+ uint32_t v;
+
+ if (!fb)
+ return -ENOMEM;
+
+ if (regno > 255)
+ return 1;
+
+ red = CMAP_TOHW(red, info->var.red.length);
+ blue = CMAP_TOHW(blue, info->var.blue.length);
+ green = CMAP_TOHW(green, info->var.green.length);
+ transp = CMAP_TOHW(transp, info->var.transp.length);
+
+ v = (red << info->var.red.offset) |
+ (green << info->var.green.offset) |
+ (blue << info->var.blue.offset) |
+ (transp << info->var.transp.offset);
+
+ if (regno < 16) {
+ switch (fb->format->cpp[0] * 8) {
+ case 16:
+ ((uint32_t *) info->pseudo_palette)[regno] = v;
+ break;
+ case 24:
+ case 32:
+ ((uint32_t *) info->pseudo_palette)[regno] = v;
+ break;
+ }
+ }
+
+ return 0;
+}
+
+static int psb_fbdev_fb_mmap(struct fb_info *info, struct vm_area_struct *vma)
+{
+ if (vma->vm_pgoff != 0)
+ return -EINVAL;
+ if (vma->vm_pgoff > (~0UL >> PAGE_SHIFT))
+ return -EINVAL;
+
+ /*
+ * If this is a GEM object then info->screen_base is the virtual
+ * kernel remapping of the object. FIXME: Review if this is
+ * suitable for our mmap work
+ */
+ vma->vm_ops = &psb_fbdev_vm_ops;
+ vma->vm_private_data = info;
+ vm_flags_set(vma, VM_IO | VM_MIXEDMAP | VM_DONTEXPAND | VM_DONTDUMP);
+
+ return 0;
+}
+
+static void psb_fbdev_fb_destroy(struct fb_info *info)
+{
+ struct drm_fb_helper *fb_helper = info->par;
+ struct drm_framebuffer *fb = fb_helper->fb;
+ struct drm_gem_object *obj = fb->obj[0];
+
+ drm_fb_helper_fini(fb_helper);
+
+ drm_framebuffer_unregister_private(fb);
+ fb->obj[0] = NULL;
+ drm_framebuffer_cleanup(fb);
+ kfree(fb);
+
+ drm_gem_object_put(obj);
+
+ drm_client_release(&fb_helper->client);
+
+ drm_fb_helper_unprepare(fb_helper);
+ kfree(fb_helper);
+}
+
+static const struct fb_ops psb_fbdev_fb_ops = {
+ .owner = THIS_MODULE,
+ DRM_FB_HELPER_DEFAULT_OPS,
+ .fb_setcolreg = psb_fbdev_fb_setcolreg,
+ .fb_read = drm_fb_helper_cfb_read,
+ .fb_write = drm_fb_helper_cfb_write,
+ .fb_fillrect = drm_fb_helper_cfb_fillrect,
+ .fb_copyarea = drm_fb_helper_cfb_copyarea,
+ .fb_imageblit = drm_fb_helper_cfb_imageblit,
+ .fb_mmap = psb_fbdev_fb_mmap,
+ .fb_destroy = psb_fbdev_fb_destroy,
+};
+
+/*
+ * struct drm_fb_helper_funcs
+ */
+
+static int psb_fbdev_fb_probe(struct drm_fb_helper *fb_helper,
+ struct drm_fb_helper_surface_size *sizes)
+{
+ struct drm_device *dev = fb_helper->dev;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
+ struct fb_info *info;
+ struct drm_framebuffer *fb;
+ struct drm_mode_fb_cmd2 mode_cmd = { };
+ int size;
+ int ret;
+ struct psb_gem_object *backing;
+ struct drm_gem_object *obj;
+ u32 bpp, depth;
+
+ /* No 24-bit packed mode */
+ if (sizes->surface_bpp == 24) {
+ sizes->surface_bpp = 32;
+ sizes->surface_depth = 24;
+ }
+ bpp = sizes->surface_bpp;
+ depth = sizes->surface_depth;
+
+ /*
+ * If the mode does not fit in 32 bit then switch to 16 bit to get
+ * a console on full resolution. The X mode setting server will
+ * allocate its own 32-bit GEM framebuffer.
+ */
+ size = ALIGN(sizes->surface_width * DIV_ROUND_UP(bpp, 8), 64) *
+ sizes->surface_height;
+ size = ALIGN(size, PAGE_SIZE);
+
+ if (size > dev_priv->vram_stolen_size) {
+ sizes->surface_bpp = 16;
+ sizes->surface_depth = 16;
+ }
+ bpp = sizes->surface_bpp;
+ depth = sizes->surface_depth;
+
+ mode_cmd.width = sizes->surface_width;
+ mode_cmd.height = sizes->surface_height;
+ mode_cmd.pitches[0] = ALIGN(mode_cmd.width * DIV_ROUND_UP(bpp, 8), 64);
+ mode_cmd.pixel_format = drm_mode_legacy_fb_format(bpp, depth);
+
+ size = mode_cmd.pitches[0] * mode_cmd.height;
+ size = ALIGN(size, PAGE_SIZE);
+
+ /* Allocate the framebuffer in the GTT with stolen page backing */
+ backing = psb_gem_create(dev, size, "fb", true, PAGE_SIZE);
+ if (IS_ERR(backing))
+ return PTR_ERR(backing);
+ obj = &backing->base;
+
+ fb = psb_framebuffer_create(dev, &mode_cmd, obj);
+ if (IS_ERR(fb)) {
+ ret = PTR_ERR(fb);
+ goto err_drm_gem_object_put;
+ }
+
+ fb_helper->fb = fb;
+
+ info = drm_fb_helper_alloc_info(fb_helper);
+ if (IS_ERR(info)) {
+ ret = PTR_ERR(info);
+ goto err_drm_framebuffer_unregister_private;
+ }
+
+ info->fbops = &psb_fbdev_fb_ops;
+ info->flags = FBINFO_DEFAULT;
+ /* Accessed stolen memory directly */
+ info->screen_base = dev_priv->vram_addr + backing->offset;
+ info->screen_size = size;
+
+ drm_fb_helper_fill_info(info, fb_helper, sizes);
+
+ info->fix.smem_start = dev_priv->stolen_base + backing->offset;
+ info->fix.smem_len = size;
+ info->fix.ywrapstep = 0;
+ info->fix.ypanstep = 0;
+ info->fix.mmio_start = pci_resource_start(pdev, 0);
+ info->fix.mmio_len = pci_resource_len(pdev, 0);
+
+ memset(info->screen_base, 0, info->screen_size);
+
+ /* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */
+
+ dev_dbg(dev->dev, "allocated %dx%d fb\n", fb->width, fb->height);
+
+ return 0;
+
+err_drm_framebuffer_unregister_private:
+ drm_framebuffer_unregister_private(fb);
+ fb->obj[0] = NULL;
+ drm_framebuffer_cleanup(fb);
+ kfree(fb);
+err_drm_gem_object_put:
+ drm_gem_object_put(obj);
+ return ret;
+}
+
+static const struct drm_fb_helper_funcs psb_fbdev_fb_helper_funcs = {
+ .fb_probe = psb_fbdev_fb_probe,
+};
+
+/*
+ * struct drm_client_funcs and setup code
+ */
+
+static void psb_fbdev_client_unregister(struct drm_client_dev *client)
+{
+ struct drm_fb_helper *fb_helper = drm_fb_helper_from_client(client);
+
+ if (fb_helper->info) {
+ drm_fb_helper_unregister_info(fb_helper);
+ } else {
+ drm_fb_helper_unprepare(fb_helper);
+ drm_client_release(&fb_helper->client);
+ kfree(fb_helper);
+ }
+}
+
+static int psb_fbdev_client_restore(struct drm_client_dev *client)
+{
+ drm_fb_helper_lastclose(client->dev);
+
+ return 0;
+}
+
+static int psb_fbdev_client_hotplug(struct drm_client_dev *client)
+{
+ struct drm_fb_helper *fb_helper = drm_fb_helper_from_client(client);
+ struct drm_device *dev = client->dev;
+ int ret;
+
+ if (dev->fb_helper)
+ return drm_fb_helper_hotplug_event(dev->fb_helper);
+
+ ret = drm_fb_helper_init(dev, fb_helper);
+ if (ret)
+ goto err_drm_err;
+
+ if (!drm_drv_uses_atomic_modeset(dev))
+ drm_helper_disable_unused_functions(dev);
+
+ ret = drm_fb_helper_initial_config(fb_helper);
+ if (ret)
+ goto err_drm_fb_helper_fini;
+
+ return 0;
+
+err_drm_fb_helper_fini:
+ drm_fb_helper_fini(fb_helper);
+err_drm_err:
+ drm_err(dev, "Failed to setup gma500 fbdev emulation (ret=%d)\n", ret);
+ return ret;
+}
+
+static const struct drm_client_funcs psb_fbdev_client_funcs = {
+ .owner = THIS_MODULE,
+ .unregister = psb_fbdev_client_unregister,
+ .restore = psb_fbdev_client_restore,
+ .hotplug = psb_fbdev_client_hotplug,
+};
+
+void psb_fbdev_setup(struct drm_psb_private *dev_priv)
+{
+ struct drm_device *dev = &dev_priv->dev;
+ struct drm_fb_helper *fb_helper;
+ int ret;
+
+ fb_helper = kzalloc(sizeof(*fb_helper), GFP_KERNEL);
+ if (!fb_helper)
+ return;
+ drm_fb_helper_prepare(dev, fb_helper, 32, &psb_fbdev_fb_helper_funcs);
+
+ ret = drm_client_init(dev, &fb_helper->client, "fbdev-gma500", &psb_fbdev_client_funcs);
+ if (ret) {
+ drm_err(dev, "Failed to register client: %d\n", ret);
+ goto err_drm_fb_helper_unprepare;
+ }
+
+ ret = psb_fbdev_client_hotplug(&fb_helper->client);
+ if (ret)
+ drm_dbg_kms(dev, "client hotplug ret=%d\n", ret);
+
+ drm_client_register(&fb_helper->client);
+
+ return;
+
+err_drm_fb_helper_unprepare:
+ drm_fb_helper_unprepare(fb_helper);
+ kfree(fb_helper);
+}
diff --git a/drivers/gpu/drm/gma500/framebuffer.c b/drivers/gpu/drm/gma500/framebuffer.c
index 50611eb7f134..1a374702b696 100644
--- a/drivers/gpu/drm/gma500/framebuffer.c
+++ b/drivers/gpu/drm/gma500/framebuffer.c
@@ -5,158 +5,18 @@
*
**************************************************************************/
-#include <linux/console.h>
-#include <linux/delay.h>
-#include <linux/errno.h>
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/mm.h>
-#include <linux/module.h>
-#include <linux/pfn_t.h>
-#include <linux/slab.h>
-#include <linux/string.h>
-#include <linux/tty.h>
-
-#include <drm/drm.h>
-#include <drm/drm_crtc.h>
-#include <drm/drm_crtc_helper.h>
-#include <drm/drm_fb_helper.h>
-#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_modeset_helper.h>
#include "framebuffer.h"
-#include "gem.h"
#include "psb_drv.h"
-#include "psb_intel_drv.h"
-#include "psb_intel_reg.h"
static const struct drm_framebuffer_funcs psb_fb_funcs = {
.destroy = drm_gem_fb_destroy,
.create_handle = drm_gem_fb_create_handle,
};
-#define CMAP_TOHW(_val, _width) ((((_val) << (_width)) + 0x7FFF - (_val)) >> 16)
-
-static int psbfb_setcolreg(unsigned regno, unsigned red, unsigned green,
- unsigned blue, unsigned transp,
- struct fb_info *info)
-{
- struct drm_fb_helper *fb_helper = info->par;
- struct drm_framebuffer *fb = fb_helper->fb;
- uint32_t v;
-
- if (!fb)
- return -ENOMEM;
-
- if (regno > 255)
- return 1;
-
- red = CMAP_TOHW(red, info->var.red.length);
- blue = CMAP_TOHW(blue, info->var.blue.length);
- green = CMAP_TOHW(green, info->var.green.length);
- transp = CMAP_TOHW(transp, info->var.transp.length);
-
- v = (red << info->var.red.offset) |
- (green << info->var.green.offset) |
- (blue << info->var.blue.offset) |
- (transp << info->var.transp.offset);
-
- if (regno < 16) {
- switch (fb->format->cpp[0] * 8) {
- case 16:
- ((uint32_t *) info->pseudo_palette)[regno] = v;
- break;
- case 24:
- case 32:
- ((uint32_t *) info->pseudo_palette)[regno] = v;
- break;
- }
- }
-
- return 0;
-}
-
-static vm_fault_t psbfb_vm_fault(struct vm_fault *vmf)
-{
- struct vm_area_struct *vma = vmf->vma;
- struct drm_framebuffer *fb = vma->vm_private_data;
- struct drm_device *dev = fb->dev;
- struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
- struct psb_gem_object *pobj = to_psb_gem_object(fb->obj[0]);
- int page_num;
- int i;
- unsigned long address;
- vm_fault_t ret = VM_FAULT_SIGBUS;
- unsigned long pfn;
- unsigned long phys_addr = (unsigned long)dev_priv->stolen_base + pobj->offset;
-
- page_num = vma_pages(vma);
- address = vmf->address - (vmf->pgoff << PAGE_SHIFT);
-
- vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
-
- for (i = 0; i < page_num; i++) {
- pfn = (phys_addr >> PAGE_SHIFT);
-
- ret = vmf_insert_mixed(vma, address,
- __pfn_to_pfn_t(pfn, PFN_DEV));
- if (unlikely(ret & VM_FAULT_ERROR))
- break;
- address += PAGE_SIZE;
- phys_addr += PAGE_SIZE;
- }
- return ret;
-}
-
-static void psbfb_vm_open(struct vm_area_struct *vma)
-{
-}
-
-static void psbfb_vm_close(struct vm_area_struct *vma)
-{
-}
-
-static const struct vm_operations_struct psbfb_vm_ops = {
- .fault = psbfb_vm_fault,
- .open = psbfb_vm_open,
- .close = psbfb_vm_close
-};
-
-static int psbfb_mmap(struct fb_info *info, struct vm_area_struct *vma)
-{
- struct drm_fb_helper *fb_helper = info->par;
- struct drm_framebuffer *fb = fb_helper->fb;
-
- if (vma->vm_pgoff != 0)
- return -EINVAL;
- if (vma->vm_pgoff > (~0UL >> PAGE_SHIFT))
- return -EINVAL;
-
- /*
- * If this is a GEM object then info->screen_base is the virtual
- * kernel remapping of the object. FIXME: Review if this is
- * suitable for our mmap work
- */
- vma->vm_ops = &psbfb_vm_ops;
- vma->vm_private_data = (void *)fb;
- vm_flags_set(vma, VM_IO | VM_MIXEDMAP | VM_DONTEXPAND | VM_DONTDUMP);
- return 0;
-}
-
-static const struct fb_ops psbfb_unaccel_ops = {
- .owner = THIS_MODULE,
- DRM_FB_HELPER_DEFAULT_OPS,
- .fb_setcolreg = psbfb_setcolreg,
- .fb_read = drm_fb_helper_cfb_read,
- .fb_write = drm_fb_helper_cfb_write,
- .fb_fillrect = drm_fb_helper_cfb_fillrect,
- .fb_copyarea = drm_fb_helper_cfb_copyarea,
- .fb_imageblit = drm_fb_helper_cfb_imageblit,
- .fb_mmap = psbfb_mmap,
-};
-
/**
* psb_framebuffer_init - initialize a framebuffer
* @dev: our DRM device
@@ -207,11 +67,9 @@ static int psb_framebuffer_init(struct drm_device *dev,
*
* TODO: review object references
*/
-
-static struct drm_framebuffer *psb_framebuffer_create
- (struct drm_device *dev,
- const struct drm_mode_fb_cmd2 *mode_cmd,
- struct drm_gem_object *obj)
+struct drm_framebuffer *psb_framebuffer_create(struct drm_device *dev,
+ const struct drm_mode_fb_cmd2 *mode_cmd,
+ struct drm_gem_object *obj)
{
struct drm_framebuffer *fb;
int ret;
@@ -229,93 +87,6 @@ static struct drm_framebuffer *psb_framebuffer_create
}
/**
- * psbfb_create - create a framebuffer
- * @fb_helper: the framebuffer helper
- * @sizes: specification of the layout
- *
- * Create a framebuffer to the specifications provided
- */
-static int psbfb_create(struct drm_fb_helper *fb_helper,
- struct drm_fb_helper_surface_size *sizes)
-{
- struct drm_device *dev = fb_helper->dev;
- struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
- struct pci_dev *pdev = to_pci_dev(dev->dev);
- struct fb_info *info;
- struct drm_framebuffer *fb;
- struct drm_mode_fb_cmd2 mode_cmd;
- int size;
- int ret;
- struct psb_gem_object *backing;
- struct drm_gem_object *obj;
- u32 bpp, depth;
-
- mode_cmd.width = sizes->surface_width;
- mode_cmd.height = sizes->surface_height;
- bpp = sizes->surface_bpp;
- depth = sizes->surface_depth;
-
- /* No 24bit packed */
- if (bpp == 24)
- bpp = 32;
-
- mode_cmd.pitches[0] = ALIGN(mode_cmd.width * DIV_ROUND_UP(bpp, 8), 64);
-
- size = mode_cmd.pitches[0] * mode_cmd.height;
- size = ALIGN(size, PAGE_SIZE);
-
- /* Allocate the framebuffer in the GTT with stolen page backing */
- backing = psb_gem_create(dev, size, "fb", true, PAGE_SIZE);
- if (IS_ERR(backing))
- return PTR_ERR(backing);
- obj = &backing->base;
-
- memset(dev_priv->vram_addr + backing->offset, 0, size);
-
- info = drm_fb_helper_alloc_info(fb_helper);
- if (IS_ERR(info)) {
- ret = PTR_ERR(info);
- goto err_drm_gem_object_put;
- }
-
- mode_cmd.pixel_format = drm_mode_legacy_fb_format(bpp, depth);
-
- fb = psb_framebuffer_create(dev, &mode_cmd, obj);
- if (IS_ERR(fb)) {
- ret = PTR_ERR(fb);
- goto err_drm_gem_object_put;
- }
-
- fb_helper->fb = fb;
-
- info->fbops = &psbfb_unaccel_ops;
-
- info->fix.smem_start = dev_priv->fb_base;
- info->fix.smem_len = size;
- info->fix.ywrapstep = 0;
- info->fix.ypanstep = 0;
-
- /* Accessed stolen memory directly */
- info->screen_base = dev_priv->vram_addr + backing->offset;
- info->screen_size = size;
-
- drm_fb_helper_fill_info(info, fb_helper, sizes);
-
- info->fix.mmio_start = pci_resource_start(pdev, 0);
- info->fix.mmio_len = pci_resource_len(pdev, 0);
-
- /* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */
-
- dev_dbg(dev->dev, "allocated %dx%d fb\n", fb->width, fb->height);
-
- return 0;
-
-err_drm_gem_object_put:
- drm_gem_object_put(obj);
- return ret;
-}
-
-/**
* psb_user_framebuffer_create - create framebuffer
* @dev: our DRM device
* @filp: client file
@@ -346,108 +117,8 @@ static struct drm_framebuffer *psb_user_framebuffer_create
return fb;
}
-static int psbfb_probe(struct drm_fb_helper *fb_helper,
- struct drm_fb_helper_surface_size *sizes)
-{
- struct drm_device *dev = fb_helper->dev;
- struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
- unsigned int fb_size;
- int bytespp;
-
- bytespp = sizes->surface_bpp / 8;
- if (bytespp == 3) /* no 24bit packed */
- bytespp = 4;
-
- /* If the mode will not fit in 32bit then switch to 16bit to get
- a console on full resolution. The X mode setting server will
- allocate its own 32bit GEM framebuffer */
- fb_size = ALIGN(sizes->surface_width * bytespp, 64) *
- sizes->surface_height;
- fb_size = ALIGN(fb_size, PAGE_SIZE);
-
- if (fb_size > dev_priv->vram_stolen_size) {
- sizes->surface_bpp = 16;
- sizes->surface_depth = 16;
- }
-
- return psbfb_create(fb_helper, sizes);
-}
-
-static const struct drm_fb_helper_funcs psb_fb_helper_funcs = {
- .fb_probe = psbfb_probe,
-};
-
-static int psb_fbdev_destroy(struct drm_device *dev,
- struct drm_fb_helper *fb_helper)
-{
- struct drm_framebuffer *fb = fb_helper->fb;
-
- drm_fb_helper_unregister_info(fb_helper);
-
- drm_fb_helper_fini(fb_helper);
- drm_framebuffer_unregister_private(fb);
- drm_framebuffer_cleanup(fb);
-
- if (fb->obj[0])
- drm_gem_object_put(fb->obj[0]);
- kfree(fb);
-
- return 0;
-}
-
-int psb_fbdev_init(struct drm_device *dev)
-{
- struct drm_fb_helper *fb_helper;
- struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
- int ret;
-
- fb_helper = kzalloc(sizeof(*fb_helper), GFP_KERNEL);
- if (!fb_helper) {
- dev_err(dev->dev, "no memory\n");
- return -ENOMEM;
- }
-
- dev_priv->fb_helper = fb_helper;
-
- drm_fb_helper_prepare(dev, fb_helper, 32, &psb_fb_helper_funcs);
-
- ret = drm_fb_helper_init(dev, fb_helper);
- if (ret)
- goto free;
-
- /* disable all the possible outputs/crtcs before entering KMS mode */
- drm_helper_disable_unused_functions(dev);
-
- ret = drm_fb_helper_initial_config(fb_helper);
- if (ret)
- goto fini;
-
- return 0;
-
-fini:
- drm_fb_helper_fini(fb_helper);
-free:
- drm_fb_helper_unprepare(fb_helper);
- kfree(fb_helper);
- return ret;
-}
-
-static void psb_fbdev_fini(struct drm_device *dev)
-{
- struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
-
- if (!dev_priv->fb_helper)
- return;
-
- psb_fbdev_destroy(dev, dev_priv->fb_helper);
- drm_fb_helper_unprepare(dev_priv->fb_helper);
- kfree(dev_priv->fb_helper);
- dev_priv->fb_helper = NULL;
-}
-
static const struct drm_mode_config_funcs psb_mode_funcs = {
.fb_create = psb_user_framebuffer_create,
- .output_poll_changed = drm_fb_helper_output_poll_changed,
};
static void psb_setup_outputs(struct drm_device *dev)
@@ -515,7 +186,6 @@ void psb_modeset_init(struct drm_device *dev)
{
struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev;
- struct pci_dev *pdev = to_pci_dev(dev->dev);
int i;
if (drmm_mode_config_init(dev))
@@ -526,10 +196,6 @@ void psb_modeset_init(struct drm_device *dev)
dev->mode_config.funcs = &psb_mode_funcs;
- /* set memory base */
- /* Oaktrail and Poulsbo should use BAR 2*/
- pci_read_config_dword(pdev, PSB_BSM, (u32 *)&(dev_priv->fb_base));
-
/* num pipes is 2 for PSB but 1 for Mrst */
for (i = 0; i < dev_priv->num_pipe; i++)
psb_intel_crtc_init(dev, i, mode_dev);
@@ -550,6 +216,5 @@ void psb_modeset_cleanup(struct drm_device *dev)
struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
if (dev_priv->modeset) {
drm_kms_helper_poll_fini(dev);
- psb_fbdev_fini(dev);
}
}
diff --git a/drivers/gpu/drm/gma500/psb_drv.c b/drivers/gpu/drm/gma500/psb_drv.c
index cd9c73f5a64a..2ce96b1b9c74 100644
--- a/drivers/gpu/drm/gma500/psb_drv.c
+++ b/drivers/gpu/drm/gma500/psb_drv.c
@@ -21,7 +21,6 @@
#include <drm/drm.h>
#include <drm/drm_aperture.h>
#include <drm/drm_drv.h>
-#include <drm/drm_fb_helper.h>
#include <drm/drm_file.h>
#include <drm/drm_ioctl.h>
#include <drm/drm_pciids.h>
@@ -387,7 +386,6 @@ static int psb_driver_load(struct drm_device *dev, unsigned long flags)
dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
psb_modeset_init(dev);
- psb_fbdev_init(dev);
drm_kms_helper_poll_init(dev);
/* Only add backlight support if we have LVDS or MIPI output */
@@ -452,6 +450,8 @@ static int psb_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (ret)
return ret;
+ psb_fbdev_setup(dev_priv);
+
return 0;
}
@@ -477,7 +477,6 @@ static const struct file_operations psb_gem_fops = {
static const struct drm_driver driver = {
.driver_features = DRIVER_MODESET | DRIVER_GEM,
- .lastclose = drm_fb_helper_lastclose,
.num_ioctls = ARRAY_SIZE(psb_ioctls),
diff --git a/drivers/gpu/drm/gma500/psb_drv.h b/drivers/gpu/drm/gma500/psb_drv.h
index a5df6d2f2cab..f7f709df99b4 100644
--- a/drivers/gpu/drm/gma500/psb_drv.h
+++ b/drivers/gpu/drm/gma500/psb_drv.h
@@ -193,8 +193,6 @@
#define KSEL_BYPASS_25 6
#define KSEL_BYPASS_83_100 7
-struct drm_fb_helper;
-
struct opregion_header;
struct opregion_acpi;
struct opregion_swsci;
@@ -522,9 +520,6 @@ struct drm_psb_private {
uint32_t blc_adj1;
uint32_t blc_adj2;
- struct drm_fb_helper *fb_helper;
- resource_size_t fb_base;
-
bool dsr_enable;
u32 dsr_fb_update;
bool dpi_panel_on[3];
@@ -610,7 +605,19 @@ extern void psb_lid_timer_takedown(struct drm_psb_private *dev_priv);
/* modesetting */
extern void psb_modeset_init(struct drm_device *dev);
extern void psb_modeset_cleanup(struct drm_device *dev);
-extern int psb_fbdev_init(struct drm_device *dev);
+
+/* framebuffer */
+struct drm_framebuffer *psb_framebuffer_create(struct drm_device *dev,
+ const struct drm_mode_fb_cmd2 *mode_cmd,
+ struct drm_gem_object *obj);
+
+/* fbdev */
+#if defined(CONFIG_DRM_FBDEV_EMULATION)
+void psb_fbdev_setup(struct drm_psb_private *dev_priv);
+#else
+static inline void psb_fbdev_setup(struct drm_psb_private *dev_priv)
+{ }
+#endif
/* backlight.c */
int gma_backlight_init(struct drm_device *dev);
diff --git a/drivers/gpu/drm/gma500/psb_irq.c b/drivers/gpu/drm/gma500/psb_irq.c
index d421031462df..343c51250207 100644
--- a/drivers/gpu/drm/gma500/psb_irq.c
+++ b/drivers/gpu/drm/gma500/psb_irq.c
@@ -32,17 +32,6 @@ static inline u32 gma_pipestat(int pipe)
BUG();
}
-static inline u32 gma_pipe_event(int pipe)
-{
- if (pipe == 0)
- return _PSB_PIPEA_EVENT_FLAG;
- if (pipe == 1)
- return _MDFLD_PIPEB_EVENT_FLAG;
- if (pipe == 2)
- return _MDFLD_PIPEC_EVENT_FLAG;
- BUG();
-}
-
static inline u32 gma_pipeconf(int pipe)
{
if (pipe == 0)
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index 918470a04591..057ef22fa9c6 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -195,6 +195,7 @@ i915-y += \
i915-y += \
gt/uc/intel_gsc_fw.o \
gt/uc/intel_gsc_uc.o \
+ gt/uc/intel_gsc_uc_heci_cmd_submit.o\
gt/uc/intel_guc.o \
gt/uc/intel_guc_ads.o \
gt/uc/intel_guc_capture.o \
@@ -239,6 +240,7 @@ i915-y += \
display/intel_display_power.o \
display/intel_display_power_map.o \
display/intel_display_power_well.o \
+ display/intel_display_rps.o \
display/intel_dmc.o \
display/intel_dpio_phy.o \
display/intel_dpll.o \
@@ -254,6 +256,7 @@ i915-y += \
display/intel_frontbuffer.o \
display/intel_global_state.o \
display/intel_hdcp.o \
+ display/intel_hdcp_gsc.o \
display/intel_hotplug.o \
display/intel_hti.o \
display/intel_lpe_audio.o \
@@ -266,10 +269,13 @@ i915-y += \
display/intel_psr.o \
display/intel_quirks.o \
display/intel_sprite.o \
+ display/intel_sprite_uapi.o \
display/intel_tc.o \
display/intel_vblank.o \
display/intel_vga.o \
+ display/intel_wm.o \
display/i9xx_plane.o \
+ display/i9xx_wm.o \
display/skl_scaler.o \
display/skl_universal_plane.o \
display/skl_watermark.o
diff --git a/drivers/gpu/drm/i915/display/g4x_dp.c b/drivers/gpu/drm/i915/display/g4x_dp.c
index fa754038d669..920d570f7594 100644
--- a/drivers/gpu/drm/i915/display/g4x_dp.c
+++ b/drivers/gpu/drm/i915/display/g4x_dp.c
@@ -17,6 +17,7 @@
#include "intel_display_power.h"
#include "intel_display_types.h"
#include "intel_dp.h"
+#include "intel_dp_aux.h"
#include "intel_dp_link_training.h"
#include "intel_dpio_phy.h"
#include "intel_fifo_underrun.h"
@@ -136,16 +137,12 @@ static void intel_dp_prepare(struct intel_encoder *encoder,
intel_dp->DP |= DP_PIPE_SEL_IVB(crtc->pipe);
} else if (HAS_PCH_CPT(dev_priv) && port != PORT_A) {
- u32 trans_dp;
-
intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
- trans_dp = intel_de_read(dev_priv, TRANS_DP_CTL(crtc->pipe));
- if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
- trans_dp |= TRANS_DP_ENH_FRAMING;
- else
- trans_dp &= ~TRANS_DP_ENH_FRAMING;
- intel_de_write(dev_priv, TRANS_DP_CTL(crtc->pipe), trans_dp);
+ intel_de_rmw(dev_priv, TRANS_DP_CTL(crtc->pipe),
+ TRANS_DP_ENH_FRAMING,
+ drm_dp_enhanced_frame_cap(intel_dp->dpcd) ?
+ TRANS_DP_ENH_FRAMING : 0);
} else {
if (IS_G4X(dev_priv) && pipe_config->limited_color_range)
intel_dp->DP |= DP_COLOR_RANGE_16_235;
@@ -1200,29 +1197,6 @@ static bool g4x_digital_port_connected(struct intel_encoder *encoder)
return intel_de_read(dev_priv, PORT_HOTPLUG_STAT) & bit;
}
-static bool gm45_digital_port_connected(struct intel_encoder *encoder)
-{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- u32 bit;
-
- switch (encoder->hpd_pin) {
- case HPD_PORT_B:
- bit = PORTB_HOTPLUG_LIVE_STATUS_GM45;
- break;
- case HPD_PORT_C:
- bit = PORTC_HOTPLUG_LIVE_STATUS_GM45;
- break;
- case HPD_PORT_D:
- bit = PORTD_HOTPLUG_LIVE_STATUS_GM45;
- break;
- default:
- MISSING_CASE(encoder->hpd_pin);
- return false;
- }
-
- return intel_de_read(dev_priv, PORT_HOTPLUG_STAT) & bit;
-}
-
static bool ilk_digital_port_connected(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
@@ -1279,11 +1253,19 @@ static const struct drm_encoder_funcs intel_dp_enc_funcs = {
bool g4x_dp_init(struct drm_i915_private *dev_priv,
i915_reg_t output_reg, enum port port)
{
+ const struct intel_bios_encoder_data *devdata;
struct intel_digital_port *dig_port;
struct intel_encoder *intel_encoder;
struct drm_encoder *encoder;
struct intel_connector *intel_connector;
+ devdata = intel_bios_encoder_data_lookup(dev_priv, port);
+
+ /* FIXME bail? */
+ if (!devdata)
+ drm_dbg_kms(&dev_priv->drm, "No VBT child device for DP-%c\n",
+ port_name(port));
+
dig_port = kzalloc(sizeof(*dig_port), GFP_KERNEL);
if (!dig_port)
return false;
@@ -1295,6 +1277,8 @@ bool g4x_dp_init(struct drm_i915_private *dev_priv,
intel_encoder = &dig_port->base;
encoder = &intel_encoder->base;
+ intel_encoder->devdata = devdata;
+
mutex_init(&dig_port->hdcp_mutex);
if (drm_encoder_init(&dev_priv->drm, &intel_encoder->base,
@@ -1377,10 +1361,7 @@ bool g4x_dp_init(struct drm_i915_private *dev_priv,
dig_port->hpd_pulse = intel_dp_hpd_pulse;
if (HAS_GMCH(dev_priv)) {
- if (IS_GM45(dev_priv))
- dig_port->connected = gm45_digital_port_connected;
- else
- dig_port->connected = g4x_digital_port_connected;
+ dig_port->connected = g4x_digital_port_connected;
} else {
if (port == PORT_A)
dig_port->connected = ilk_digital_port_connected;
@@ -1391,7 +1372,7 @@ bool g4x_dp_init(struct drm_i915_private *dev_priv,
if (port != PORT_A)
intel_infoframe_init(dig_port);
- dig_port->aux_ch = intel_bios_port_aux_ch(dev_priv, port);
+ dig_port->aux_ch = intel_dp_aux_ch(intel_encoder);
if (!intel_dp_init_connector(dig_port, intel_connector))
goto err_init_connector;
diff --git a/drivers/gpu/drm/i915/display/g4x_hdmi.c b/drivers/gpu/drm/i915/display/g4x_hdmi.c
index 64c3b3990702..448ea26786e0 100644
--- a/drivers/gpu/drm/i915/display/g4x_hdmi.c
+++ b/drivers/gpu/drm/i915/display/g4x_hdmi.c
@@ -13,6 +13,7 @@
#include "intel_de.h"
#include "intel_display_power.h"
#include "intel_display_types.h"
+#include "intel_dp_aux.h"
#include "intel_dpio_phy.h"
#include "intel_fifo_underrun.h"
#include "intel_hdmi.h"
@@ -273,8 +274,8 @@ static void cpt_enable_hdmi(struct intel_atomic_state *state,
*/
if (pipe_config->pipe_bpp > 24) {
- intel_de_write(dev_priv, TRANS_CHICKEN1(pipe),
- intel_de_read(dev_priv, TRANS_CHICKEN1(pipe)) | TRANS_CHICKEN1_HDMIUNIT_GC_DISABLE);
+ intel_de_rmw(dev_priv, TRANS_CHICKEN1(pipe),
+ 0, TRANS_CHICKEN1_HDMIUNIT_GC_DISABLE);
temp &= ~SDVO_COLOR_FORMAT_MASK;
temp |= SDVO_COLOR_FORMAT_8bpc;
@@ -290,8 +291,8 @@ static void cpt_enable_hdmi(struct intel_atomic_state *state,
intel_de_write(dev_priv, intel_hdmi->hdmi_reg, temp);
intel_de_posting_read(dev_priv, intel_hdmi->hdmi_reg);
- intel_de_write(dev_priv, TRANS_CHICKEN1(pipe),
- intel_de_read(dev_priv, TRANS_CHICKEN1(pipe)) & ~TRANS_CHICKEN1_HDMIUNIT_GC_DISABLE);
+ intel_de_rmw(dev_priv, TRANS_CHICKEN1(pipe),
+ TRANS_CHICKEN1_HDMIUNIT_GC_DISABLE, 0);
}
drm_WARN_ON(&dev_priv->drm, pipe_config->has_audio &&
@@ -548,10 +549,18 @@ intel_hdmi_hotplug(struct intel_encoder *encoder,
void g4x_hdmi_init(struct drm_i915_private *dev_priv,
i915_reg_t hdmi_reg, enum port port)
{
+ const struct intel_bios_encoder_data *devdata;
struct intel_digital_port *dig_port;
struct intel_encoder *intel_encoder;
struct intel_connector *intel_connector;
+ devdata = intel_bios_encoder_data_lookup(dev_priv, port);
+
+ /* FIXME bail? */
+ if (!devdata)
+ drm_dbg_kms(&dev_priv->drm, "No VBT child device for HDMI-%c\n",
+ port_name(port));
+
dig_port = kzalloc(sizeof(*dig_port), GFP_KERNEL);
if (!dig_port)
return;
@@ -564,6 +573,8 @@ void g4x_hdmi_init(struct drm_i915_private *dev_priv,
intel_encoder = &dig_port->base;
+ intel_encoder->devdata = devdata;
+
mutex_init(&dig_port->hdcp_mutex);
drm_encoder_init(&dev_priv->drm, &intel_encoder->base,
@@ -629,6 +640,6 @@ void g4x_hdmi_init(struct drm_i915_private *dev_priv,
intel_infoframe_init(dig_port);
- dig_port->aux_ch = intel_bios_port_aux_ch(dev_priv, port);
+ dig_port->aux_ch = intel_dp_aux_ch(intel_encoder);
intel_hdmi_init_connector(dig_port, intel_connector);
}
diff --git a/drivers/gpu/drm/i915/display/hsw_ips.c b/drivers/gpu/drm/i915/display/hsw_ips.c
index 83aa3800245f..2910f5d0f3e2 100644
--- a/drivers/gpu/drm/i915/display/hsw_ips.c
+++ b/drivers/gpu/drm/i915/display/hsw_ips.c
@@ -267,3 +267,40 @@ void hsw_ips_get_config(struct intel_crtc_state *crtc_state)
crtc_state->ips_enabled = true;
}
}
+
+static int hsw_ips_debugfs_status_show(struct seq_file *m, void *unused)
+{
+ struct drm_i915_private *i915 = m->private;
+ intel_wakeref_t wakeref;
+
+ if (!HAS_IPS(i915))
+ return -ENODEV;
+
+ wakeref = intel_runtime_pm_get(&i915->runtime_pm);
+
+ seq_printf(m, "Enabled by kernel parameter: %s\n",
+ str_yes_no(i915->params.enable_ips));
+
+ if (DISPLAY_VER(i915) >= 8) {
+ seq_puts(m, "Currently: unknown\n");
+ } else {
+ if (intel_de_read(i915, IPS_CTL) & IPS_ENABLE)
+ seq_puts(m, "Currently: enabled\n");
+ else
+ seq_puts(m, "Currently: disabled\n");
+ }
+
+ intel_runtime_pm_put(&i915->runtime_pm, wakeref);
+
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(hsw_ips_debugfs_status);
+
+void hsw_ips_debugfs_register(struct drm_i915_private *i915)
+{
+ struct drm_minor *minor = i915->drm.primary;
+
+ debugfs_create_file("i915_ips_status", 0444, minor->debugfs_root,
+ i915, &hsw_ips_debugfs_status_fops);
+}
diff --git a/drivers/gpu/drm/i915/display/hsw_ips.h b/drivers/gpu/drm/i915/display/hsw_ips.h
index 4564dee497d7..7ed6061874f7 100644
--- a/drivers/gpu/drm/i915/display/hsw_ips.h
+++ b/drivers/gpu/drm/i915/display/hsw_ips.h
@@ -8,6 +8,7 @@
#include <linux/types.h>
+struct drm_i915_private;
struct intel_atomic_state;
struct intel_crtc;
struct intel_crtc_state;
@@ -22,5 +23,6 @@ bool hsw_crtc_state_ips_capable(const struct intel_crtc_state *crtc_state);
int hsw_ips_compute_config(struct intel_atomic_state *state,
struct intel_crtc *crtc);
void hsw_ips_get_config(struct intel_crtc_state *crtc_state);
+void hsw_ips_debugfs_register(struct drm_i915_private *i915);
#endif /* __HSW_IPS_H__ */
diff --git a/drivers/gpu/drm/i915/display/i9xx_wm.c b/drivers/gpu/drm/i915/display/i9xx_wm.c
new file mode 100644
index 000000000000..caef72d38798
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/i9xx_wm.c
@@ -0,0 +1,4047 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#include "i915_drv.h"
+#include "i9xx_wm.h"
+#include "intel_atomic.h"
+#include "intel_display.h"
+#include "intel_display_trace.h"
+#include "intel_mchbar_regs.h"
+#include "intel_wm.h"
+#include "skl_watermark.h"
+#include "vlv_sideband.h"
+
+/* used in computing the new watermarks state */
+struct intel_wm_config {
+ unsigned int num_pipes_active;
+ bool sprites_enabled;
+ bool sprites_scaled;
+};
+
+struct cxsr_latency {
+ bool is_desktop : 1;
+ bool is_ddr3 : 1;
+ u16 fsb_freq;
+ u16 mem_freq;
+ u16 display_sr;
+ u16 display_hpll_disable;
+ u16 cursor_sr;
+ u16 cursor_hpll_disable;
+};
+
+static const struct cxsr_latency cxsr_latency_table[] = {
+ {1, 0, 800, 400, 3382, 33382, 3983, 33983}, /* DDR2-400 SC */
+ {1, 0, 800, 667, 3354, 33354, 3807, 33807}, /* DDR2-667 SC */
+ {1, 0, 800, 800, 3347, 33347, 3763, 33763}, /* DDR2-800 SC */
+ {1, 1, 800, 667, 6420, 36420, 6873, 36873}, /* DDR3-667 SC */
+ {1, 1, 800, 800, 5902, 35902, 6318, 36318}, /* DDR3-800 SC */
+
+ {1, 0, 667, 400, 3400, 33400, 4021, 34021}, /* DDR2-400 SC */
+ {1, 0, 667, 667, 3372, 33372, 3845, 33845}, /* DDR2-667 SC */
+ {1, 0, 667, 800, 3386, 33386, 3822, 33822}, /* DDR2-800 SC */
+ {1, 1, 667, 667, 6438, 36438, 6911, 36911}, /* DDR3-667 SC */
+ {1, 1, 667, 800, 5941, 35941, 6377, 36377}, /* DDR3-800 SC */
+
+ {1, 0, 400, 400, 3472, 33472, 4173, 34173}, /* DDR2-400 SC */
+ {1, 0, 400, 667, 3443, 33443, 3996, 33996}, /* DDR2-667 SC */
+ {1, 0, 400, 800, 3430, 33430, 3946, 33946}, /* DDR2-800 SC */
+ {1, 1, 400, 667, 6509, 36509, 7062, 37062}, /* DDR3-667 SC */
+ {1, 1, 400, 800, 5985, 35985, 6501, 36501}, /* DDR3-800 SC */
+
+ {0, 0, 800, 400, 3438, 33438, 4065, 34065}, /* DDR2-400 SC */
+ {0, 0, 800, 667, 3410, 33410, 3889, 33889}, /* DDR2-667 SC */
+ {0, 0, 800, 800, 3403, 33403, 3845, 33845}, /* DDR2-800 SC */
+ {0, 1, 800, 667, 6476, 36476, 6955, 36955}, /* DDR3-667 SC */
+ {0, 1, 800, 800, 5958, 35958, 6400, 36400}, /* DDR3-800 SC */
+
+ {0, 0, 667, 400, 3456, 33456, 4103, 34106}, /* DDR2-400 SC */
+ {0, 0, 667, 667, 3428, 33428, 3927, 33927}, /* DDR2-667 SC */
+ {0, 0, 667, 800, 3443, 33443, 3905, 33905}, /* DDR2-800 SC */
+ {0, 1, 667, 667, 6494, 36494, 6993, 36993}, /* DDR3-667 SC */
+ {0, 1, 667, 800, 5998, 35998, 6460, 36460}, /* DDR3-800 SC */
+
+ {0, 0, 400, 400, 3528, 33528, 4255, 34255}, /* DDR2-400 SC */
+ {0, 0, 400, 667, 3500, 33500, 4079, 34079}, /* DDR2-667 SC */
+ {0, 0, 400, 800, 3487, 33487, 4029, 34029}, /* DDR2-800 SC */
+ {0, 1, 400, 667, 6566, 36566, 7145, 37145}, /* DDR3-667 SC */
+ {0, 1, 400, 800, 6042, 36042, 6584, 36584}, /* DDR3-800 SC */
+};
+
+static const struct cxsr_latency *intel_get_cxsr_latency(bool is_desktop,
+ bool is_ddr3,
+ int fsb,
+ int mem)
+{
+ const struct cxsr_latency *latency;
+ int i;
+
+ if (fsb == 0 || mem == 0)
+ return NULL;
+
+ for (i = 0; i < ARRAY_SIZE(cxsr_latency_table); i++) {
+ latency = &cxsr_latency_table[i];
+ if (is_desktop == latency->is_desktop &&
+ is_ddr3 == latency->is_ddr3 &&
+ fsb == latency->fsb_freq && mem == latency->mem_freq)
+ return latency;
+ }
+
+ DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
+
+ return NULL;
+}
+
+static void chv_set_memory_dvfs(struct drm_i915_private *dev_priv, bool enable)
+{
+ u32 val;
+
+ vlv_punit_get(dev_priv);
+
+ val = vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2);
+ if (enable)
+ val &= ~FORCE_DDR_HIGH_FREQ;
+ else
+ val |= FORCE_DDR_HIGH_FREQ;
+ val &= ~FORCE_DDR_LOW_FREQ;
+ val |= FORCE_DDR_FREQ_REQ_ACK;
+ vlv_punit_write(dev_priv, PUNIT_REG_DDR_SETUP2, val);
+
+ if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2) &
+ FORCE_DDR_FREQ_REQ_ACK) == 0, 3))
+ drm_err(&dev_priv->drm,
+ "timed out waiting for Punit DDR DVFS request\n");
+
+ vlv_punit_put(dev_priv);
+}
+
+static void chv_set_memory_pm5(struct drm_i915_private *dev_priv, bool enable)
+{
+ u32 val;
+
+ vlv_punit_get(dev_priv);
+
+ val = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM);
+ if (enable)
+ val |= DSP_MAXFIFO_PM5_ENABLE;
+ else
+ val &= ~DSP_MAXFIFO_PM5_ENABLE;
+ vlv_punit_write(dev_priv, PUNIT_REG_DSPSSPM, val);
+
+ vlv_punit_put(dev_priv);
+}
+
+#define FW_WM(value, plane) \
+ (((value) << DSPFW_ ## plane ## _SHIFT) & DSPFW_ ## plane ## _MASK)
+
+static bool _intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable)
+{
+ bool was_enabled;
+ u32 val;
+
+ if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
+ was_enabled = intel_uncore_read(&dev_priv->uncore, FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
+ intel_uncore_write(&dev_priv->uncore, FW_BLC_SELF_VLV, enable ? FW_CSPWRDWNEN : 0);
+ intel_uncore_posting_read(&dev_priv->uncore, FW_BLC_SELF_VLV);
+ } else if (IS_G4X(dev_priv) || IS_I965GM(dev_priv)) {
+ was_enabled = intel_uncore_read(&dev_priv->uncore, FW_BLC_SELF) & FW_BLC_SELF_EN;
+ intel_uncore_write(&dev_priv->uncore, FW_BLC_SELF, enable ? FW_BLC_SELF_EN : 0);
+ intel_uncore_posting_read(&dev_priv->uncore, FW_BLC_SELF);
+ } else if (IS_PINEVIEW(dev_priv)) {
+ val = intel_uncore_read(&dev_priv->uncore, DSPFW3);
+ was_enabled = val & PINEVIEW_SELF_REFRESH_EN;
+ if (enable)
+ val |= PINEVIEW_SELF_REFRESH_EN;
+ else
+ val &= ~PINEVIEW_SELF_REFRESH_EN;
+ intel_uncore_write(&dev_priv->uncore, DSPFW3, val);
+ intel_uncore_posting_read(&dev_priv->uncore, DSPFW3);
+ } else if (IS_I945G(dev_priv) || IS_I945GM(dev_priv)) {
+ was_enabled = intel_uncore_read(&dev_priv->uncore, FW_BLC_SELF) & FW_BLC_SELF_EN;
+ val = enable ? _MASKED_BIT_ENABLE(FW_BLC_SELF_EN) :
+ _MASKED_BIT_DISABLE(FW_BLC_SELF_EN);
+ intel_uncore_write(&dev_priv->uncore, FW_BLC_SELF, val);
+ intel_uncore_posting_read(&dev_priv->uncore, FW_BLC_SELF);
+ } else if (IS_I915GM(dev_priv)) {
+ /*
+ * FIXME can't find a bit like this for 915G, and
+ * yet it does have the related watermark in
+ * FW_BLC_SELF. What's going on?
+ */
+ was_enabled = intel_uncore_read(&dev_priv->uncore, INSTPM) & INSTPM_SELF_EN;
+ val = enable ? _MASKED_BIT_ENABLE(INSTPM_SELF_EN) :
+ _MASKED_BIT_DISABLE(INSTPM_SELF_EN);
+ intel_uncore_write(&dev_priv->uncore, INSTPM, val);
+ intel_uncore_posting_read(&dev_priv->uncore, INSTPM);
+ } else {
+ return false;
+ }
+
+ trace_intel_memory_cxsr(dev_priv, was_enabled, enable);
+
+ drm_dbg_kms(&dev_priv->drm, "memory self-refresh is %s (was %s)\n",
+ str_enabled_disabled(enable),
+ str_enabled_disabled(was_enabled));
+
+ return was_enabled;
+}
+
+/**
+ * intel_set_memory_cxsr - Configure CxSR state
+ * @dev_priv: i915 device
+ * @enable: Allow vs. disallow CxSR
+ *
+ * Allow or disallow the system to enter a special CxSR
+ * (C-state self refresh) state. What typically happens in CxSR mode
+ * is that several display FIFOs may get combined into a single larger
+ * FIFO for a particular plane (so called max FIFO mode) to allow the
+ * system to defer memory fetches longer, and the memory will enter
+ * self refresh.
+ *
+ * Note that enabling CxSR does not guarantee that the system enter
+ * this special mode, nor does it guarantee that the system stays
+ * in that mode once entered. So this just allows/disallows the system
+ * to autonomously utilize the CxSR mode. Other factors such as core
+ * C-states will affect when/if the system actually enters/exits the
+ * CxSR mode.
+ *
+ * Note that on VLV/CHV this actually only controls the max FIFO mode,
+ * and the system is free to enter/exit memory self refresh at any time
+ * even when the use of CxSR has been disallowed.
+ *
+ * While the system is actually in the CxSR/max FIFO mode, some plane
+ * control registers will not get latched on vblank. Thus in order to
+ * guarantee the system will respond to changes in the plane registers
+ * we must always disallow CxSR prior to making changes to those registers.
+ * Unfortunately the system will re-evaluate the CxSR conditions at
+ * frame start which happens after vblank start (which is when the plane
+ * registers would get latched), so we can't proceed with the plane update
+ * during the same frame where we disallowed CxSR.
+ *
+ * Certain platforms also have a deeper HPLL SR mode. Fortunately the
+ * HPLL SR mode depends on CxSR itself, so we don't have to hand hold
+ * the hardware w.r.t. HPLL SR when writing to plane registers.
+ * Disallowing just CxSR is sufficient.
+ */
+bool intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable)
+{
+ bool ret;
+
+ mutex_lock(&dev_priv->display.wm.wm_mutex);
+ ret = _intel_set_memory_cxsr(dev_priv, enable);
+ if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+ dev_priv->display.wm.vlv.cxsr = enable;
+ else if (IS_G4X(dev_priv))
+ dev_priv->display.wm.g4x.cxsr = enable;
+ mutex_unlock(&dev_priv->display.wm.wm_mutex);
+
+ return ret;
+}
+
+/*
+ * Latency for FIFO fetches is dependent on several factors:
+ * - memory configuration (speed, channels)
+ * - chipset
+ * - current MCH state
+ * It can be fairly high in some situations, so here we assume a fairly
+ * pessimal value. It's a tradeoff between extra memory fetches (if we
+ * set this value too high, the FIFO will fetch frequently to stay full)
+ * and power consumption (set it too low to save power and we might see
+ * FIFO underruns and display "flicker").
+ *
+ * A value of 5us seems to be a good balance; safe for very low end
+ * platforms but not overly aggressive on lower latency configs.
+ */
+static const int pessimal_latency_ns = 5000;
+
+#define VLV_FIFO_START(dsparb, dsparb2, lo_shift, hi_shift) \
+ ((((dsparb) >> (lo_shift)) & 0xff) | ((((dsparb2) >> (hi_shift)) & 0x1) << 8))
+
+static void vlv_get_fifo_size(struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct vlv_fifo_state *fifo_state = &crtc_state->wm.vlv.fifo_state;
+ enum pipe pipe = crtc->pipe;
+ int sprite0_start, sprite1_start;
+ u32 dsparb, dsparb2, dsparb3;
+
+ switch (pipe) {
+ case PIPE_A:
+ dsparb = intel_uncore_read(&dev_priv->uncore, DSPARB);
+ dsparb2 = intel_uncore_read(&dev_priv->uncore, DSPARB2);
+ sprite0_start = VLV_FIFO_START(dsparb, dsparb2, 0, 0);
+ sprite1_start = VLV_FIFO_START(dsparb, dsparb2, 8, 4);
+ break;
+ case PIPE_B:
+ dsparb = intel_uncore_read(&dev_priv->uncore, DSPARB);
+ dsparb2 = intel_uncore_read(&dev_priv->uncore, DSPARB2);
+ sprite0_start = VLV_FIFO_START(dsparb, dsparb2, 16, 8);
+ sprite1_start = VLV_FIFO_START(dsparb, dsparb2, 24, 12);
+ break;
+ case PIPE_C:
+ dsparb2 = intel_uncore_read(&dev_priv->uncore, DSPARB2);
+ dsparb3 = intel_uncore_read(&dev_priv->uncore, DSPARB3);
+ sprite0_start = VLV_FIFO_START(dsparb3, dsparb2, 0, 16);
+ sprite1_start = VLV_FIFO_START(dsparb3, dsparb2, 8, 20);
+ break;
+ default:
+ MISSING_CASE(pipe);
+ return;
+ }
+
+ fifo_state->plane[PLANE_PRIMARY] = sprite0_start;
+ fifo_state->plane[PLANE_SPRITE0] = sprite1_start - sprite0_start;
+ fifo_state->plane[PLANE_SPRITE1] = 511 - sprite1_start;
+ fifo_state->plane[PLANE_CURSOR] = 63;
+}
+
+static int i9xx_get_fifo_size(struct drm_i915_private *dev_priv,
+ enum i9xx_plane_id i9xx_plane)
+{
+ u32 dsparb = intel_uncore_read(&dev_priv->uncore, DSPARB);
+ int size;
+
+ size = dsparb & 0x7f;
+ if (i9xx_plane == PLANE_B)
+ size = ((dsparb >> DSPARB_CSTART_SHIFT) & 0x7f) - size;
+
+ drm_dbg_kms(&dev_priv->drm, "FIFO size - (0x%08x) %c: %d\n",
+ dsparb, plane_name(i9xx_plane), size);
+
+ return size;
+}
+
+static int i830_get_fifo_size(struct drm_i915_private *dev_priv,
+ enum i9xx_plane_id i9xx_plane)
+{
+ u32 dsparb = intel_uncore_read(&dev_priv->uncore, DSPARB);
+ int size;
+
+ size = dsparb & 0x1ff;
+ if (i9xx_plane == PLANE_B)
+ size = ((dsparb >> DSPARB_BEND_SHIFT) & 0x1ff) - size;
+ size >>= 1; /* Convert to cachelines */
+
+ drm_dbg_kms(&dev_priv->drm, "FIFO size - (0x%08x) %c: %d\n",
+ dsparb, plane_name(i9xx_plane), size);
+
+ return size;
+}
+
+static int i845_get_fifo_size(struct drm_i915_private *dev_priv,
+ enum i9xx_plane_id i9xx_plane)
+{
+ u32 dsparb = intel_uncore_read(&dev_priv->uncore, DSPARB);
+ int size;
+
+ size = dsparb & 0x7f;
+ size >>= 2; /* Convert to cachelines */
+
+ drm_dbg_kms(&dev_priv->drm, "FIFO size - (0x%08x) %c: %d\n",
+ dsparb, plane_name(i9xx_plane), size);
+
+ return size;
+}
+
+/* Pineview has different values for various configs */
+static const struct intel_watermark_params pnv_display_wm = {
+ .fifo_size = PINEVIEW_DISPLAY_FIFO,
+ .max_wm = PINEVIEW_MAX_WM,
+ .default_wm = PINEVIEW_DFT_WM,
+ .guard_size = PINEVIEW_GUARD_WM,
+ .cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
+};
+
+static const struct intel_watermark_params pnv_display_hplloff_wm = {
+ .fifo_size = PINEVIEW_DISPLAY_FIFO,
+ .max_wm = PINEVIEW_MAX_WM,
+ .default_wm = PINEVIEW_DFT_HPLLOFF_WM,
+ .guard_size = PINEVIEW_GUARD_WM,
+ .cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
+};
+
+static const struct intel_watermark_params pnv_cursor_wm = {
+ .fifo_size = PINEVIEW_CURSOR_FIFO,
+ .max_wm = PINEVIEW_CURSOR_MAX_WM,
+ .default_wm = PINEVIEW_CURSOR_DFT_WM,
+ .guard_size = PINEVIEW_CURSOR_GUARD_WM,
+ .cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
+};
+
+static const struct intel_watermark_params pnv_cursor_hplloff_wm = {
+ .fifo_size = PINEVIEW_CURSOR_FIFO,
+ .max_wm = PINEVIEW_CURSOR_MAX_WM,
+ .default_wm = PINEVIEW_CURSOR_DFT_WM,
+ .guard_size = PINEVIEW_CURSOR_GUARD_WM,
+ .cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
+};
+
+static const struct intel_watermark_params i965_cursor_wm_info = {
+ .fifo_size = I965_CURSOR_FIFO,
+ .max_wm = I965_CURSOR_MAX_WM,
+ .default_wm = I965_CURSOR_DFT_WM,
+ .guard_size = 2,
+ .cacheline_size = I915_FIFO_LINE_SIZE,
+};
+
+static const struct intel_watermark_params i945_wm_info = {
+ .fifo_size = I945_FIFO_SIZE,
+ .max_wm = I915_MAX_WM,
+ .default_wm = 1,
+ .guard_size = 2,
+ .cacheline_size = I915_FIFO_LINE_SIZE,
+};
+
+static const struct intel_watermark_params i915_wm_info = {
+ .fifo_size = I915_FIFO_SIZE,
+ .max_wm = I915_MAX_WM,
+ .default_wm = 1,
+ .guard_size = 2,
+ .cacheline_size = I915_FIFO_LINE_SIZE,
+};
+
+static const struct intel_watermark_params i830_a_wm_info = {
+ .fifo_size = I855GM_FIFO_SIZE,
+ .max_wm = I915_MAX_WM,
+ .default_wm = 1,
+ .guard_size = 2,
+ .cacheline_size = I830_FIFO_LINE_SIZE,
+};
+
+static const struct intel_watermark_params i830_bc_wm_info = {
+ .fifo_size = I855GM_FIFO_SIZE,
+ .max_wm = I915_MAX_WM / 2,
+ .default_wm = 1,
+ .guard_size = 2,
+ .cacheline_size = I830_FIFO_LINE_SIZE,
+};
+
+static const struct intel_watermark_params i845_wm_info = {
+ .fifo_size = I830_FIFO_SIZE,
+ .max_wm = I915_MAX_WM,
+ .default_wm = 1,
+ .guard_size = 2,
+ .cacheline_size = I830_FIFO_LINE_SIZE,
+};
+
+/**
+ * intel_wm_method1 - Method 1 / "small buffer" watermark formula
+ * @pixel_rate: Pipe pixel rate in kHz
+ * @cpp: Plane bytes per pixel
+ * @latency: Memory wakeup latency in 0.1us units
+ *
+ * Compute the watermark using the method 1 or "small buffer"
+ * formula. The caller may additonally add extra cachelines
+ * to account for TLB misses and clock crossings.
+ *
+ * This method is concerned with the short term drain rate
+ * of the FIFO, ie. it does not account for blanking periods
+ * which would effectively reduce the average drain rate across
+ * a longer period. The name "small" refers to the fact the
+ * FIFO is relatively small compared to the amount of data
+ * fetched.
+ *
+ * The FIFO level vs. time graph might look something like:
+ *
+ * |\ |\
+ * | \ | \
+ * __---__---__ (- plane active, _ blanking)
+ * -> time
+ *
+ * or perhaps like this:
+ *
+ * |\|\ |\|\
+ * __----__----__ (- plane active, _ blanking)
+ * -> time
+ *
+ * Returns:
+ * The watermark in bytes
+ */
+static unsigned int intel_wm_method1(unsigned int pixel_rate,
+ unsigned int cpp,
+ unsigned int latency)
+{
+ u64 ret;
+
+ ret = mul_u32_u32(pixel_rate, cpp * latency);
+ ret = DIV_ROUND_UP_ULL(ret, 10000);
+
+ return ret;
+}
+
+/**
+ * intel_wm_method2 - Method 2 / "large buffer" watermark formula
+ * @pixel_rate: Pipe pixel rate in kHz
+ * @htotal: Pipe horizontal total
+ * @width: Plane width in pixels
+ * @cpp: Plane bytes per pixel
+ * @latency: Memory wakeup latency in 0.1us units
+ *
+ * Compute the watermark using the method 2 or "large buffer"
+ * formula. The caller may additonally add extra cachelines
+ * to account for TLB misses and clock crossings.
+ *
+ * This method is concerned with the long term drain rate
+ * of the FIFO, ie. it does account for blanking periods
+ * which effectively reduce the average drain rate across
+ * a longer period. The name "large" refers to the fact the
+ * FIFO is relatively large compared to the amount of data
+ * fetched.
+ *
+ * The FIFO level vs. time graph might look something like:
+ *
+ * |\___ |\___
+ * | \___ | \___
+ * | \ | \
+ * __ --__--__--__--__--__--__ (- plane active, _ blanking)
+ * -> time
+ *
+ * Returns:
+ * The watermark in bytes
+ */
+static unsigned int intel_wm_method2(unsigned int pixel_rate,
+ unsigned int htotal,
+ unsigned int width,
+ unsigned int cpp,
+ unsigned int latency)
+{
+ unsigned int ret;
+
+ /*
+ * FIXME remove once all users are computing
+ * watermarks in the correct place.
+ */
+ if (WARN_ON_ONCE(htotal == 0))
+ htotal = 1;
+
+ ret = (latency * pixel_rate) / (htotal * 10000);
+ ret = (ret + 1) * width * cpp;
+
+ return ret;
+}
+
+/**
+ * intel_calculate_wm - calculate watermark level
+ * @pixel_rate: pixel clock
+ * @wm: chip FIFO params
+ * @fifo_size: size of the FIFO buffer
+ * @cpp: bytes per pixel
+ * @latency_ns: memory latency for the platform
+ *
+ * Calculate the watermark level (the level at which the display plane will
+ * start fetching from memory again). Each chip has a different display
+ * FIFO size and allocation, so the caller needs to figure that out and pass
+ * in the correct intel_watermark_params structure.
+ *
+ * As the pixel clock runs, the FIFO will be drained at a rate that depends
+ * on the pixel size. When it reaches the watermark level, it'll start
+ * fetching FIFO line sized based chunks from memory until the FIFO fills
+ * past the watermark point. If the FIFO drains completely, a FIFO underrun
+ * will occur, and a display engine hang could result.
+ */
+static unsigned int intel_calculate_wm(int pixel_rate,
+ const struct intel_watermark_params *wm,
+ int fifo_size, int cpp,
+ unsigned int latency_ns)
+{
+ int entries, wm_size;
+
+ /*
+ * Note: we need to make sure we don't overflow for various clock &
+ * latency values.
+ * clocks go from a few thousand to several hundred thousand.
+ * latency is usually a few thousand
+ */
+ entries = intel_wm_method1(pixel_rate, cpp,
+ latency_ns / 100);
+ entries = DIV_ROUND_UP(entries, wm->cacheline_size) +
+ wm->guard_size;
+ DRM_DEBUG_KMS("FIFO entries required for mode: %d\n", entries);
+
+ wm_size = fifo_size - entries;
+ DRM_DEBUG_KMS("FIFO watermark level: %d\n", wm_size);
+
+ /* Don't promote wm_size to unsigned... */
+ if (wm_size > wm->max_wm)
+ wm_size = wm->max_wm;
+ if (wm_size <= 0)
+ wm_size = wm->default_wm;
+
+ /*
+ * Bspec seems to indicate that the value shouldn't be lower than
+ * 'burst size + 1'. Certainly 830 is quite unhappy with low values.
+ * Lets go for 8 which is the burst size since certain platforms
+ * already use a hardcoded 8 (which is what the spec says should be
+ * done).
+ */
+ if (wm_size <= 8)
+ wm_size = 8;
+
+ return wm_size;
+}
+
+static bool is_disabling(int old, int new, int threshold)
+{
+ return old >= threshold && new < threshold;
+}
+
+static bool is_enabling(int old, int new, int threshold)
+{
+ return old < threshold && new >= threshold;
+}
+
+static bool intel_crtc_active(struct intel_crtc *crtc)
+{
+ /* Be paranoid as we can arrive here with only partial
+ * state retrieved from the hardware during setup.
+ *
+ * We can ditch the adjusted_mode.crtc_clock check as soon
+ * as Haswell has gained clock readout/fastboot support.
+ *
+ * We can ditch the crtc->primary->state->fb check as soon as we can
+ * properly reconstruct framebuffers.
+ *
+ * FIXME: The intel_crtc->active here should be switched to
+ * crtc->state->active once we have proper CRTC states wired up
+ * for atomic.
+ */
+ return crtc && crtc->active && crtc->base.primary->state->fb &&
+ crtc->config->hw.adjusted_mode.crtc_clock;
+}
+
+static struct intel_crtc *single_enabled_crtc(struct drm_i915_private *dev_priv)
+{
+ struct intel_crtc *crtc, *enabled = NULL;
+
+ for_each_intel_crtc(&dev_priv->drm, crtc) {
+ if (intel_crtc_active(crtc)) {
+ if (enabled)
+ return NULL;
+ enabled = crtc;
+ }
+ }
+
+ return enabled;
+}
+
+static void pnv_update_wm(struct drm_i915_private *dev_priv)
+{
+ struct intel_crtc *crtc;
+ const struct cxsr_latency *latency;
+ u32 reg;
+ unsigned int wm;
+
+ latency = intel_get_cxsr_latency(!IS_MOBILE(dev_priv),
+ dev_priv->is_ddr3,
+ dev_priv->fsb_freq,
+ dev_priv->mem_freq);
+ if (!latency) {
+ drm_dbg_kms(&dev_priv->drm,
+ "Unknown FSB/MEM found, disable CxSR\n");
+ intel_set_memory_cxsr(dev_priv, false);
+ return;
+ }
+
+ crtc = single_enabled_crtc(dev_priv);
+ if (crtc) {
+ const struct drm_framebuffer *fb =
+ crtc->base.primary->state->fb;
+ int pixel_rate = crtc->config->pixel_rate;
+ int cpp = fb->format->cpp[0];
+
+ /* Display SR */
+ wm = intel_calculate_wm(pixel_rate, &pnv_display_wm,
+ pnv_display_wm.fifo_size,
+ cpp, latency->display_sr);
+ reg = intel_uncore_read(&dev_priv->uncore, DSPFW1);
+ reg &= ~DSPFW_SR_MASK;
+ reg |= FW_WM(wm, SR);
+ intel_uncore_write(&dev_priv->uncore, DSPFW1, reg);
+ drm_dbg_kms(&dev_priv->drm, "DSPFW1 register is %x\n", reg);
+
+ /* cursor SR */
+ wm = intel_calculate_wm(pixel_rate, &pnv_cursor_wm,
+ pnv_display_wm.fifo_size,
+ 4, latency->cursor_sr);
+ intel_uncore_rmw(&dev_priv->uncore, DSPFW3, DSPFW_CURSOR_SR_MASK,
+ FW_WM(wm, CURSOR_SR));
+
+ /* Display HPLL off SR */
+ wm = intel_calculate_wm(pixel_rate, &pnv_display_hplloff_wm,
+ pnv_display_hplloff_wm.fifo_size,
+ cpp, latency->display_hpll_disable);
+ intel_uncore_rmw(&dev_priv->uncore, DSPFW3, DSPFW_HPLL_SR_MASK, FW_WM(wm, HPLL_SR));
+
+ /* cursor HPLL off SR */
+ wm = intel_calculate_wm(pixel_rate, &pnv_cursor_hplloff_wm,
+ pnv_display_hplloff_wm.fifo_size,
+ 4, latency->cursor_hpll_disable);
+ reg = intel_uncore_read(&dev_priv->uncore, DSPFW3);
+ reg &= ~DSPFW_HPLL_CURSOR_MASK;
+ reg |= FW_WM(wm, HPLL_CURSOR);
+ intel_uncore_write(&dev_priv->uncore, DSPFW3, reg);
+ drm_dbg_kms(&dev_priv->drm, "DSPFW3 register is %x\n", reg);
+
+ intel_set_memory_cxsr(dev_priv, true);
+ } else {
+ intel_set_memory_cxsr(dev_priv, false);
+ }
+}
+
+/*
+ * Documentation says:
+ * "If the line size is small, the TLB fetches can get in the way of the
+ * data fetches, causing some lag in the pixel data return which is not
+ * accounted for in the above formulas. The following adjustment only
+ * needs to be applied if eight whole lines fit in the buffer at once.
+ * The WM is adjusted upwards by the difference between the FIFO size
+ * and the size of 8 whole lines. This adjustment is always performed
+ * in the actual pixel depth regardless of whether FBC is enabled or not."
+ */
+static unsigned int g4x_tlb_miss_wa(int fifo_size, int width, int cpp)
+{
+ int tlb_miss = fifo_size * 64 - width * cpp * 8;
+
+ return max(0, tlb_miss);
+}
+
+static void g4x_write_wm_values(struct drm_i915_private *dev_priv,
+ const struct g4x_wm_values *wm)
+{
+ enum pipe pipe;
+
+ for_each_pipe(dev_priv, pipe)
+ trace_g4x_wm(intel_crtc_for_pipe(dev_priv, pipe), wm);
+
+ intel_uncore_write(&dev_priv->uncore, DSPFW1,
+ FW_WM(wm->sr.plane, SR) |
+ FW_WM(wm->pipe[PIPE_B].plane[PLANE_CURSOR], CURSORB) |
+ FW_WM(wm->pipe[PIPE_B].plane[PLANE_PRIMARY], PLANEB) |
+ FW_WM(wm->pipe[PIPE_A].plane[PLANE_PRIMARY], PLANEA));
+ intel_uncore_write(&dev_priv->uncore, DSPFW2,
+ (wm->fbc_en ? DSPFW_FBC_SR_EN : 0) |
+ FW_WM(wm->sr.fbc, FBC_SR) |
+ FW_WM(wm->hpll.fbc, FBC_HPLL_SR) |
+ FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE0], SPRITEB) |
+ FW_WM(wm->pipe[PIPE_A].plane[PLANE_CURSOR], CURSORA) |
+ FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE0], SPRITEA));
+ intel_uncore_write(&dev_priv->uncore, DSPFW3,
+ (wm->hpll_en ? DSPFW_HPLL_SR_EN : 0) |
+ FW_WM(wm->sr.cursor, CURSOR_SR) |
+ FW_WM(wm->hpll.cursor, HPLL_CURSOR) |
+ FW_WM(wm->hpll.plane, HPLL_SR));
+
+ intel_uncore_posting_read(&dev_priv->uncore, DSPFW1);
+}
+
+#define FW_WM_VLV(value, plane) \
+ (((value) << DSPFW_ ## plane ## _SHIFT) & DSPFW_ ## plane ## _MASK_VLV)
+
+static void vlv_write_wm_values(struct drm_i915_private *dev_priv,
+ const struct vlv_wm_values *wm)
+{
+ enum pipe pipe;
+
+ for_each_pipe(dev_priv, pipe) {
+ trace_vlv_wm(intel_crtc_for_pipe(dev_priv, pipe), wm);
+
+ intel_uncore_write(&dev_priv->uncore, VLV_DDL(pipe),
+ (wm->ddl[pipe].plane[PLANE_CURSOR] << DDL_CURSOR_SHIFT) |
+ (wm->ddl[pipe].plane[PLANE_SPRITE1] << DDL_SPRITE_SHIFT(1)) |
+ (wm->ddl[pipe].plane[PLANE_SPRITE0] << DDL_SPRITE_SHIFT(0)) |
+ (wm->ddl[pipe].plane[PLANE_PRIMARY] << DDL_PLANE_SHIFT));
+ }
+
+ /*
+ * Zero the (unused) WM1 watermarks, and also clear all the
+ * high order bits so that there are no out of bounds values
+ * present in the registers during the reprogramming.
+ */
+ intel_uncore_write(&dev_priv->uncore, DSPHOWM, 0);
+ intel_uncore_write(&dev_priv->uncore, DSPHOWM1, 0);
+ intel_uncore_write(&dev_priv->uncore, DSPFW4, 0);
+ intel_uncore_write(&dev_priv->uncore, DSPFW5, 0);
+ intel_uncore_write(&dev_priv->uncore, DSPFW6, 0);
+
+ intel_uncore_write(&dev_priv->uncore, DSPFW1,
+ FW_WM(wm->sr.plane, SR) |
+ FW_WM(wm->pipe[PIPE_B].plane[PLANE_CURSOR], CURSORB) |
+ FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_PRIMARY], PLANEB) |
+ FW_WM_VLV(wm->pipe[PIPE_A].plane[PLANE_PRIMARY], PLANEA));
+ intel_uncore_write(&dev_priv->uncore, DSPFW2,
+ FW_WM_VLV(wm->pipe[PIPE_A].plane[PLANE_SPRITE1], SPRITEB) |
+ FW_WM(wm->pipe[PIPE_A].plane[PLANE_CURSOR], CURSORA) |
+ FW_WM_VLV(wm->pipe[PIPE_A].plane[PLANE_SPRITE0], SPRITEA));
+ intel_uncore_write(&dev_priv->uncore, DSPFW3,
+ FW_WM(wm->sr.cursor, CURSOR_SR));
+
+ if (IS_CHERRYVIEW(dev_priv)) {
+ intel_uncore_write(&dev_priv->uncore, DSPFW7_CHV,
+ FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE1], SPRITED) |
+ FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE0], SPRITEC));
+ intel_uncore_write(&dev_priv->uncore, DSPFW8_CHV,
+ FW_WM_VLV(wm->pipe[PIPE_C].plane[PLANE_SPRITE1], SPRITEF) |
+ FW_WM_VLV(wm->pipe[PIPE_C].plane[PLANE_SPRITE0], SPRITEE));
+ intel_uncore_write(&dev_priv->uncore, DSPFW9_CHV,
+ FW_WM_VLV(wm->pipe[PIPE_C].plane[PLANE_PRIMARY], PLANEC) |
+ FW_WM(wm->pipe[PIPE_C].plane[PLANE_CURSOR], CURSORC));
+ intel_uncore_write(&dev_priv->uncore, DSPHOWM,
+ FW_WM(wm->sr.plane >> 9, SR_HI) |
+ FW_WM(wm->pipe[PIPE_C].plane[PLANE_SPRITE1] >> 8, SPRITEF_HI) |
+ FW_WM(wm->pipe[PIPE_C].plane[PLANE_SPRITE0] >> 8, SPRITEE_HI) |
+ FW_WM(wm->pipe[PIPE_C].plane[PLANE_PRIMARY] >> 8, PLANEC_HI) |
+ FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE1] >> 8, SPRITED_HI) |
+ FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE0] >> 8, SPRITEC_HI) |
+ FW_WM(wm->pipe[PIPE_B].plane[PLANE_PRIMARY] >> 8, PLANEB_HI) |
+ FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE1] >> 8, SPRITEB_HI) |
+ FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE0] >> 8, SPRITEA_HI) |
+ FW_WM(wm->pipe[PIPE_A].plane[PLANE_PRIMARY] >> 8, PLANEA_HI));
+ } else {
+ intel_uncore_write(&dev_priv->uncore, DSPFW7,
+ FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE1], SPRITED) |
+ FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE0], SPRITEC));
+ intel_uncore_write(&dev_priv->uncore, DSPHOWM,
+ FW_WM(wm->sr.plane >> 9, SR_HI) |
+ FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE1] >> 8, SPRITED_HI) |
+ FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE0] >> 8, SPRITEC_HI) |
+ FW_WM(wm->pipe[PIPE_B].plane[PLANE_PRIMARY] >> 8, PLANEB_HI) |
+ FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE1] >> 8, SPRITEB_HI) |
+ FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE0] >> 8, SPRITEA_HI) |
+ FW_WM(wm->pipe[PIPE_A].plane[PLANE_PRIMARY] >> 8, PLANEA_HI));
+ }
+
+ intel_uncore_posting_read(&dev_priv->uncore, DSPFW1);
+}
+
+#undef FW_WM_VLV
+
+static void g4x_setup_wm_latency(struct drm_i915_private *dev_priv)
+{
+ /* all latencies in usec */
+ dev_priv->display.wm.pri_latency[G4X_WM_LEVEL_NORMAL] = 5;
+ dev_priv->display.wm.pri_latency[G4X_WM_LEVEL_SR] = 12;
+ dev_priv->display.wm.pri_latency[G4X_WM_LEVEL_HPLL] = 35;
+
+ dev_priv->display.wm.num_levels = G4X_WM_LEVEL_HPLL + 1;
+}
+
+static int g4x_plane_fifo_size(enum plane_id plane_id, int level)
+{
+ /*
+ * DSPCNTR[13] supposedly controls whether the
+ * primary plane can use the FIFO space otherwise
+ * reserved for the sprite plane. It's not 100% clear
+ * what the actual FIFO size is, but it looks like we
+ * can happily set both primary and sprite watermarks
+ * up to 127 cachelines. So that would seem to mean
+ * that either DSPCNTR[13] doesn't do anything, or that
+ * the total FIFO is >= 256 cachelines in size. Either
+ * way, we don't seem to have to worry about this
+ * repartitioning as the maximum watermark value the
+ * register can hold for each plane is lower than the
+ * minimum FIFO size.
+ */
+ switch (plane_id) {
+ case PLANE_CURSOR:
+ return 63;
+ case PLANE_PRIMARY:
+ return level == G4X_WM_LEVEL_NORMAL ? 127 : 511;
+ case PLANE_SPRITE0:
+ return level == G4X_WM_LEVEL_NORMAL ? 127 : 0;
+ default:
+ MISSING_CASE(plane_id);
+ return 0;
+ }
+}
+
+static int g4x_fbc_fifo_size(int level)
+{
+ switch (level) {
+ case G4X_WM_LEVEL_SR:
+ return 7;
+ case G4X_WM_LEVEL_HPLL:
+ return 15;
+ default:
+ MISSING_CASE(level);
+ return 0;
+ }
+}
+
+static u16 g4x_compute_wm(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state,
+ int level)
+{
+ struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ const struct drm_display_mode *pipe_mode =
+ &crtc_state->hw.pipe_mode;
+ unsigned int latency = dev_priv->display.wm.pri_latency[level] * 10;
+ unsigned int pixel_rate, htotal, cpp, width, wm;
+
+ if (latency == 0)
+ return USHRT_MAX;
+
+ if (!intel_wm_plane_visible(crtc_state, plane_state))
+ return 0;
+
+ cpp = plane_state->hw.fb->format->cpp[0];
+
+ /*
+ * WaUse32BppForSRWM:ctg,elk
+ *
+ * The spec fails to list this restriction for the
+ * HPLL watermark, which seems a little strange.
+ * Let's use 32bpp for the HPLL watermark as well.
+ */
+ if (plane->id == PLANE_PRIMARY &&
+ level != G4X_WM_LEVEL_NORMAL)
+ cpp = max(cpp, 4u);
+
+ pixel_rate = crtc_state->pixel_rate;
+ htotal = pipe_mode->crtc_htotal;
+ width = drm_rect_width(&plane_state->uapi.src) >> 16;
+
+ if (plane->id == PLANE_CURSOR) {
+ wm = intel_wm_method2(pixel_rate, htotal, width, cpp, latency);
+ } else if (plane->id == PLANE_PRIMARY &&
+ level == G4X_WM_LEVEL_NORMAL) {
+ wm = intel_wm_method1(pixel_rate, cpp, latency);
+ } else {
+ unsigned int small, large;
+
+ small = intel_wm_method1(pixel_rate, cpp, latency);
+ large = intel_wm_method2(pixel_rate, htotal, width, cpp, latency);
+
+ wm = min(small, large);
+ }
+
+ wm += g4x_tlb_miss_wa(g4x_plane_fifo_size(plane->id, level),
+ width, cpp);
+
+ wm = DIV_ROUND_UP(wm, 64) + 2;
+
+ return min_t(unsigned int, wm, USHRT_MAX);
+}
+
+static bool g4x_raw_plane_wm_set(struct intel_crtc_state *crtc_state,
+ int level, enum plane_id plane_id, u16 value)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
+ bool dirty = false;
+
+ for (; level < dev_priv->display.wm.num_levels; level++) {
+ struct g4x_pipe_wm *raw = &crtc_state->wm.g4x.raw[level];
+
+ dirty |= raw->plane[plane_id] != value;
+ raw->plane[plane_id] = value;
+ }
+
+ return dirty;
+}
+
+static bool g4x_raw_fbc_wm_set(struct intel_crtc_state *crtc_state,
+ int level, u16 value)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
+ bool dirty = false;
+
+ /* NORMAL level doesn't have an FBC watermark */
+ level = max(level, G4X_WM_LEVEL_SR);
+
+ for (; level < dev_priv->display.wm.num_levels; level++) {
+ struct g4x_pipe_wm *raw = &crtc_state->wm.g4x.raw[level];
+
+ dirty |= raw->fbc != value;
+ raw->fbc = value;
+ }
+
+ return dirty;
+}
+
+static u32 ilk_compute_fbc_wm(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state,
+ u32 pri_val);
+
+static bool g4x_raw_plane_wm_compute(struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
+{
+ struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
+ enum plane_id plane_id = plane->id;
+ bool dirty = false;
+ int level;
+
+ if (!intel_wm_plane_visible(crtc_state, plane_state)) {
+ dirty |= g4x_raw_plane_wm_set(crtc_state, 0, plane_id, 0);
+ if (plane_id == PLANE_PRIMARY)
+ dirty |= g4x_raw_fbc_wm_set(crtc_state, 0, 0);
+ goto out;
+ }
+
+ for (level = 0; level < dev_priv->display.wm.num_levels; level++) {
+ struct g4x_pipe_wm *raw = &crtc_state->wm.g4x.raw[level];
+ int wm, max_wm;
+
+ wm = g4x_compute_wm(crtc_state, plane_state, level);
+ max_wm = g4x_plane_fifo_size(plane_id, level);
+
+ if (wm > max_wm)
+ break;
+
+ dirty |= raw->plane[plane_id] != wm;
+ raw->plane[plane_id] = wm;
+
+ if (plane_id != PLANE_PRIMARY ||
+ level == G4X_WM_LEVEL_NORMAL)
+ continue;
+
+ wm = ilk_compute_fbc_wm(crtc_state, plane_state,
+ raw->plane[plane_id]);
+ max_wm = g4x_fbc_fifo_size(level);
+
+ /*
+ * FBC wm is not mandatory as we
+ * can always just disable its use.
+ */
+ if (wm > max_wm)
+ wm = USHRT_MAX;
+
+ dirty |= raw->fbc != wm;
+ raw->fbc = wm;
+ }
+
+ /* mark watermarks as invalid */
+ dirty |= g4x_raw_plane_wm_set(crtc_state, level, plane_id, USHRT_MAX);
+
+ if (plane_id == PLANE_PRIMARY)
+ dirty |= g4x_raw_fbc_wm_set(crtc_state, level, USHRT_MAX);
+
+ out:
+ if (dirty) {
+ drm_dbg_kms(&dev_priv->drm,
+ "%s watermarks: normal=%d, SR=%d, HPLL=%d\n",
+ plane->base.name,
+ crtc_state->wm.g4x.raw[G4X_WM_LEVEL_NORMAL].plane[plane_id],
+ crtc_state->wm.g4x.raw[G4X_WM_LEVEL_SR].plane[plane_id],
+ crtc_state->wm.g4x.raw[G4X_WM_LEVEL_HPLL].plane[plane_id]);
+
+ if (plane_id == PLANE_PRIMARY)
+ drm_dbg_kms(&dev_priv->drm,
+ "FBC watermarks: SR=%d, HPLL=%d\n",
+ crtc_state->wm.g4x.raw[G4X_WM_LEVEL_SR].fbc,
+ crtc_state->wm.g4x.raw[G4X_WM_LEVEL_HPLL].fbc);
+ }
+
+ return dirty;
+}
+
+static bool g4x_raw_plane_wm_is_valid(const struct intel_crtc_state *crtc_state,
+ enum plane_id plane_id, int level)
+{
+ const struct g4x_pipe_wm *raw = &crtc_state->wm.g4x.raw[level];
+
+ return raw->plane[plane_id] <= g4x_plane_fifo_size(plane_id, level);
+}
+
+static bool g4x_raw_crtc_wm_is_valid(const struct intel_crtc_state *crtc_state,
+ int level)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
+
+ if (level >= dev_priv->display.wm.num_levels)
+ return false;
+
+ return g4x_raw_plane_wm_is_valid(crtc_state, PLANE_PRIMARY, level) &&
+ g4x_raw_plane_wm_is_valid(crtc_state, PLANE_SPRITE0, level) &&
+ g4x_raw_plane_wm_is_valid(crtc_state, PLANE_CURSOR, level);
+}
+
+/* mark all levels starting from 'level' as invalid */
+static void g4x_invalidate_wms(struct intel_crtc *crtc,
+ struct g4x_wm_state *wm_state, int level)
+{
+ if (level <= G4X_WM_LEVEL_NORMAL) {
+ enum plane_id plane_id;
+
+ for_each_plane_id_on_crtc(crtc, plane_id)
+ wm_state->wm.plane[plane_id] = USHRT_MAX;
+ }
+
+ if (level <= G4X_WM_LEVEL_SR) {
+ wm_state->cxsr = false;
+ wm_state->sr.cursor = USHRT_MAX;
+ wm_state->sr.plane = USHRT_MAX;
+ wm_state->sr.fbc = USHRT_MAX;
+ }
+
+ if (level <= G4X_WM_LEVEL_HPLL) {
+ wm_state->hpll_en = false;
+ wm_state->hpll.cursor = USHRT_MAX;
+ wm_state->hpll.plane = USHRT_MAX;
+ wm_state->hpll.fbc = USHRT_MAX;
+ }
+}
+
+static bool g4x_compute_fbc_en(const struct g4x_wm_state *wm_state,
+ int level)
+{
+ if (level < G4X_WM_LEVEL_SR)
+ return false;
+
+ if (level >= G4X_WM_LEVEL_SR &&
+ wm_state->sr.fbc > g4x_fbc_fifo_size(G4X_WM_LEVEL_SR))
+ return false;
+
+ if (level >= G4X_WM_LEVEL_HPLL &&
+ wm_state->hpll.fbc > g4x_fbc_fifo_size(G4X_WM_LEVEL_HPLL))
+ return false;
+
+ return true;
+}
+
+static int _g4x_compute_pipe_wm(struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct g4x_wm_state *wm_state = &crtc_state->wm.g4x.optimal;
+ u8 active_planes = crtc_state->active_planes & ~BIT(PLANE_CURSOR);
+ const struct g4x_pipe_wm *raw;
+ enum plane_id plane_id;
+ int level;
+
+ level = G4X_WM_LEVEL_NORMAL;
+ if (!g4x_raw_crtc_wm_is_valid(crtc_state, level))
+ goto out;
+
+ raw = &crtc_state->wm.g4x.raw[level];
+ for_each_plane_id_on_crtc(crtc, plane_id)
+ wm_state->wm.plane[plane_id] = raw->plane[plane_id];
+
+ level = G4X_WM_LEVEL_SR;
+ if (!g4x_raw_crtc_wm_is_valid(crtc_state, level))
+ goto out;
+
+ raw = &crtc_state->wm.g4x.raw[level];
+ wm_state->sr.plane = raw->plane[PLANE_PRIMARY];
+ wm_state->sr.cursor = raw->plane[PLANE_CURSOR];
+ wm_state->sr.fbc = raw->fbc;
+
+ wm_state->cxsr = active_planes == BIT(PLANE_PRIMARY);
+
+ level = G4X_WM_LEVEL_HPLL;
+ if (!g4x_raw_crtc_wm_is_valid(crtc_state, level))
+ goto out;
+
+ raw = &crtc_state->wm.g4x.raw[level];
+ wm_state->hpll.plane = raw->plane[PLANE_PRIMARY];
+ wm_state->hpll.cursor = raw->plane[PLANE_CURSOR];
+ wm_state->hpll.fbc = raw->fbc;
+
+ wm_state->hpll_en = wm_state->cxsr;
+
+ level++;
+
+ out:
+ if (level == G4X_WM_LEVEL_NORMAL)
+ return -EINVAL;
+
+ /* invalidate the higher levels */
+ g4x_invalidate_wms(crtc, wm_state, level);
+
+ /*
+ * Determine if the FBC watermark(s) can be used. IF
+ * this isn't the case we prefer to disable the FBC
+ * watermark(s) rather than disable the SR/HPLL
+ * level(s) entirely. 'level-1' is the highest valid
+ * level here.
+ */
+ wm_state->fbc_en = g4x_compute_fbc_en(wm_state, level - 1);
+
+ return 0;
+}
+
+static int g4x_compute_pipe_wm(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ const struct intel_plane_state *old_plane_state;
+ const struct intel_plane_state *new_plane_state;
+ struct intel_plane *plane;
+ unsigned int dirty = 0;
+ int i;
+
+ for_each_oldnew_intel_plane_in_state(state, plane,
+ old_plane_state,
+ new_plane_state, i) {
+ if (new_plane_state->hw.crtc != &crtc->base &&
+ old_plane_state->hw.crtc != &crtc->base)
+ continue;
+
+ if (g4x_raw_plane_wm_compute(crtc_state, new_plane_state))
+ dirty |= BIT(plane->id);
+ }
+
+ if (!dirty)
+ return 0;
+
+ return _g4x_compute_pipe_wm(crtc_state);
+}
+
+static int g4x_compute_intermediate_wm(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_crtc_state *new_crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ const struct intel_crtc_state *old_crtc_state =
+ intel_atomic_get_old_crtc_state(state, crtc);
+ struct g4x_wm_state *intermediate = &new_crtc_state->wm.g4x.intermediate;
+ const struct g4x_wm_state *optimal = &new_crtc_state->wm.g4x.optimal;
+ const struct g4x_wm_state *active = &old_crtc_state->wm.g4x.optimal;
+ enum plane_id plane_id;
+
+ if (!new_crtc_state->hw.active ||
+ intel_crtc_needs_modeset(new_crtc_state)) {
+ *intermediate = *optimal;
+
+ intermediate->cxsr = false;
+ intermediate->hpll_en = false;
+ goto out;
+ }
+
+ intermediate->cxsr = optimal->cxsr && active->cxsr &&
+ !new_crtc_state->disable_cxsr;
+ intermediate->hpll_en = optimal->hpll_en && active->hpll_en &&
+ !new_crtc_state->disable_cxsr;
+ intermediate->fbc_en = optimal->fbc_en && active->fbc_en;
+
+ for_each_plane_id_on_crtc(crtc, plane_id) {
+ intermediate->wm.plane[plane_id] =
+ max(optimal->wm.plane[plane_id],
+ active->wm.plane[plane_id]);
+
+ drm_WARN_ON(&dev_priv->drm, intermediate->wm.plane[plane_id] >
+ g4x_plane_fifo_size(plane_id, G4X_WM_LEVEL_NORMAL));
+ }
+
+ intermediate->sr.plane = max(optimal->sr.plane,
+ active->sr.plane);
+ intermediate->sr.cursor = max(optimal->sr.cursor,
+ active->sr.cursor);
+ intermediate->sr.fbc = max(optimal->sr.fbc,
+ active->sr.fbc);
+
+ intermediate->hpll.plane = max(optimal->hpll.plane,
+ active->hpll.plane);
+ intermediate->hpll.cursor = max(optimal->hpll.cursor,
+ active->hpll.cursor);
+ intermediate->hpll.fbc = max(optimal->hpll.fbc,
+ active->hpll.fbc);
+
+ drm_WARN_ON(&dev_priv->drm,
+ (intermediate->sr.plane >
+ g4x_plane_fifo_size(PLANE_PRIMARY, G4X_WM_LEVEL_SR) ||
+ intermediate->sr.cursor >
+ g4x_plane_fifo_size(PLANE_CURSOR, G4X_WM_LEVEL_SR)) &&
+ intermediate->cxsr);
+ drm_WARN_ON(&dev_priv->drm,
+ (intermediate->sr.plane >
+ g4x_plane_fifo_size(PLANE_PRIMARY, G4X_WM_LEVEL_HPLL) ||
+ intermediate->sr.cursor >
+ g4x_plane_fifo_size(PLANE_CURSOR, G4X_WM_LEVEL_HPLL)) &&
+ intermediate->hpll_en);
+
+ drm_WARN_ON(&dev_priv->drm,
+ intermediate->sr.fbc > g4x_fbc_fifo_size(1) &&
+ intermediate->fbc_en && intermediate->cxsr);
+ drm_WARN_ON(&dev_priv->drm,
+ intermediate->hpll.fbc > g4x_fbc_fifo_size(2) &&
+ intermediate->fbc_en && intermediate->hpll_en);
+
+out:
+ /*
+ * If our intermediate WM are identical to the final WM, then we can
+ * omit the post-vblank programming; only update if it's different.
+ */
+ if (memcmp(intermediate, optimal, sizeof(*intermediate)) != 0)
+ new_crtc_state->wm.need_postvbl_update = true;
+
+ return 0;
+}
+
+static void g4x_merge_wm(struct drm_i915_private *dev_priv,
+ struct g4x_wm_values *wm)
+{
+ struct intel_crtc *crtc;
+ int num_active_pipes = 0;
+
+ wm->cxsr = true;
+ wm->hpll_en = true;
+ wm->fbc_en = true;
+
+ for_each_intel_crtc(&dev_priv->drm, crtc) {
+ const struct g4x_wm_state *wm_state = &crtc->wm.active.g4x;
+
+ if (!crtc->active)
+ continue;
+
+ if (!wm_state->cxsr)
+ wm->cxsr = false;
+ if (!wm_state->hpll_en)
+ wm->hpll_en = false;
+ if (!wm_state->fbc_en)
+ wm->fbc_en = false;
+
+ num_active_pipes++;
+ }
+
+ if (num_active_pipes != 1) {
+ wm->cxsr = false;
+ wm->hpll_en = false;
+ wm->fbc_en = false;
+ }
+
+ for_each_intel_crtc(&dev_priv->drm, crtc) {
+ const struct g4x_wm_state *wm_state = &crtc->wm.active.g4x;
+ enum pipe pipe = crtc->pipe;
+
+ wm->pipe[pipe] = wm_state->wm;
+ if (crtc->active && wm->cxsr)
+ wm->sr = wm_state->sr;
+ if (crtc->active && wm->hpll_en)
+ wm->hpll = wm_state->hpll;
+ }
+}
+
+static void g4x_program_watermarks(struct drm_i915_private *dev_priv)
+{
+ struct g4x_wm_values *old_wm = &dev_priv->display.wm.g4x;
+ struct g4x_wm_values new_wm = {};
+
+ g4x_merge_wm(dev_priv, &new_wm);
+
+ if (memcmp(old_wm, &new_wm, sizeof(new_wm)) == 0)
+ return;
+
+ if (is_disabling(old_wm->cxsr, new_wm.cxsr, true))
+ _intel_set_memory_cxsr(dev_priv, false);
+
+ g4x_write_wm_values(dev_priv, &new_wm);
+
+ if (is_enabling(old_wm->cxsr, new_wm.cxsr, true))
+ _intel_set_memory_cxsr(dev_priv, true);
+
+ *old_wm = new_wm;
+}
+
+static void g4x_initial_watermarks(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ const struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+
+ mutex_lock(&dev_priv->display.wm.wm_mutex);
+ crtc->wm.active.g4x = crtc_state->wm.g4x.intermediate;
+ g4x_program_watermarks(dev_priv);
+ mutex_unlock(&dev_priv->display.wm.wm_mutex);
+}
+
+static void g4x_optimize_watermarks(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ const struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+
+ if (!crtc_state->wm.need_postvbl_update)
+ return;
+
+ mutex_lock(&dev_priv->display.wm.wm_mutex);
+ crtc->wm.active.g4x = crtc_state->wm.g4x.optimal;
+ g4x_program_watermarks(dev_priv);
+ mutex_unlock(&dev_priv->display.wm.wm_mutex);
+}
+
+/* latency must be in 0.1us units. */
+static unsigned int vlv_wm_method2(unsigned int pixel_rate,
+ unsigned int htotal,
+ unsigned int width,
+ unsigned int cpp,
+ unsigned int latency)
+{
+ unsigned int ret;
+
+ ret = intel_wm_method2(pixel_rate, htotal,
+ width, cpp, latency);
+ ret = DIV_ROUND_UP(ret, 64);
+
+ return ret;
+}
+
+static void vlv_setup_wm_latency(struct drm_i915_private *dev_priv)
+{
+ /* all latencies in usec */
+ dev_priv->display.wm.pri_latency[VLV_WM_LEVEL_PM2] = 3;
+
+ dev_priv->display.wm.num_levels = VLV_WM_LEVEL_PM2 + 1;
+
+ if (IS_CHERRYVIEW(dev_priv)) {
+ dev_priv->display.wm.pri_latency[VLV_WM_LEVEL_PM5] = 12;
+ dev_priv->display.wm.pri_latency[VLV_WM_LEVEL_DDR_DVFS] = 33;
+
+ dev_priv->display.wm.num_levels = VLV_WM_LEVEL_DDR_DVFS + 1;
+ }
+}
+
+static u16 vlv_compute_wm_level(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state,
+ int level)
+{
+ struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ const struct drm_display_mode *pipe_mode =
+ &crtc_state->hw.pipe_mode;
+ unsigned int pixel_rate, htotal, cpp, width, wm;
+
+ if (dev_priv->display.wm.pri_latency[level] == 0)
+ return USHRT_MAX;
+
+ if (!intel_wm_plane_visible(crtc_state, plane_state))
+ return 0;
+
+ cpp = plane_state->hw.fb->format->cpp[0];
+ pixel_rate = crtc_state->pixel_rate;
+ htotal = pipe_mode->crtc_htotal;
+ width = drm_rect_width(&plane_state->uapi.src) >> 16;
+
+ if (plane->id == PLANE_CURSOR) {
+ /*
+ * FIXME the formula gives values that are
+ * too big for the cursor FIFO, and hence we
+ * would never be able to use cursors. For
+ * now just hardcode the watermark.
+ */
+ wm = 63;
+ } else {
+ wm = vlv_wm_method2(pixel_rate, htotal, width, cpp,
+ dev_priv->display.wm.pri_latency[level] * 10);
+ }
+
+ return min_t(unsigned int, wm, USHRT_MAX);
+}
+
+static bool vlv_need_sprite0_fifo_workaround(unsigned int active_planes)
+{
+ return (active_planes & (BIT(PLANE_SPRITE0) |
+ BIT(PLANE_SPRITE1))) == BIT(PLANE_SPRITE1);
+}
+
+static int vlv_compute_fifo(struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ const struct g4x_pipe_wm *raw =
+ &crtc_state->wm.vlv.raw[VLV_WM_LEVEL_PM2];
+ struct vlv_fifo_state *fifo_state = &crtc_state->wm.vlv.fifo_state;
+ u8 active_planes = crtc_state->active_planes & ~BIT(PLANE_CURSOR);
+ int num_active_planes = hweight8(active_planes);
+ const int fifo_size = 511;
+ int fifo_extra, fifo_left = fifo_size;
+ int sprite0_fifo_extra = 0;
+ unsigned int total_rate;
+ enum plane_id plane_id;
+
+ /*
+ * When enabling sprite0 after sprite1 has already been enabled
+ * we tend to get an underrun unless sprite0 already has some
+ * FIFO space allcoated. Hence we always allocate at least one
+ * cacheline for sprite0 whenever sprite1 is enabled.
+ *
+ * All other plane enable sequences appear immune to this problem.
+ */
+ if (vlv_need_sprite0_fifo_workaround(active_planes))
+ sprite0_fifo_extra = 1;
+
+ total_rate = raw->plane[PLANE_PRIMARY] +
+ raw->plane[PLANE_SPRITE0] +
+ raw->plane[PLANE_SPRITE1] +
+ sprite0_fifo_extra;
+
+ if (total_rate > fifo_size)
+ return -EINVAL;
+
+ if (total_rate == 0)
+ total_rate = 1;
+
+ for_each_plane_id_on_crtc(crtc, plane_id) {
+ unsigned int rate;
+
+ if ((active_planes & BIT(plane_id)) == 0) {
+ fifo_state->plane[plane_id] = 0;
+ continue;
+ }
+
+ rate = raw->plane[plane_id];
+ fifo_state->plane[plane_id] = fifo_size * rate / total_rate;
+ fifo_left -= fifo_state->plane[plane_id];
+ }
+
+ fifo_state->plane[PLANE_SPRITE0] += sprite0_fifo_extra;
+ fifo_left -= sprite0_fifo_extra;
+
+ fifo_state->plane[PLANE_CURSOR] = 63;
+
+ fifo_extra = DIV_ROUND_UP(fifo_left, num_active_planes ?: 1);
+
+ /* spread the remainder evenly */
+ for_each_plane_id_on_crtc(crtc, plane_id) {
+ int plane_extra;
+
+ if (fifo_left == 0)
+ break;
+
+ if ((active_planes & BIT(plane_id)) == 0)
+ continue;
+
+ plane_extra = min(fifo_extra, fifo_left);
+ fifo_state->plane[plane_id] += plane_extra;
+ fifo_left -= plane_extra;
+ }
+
+ drm_WARN_ON(&dev_priv->drm, active_planes != 0 && fifo_left != 0);
+
+ /* give it all to the first plane if none are active */
+ if (active_planes == 0) {
+ drm_WARN_ON(&dev_priv->drm, fifo_left != fifo_size);
+ fifo_state->plane[PLANE_PRIMARY] = fifo_left;
+ }
+
+ return 0;
+}
+
+/* mark all levels starting from 'level' as invalid */
+static void vlv_invalidate_wms(struct intel_crtc *crtc,
+ struct vlv_wm_state *wm_state, int level)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+
+ for (; level < dev_priv->display.wm.num_levels; level++) {
+ enum plane_id plane_id;
+
+ for_each_plane_id_on_crtc(crtc, plane_id)
+ wm_state->wm[level].plane[plane_id] = USHRT_MAX;
+
+ wm_state->sr[level].cursor = USHRT_MAX;
+ wm_state->sr[level].plane = USHRT_MAX;
+ }
+}
+
+static u16 vlv_invert_wm_value(u16 wm, u16 fifo_size)
+{
+ if (wm > fifo_size)
+ return USHRT_MAX;
+ else
+ return fifo_size - wm;
+}
+
+/*
+ * Starting from 'level' set all higher
+ * levels to 'value' in the "raw" watermarks.
+ */
+static bool vlv_raw_plane_wm_set(struct intel_crtc_state *crtc_state,
+ int level, enum plane_id plane_id, u16 value)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
+ bool dirty = false;
+
+ for (; level < dev_priv->display.wm.num_levels; level++) {
+ struct g4x_pipe_wm *raw = &crtc_state->wm.vlv.raw[level];
+
+ dirty |= raw->plane[plane_id] != value;
+ raw->plane[plane_id] = value;
+ }
+
+ return dirty;
+}
+
+static bool vlv_raw_plane_wm_compute(struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
+{
+ struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
+ enum plane_id plane_id = plane->id;
+ int level;
+ bool dirty = false;
+
+ if (!intel_wm_plane_visible(crtc_state, plane_state)) {
+ dirty |= vlv_raw_plane_wm_set(crtc_state, 0, plane_id, 0);
+ goto out;
+ }
+
+ for (level = 0; level < dev_priv->display.wm.num_levels; level++) {
+ struct g4x_pipe_wm *raw = &crtc_state->wm.vlv.raw[level];
+ int wm = vlv_compute_wm_level(crtc_state, plane_state, level);
+ int max_wm = plane_id == PLANE_CURSOR ? 63 : 511;
+
+ if (wm > max_wm)
+ break;
+
+ dirty |= raw->plane[plane_id] != wm;
+ raw->plane[plane_id] = wm;
+ }
+
+ /* mark all higher levels as invalid */
+ dirty |= vlv_raw_plane_wm_set(crtc_state, level, plane_id, USHRT_MAX);
+
+out:
+ if (dirty)
+ drm_dbg_kms(&dev_priv->drm,
+ "%s watermarks: PM2=%d, PM5=%d, DDR DVFS=%d\n",
+ plane->base.name,
+ crtc_state->wm.vlv.raw[VLV_WM_LEVEL_PM2].plane[plane_id],
+ crtc_state->wm.vlv.raw[VLV_WM_LEVEL_PM5].plane[plane_id],
+ crtc_state->wm.vlv.raw[VLV_WM_LEVEL_DDR_DVFS].plane[plane_id]);
+
+ return dirty;
+}
+
+static bool vlv_raw_plane_wm_is_valid(const struct intel_crtc_state *crtc_state,
+ enum plane_id plane_id, int level)
+{
+ const struct g4x_pipe_wm *raw =
+ &crtc_state->wm.vlv.raw[level];
+ const struct vlv_fifo_state *fifo_state =
+ &crtc_state->wm.vlv.fifo_state;
+
+ return raw->plane[plane_id] <= fifo_state->plane[plane_id];
+}
+
+static bool vlv_raw_crtc_wm_is_valid(const struct intel_crtc_state *crtc_state, int level)
+{
+ return vlv_raw_plane_wm_is_valid(crtc_state, PLANE_PRIMARY, level) &&
+ vlv_raw_plane_wm_is_valid(crtc_state, PLANE_SPRITE0, level) &&
+ vlv_raw_plane_wm_is_valid(crtc_state, PLANE_SPRITE1, level) &&
+ vlv_raw_plane_wm_is_valid(crtc_state, PLANE_CURSOR, level);
+}
+
+static int _vlv_compute_pipe_wm(struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct vlv_wm_state *wm_state = &crtc_state->wm.vlv.optimal;
+ const struct vlv_fifo_state *fifo_state =
+ &crtc_state->wm.vlv.fifo_state;
+ u8 active_planes = crtc_state->active_planes & ~BIT(PLANE_CURSOR);
+ int num_active_planes = hweight8(active_planes);
+ enum plane_id plane_id;
+ int level;
+
+ /* initially allow all levels */
+ wm_state->num_levels = dev_priv->display.wm.num_levels;
+ /*
+ * Note that enabling cxsr with no primary/sprite planes
+ * enabled can wedge the pipe. Hence we only allow cxsr
+ * with exactly one enabled primary/sprite plane.
+ */
+ wm_state->cxsr = crtc->pipe != PIPE_C && num_active_planes == 1;
+
+ for (level = 0; level < wm_state->num_levels; level++) {
+ const struct g4x_pipe_wm *raw = &crtc_state->wm.vlv.raw[level];
+ const int sr_fifo_size = INTEL_NUM_PIPES(dev_priv) * 512 - 1;
+
+ if (!vlv_raw_crtc_wm_is_valid(crtc_state, level))
+ break;
+
+ for_each_plane_id_on_crtc(crtc, plane_id) {
+ wm_state->wm[level].plane[plane_id] =
+ vlv_invert_wm_value(raw->plane[plane_id],
+ fifo_state->plane[plane_id]);
+ }
+
+ wm_state->sr[level].plane =
+ vlv_invert_wm_value(max3(raw->plane[PLANE_PRIMARY],
+ raw->plane[PLANE_SPRITE0],
+ raw->plane[PLANE_SPRITE1]),
+ sr_fifo_size);
+
+ wm_state->sr[level].cursor =
+ vlv_invert_wm_value(raw->plane[PLANE_CURSOR],
+ 63);
+ }
+
+ if (level == 0)
+ return -EINVAL;
+
+ /* limit to only levels we can actually handle */
+ wm_state->num_levels = level;
+
+ /* invalidate the higher levels */
+ vlv_invalidate_wms(crtc, wm_state, level);
+
+ return 0;
+}
+
+static int vlv_compute_pipe_wm(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ const struct intel_plane_state *old_plane_state;
+ const struct intel_plane_state *new_plane_state;
+ struct intel_plane *plane;
+ unsigned int dirty = 0;
+ int i;
+
+ for_each_oldnew_intel_plane_in_state(state, plane,
+ old_plane_state,
+ new_plane_state, i) {
+ if (new_plane_state->hw.crtc != &crtc->base &&
+ old_plane_state->hw.crtc != &crtc->base)
+ continue;
+
+ if (vlv_raw_plane_wm_compute(crtc_state, new_plane_state))
+ dirty |= BIT(plane->id);
+ }
+
+ /*
+ * DSPARB registers may have been reset due to the
+ * power well being turned off. Make sure we restore
+ * them to a consistent state even if no primary/sprite
+ * planes are initially active. We also force a FIFO
+ * recomputation so that we are sure to sanitize the
+ * FIFO setting we took over from the BIOS even if there
+ * are no active planes on the crtc.
+ */
+ if (intel_crtc_needs_modeset(crtc_state))
+ dirty = ~0;
+
+ if (!dirty)
+ return 0;
+
+ /* cursor changes don't warrant a FIFO recompute */
+ if (dirty & ~BIT(PLANE_CURSOR)) {
+ const struct intel_crtc_state *old_crtc_state =
+ intel_atomic_get_old_crtc_state(state, crtc);
+ const struct vlv_fifo_state *old_fifo_state =
+ &old_crtc_state->wm.vlv.fifo_state;
+ const struct vlv_fifo_state *new_fifo_state =
+ &crtc_state->wm.vlv.fifo_state;
+ int ret;
+
+ ret = vlv_compute_fifo(crtc_state);
+ if (ret)
+ return ret;
+
+ if (intel_crtc_needs_modeset(crtc_state) ||
+ memcmp(old_fifo_state, new_fifo_state,
+ sizeof(*new_fifo_state)) != 0)
+ crtc_state->fifo_changed = true;
+ }
+
+ return _vlv_compute_pipe_wm(crtc_state);
+}
+
+#define VLV_FIFO(plane, value) \
+ (((value) << DSPARB_ ## plane ## _SHIFT_VLV) & DSPARB_ ## plane ## _MASK_VLV)
+
+static void vlv_atomic_update_fifo(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_uncore *uncore = &dev_priv->uncore;
+ const struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ const struct vlv_fifo_state *fifo_state =
+ &crtc_state->wm.vlv.fifo_state;
+ int sprite0_start, sprite1_start, fifo_size;
+ u32 dsparb, dsparb2, dsparb3;
+
+ if (!crtc_state->fifo_changed)
+ return;
+
+ sprite0_start = fifo_state->plane[PLANE_PRIMARY];
+ sprite1_start = fifo_state->plane[PLANE_SPRITE0] + sprite0_start;
+ fifo_size = fifo_state->plane[PLANE_SPRITE1] + sprite1_start;
+
+ drm_WARN_ON(&dev_priv->drm, fifo_state->plane[PLANE_CURSOR] != 63);
+ drm_WARN_ON(&dev_priv->drm, fifo_size != 511);
+
+ trace_vlv_fifo_size(crtc, sprite0_start, sprite1_start, fifo_size);
+
+ /*
+ * uncore.lock serves a double purpose here. It allows us to
+ * use the less expensive I915_{READ,WRITE}_FW() functions, and
+ * it protects the DSPARB registers from getting clobbered by
+ * parallel updates from multiple pipes.
+ *
+ * intel_pipe_update_start() has already disabled interrupts
+ * for us, so a plain spin_lock() is sufficient here.
+ */
+ spin_lock(&uncore->lock);
+
+ switch (crtc->pipe) {
+ case PIPE_A:
+ dsparb = intel_uncore_read_fw(uncore, DSPARB);
+ dsparb2 = intel_uncore_read_fw(uncore, DSPARB2);
+
+ dsparb &= ~(VLV_FIFO(SPRITEA, 0xff) |
+ VLV_FIFO(SPRITEB, 0xff));
+ dsparb |= (VLV_FIFO(SPRITEA, sprite0_start) |
+ VLV_FIFO(SPRITEB, sprite1_start));
+
+ dsparb2 &= ~(VLV_FIFO(SPRITEA_HI, 0x1) |
+ VLV_FIFO(SPRITEB_HI, 0x1));
+ dsparb2 |= (VLV_FIFO(SPRITEA_HI, sprite0_start >> 8) |
+ VLV_FIFO(SPRITEB_HI, sprite1_start >> 8));
+
+ intel_uncore_write_fw(uncore, DSPARB, dsparb);
+ intel_uncore_write_fw(uncore, DSPARB2, dsparb2);
+ break;
+ case PIPE_B:
+ dsparb = intel_uncore_read_fw(uncore, DSPARB);
+ dsparb2 = intel_uncore_read_fw(uncore, DSPARB2);
+
+ dsparb &= ~(VLV_FIFO(SPRITEC, 0xff) |
+ VLV_FIFO(SPRITED, 0xff));
+ dsparb |= (VLV_FIFO(SPRITEC, sprite0_start) |
+ VLV_FIFO(SPRITED, sprite1_start));
+
+ dsparb2 &= ~(VLV_FIFO(SPRITEC_HI, 0xff) |
+ VLV_FIFO(SPRITED_HI, 0xff));
+ dsparb2 |= (VLV_FIFO(SPRITEC_HI, sprite0_start >> 8) |
+ VLV_FIFO(SPRITED_HI, sprite1_start >> 8));
+
+ intel_uncore_write_fw(uncore, DSPARB, dsparb);
+ intel_uncore_write_fw(uncore, DSPARB2, dsparb2);
+ break;
+ case PIPE_C:
+ dsparb3 = intel_uncore_read_fw(uncore, DSPARB3);
+ dsparb2 = intel_uncore_read_fw(uncore, DSPARB2);
+
+ dsparb3 &= ~(VLV_FIFO(SPRITEE, 0xff) |
+ VLV_FIFO(SPRITEF, 0xff));
+ dsparb3 |= (VLV_FIFO(SPRITEE, sprite0_start) |
+ VLV_FIFO(SPRITEF, sprite1_start));
+
+ dsparb2 &= ~(VLV_FIFO(SPRITEE_HI, 0xff) |
+ VLV_FIFO(SPRITEF_HI, 0xff));
+ dsparb2 |= (VLV_FIFO(SPRITEE_HI, sprite0_start >> 8) |
+ VLV_FIFO(SPRITEF_HI, sprite1_start >> 8));
+
+ intel_uncore_write_fw(uncore, DSPARB3, dsparb3);
+ intel_uncore_write_fw(uncore, DSPARB2, dsparb2);
+ break;
+ default:
+ break;
+ }
+
+ intel_uncore_posting_read_fw(uncore, DSPARB);
+
+ spin_unlock(&uncore->lock);
+}
+
+#undef VLV_FIFO
+
+static int vlv_compute_intermediate_wm(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ struct intel_crtc_state *new_crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ const struct intel_crtc_state *old_crtc_state =
+ intel_atomic_get_old_crtc_state(state, crtc);
+ struct vlv_wm_state *intermediate = &new_crtc_state->wm.vlv.intermediate;
+ const struct vlv_wm_state *optimal = &new_crtc_state->wm.vlv.optimal;
+ const struct vlv_wm_state *active = &old_crtc_state->wm.vlv.optimal;
+ int level;
+
+ if (!new_crtc_state->hw.active ||
+ intel_crtc_needs_modeset(new_crtc_state)) {
+ *intermediate = *optimal;
+
+ intermediate->cxsr = false;
+ goto out;
+ }
+
+ intermediate->num_levels = min(optimal->num_levels, active->num_levels);
+ intermediate->cxsr = optimal->cxsr && active->cxsr &&
+ !new_crtc_state->disable_cxsr;
+
+ for (level = 0; level < intermediate->num_levels; level++) {
+ enum plane_id plane_id;
+
+ for_each_plane_id_on_crtc(crtc, plane_id) {
+ intermediate->wm[level].plane[plane_id] =
+ min(optimal->wm[level].plane[plane_id],
+ active->wm[level].plane[plane_id]);
+ }
+
+ intermediate->sr[level].plane = min(optimal->sr[level].plane,
+ active->sr[level].plane);
+ intermediate->sr[level].cursor = min(optimal->sr[level].cursor,
+ active->sr[level].cursor);
+ }
+
+ vlv_invalidate_wms(crtc, intermediate, level);
+
+out:
+ /*
+ * If our intermediate WM are identical to the final WM, then we can
+ * omit the post-vblank programming; only update if it's different.
+ */
+ if (memcmp(intermediate, optimal, sizeof(*intermediate)) != 0)
+ new_crtc_state->wm.need_postvbl_update = true;
+
+ return 0;
+}
+
+static void vlv_merge_wm(struct drm_i915_private *dev_priv,
+ struct vlv_wm_values *wm)
+{
+ struct intel_crtc *crtc;
+ int num_active_pipes = 0;
+
+ wm->level = dev_priv->display.wm.num_levels - 1;
+ wm->cxsr = true;
+
+ for_each_intel_crtc(&dev_priv->drm, crtc) {
+ const struct vlv_wm_state *wm_state = &crtc->wm.active.vlv;
+
+ if (!crtc->active)
+ continue;
+
+ if (!wm_state->cxsr)
+ wm->cxsr = false;
+
+ num_active_pipes++;
+ wm->level = min_t(int, wm->level, wm_state->num_levels - 1);
+ }
+
+ if (num_active_pipes != 1)
+ wm->cxsr = false;
+
+ if (num_active_pipes > 1)
+ wm->level = VLV_WM_LEVEL_PM2;
+
+ for_each_intel_crtc(&dev_priv->drm, crtc) {
+ const struct vlv_wm_state *wm_state = &crtc->wm.active.vlv;
+ enum pipe pipe = crtc->pipe;
+
+ wm->pipe[pipe] = wm_state->wm[wm->level];
+ if (crtc->active && wm->cxsr)
+ wm->sr = wm_state->sr[wm->level];
+
+ wm->ddl[pipe].plane[PLANE_PRIMARY] = DDL_PRECISION_HIGH | 2;
+ wm->ddl[pipe].plane[PLANE_SPRITE0] = DDL_PRECISION_HIGH | 2;
+ wm->ddl[pipe].plane[PLANE_SPRITE1] = DDL_PRECISION_HIGH | 2;
+ wm->ddl[pipe].plane[PLANE_CURSOR] = DDL_PRECISION_HIGH | 2;
+ }
+}
+
+static void vlv_program_watermarks(struct drm_i915_private *dev_priv)
+{
+ struct vlv_wm_values *old_wm = &dev_priv->display.wm.vlv;
+ struct vlv_wm_values new_wm = {};
+
+ vlv_merge_wm(dev_priv, &new_wm);
+
+ if (memcmp(old_wm, &new_wm, sizeof(new_wm)) == 0)
+ return;
+
+ if (is_disabling(old_wm->level, new_wm.level, VLV_WM_LEVEL_DDR_DVFS))
+ chv_set_memory_dvfs(dev_priv, false);
+
+ if (is_disabling(old_wm->level, new_wm.level, VLV_WM_LEVEL_PM5))
+ chv_set_memory_pm5(dev_priv, false);
+
+ if (is_disabling(old_wm->cxsr, new_wm.cxsr, true))
+ _intel_set_memory_cxsr(dev_priv, false);
+
+ vlv_write_wm_values(dev_priv, &new_wm);
+
+ if (is_enabling(old_wm->cxsr, new_wm.cxsr, true))
+ _intel_set_memory_cxsr(dev_priv, true);
+
+ if (is_enabling(old_wm->level, new_wm.level, VLV_WM_LEVEL_PM5))
+ chv_set_memory_pm5(dev_priv, true);
+
+ if (is_enabling(old_wm->level, new_wm.level, VLV_WM_LEVEL_DDR_DVFS))
+ chv_set_memory_dvfs(dev_priv, true);
+
+ *old_wm = new_wm;
+}
+
+static void vlv_initial_watermarks(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ const struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+
+ mutex_lock(&dev_priv->display.wm.wm_mutex);
+ crtc->wm.active.vlv = crtc_state->wm.vlv.intermediate;
+ vlv_program_watermarks(dev_priv);
+ mutex_unlock(&dev_priv->display.wm.wm_mutex);
+}
+
+static void vlv_optimize_watermarks(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ const struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+
+ if (!crtc_state->wm.need_postvbl_update)
+ return;
+
+ mutex_lock(&dev_priv->display.wm.wm_mutex);
+ crtc->wm.active.vlv = crtc_state->wm.vlv.optimal;
+ vlv_program_watermarks(dev_priv);
+ mutex_unlock(&dev_priv->display.wm.wm_mutex);
+}
+
+static void i965_update_wm(struct drm_i915_private *dev_priv)
+{
+ struct intel_crtc *crtc;
+ int srwm = 1;
+ int cursor_sr = 16;
+ bool cxsr_enabled;
+
+ /* Calc sr entries for one plane configs */
+ crtc = single_enabled_crtc(dev_priv);
+ if (crtc) {
+ /* self-refresh has much higher latency */
+ static const int sr_latency_ns = 12000;
+ const struct drm_display_mode *pipe_mode =
+ &crtc->config->hw.pipe_mode;
+ const struct drm_framebuffer *fb =
+ crtc->base.primary->state->fb;
+ int pixel_rate = crtc->config->pixel_rate;
+ int htotal = pipe_mode->crtc_htotal;
+ int width = drm_rect_width(&crtc->base.primary->state->src) >> 16;
+ int cpp = fb->format->cpp[0];
+ int entries;
+
+ entries = intel_wm_method2(pixel_rate, htotal,
+ width, cpp, sr_latency_ns / 100);
+ entries = DIV_ROUND_UP(entries, I915_FIFO_LINE_SIZE);
+ srwm = I965_FIFO_SIZE - entries;
+ if (srwm < 0)
+ srwm = 1;
+ srwm &= 0x1ff;
+ drm_dbg_kms(&dev_priv->drm,
+ "self-refresh entries: %d, wm: %d\n",
+ entries, srwm);
+
+ entries = intel_wm_method2(pixel_rate, htotal,
+ crtc->base.cursor->state->crtc_w, 4,
+ sr_latency_ns / 100);
+ entries = DIV_ROUND_UP(entries,
+ i965_cursor_wm_info.cacheline_size) +
+ i965_cursor_wm_info.guard_size;
+
+ cursor_sr = i965_cursor_wm_info.fifo_size - entries;
+ if (cursor_sr > i965_cursor_wm_info.max_wm)
+ cursor_sr = i965_cursor_wm_info.max_wm;
+
+ drm_dbg_kms(&dev_priv->drm,
+ "self-refresh watermark: display plane %d "
+ "cursor %d\n", srwm, cursor_sr);
+
+ cxsr_enabled = true;
+ } else {
+ cxsr_enabled = false;
+ /* Turn off self refresh if both pipes are enabled */
+ intel_set_memory_cxsr(dev_priv, false);
+ }
+
+ drm_dbg_kms(&dev_priv->drm,
+ "Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n",
+ srwm);
+
+ /* 965 has limitations... */
+ intel_uncore_write(&dev_priv->uncore, DSPFW1, FW_WM(srwm, SR) |
+ FW_WM(8, CURSORB) |
+ FW_WM(8, PLANEB) |
+ FW_WM(8, PLANEA));
+ intel_uncore_write(&dev_priv->uncore, DSPFW2, FW_WM(8, CURSORA) |
+ FW_WM(8, PLANEC_OLD));
+ /* update cursor SR watermark */
+ intel_uncore_write(&dev_priv->uncore, DSPFW3, FW_WM(cursor_sr, CURSOR_SR));
+
+ if (cxsr_enabled)
+ intel_set_memory_cxsr(dev_priv, true);
+}
+
+#undef FW_WM
+
+static struct intel_crtc *intel_crtc_for_plane(struct drm_i915_private *i915,
+ enum i9xx_plane_id i9xx_plane)
+{
+ struct intel_plane *plane;
+
+ for_each_intel_plane(&i915->drm, plane) {
+ if (plane->id == PLANE_PRIMARY &&
+ plane->i9xx_plane == i9xx_plane)
+ return intel_crtc_for_pipe(i915, plane->pipe);
+ }
+
+ return NULL;
+}
+
+static void i9xx_update_wm(struct drm_i915_private *dev_priv)
+{
+ const struct intel_watermark_params *wm_info;
+ u32 fwater_lo;
+ u32 fwater_hi;
+ int cwm, srwm = 1;
+ int fifo_size;
+ int planea_wm, planeb_wm;
+ struct intel_crtc *crtc;
+
+ if (IS_I945GM(dev_priv))
+ wm_info = &i945_wm_info;
+ else if (DISPLAY_VER(dev_priv) != 2)
+ wm_info = &i915_wm_info;
+ else
+ wm_info = &i830_a_wm_info;
+
+ if (DISPLAY_VER(dev_priv) == 2)
+ fifo_size = i830_get_fifo_size(dev_priv, PLANE_A);
+ else
+ fifo_size = i9xx_get_fifo_size(dev_priv, PLANE_A);
+ crtc = intel_crtc_for_plane(dev_priv, PLANE_A);
+ if (intel_crtc_active(crtc)) {
+ const struct drm_framebuffer *fb =
+ crtc->base.primary->state->fb;
+ int cpp;
+
+ if (DISPLAY_VER(dev_priv) == 2)
+ cpp = 4;
+ else
+ cpp = fb->format->cpp[0];
+
+ planea_wm = intel_calculate_wm(crtc->config->pixel_rate,
+ wm_info, fifo_size, cpp,
+ pessimal_latency_ns);
+ } else {
+ planea_wm = fifo_size - wm_info->guard_size;
+ if (planea_wm > (long)wm_info->max_wm)
+ planea_wm = wm_info->max_wm;
+ }
+
+ if (DISPLAY_VER(dev_priv) == 2)
+ wm_info = &i830_bc_wm_info;
+
+ if (DISPLAY_VER(dev_priv) == 2)
+ fifo_size = i830_get_fifo_size(dev_priv, PLANE_B);
+ else
+ fifo_size = i9xx_get_fifo_size(dev_priv, PLANE_B);
+ crtc = intel_crtc_for_plane(dev_priv, PLANE_B);
+ if (intel_crtc_active(crtc)) {
+ const struct drm_framebuffer *fb =
+ crtc->base.primary->state->fb;
+ int cpp;
+
+ if (DISPLAY_VER(dev_priv) == 2)
+ cpp = 4;
+ else
+ cpp = fb->format->cpp[0];
+
+ planeb_wm = intel_calculate_wm(crtc->config->pixel_rate,
+ wm_info, fifo_size, cpp,
+ pessimal_latency_ns);
+ } else {
+ planeb_wm = fifo_size - wm_info->guard_size;
+ if (planeb_wm > (long)wm_info->max_wm)
+ planeb_wm = wm_info->max_wm;
+ }
+
+ drm_dbg_kms(&dev_priv->drm,
+ "FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm);
+
+ crtc = single_enabled_crtc(dev_priv);
+ if (IS_I915GM(dev_priv) && crtc) {
+ struct drm_i915_gem_object *obj;
+
+ obj = intel_fb_obj(crtc->base.primary->state->fb);
+
+ /* self-refresh seems busted with untiled */
+ if (!i915_gem_object_is_tiled(obj))
+ crtc = NULL;
+ }
+
+ /*
+ * Overlay gets an aggressive default since video jitter is bad.
+ */
+ cwm = 2;
+
+ /* Play safe and disable self-refresh before adjusting watermarks. */
+ intel_set_memory_cxsr(dev_priv, false);
+
+ /* Calc sr entries for one plane configs */
+ if (HAS_FW_BLC(dev_priv) && crtc) {
+ /* self-refresh has much higher latency */
+ static const int sr_latency_ns = 6000;
+ const struct drm_display_mode *pipe_mode =
+ &crtc->config->hw.pipe_mode;
+ const struct drm_framebuffer *fb =
+ crtc->base.primary->state->fb;
+ int pixel_rate = crtc->config->pixel_rate;
+ int htotal = pipe_mode->crtc_htotal;
+ int width = drm_rect_width(&crtc->base.primary->state->src) >> 16;
+ int cpp;
+ int entries;
+
+ if (IS_I915GM(dev_priv) || IS_I945GM(dev_priv))
+ cpp = 4;
+ else
+ cpp = fb->format->cpp[0];
+
+ entries = intel_wm_method2(pixel_rate, htotal, width, cpp,
+ sr_latency_ns / 100);
+ entries = DIV_ROUND_UP(entries, wm_info->cacheline_size);
+ drm_dbg_kms(&dev_priv->drm,
+ "self-refresh entries: %d\n", entries);
+ srwm = wm_info->fifo_size - entries;
+ if (srwm < 0)
+ srwm = 1;
+
+ if (IS_I945G(dev_priv) || IS_I945GM(dev_priv))
+ intel_uncore_write(&dev_priv->uncore, FW_BLC_SELF,
+ FW_BLC_SELF_FIFO_MASK | (srwm & 0xff));
+ else
+ intel_uncore_write(&dev_priv->uncore, FW_BLC_SELF, srwm & 0x3f);
+ }
+
+ drm_dbg_kms(&dev_priv->drm,
+ "Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n",
+ planea_wm, planeb_wm, cwm, srwm);
+
+ fwater_lo = ((planeb_wm & 0x3f) << 16) | (planea_wm & 0x3f);
+ fwater_hi = (cwm & 0x1f);
+
+ /* Set request length to 8 cachelines per fetch */
+ fwater_lo = fwater_lo | (1 << 24) | (1 << 8);
+ fwater_hi = fwater_hi | (1 << 8);
+
+ intel_uncore_write(&dev_priv->uncore, FW_BLC, fwater_lo);
+ intel_uncore_write(&dev_priv->uncore, FW_BLC2, fwater_hi);
+
+ if (crtc)
+ intel_set_memory_cxsr(dev_priv, true);
+}
+
+static void i845_update_wm(struct drm_i915_private *dev_priv)
+{
+ struct intel_crtc *crtc;
+ u32 fwater_lo;
+ int planea_wm;
+
+ crtc = single_enabled_crtc(dev_priv);
+ if (crtc == NULL)
+ return;
+
+ planea_wm = intel_calculate_wm(crtc->config->pixel_rate,
+ &i845_wm_info,
+ i845_get_fifo_size(dev_priv, PLANE_A),
+ 4, pessimal_latency_ns);
+ fwater_lo = intel_uncore_read(&dev_priv->uncore, FW_BLC) & ~0xfff;
+ fwater_lo |= (3<<8) | planea_wm;
+
+ drm_dbg_kms(&dev_priv->drm,
+ "Setting FIFO watermarks - A: %d\n", planea_wm);
+
+ intel_uncore_write(&dev_priv->uncore, FW_BLC, fwater_lo);
+}
+
+/* latency must be in 0.1us units. */
+static unsigned int ilk_wm_method1(unsigned int pixel_rate,
+ unsigned int cpp,
+ unsigned int latency)
+{
+ unsigned int ret;
+
+ ret = intel_wm_method1(pixel_rate, cpp, latency);
+ ret = DIV_ROUND_UP(ret, 64) + 2;
+
+ return ret;
+}
+
+/* latency must be in 0.1us units. */
+static unsigned int ilk_wm_method2(unsigned int pixel_rate,
+ unsigned int htotal,
+ unsigned int width,
+ unsigned int cpp,
+ unsigned int latency)
+{
+ unsigned int ret;
+
+ ret = intel_wm_method2(pixel_rate, htotal,
+ width, cpp, latency);
+ ret = DIV_ROUND_UP(ret, 64) + 2;
+
+ return ret;
+}
+
+static u32 ilk_wm_fbc(u32 pri_val, u32 horiz_pixels, u8 cpp)
+{
+ /*
+ * Neither of these should be possible since this function shouldn't be
+ * called if the CRTC is off or the plane is invisible. But let's be
+ * extra paranoid to avoid a potential divide-by-zero if we screw up
+ * elsewhere in the driver.
+ */
+ if (WARN_ON(!cpp))
+ return 0;
+ if (WARN_ON(!horiz_pixels))
+ return 0;
+
+ return DIV_ROUND_UP(pri_val * 64, horiz_pixels * cpp) + 2;
+}
+
+struct ilk_wm_maximums {
+ u16 pri;
+ u16 spr;
+ u16 cur;
+ u16 fbc;
+};
+
+/*
+ * For both WM_PIPE and WM_LP.
+ * mem_value must be in 0.1us units.
+ */
+static u32 ilk_compute_pri_wm(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state,
+ u32 mem_value, bool is_lp)
+{
+ u32 method1, method2;
+ int cpp;
+
+ if (mem_value == 0)
+ return U32_MAX;
+
+ if (!intel_wm_plane_visible(crtc_state, plane_state))
+ return 0;
+
+ cpp = plane_state->hw.fb->format->cpp[0];
+
+ method1 = ilk_wm_method1(crtc_state->pixel_rate, cpp, mem_value);
+
+ if (!is_lp)
+ return method1;
+
+ method2 = ilk_wm_method2(crtc_state->pixel_rate,
+ crtc_state->hw.pipe_mode.crtc_htotal,
+ drm_rect_width(&plane_state->uapi.src) >> 16,
+ cpp, mem_value);
+
+ return min(method1, method2);
+}
+
+/*
+ * For both WM_PIPE and WM_LP.
+ * mem_value must be in 0.1us units.
+ */
+static u32 ilk_compute_spr_wm(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state,
+ u32 mem_value)
+{
+ u32 method1, method2;
+ int cpp;
+
+ if (mem_value == 0)
+ return U32_MAX;
+
+ if (!intel_wm_plane_visible(crtc_state, plane_state))
+ return 0;
+
+ cpp = plane_state->hw.fb->format->cpp[0];
+
+ method1 = ilk_wm_method1(crtc_state->pixel_rate, cpp, mem_value);
+ method2 = ilk_wm_method2(crtc_state->pixel_rate,
+ crtc_state->hw.pipe_mode.crtc_htotal,
+ drm_rect_width(&plane_state->uapi.src) >> 16,
+ cpp, mem_value);
+ return min(method1, method2);
+}
+
+/*
+ * For both WM_PIPE and WM_LP.
+ * mem_value must be in 0.1us units.
+ */
+static u32 ilk_compute_cur_wm(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state,
+ u32 mem_value)
+{
+ int cpp;
+
+ if (mem_value == 0)
+ return U32_MAX;
+
+ if (!intel_wm_plane_visible(crtc_state, plane_state))
+ return 0;
+
+ cpp = plane_state->hw.fb->format->cpp[0];
+
+ return ilk_wm_method2(crtc_state->pixel_rate,
+ crtc_state->hw.pipe_mode.crtc_htotal,
+ drm_rect_width(&plane_state->uapi.src) >> 16,
+ cpp, mem_value);
+}
+
+/* Only for WM_LP. */
+static u32 ilk_compute_fbc_wm(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state,
+ u32 pri_val)
+{
+ int cpp;
+
+ if (!intel_wm_plane_visible(crtc_state, plane_state))
+ return 0;
+
+ cpp = plane_state->hw.fb->format->cpp[0];
+
+ return ilk_wm_fbc(pri_val, drm_rect_width(&plane_state->uapi.src) >> 16,
+ cpp);
+}
+
+static unsigned int
+ilk_display_fifo_size(const struct drm_i915_private *dev_priv)
+{
+ if (DISPLAY_VER(dev_priv) >= 8)
+ return 3072;
+ else if (DISPLAY_VER(dev_priv) >= 7)
+ return 768;
+ else
+ return 512;
+}
+
+static unsigned int
+ilk_plane_wm_reg_max(const struct drm_i915_private *dev_priv,
+ int level, bool is_sprite)
+{
+ if (DISPLAY_VER(dev_priv) >= 8)
+ /* BDW primary/sprite plane watermarks */
+ return level == 0 ? 255 : 2047;
+ else if (DISPLAY_VER(dev_priv) >= 7)
+ /* IVB/HSW primary/sprite plane watermarks */
+ return level == 0 ? 127 : 1023;
+ else if (!is_sprite)
+ /* ILK/SNB primary plane watermarks */
+ return level == 0 ? 127 : 511;
+ else
+ /* ILK/SNB sprite plane watermarks */
+ return level == 0 ? 63 : 255;
+}
+
+static unsigned int
+ilk_cursor_wm_reg_max(const struct drm_i915_private *dev_priv, int level)
+{
+ if (DISPLAY_VER(dev_priv) >= 7)
+ return level == 0 ? 63 : 255;
+ else
+ return level == 0 ? 31 : 63;
+}
+
+static unsigned int ilk_fbc_wm_reg_max(const struct drm_i915_private *dev_priv)
+{
+ if (DISPLAY_VER(dev_priv) >= 8)
+ return 31;
+ else
+ return 15;
+}
+
+/* Calculate the maximum primary/sprite plane watermark */
+static unsigned int ilk_plane_wm_max(const struct drm_i915_private *dev_priv,
+ int level,
+ const struct intel_wm_config *config,
+ enum intel_ddb_partitioning ddb_partitioning,
+ bool is_sprite)
+{
+ unsigned int fifo_size = ilk_display_fifo_size(dev_priv);
+
+ /* if sprites aren't enabled, sprites get nothing */
+ if (is_sprite && !config->sprites_enabled)
+ return 0;
+
+ /* HSW allows LP1+ watermarks even with multiple pipes */
+ if (level == 0 || config->num_pipes_active > 1) {
+ fifo_size /= INTEL_NUM_PIPES(dev_priv);
+
+ /*
+ * For some reason the non self refresh
+ * FIFO size is only half of the self
+ * refresh FIFO size on ILK/SNB.
+ */
+ if (DISPLAY_VER(dev_priv) <= 6)
+ fifo_size /= 2;
+ }
+
+ if (config->sprites_enabled) {
+ /* level 0 is always calculated with 1:1 split */
+ if (level > 0 && ddb_partitioning == INTEL_DDB_PART_5_6) {
+ if (is_sprite)
+ fifo_size *= 5;
+ fifo_size /= 6;
+ } else {
+ fifo_size /= 2;
+ }
+ }
+
+ /* clamp to max that the registers can hold */
+ return min(fifo_size, ilk_plane_wm_reg_max(dev_priv, level, is_sprite));
+}
+
+/* Calculate the maximum cursor plane watermark */
+static unsigned int ilk_cursor_wm_max(const struct drm_i915_private *dev_priv,
+ int level,
+ const struct intel_wm_config *config)
+{
+ /* HSW LP1+ watermarks w/ multiple pipes */
+ if (level > 0 && config->num_pipes_active > 1)
+ return 64;
+
+ /* otherwise just report max that registers can hold */
+ return ilk_cursor_wm_reg_max(dev_priv, level);
+}
+
+static void ilk_compute_wm_maximums(const struct drm_i915_private *dev_priv,
+ int level,
+ const struct intel_wm_config *config,
+ enum intel_ddb_partitioning ddb_partitioning,
+ struct ilk_wm_maximums *max)
+{
+ max->pri = ilk_plane_wm_max(dev_priv, level, config, ddb_partitioning, false);
+ max->spr = ilk_plane_wm_max(dev_priv, level, config, ddb_partitioning, true);
+ max->cur = ilk_cursor_wm_max(dev_priv, level, config);
+ max->fbc = ilk_fbc_wm_reg_max(dev_priv);
+}
+
+static void ilk_compute_wm_reg_maximums(const struct drm_i915_private *dev_priv,
+ int level,
+ struct ilk_wm_maximums *max)
+{
+ max->pri = ilk_plane_wm_reg_max(dev_priv, level, false);
+ max->spr = ilk_plane_wm_reg_max(dev_priv, level, true);
+ max->cur = ilk_cursor_wm_reg_max(dev_priv, level);
+ max->fbc = ilk_fbc_wm_reg_max(dev_priv);
+}
+
+static bool ilk_validate_wm_level(int level,
+ const struct ilk_wm_maximums *max,
+ struct intel_wm_level *result)
+{
+ bool ret;
+
+ /* already determined to be invalid? */
+ if (!result->enable)
+ return false;
+
+ result->enable = result->pri_val <= max->pri &&
+ result->spr_val <= max->spr &&
+ result->cur_val <= max->cur;
+
+ ret = result->enable;
+
+ /*
+ * HACK until we can pre-compute everything,
+ * and thus fail gracefully if LP0 watermarks
+ * are exceeded...
+ */
+ if (level == 0 && !result->enable) {
+ if (result->pri_val > max->pri)
+ DRM_DEBUG_KMS("Primary WM%d too large %u (max %u)\n",
+ level, result->pri_val, max->pri);
+ if (result->spr_val > max->spr)
+ DRM_DEBUG_KMS("Sprite WM%d too large %u (max %u)\n",
+ level, result->spr_val, max->spr);
+ if (result->cur_val > max->cur)
+ DRM_DEBUG_KMS("Cursor WM%d too large %u (max %u)\n",
+ level, result->cur_val, max->cur);
+
+ result->pri_val = min_t(u32, result->pri_val, max->pri);
+ result->spr_val = min_t(u32, result->spr_val, max->spr);
+ result->cur_val = min_t(u32, result->cur_val, max->cur);
+ result->enable = true;
+ }
+
+ return ret;
+}
+
+static void ilk_compute_wm_level(const struct drm_i915_private *dev_priv,
+ const struct intel_crtc *crtc,
+ int level,
+ struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *pristate,
+ const struct intel_plane_state *sprstate,
+ const struct intel_plane_state *curstate,
+ struct intel_wm_level *result)
+{
+ u16 pri_latency = dev_priv->display.wm.pri_latency[level];
+ u16 spr_latency = dev_priv->display.wm.spr_latency[level];
+ u16 cur_latency = dev_priv->display.wm.cur_latency[level];
+
+ /* WM1+ latency values stored in 0.5us units */
+ if (level > 0) {
+ pri_latency *= 5;
+ spr_latency *= 5;
+ cur_latency *= 5;
+ }
+
+ if (pristate) {
+ result->pri_val = ilk_compute_pri_wm(crtc_state, pristate,
+ pri_latency, level);
+ result->fbc_val = ilk_compute_fbc_wm(crtc_state, pristate, result->pri_val);
+ }
+
+ if (sprstate)
+ result->spr_val = ilk_compute_spr_wm(crtc_state, sprstate, spr_latency);
+
+ if (curstate)
+ result->cur_val = ilk_compute_cur_wm(crtc_state, curstate, cur_latency);
+
+ result->enable = true;
+}
+
+static void hsw_read_wm_latency(struct drm_i915_private *i915, u16 wm[])
+{
+ u64 sskpd;
+
+ i915->display.wm.num_levels = 5;
+
+ sskpd = intel_uncore_read64(&i915->uncore, MCH_SSKPD);
+
+ wm[0] = REG_FIELD_GET64(SSKPD_NEW_WM0_MASK_HSW, sskpd);
+ if (wm[0] == 0)
+ wm[0] = REG_FIELD_GET64(SSKPD_OLD_WM0_MASK_HSW, sskpd);
+ wm[1] = REG_FIELD_GET64(SSKPD_WM1_MASK_HSW, sskpd);
+ wm[2] = REG_FIELD_GET64(SSKPD_WM2_MASK_HSW, sskpd);
+ wm[3] = REG_FIELD_GET64(SSKPD_WM3_MASK_HSW, sskpd);
+ wm[4] = REG_FIELD_GET64(SSKPD_WM4_MASK_HSW, sskpd);
+}
+
+static void snb_read_wm_latency(struct drm_i915_private *i915, u16 wm[])
+{
+ u32 sskpd;
+
+ i915->display.wm.num_levels = 4;
+
+ sskpd = intel_uncore_read(&i915->uncore, MCH_SSKPD);
+
+ wm[0] = REG_FIELD_GET(SSKPD_WM0_MASK_SNB, sskpd);
+ wm[1] = REG_FIELD_GET(SSKPD_WM1_MASK_SNB, sskpd);
+ wm[2] = REG_FIELD_GET(SSKPD_WM2_MASK_SNB, sskpd);
+ wm[3] = REG_FIELD_GET(SSKPD_WM3_MASK_SNB, sskpd);
+}
+
+static void ilk_read_wm_latency(struct drm_i915_private *i915, u16 wm[])
+{
+ u32 mltr;
+
+ i915->display.wm.num_levels = 3;
+
+ mltr = intel_uncore_read(&i915->uncore, MLTR_ILK);
+
+ /* ILK primary LP0 latency is 700 ns */
+ wm[0] = 7;
+ wm[1] = REG_FIELD_GET(MLTR_WM1_MASK, mltr);
+ wm[2] = REG_FIELD_GET(MLTR_WM2_MASK, mltr);
+}
+
+static void intel_fixup_spr_wm_latency(struct drm_i915_private *dev_priv,
+ u16 wm[5])
+{
+ /* ILK sprite LP0 latency is 1300 ns */
+ if (DISPLAY_VER(dev_priv) == 5)
+ wm[0] = 13;
+}
+
+static void intel_fixup_cur_wm_latency(struct drm_i915_private *dev_priv,
+ u16 wm[5])
+{
+ /* ILK cursor LP0 latency is 1300 ns */
+ if (DISPLAY_VER(dev_priv) == 5)
+ wm[0] = 13;
+}
+
+static bool ilk_increase_wm_latency(struct drm_i915_private *dev_priv,
+ u16 wm[5], u16 min)
+{
+ int level;
+
+ if (wm[0] >= min)
+ return false;
+
+ wm[0] = max(wm[0], min);
+ for (level = 1; level < dev_priv->display.wm.num_levels; level++)
+ wm[level] = max_t(u16, wm[level], DIV_ROUND_UP(min, 5));
+
+ return true;
+}
+
+static void snb_wm_latency_quirk(struct drm_i915_private *dev_priv)
+{
+ bool changed;
+
+ /*
+ * The BIOS provided WM memory latency values are often
+ * inadequate for high resolution displays. Adjust them.
+ */
+ changed = ilk_increase_wm_latency(dev_priv, dev_priv->display.wm.pri_latency, 12);
+ changed |= ilk_increase_wm_latency(dev_priv, dev_priv->display.wm.spr_latency, 12);
+ changed |= ilk_increase_wm_latency(dev_priv, dev_priv->display.wm.cur_latency, 12);
+
+ if (!changed)
+ return;
+
+ drm_dbg_kms(&dev_priv->drm,
+ "WM latency values increased to avoid potential underruns\n");
+ intel_print_wm_latency(dev_priv, "Primary", dev_priv->display.wm.pri_latency);
+ intel_print_wm_latency(dev_priv, "Sprite", dev_priv->display.wm.spr_latency);
+ intel_print_wm_latency(dev_priv, "Cursor", dev_priv->display.wm.cur_latency);
+}
+
+static void snb_wm_lp3_irq_quirk(struct drm_i915_private *dev_priv)
+{
+ /*
+ * On some SNB machines (Thinkpad X220 Tablet at least)
+ * LP3 usage can cause vblank interrupts to be lost.
+ * The DEIIR bit will go high but it looks like the CPU
+ * never gets interrupted.
+ *
+ * It's not clear whether other interrupt source could
+ * be affected or if this is somehow limited to vblank
+ * interrupts only. To play it safe we disable LP3
+ * watermarks entirely.
+ */
+ if (dev_priv->display.wm.pri_latency[3] == 0 &&
+ dev_priv->display.wm.spr_latency[3] == 0 &&
+ dev_priv->display.wm.cur_latency[3] == 0)
+ return;
+
+ dev_priv->display.wm.pri_latency[3] = 0;
+ dev_priv->display.wm.spr_latency[3] = 0;
+ dev_priv->display.wm.cur_latency[3] = 0;
+
+ drm_dbg_kms(&dev_priv->drm,
+ "LP3 watermarks disabled due to potential for lost interrupts\n");
+ intel_print_wm_latency(dev_priv, "Primary", dev_priv->display.wm.pri_latency);
+ intel_print_wm_latency(dev_priv, "Sprite", dev_priv->display.wm.spr_latency);
+ intel_print_wm_latency(dev_priv, "Cursor", dev_priv->display.wm.cur_latency);
+}
+
+static void ilk_setup_wm_latency(struct drm_i915_private *dev_priv)
+{
+ if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
+ hsw_read_wm_latency(dev_priv, dev_priv->display.wm.pri_latency);
+ else if (DISPLAY_VER(dev_priv) >= 6)
+ snb_read_wm_latency(dev_priv, dev_priv->display.wm.pri_latency);
+ else
+ ilk_read_wm_latency(dev_priv, dev_priv->display.wm.pri_latency);
+
+ memcpy(dev_priv->display.wm.spr_latency, dev_priv->display.wm.pri_latency,
+ sizeof(dev_priv->display.wm.pri_latency));
+ memcpy(dev_priv->display.wm.cur_latency, dev_priv->display.wm.pri_latency,
+ sizeof(dev_priv->display.wm.pri_latency));
+
+ intel_fixup_spr_wm_latency(dev_priv, dev_priv->display.wm.spr_latency);
+ intel_fixup_cur_wm_latency(dev_priv, dev_priv->display.wm.cur_latency);
+
+ intel_print_wm_latency(dev_priv, "Primary", dev_priv->display.wm.pri_latency);
+ intel_print_wm_latency(dev_priv, "Sprite", dev_priv->display.wm.spr_latency);
+ intel_print_wm_latency(dev_priv, "Cursor", dev_priv->display.wm.cur_latency);
+
+ if (DISPLAY_VER(dev_priv) == 6) {
+ snb_wm_latency_quirk(dev_priv);
+ snb_wm_lp3_irq_quirk(dev_priv);
+ }
+}
+
+static bool ilk_validate_pipe_wm(const struct drm_i915_private *dev_priv,
+ struct intel_pipe_wm *pipe_wm)
+{
+ /* LP0 watermark maximums depend on this pipe alone */
+ const struct intel_wm_config config = {
+ .num_pipes_active = 1,
+ .sprites_enabled = pipe_wm->sprites_enabled,
+ .sprites_scaled = pipe_wm->sprites_scaled,
+ };
+ struct ilk_wm_maximums max;
+
+ /* LP0 watermarks always use 1/2 DDB partitioning */
+ ilk_compute_wm_maximums(dev_priv, 0, &config, INTEL_DDB_PART_1_2, &max);
+
+ /* At least LP0 must be valid */
+ if (!ilk_validate_wm_level(0, &max, &pipe_wm->wm[0])) {
+ drm_dbg_kms(&dev_priv->drm, "LP0 watermark invalid\n");
+ return false;
+ }
+
+ return true;
+}
+
+/* Compute new watermarks for the pipe */
+static int ilk_compute_pipe_wm(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ struct intel_pipe_wm *pipe_wm;
+ struct intel_plane *plane;
+ const struct intel_plane_state *plane_state;
+ const struct intel_plane_state *pristate = NULL;
+ const struct intel_plane_state *sprstate = NULL;
+ const struct intel_plane_state *curstate = NULL;
+ struct ilk_wm_maximums max;
+ int level, usable_level;
+
+ pipe_wm = &crtc_state->wm.ilk.optimal;
+
+ intel_atomic_crtc_state_for_each_plane_state(plane, plane_state, crtc_state) {
+ if (plane->base.type == DRM_PLANE_TYPE_PRIMARY)
+ pristate = plane_state;
+ else if (plane->base.type == DRM_PLANE_TYPE_OVERLAY)
+ sprstate = plane_state;
+ else if (plane->base.type == DRM_PLANE_TYPE_CURSOR)
+ curstate = plane_state;
+ }
+
+ pipe_wm->pipe_enabled = crtc_state->hw.active;
+ pipe_wm->sprites_enabled = crtc_state->active_planes & BIT(PLANE_SPRITE0);
+ pipe_wm->sprites_scaled = crtc_state->scaled_planes & BIT(PLANE_SPRITE0);
+
+ usable_level = dev_priv->display.wm.num_levels - 1;
+
+ /* ILK/SNB: LP2+ watermarks only w/o sprites */
+ if (DISPLAY_VER(dev_priv) <= 6 && pipe_wm->sprites_enabled)
+ usable_level = 1;
+
+ /* ILK/SNB/IVB: LP1+ watermarks only w/o scaling */
+ if (pipe_wm->sprites_scaled)
+ usable_level = 0;
+
+ memset(&pipe_wm->wm, 0, sizeof(pipe_wm->wm));
+ ilk_compute_wm_level(dev_priv, crtc, 0, crtc_state,
+ pristate, sprstate, curstate, &pipe_wm->wm[0]);
+
+ if (!ilk_validate_pipe_wm(dev_priv, pipe_wm))
+ return -EINVAL;
+
+ ilk_compute_wm_reg_maximums(dev_priv, 1, &max);
+
+ for (level = 1; level <= usable_level; level++) {
+ struct intel_wm_level *wm = &pipe_wm->wm[level];
+
+ ilk_compute_wm_level(dev_priv, crtc, level, crtc_state,
+ pristate, sprstate, curstate, wm);
+
+ /*
+ * Disable any watermark level that exceeds the
+ * register maximums since such watermarks are
+ * always invalid.
+ */
+ if (!ilk_validate_wm_level(level, &max, wm)) {
+ memset(wm, 0, sizeof(*wm));
+ break;
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * Build a set of 'intermediate' watermark values that satisfy both the old
+ * state and the new state. These can be programmed to the hardware
+ * immediately.
+ */
+static int ilk_compute_intermediate_wm(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_crtc_state *new_crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ const struct intel_crtc_state *old_crtc_state =
+ intel_atomic_get_old_crtc_state(state, crtc);
+ struct intel_pipe_wm *a = &new_crtc_state->wm.ilk.intermediate;
+ const struct intel_pipe_wm *b = &old_crtc_state->wm.ilk.optimal;
+ int level;
+
+ /*
+ * Start with the final, target watermarks, then combine with the
+ * currently active watermarks to get values that are safe both before
+ * and after the vblank.
+ */
+ *a = new_crtc_state->wm.ilk.optimal;
+ if (!new_crtc_state->hw.active ||
+ intel_crtc_needs_modeset(new_crtc_state) ||
+ state->skip_intermediate_wm)
+ return 0;
+
+ a->pipe_enabled |= b->pipe_enabled;
+ a->sprites_enabled |= b->sprites_enabled;
+ a->sprites_scaled |= b->sprites_scaled;
+
+ for (level = 0; level < dev_priv->display.wm.num_levels; level++) {
+ struct intel_wm_level *a_wm = &a->wm[level];
+ const struct intel_wm_level *b_wm = &b->wm[level];
+
+ a_wm->enable &= b_wm->enable;
+ a_wm->pri_val = max(a_wm->pri_val, b_wm->pri_val);
+ a_wm->spr_val = max(a_wm->spr_val, b_wm->spr_val);
+ a_wm->cur_val = max(a_wm->cur_val, b_wm->cur_val);
+ a_wm->fbc_val = max(a_wm->fbc_val, b_wm->fbc_val);
+ }
+
+ /*
+ * We need to make sure that these merged watermark values are
+ * actually a valid configuration themselves. If they're not,
+ * there's no safe way to transition from the old state to
+ * the new state, so we need to fail the atomic transaction.
+ */
+ if (!ilk_validate_pipe_wm(dev_priv, a))
+ return -EINVAL;
+
+ /*
+ * If our intermediate WM are identical to the final WM, then we can
+ * omit the post-vblank programming; only update if it's different.
+ */
+ if (memcmp(a, &new_crtc_state->wm.ilk.optimal, sizeof(*a)) != 0)
+ new_crtc_state->wm.need_postvbl_update = true;
+
+ return 0;
+}
+
+/*
+ * Merge the watermarks from all active pipes for a specific level.
+ */
+static void ilk_merge_wm_level(struct drm_i915_private *dev_priv,
+ int level,
+ struct intel_wm_level *ret_wm)
+{
+ const struct intel_crtc *crtc;
+
+ ret_wm->enable = true;
+
+ for_each_intel_crtc(&dev_priv->drm, crtc) {
+ const struct intel_pipe_wm *active = &crtc->wm.active.ilk;
+ const struct intel_wm_level *wm = &active->wm[level];
+
+ if (!active->pipe_enabled)
+ continue;
+
+ /*
+ * The watermark values may have been used in the past,
+ * so we must maintain them in the registers for some
+ * time even if the level is now disabled.
+ */
+ if (!wm->enable)
+ ret_wm->enable = false;
+
+ ret_wm->pri_val = max(ret_wm->pri_val, wm->pri_val);
+ ret_wm->spr_val = max(ret_wm->spr_val, wm->spr_val);
+ ret_wm->cur_val = max(ret_wm->cur_val, wm->cur_val);
+ ret_wm->fbc_val = max(ret_wm->fbc_val, wm->fbc_val);
+ }
+}
+
+/*
+ * Merge all low power watermarks for all active pipes.
+ */
+static void ilk_wm_merge(struct drm_i915_private *dev_priv,
+ const struct intel_wm_config *config,
+ const struct ilk_wm_maximums *max,
+ struct intel_pipe_wm *merged)
+{
+ int level, num_levels = dev_priv->display.wm.num_levels;
+ int last_enabled_level = num_levels - 1;
+
+ /* ILK/SNB/IVB: LP1+ watermarks only w/ single pipe */
+ if ((DISPLAY_VER(dev_priv) <= 6 || IS_IVYBRIDGE(dev_priv)) &&
+ config->num_pipes_active > 1)
+ last_enabled_level = 0;
+
+ /* ILK: FBC WM must be disabled always */
+ merged->fbc_wm_enabled = DISPLAY_VER(dev_priv) >= 6;
+
+ /* merge each WM1+ level */
+ for (level = 1; level < num_levels; level++) {
+ struct intel_wm_level *wm = &merged->wm[level];
+
+ ilk_merge_wm_level(dev_priv, level, wm);
+
+ if (level > last_enabled_level)
+ wm->enable = false;
+ else if (!ilk_validate_wm_level(level, max, wm))
+ /* make sure all following levels get disabled */
+ last_enabled_level = level - 1;
+
+ /*
+ * The spec says it is preferred to disable
+ * FBC WMs instead of disabling a WM level.
+ */
+ if (wm->fbc_val > max->fbc) {
+ if (wm->enable)
+ merged->fbc_wm_enabled = false;
+ wm->fbc_val = 0;
+ }
+ }
+
+ /* ILK: LP2+ must be disabled when FBC WM is disabled but FBC enabled */
+ if (DISPLAY_VER(dev_priv) == 5 && HAS_FBC(dev_priv) &&
+ dev_priv->params.enable_fbc && !merged->fbc_wm_enabled) {
+ for (level = 2; level < num_levels; level++) {
+ struct intel_wm_level *wm = &merged->wm[level];
+
+ wm->enable = false;
+ }
+ }
+}
+
+static int ilk_wm_lp_to_level(int wm_lp, const struct intel_pipe_wm *pipe_wm)
+{
+ /* LP1,LP2,LP3 levels are either 1,2,3 or 1,3,4 */
+ return wm_lp + (wm_lp >= 2 && pipe_wm->wm[4].enable);
+}
+
+/* The value we need to program into the WM_LPx latency field */
+static unsigned int ilk_wm_lp_latency(struct drm_i915_private *dev_priv,
+ int level)
+{
+ if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
+ return 2 * level;
+ else
+ return dev_priv->display.wm.pri_latency[level];
+}
+
+static void ilk_compute_wm_results(struct drm_i915_private *dev_priv,
+ const struct intel_pipe_wm *merged,
+ enum intel_ddb_partitioning partitioning,
+ struct ilk_wm_values *results)
+{
+ struct intel_crtc *crtc;
+ int level, wm_lp;
+
+ results->enable_fbc_wm = merged->fbc_wm_enabled;
+ results->partitioning = partitioning;
+
+ /* LP1+ register values */
+ for (wm_lp = 1; wm_lp <= 3; wm_lp++) {
+ const struct intel_wm_level *r;
+
+ level = ilk_wm_lp_to_level(wm_lp, merged);
+
+ r = &merged->wm[level];
+
+ /*
+ * Maintain the watermark values even if the level is
+ * disabled. Doing otherwise could cause underruns.
+ */
+ results->wm_lp[wm_lp - 1] =
+ WM_LP_LATENCY(ilk_wm_lp_latency(dev_priv, level)) |
+ WM_LP_PRIMARY(r->pri_val) |
+ WM_LP_CURSOR(r->cur_val);
+
+ if (r->enable)
+ results->wm_lp[wm_lp - 1] |= WM_LP_ENABLE;
+
+ if (DISPLAY_VER(dev_priv) >= 8)
+ results->wm_lp[wm_lp - 1] |= WM_LP_FBC_BDW(r->fbc_val);
+ else
+ results->wm_lp[wm_lp - 1] |= WM_LP_FBC_ILK(r->fbc_val);
+
+ results->wm_lp_spr[wm_lp - 1] = WM_LP_SPRITE(r->spr_val);
+
+ /*
+ * Always set WM_LP_SPRITE_EN when spr_val != 0, even if the
+ * level is disabled. Doing otherwise could cause underruns.
+ */
+ if (DISPLAY_VER(dev_priv) <= 6 && r->spr_val) {
+ drm_WARN_ON(&dev_priv->drm, wm_lp != 1);
+ results->wm_lp_spr[wm_lp - 1] |= WM_LP_SPRITE_ENABLE;
+ }
+ }
+
+ /* LP0 register values */
+ for_each_intel_crtc(&dev_priv->drm, crtc) {
+ enum pipe pipe = crtc->pipe;
+ const struct intel_pipe_wm *pipe_wm = &crtc->wm.active.ilk;
+ const struct intel_wm_level *r = &pipe_wm->wm[0];
+
+ if (drm_WARN_ON(&dev_priv->drm, !r->enable))
+ continue;
+
+ results->wm_pipe[pipe] =
+ WM0_PIPE_PRIMARY(r->pri_val) |
+ WM0_PIPE_SPRITE(r->spr_val) |
+ WM0_PIPE_CURSOR(r->cur_val);
+ }
+}
+
+/*
+ * Find the result with the highest level enabled. Check for enable_fbc_wm in
+ * case both are at the same level. Prefer r1 in case they're the same.
+ */
+static struct intel_pipe_wm *
+ilk_find_best_result(struct drm_i915_private *dev_priv,
+ struct intel_pipe_wm *r1,
+ struct intel_pipe_wm *r2)
+{
+ int level, level1 = 0, level2 = 0;
+
+ for (level = 1; level < dev_priv->display.wm.num_levels; level++) {
+ if (r1->wm[level].enable)
+ level1 = level;
+ if (r2->wm[level].enable)
+ level2 = level;
+ }
+
+ if (level1 == level2) {
+ if (r2->fbc_wm_enabled && !r1->fbc_wm_enabled)
+ return r2;
+ else
+ return r1;
+ } else if (level1 > level2) {
+ return r1;
+ } else {
+ return r2;
+ }
+}
+
+/* dirty bits used to track which watermarks need changes */
+#define WM_DIRTY_PIPE(pipe) (1 << (pipe))
+#define WM_DIRTY_LP(wm_lp) (1 << (15 + (wm_lp)))
+#define WM_DIRTY_LP_ALL (WM_DIRTY_LP(1) | WM_DIRTY_LP(2) | WM_DIRTY_LP(3))
+#define WM_DIRTY_FBC (1 << 24)
+#define WM_DIRTY_DDB (1 << 25)
+
+static unsigned int ilk_compute_wm_dirty(struct drm_i915_private *dev_priv,
+ const struct ilk_wm_values *old,
+ const struct ilk_wm_values *new)
+{
+ unsigned int dirty = 0;
+ enum pipe pipe;
+ int wm_lp;
+
+ for_each_pipe(dev_priv, pipe) {
+ if (old->wm_pipe[pipe] != new->wm_pipe[pipe]) {
+ dirty |= WM_DIRTY_PIPE(pipe);
+ /* Must disable LP1+ watermarks too */
+ dirty |= WM_DIRTY_LP_ALL;
+ }
+ }
+
+ if (old->enable_fbc_wm != new->enable_fbc_wm) {
+ dirty |= WM_DIRTY_FBC;
+ /* Must disable LP1+ watermarks too */
+ dirty |= WM_DIRTY_LP_ALL;
+ }
+
+ if (old->partitioning != new->partitioning) {
+ dirty |= WM_DIRTY_DDB;
+ /* Must disable LP1+ watermarks too */
+ dirty |= WM_DIRTY_LP_ALL;
+ }
+
+ /* LP1+ watermarks already deemed dirty, no need to continue */
+ if (dirty & WM_DIRTY_LP_ALL)
+ return dirty;
+
+ /* Find the lowest numbered LP1+ watermark in need of an update... */
+ for (wm_lp = 1; wm_lp <= 3; wm_lp++) {
+ if (old->wm_lp[wm_lp - 1] != new->wm_lp[wm_lp - 1] ||
+ old->wm_lp_spr[wm_lp - 1] != new->wm_lp_spr[wm_lp - 1])
+ break;
+ }
+
+ /* ...and mark it and all higher numbered LP1+ watermarks as dirty */
+ for (; wm_lp <= 3; wm_lp++)
+ dirty |= WM_DIRTY_LP(wm_lp);
+
+ return dirty;
+}
+
+static bool _ilk_disable_lp_wm(struct drm_i915_private *dev_priv,
+ unsigned int dirty)
+{
+ struct ilk_wm_values *previous = &dev_priv->display.wm.hw;
+ bool changed = false;
+
+ if (dirty & WM_DIRTY_LP(3) && previous->wm_lp[2] & WM_LP_ENABLE) {
+ previous->wm_lp[2] &= ~WM_LP_ENABLE;
+ intel_uncore_write(&dev_priv->uncore, WM3_LP_ILK, previous->wm_lp[2]);
+ changed = true;
+ }
+ if (dirty & WM_DIRTY_LP(2) && previous->wm_lp[1] & WM_LP_ENABLE) {
+ previous->wm_lp[1] &= ~WM_LP_ENABLE;
+ intel_uncore_write(&dev_priv->uncore, WM2_LP_ILK, previous->wm_lp[1]);
+ changed = true;
+ }
+ if (dirty & WM_DIRTY_LP(1) && previous->wm_lp[0] & WM_LP_ENABLE) {
+ previous->wm_lp[0] &= ~WM_LP_ENABLE;
+ intel_uncore_write(&dev_priv->uncore, WM1_LP_ILK, previous->wm_lp[0]);
+ changed = true;
+ }
+
+ /*
+ * Don't touch WM_LP_SPRITE_ENABLE here.
+ * Doing so could cause underruns.
+ */
+
+ return changed;
+}
+
+/*
+ * The spec says we shouldn't write when we don't need, because every write
+ * causes WMs to be re-evaluated, expending some power.
+ */
+static void ilk_write_wm_values(struct drm_i915_private *dev_priv,
+ struct ilk_wm_values *results)
+{
+ struct ilk_wm_values *previous = &dev_priv->display.wm.hw;
+ unsigned int dirty;
+
+ dirty = ilk_compute_wm_dirty(dev_priv, previous, results);
+ if (!dirty)
+ return;
+
+ _ilk_disable_lp_wm(dev_priv, dirty);
+
+ if (dirty & WM_DIRTY_PIPE(PIPE_A))
+ intel_uncore_write(&dev_priv->uncore, WM0_PIPE_ILK(PIPE_A), results->wm_pipe[0]);
+ if (dirty & WM_DIRTY_PIPE(PIPE_B))
+ intel_uncore_write(&dev_priv->uncore, WM0_PIPE_ILK(PIPE_B), results->wm_pipe[1]);
+ if (dirty & WM_DIRTY_PIPE(PIPE_C))
+ intel_uncore_write(&dev_priv->uncore, WM0_PIPE_ILK(PIPE_C), results->wm_pipe[2]);
+
+ if (dirty & WM_DIRTY_DDB) {
+ if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
+ intel_uncore_rmw(&dev_priv->uncore, WM_MISC, WM_MISC_DATA_PARTITION_5_6,
+ results->partitioning == INTEL_DDB_PART_1_2 ? 0 :
+ WM_MISC_DATA_PARTITION_5_6);
+ else
+ intel_uncore_rmw(&dev_priv->uncore, DISP_ARB_CTL2, DISP_DATA_PARTITION_5_6,
+ results->partitioning == INTEL_DDB_PART_1_2 ? 0 :
+ DISP_DATA_PARTITION_5_6);
+ }
+
+ if (dirty & WM_DIRTY_FBC)
+ intel_uncore_rmw(&dev_priv->uncore, DISP_ARB_CTL, DISP_FBC_WM_DIS,
+ results->enable_fbc_wm ? 0 : DISP_FBC_WM_DIS);
+
+ if (dirty & WM_DIRTY_LP(1) &&
+ previous->wm_lp_spr[0] != results->wm_lp_spr[0])
+ intel_uncore_write(&dev_priv->uncore, WM1S_LP_ILK, results->wm_lp_spr[0]);
+
+ if (DISPLAY_VER(dev_priv) >= 7) {
+ if (dirty & WM_DIRTY_LP(2) && previous->wm_lp_spr[1] != results->wm_lp_spr[1])
+ intel_uncore_write(&dev_priv->uncore, WM2S_LP_IVB, results->wm_lp_spr[1]);
+ if (dirty & WM_DIRTY_LP(3) && previous->wm_lp_spr[2] != results->wm_lp_spr[2])
+ intel_uncore_write(&dev_priv->uncore, WM3S_LP_IVB, results->wm_lp_spr[2]);
+ }
+
+ if (dirty & WM_DIRTY_LP(1) && previous->wm_lp[0] != results->wm_lp[0])
+ intel_uncore_write(&dev_priv->uncore, WM1_LP_ILK, results->wm_lp[0]);
+ if (dirty & WM_DIRTY_LP(2) && previous->wm_lp[1] != results->wm_lp[1])
+ intel_uncore_write(&dev_priv->uncore, WM2_LP_ILK, results->wm_lp[1]);
+ if (dirty & WM_DIRTY_LP(3) && previous->wm_lp[2] != results->wm_lp[2])
+ intel_uncore_write(&dev_priv->uncore, WM3_LP_ILK, results->wm_lp[2]);
+
+ dev_priv->display.wm.hw = *results;
+}
+
+bool ilk_disable_lp_wm(struct drm_i915_private *dev_priv)
+{
+ return _ilk_disable_lp_wm(dev_priv, WM_DIRTY_LP_ALL);
+}
+
+static void ilk_compute_wm_config(struct drm_i915_private *dev_priv,
+ struct intel_wm_config *config)
+{
+ struct intel_crtc *crtc;
+
+ /* Compute the currently _active_ config */
+ for_each_intel_crtc(&dev_priv->drm, crtc) {
+ const struct intel_pipe_wm *wm = &crtc->wm.active.ilk;
+
+ if (!wm->pipe_enabled)
+ continue;
+
+ config->sprites_enabled |= wm->sprites_enabled;
+ config->sprites_scaled |= wm->sprites_scaled;
+ config->num_pipes_active++;
+ }
+}
+
+static void ilk_program_watermarks(struct drm_i915_private *dev_priv)
+{
+ struct intel_pipe_wm lp_wm_1_2 = {}, lp_wm_5_6 = {}, *best_lp_wm;
+ struct ilk_wm_maximums max;
+ struct intel_wm_config config = {};
+ struct ilk_wm_values results = {};
+ enum intel_ddb_partitioning partitioning;
+
+ ilk_compute_wm_config(dev_priv, &config);
+
+ ilk_compute_wm_maximums(dev_priv, 1, &config, INTEL_DDB_PART_1_2, &max);
+ ilk_wm_merge(dev_priv, &config, &max, &lp_wm_1_2);
+
+ /* 5/6 split only in single pipe config on IVB+ */
+ if (DISPLAY_VER(dev_priv) >= 7 &&
+ config.num_pipes_active == 1 && config.sprites_enabled) {
+ ilk_compute_wm_maximums(dev_priv, 1, &config, INTEL_DDB_PART_5_6, &max);
+ ilk_wm_merge(dev_priv, &config, &max, &lp_wm_5_6);
+
+ best_lp_wm = ilk_find_best_result(dev_priv, &lp_wm_1_2, &lp_wm_5_6);
+ } else {
+ best_lp_wm = &lp_wm_1_2;
+ }
+
+ partitioning = (best_lp_wm == &lp_wm_1_2) ?
+ INTEL_DDB_PART_1_2 : INTEL_DDB_PART_5_6;
+
+ ilk_compute_wm_results(dev_priv, best_lp_wm, partitioning, &results);
+
+ ilk_write_wm_values(dev_priv, &results);
+}
+
+static void ilk_initial_watermarks(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ const struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+
+ mutex_lock(&dev_priv->display.wm.wm_mutex);
+ crtc->wm.active.ilk = crtc_state->wm.ilk.intermediate;
+ ilk_program_watermarks(dev_priv);
+ mutex_unlock(&dev_priv->display.wm.wm_mutex);
+}
+
+static void ilk_optimize_watermarks(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ const struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+
+ if (!crtc_state->wm.need_postvbl_update)
+ return;
+
+ mutex_lock(&dev_priv->display.wm.wm_mutex);
+ crtc->wm.active.ilk = crtc_state->wm.ilk.optimal;
+ ilk_program_watermarks(dev_priv);
+ mutex_unlock(&dev_priv->display.wm.wm_mutex);
+}
+
+static void ilk_pipe_wm_get_hw_state(struct intel_crtc *crtc)
+{
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct ilk_wm_values *hw = &dev_priv->display.wm.hw;
+ struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state);
+ struct intel_pipe_wm *active = &crtc_state->wm.ilk.optimal;
+ enum pipe pipe = crtc->pipe;
+
+ hw->wm_pipe[pipe] = intel_uncore_read(&dev_priv->uncore, WM0_PIPE_ILK(pipe));
+
+ memset(active, 0, sizeof(*active));
+
+ active->pipe_enabled = crtc->active;
+
+ if (active->pipe_enabled) {
+ u32 tmp = hw->wm_pipe[pipe];
+
+ /*
+ * For active pipes LP0 watermark is marked as
+ * enabled, and LP1+ watermaks as disabled since
+ * we can't really reverse compute them in case
+ * multiple pipes are active.
+ */
+ active->wm[0].enable = true;
+ active->wm[0].pri_val = REG_FIELD_GET(WM0_PIPE_PRIMARY_MASK, tmp);
+ active->wm[0].spr_val = REG_FIELD_GET(WM0_PIPE_SPRITE_MASK, tmp);
+ active->wm[0].cur_val = REG_FIELD_GET(WM0_PIPE_CURSOR_MASK, tmp);
+ } else {
+ int level;
+
+ /*
+ * For inactive pipes, all watermark levels
+ * should be marked as enabled but zeroed,
+ * which is what we'd compute them to.
+ */
+ for (level = 0; level < dev_priv->display.wm.num_levels; level++)
+ active->wm[level].enable = true;
+ }
+
+ crtc->wm.active.ilk = *active;
+}
+
+static int ilk_sanitize_watermarks_add_affected(struct drm_atomic_state *state)
+{
+ struct drm_plane *plane;
+ struct intel_crtc *crtc;
+
+ for_each_intel_crtc(state->dev, crtc) {
+ struct intel_crtc_state *crtc_state;
+
+ crtc_state = intel_atomic_get_crtc_state(state, crtc);
+ if (IS_ERR(crtc_state))
+ return PTR_ERR(crtc_state);
+
+ if (crtc_state->hw.active) {
+ /*
+ * Preserve the inherited flag to avoid
+ * taking the full modeset path.
+ */
+ crtc_state->inherited = true;
+ }
+ }
+
+ drm_for_each_plane(plane, state->dev) {
+ struct drm_plane_state *plane_state;
+
+ plane_state = drm_atomic_get_plane_state(state, plane);
+ if (IS_ERR(plane_state))
+ return PTR_ERR(plane_state);
+ }
+
+ return 0;
+}
+
+/*
+ * Calculate what we think the watermarks should be for the state we've read
+ * out of the hardware and then immediately program those watermarks so that
+ * we ensure the hardware settings match our internal state.
+ *
+ * We can calculate what we think WM's should be by creating a duplicate of the
+ * current state (which was constructed during hardware readout) and running it
+ * through the atomic check code to calculate new watermark values in the
+ * state object.
+ */
+void ilk_wm_sanitize(struct drm_i915_private *dev_priv)
+{
+ struct drm_atomic_state *state;
+ struct intel_atomic_state *intel_state;
+ struct intel_crtc *crtc;
+ struct intel_crtc_state *crtc_state;
+ struct drm_modeset_acquire_ctx ctx;
+ int ret;
+ int i;
+
+ /* Only supported on platforms that use atomic watermark design */
+ if (!dev_priv->display.funcs.wm->optimize_watermarks)
+ return;
+
+ if (drm_WARN_ON(&dev_priv->drm, DISPLAY_VER(dev_priv) >= 9))
+ return;
+
+ state = drm_atomic_state_alloc(&dev_priv->drm);
+ if (drm_WARN_ON(&dev_priv->drm, !state))
+ return;
+
+ intel_state = to_intel_atomic_state(state);
+
+ drm_modeset_acquire_init(&ctx, 0);
+
+retry:
+ state->acquire_ctx = &ctx;
+
+ /*
+ * Hardware readout is the only time we don't want to calculate
+ * intermediate watermarks (since we don't trust the current
+ * watermarks).
+ */
+ if (!HAS_GMCH(dev_priv))
+ intel_state->skip_intermediate_wm = true;
+
+ ret = ilk_sanitize_watermarks_add_affected(state);
+ if (ret)
+ goto fail;
+
+ ret = intel_atomic_check(&dev_priv->drm, state);
+ if (ret)
+ goto fail;
+
+ /* Write calculated watermark values back */
+ for_each_new_intel_crtc_in_state(intel_state, crtc, crtc_state, i) {
+ crtc_state->wm.need_postvbl_update = true;
+ intel_optimize_watermarks(intel_state, crtc);
+
+ to_intel_crtc_state(crtc->base.state)->wm = crtc_state->wm;
+ }
+
+fail:
+ if (ret == -EDEADLK) {
+ drm_atomic_state_clear(state);
+ drm_modeset_backoff(&ctx);
+ goto retry;
+ }
+
+ /*
+ * If we fail here, it means that the hardware appears to be
+ * programmed in a way that shouldn't be possible, given our
+ * understanding of watermark requirements. This might mean a
+ * mistake in the hardware readout code or a mistake in the
+ * watermark calculations for a given platform. Raise a WARN
+ * so that this is noticeable.
+ *
+ * If this actually happens, we'll have to just leave the
+ * BIOS-programmed watermarks untouched and hope for the best.
+ */
+ drm_WARN(&dev_priv->drm, ret,
+ "Could not determine valid watermarks for inherited state\n");
+
+ drm_atomic_state_put(state);
+
+ drm_modeset_drop_locks(&ctx);
+ drm_modeset_acquire_fini(&ctx);
+}
+
+#define _FW_WM(value, plane) \
+ (((value) & DSPFW_ ## plane ## _MASK) >> DSPFW_ ## plane ## _SHIFT)
+#define _FW_WM_VLV(value, plane) \
+ (((value) & DSPFW_ ## plane ## _MASK_VLV) >> DSPFW_ ## plane ## _SHIFT)
+
+static void g4x_read_wm_values(struct drm_i915_private *dev_priv,
+ struct g4x_wm_values *wm)
+{
+ u32 tmp;
+
+ tmp = intel_uncore_read(&dev_priv->uncore, DSPFW1);
+ wm->sr.plane = _FW_WM(tmp, SR);
+ wm->pipe[PIPE_B].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORB);
+ wm->pipe[PIPE_B].plane[PLANE_PRIMARY] = _FW_WM(tmp, PLANEB);
+ wm->pipe[PIPE_A].plane[PLANE_PRIMARY] = _FW_WM(tmp, PLANEA);
+
+ tmp = intel_uncore_read(&dev_priv->uncore, DSPFW2);
+ wm->fbc_en = tmp & DSPFW_FBC_SR_EN;
+ wm->sr.fbc = _FW_WM(tmp, FBC_SR);
+ wm->hpll.fbc = _FW_WM(tmp, FBC_HPLL_SR);
+ wm->pipe[PIPE_B].plane[PLANE_SPRITE0] = _FW_WM(tmp, SPRITEB);
+ wm->pipe[PIPE_A].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORA);
+ wm->pipe[PIPE_A].plane[PLANE_SPRITE0] = _FW_WM(tmp, SPRITEA);
+
+ tmp = intel_uncore_read(&dev_priv->uncore, DSPFW3);
+ wm->hpll_en = tmp & DSPFW_HPLL_SR_EN;
+ wm->sr.cursor = _FW_WM(tmp, CURSOR_SR);
+ wm->hpll.cursor = _FW_WM(tmp, HPLL_CURSOR);
+ wm->hpll.plane = _FW_WM(tmp, HPLL_SR);
+}
+
+static void vlv_read_wm_values(struct drm_i915_private *dev_priv,
+ struct vlv_wm_values *wm)
+{
+ enum pipe pipe;
+ u32 tmp;
+
+ for_each_pipe(dev_priv, pipe) {
+ tmp = intel_uncore_read(&dev_priv->uncore, VLV_DDL(pipe));
+
+ wm->ddl[pipe].plane[PLANE_PRIMARY] =
+ (tmp >> DDL_PLANE_SHIFT) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK);
+ wm->ddl[pipe].plane[PLANE_CURSOR] =
+ (tmp >> DDL_CURSOR_SHIFT) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK);
+ wm->ddl[pipe].plane[PLANE_SPRITE0] =
+ (tmp >> DDL_SPRITE_SHIFT(0)) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK);
+ wm->ddl[pipe].plane[PLANE_SPRITE1] =
+ (tmp >> DDL_SPRITE_SHIFT(1)) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK);
+ }
+
+ tmp = intel_uncore_read(&dev_priv->uncore, DSPFW1);
+ wm->sr.plane = _FW_WM(tmp, SR);
+ wm->pipe[PIPE_B].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORB);
+ wm->pipe[PIPE_B].plane[PLANE_PRIMARY] = _FW_WM_VLV(tmp, PLANEB);
+ wm->pipe[PIPE_A].plane[PLANE_PRIMARY] = _FW_WM_VLV(tmp, PLANEA);
+
+ tmp = intel_uncore_read(&dev_priv->uncore, DSPFW2);
+ wm->pipe[PIPE_A].plane[PLANE_SPRITE1] = _FW_WM_VLV(tmp, SPRITEB);
+ wm->pipe[PIPE_A].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORA);
+ wm->pipe[PIPE_A].plane[PLANE_SPRITE0] = _FW_WM_VLV(tmp, SPRITEA);
+
+ tmp = intel_uncore_read(&dev_priv->uncore, DSPFW3);
+ wm->sr.cursor = _FW_WM(tmp, CURSOR_SR);
+
+ if (IS_CHERRYVIEW(dev_priv)) {
+ tmp = intel_uncore_read(&dev_priv->uncore, DSPFW7_CHV);
+ wm->pipe[PIPE_B].plane[PLANE_SPRITE1] = _FW_WM_VLV(tmp, SPRITED);
+ wm->pipe[PIPE_B].plane[PLANE_SPRITE0] = _FW_WM_VLV(tmp, SPRITEC);
+
+ tmp = intel_uncore_read(&dev_priv->uncore, DSPFW8_CHV);
+ wm->pipe[PIPE_C].plane[PLANE_SPRITE1] = _FW_WM_VLV(tmp, SPRITEF);
+ wm->pipe[PIPE_C].plane[PLANE_SPRITE0] = _FW_WM_VLV(tmp, SPRITEE);
+
+ tmp = intel_uncore_read(&dev_priv->uncore, DSPFW9_CHV);
+ wm->pipe[PIPE_C].plane[PLANE_PRIMARY] = _FW_WM_VLV(tmp, PLANEC);
+ wm->pipe[PIPE_C].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORC);
+
+ tmp = intel_uncore_read(&dev_priv->uncore, DSPHOWM);
+ wm->sr.plane |= _FW_WM(tmp, SR_HI) << 9;
+ wm->pipe[PIPE_C].plane[PLANE_SPRITE1] |= _FW_WM(tmp, SPRITEF_HI) << 8;
+ wm->pipe[PIPE_C].plane[PLANE_SPRITE0] |= _FW_WM(tmp, SPRITEE_HI) << 8;
+ wm->pipe[PIPE_C].plane[PLANE_PRIMARY] |= _FW_WM(tmp, PLANEC_HI) << 8;
+ wm->pipe[PIPE_B].plane[PLANE_SPRITE1] |= _FW_WM(tmp, SPRITED_HI) << 8;
+ wm->pipe[PIPE_B].plane[PLANE_SPRITE0] |= _FW_WM(tmp, SPRITEC_HI) << 8;
+ wm->pipe[PIPE_B].plane[PLANE_PRIMARY] |= _FW_WM(tmp, PLANEB_HI) << 8;
+ wm->pipe[PIPE_A].plane[PLANE_SPRITE1] |= _FW_WM(tmp, SPRITEB_HI) << 8;
+ wm->pipe[PIPE_A].plane[PLANE_SPRITE0] |= _FW_WM(tmp, SPRITEA_HI) << 8;
+ wm->pipe[PIPE_A].plane[PLANE_PRIMARY] |= _FW_WM(tmp, PLANEA_HI) << 8;
+ } else {
+ tmp = intel_uncore_read(&dev_priv->uncore, DSPFW7);
+ wm->pipe[PIPE_B].plane[PLANE_SPRITE1] = _FW_WM_VLV(tmp, SPRITED);
+ wm->pipe[PIPE_B].plane[PLANE_SPRITE0] = _FW_WM_VLV(tmp, SPRITEC);
+
+ tmp = intel_uncore_read(&dev_priv->uncore, DSPHOWM);
+ wm->sr.plane |= _FW_WM(tmp, SR_HI) << 9;
+ wm->pipe[PIPE_B].plane[PLANE_SPRITE1] |= _FW_WM(tmp, SPRITED_HI) << 8;
+ wm->pipe[PIPE_B].plane[PLANE_SPRITE0] |= _FW_WM(tmp, SPRITEC_HI) << 8;
+ wm->pipe[PIPE_B].plane[PLANE_PRIMARY] |= _FW_WM(tmp, PLANEB_HI) << 8;
+ wm->pipe[PIPE_A].plane[PLANE_SPRITE1] |= _FW_WM(tmp, SPRITEB_HI) << 8;
+ wm->pipe[PIPE_A].plane[PLANE_SPRITE0] |= _FW_WM(tmp, SPRITEA_HI) << 8;
+ wm->pipe[PIPE_A].plane[PLANE_PRIMARY] |= _FW_WM(tmp, PLANEA_HI) << 8;
+ }
+}
+
+#undef _FW_WM
+#undef _FW_WM_VLV
+
+static void g4x_wm_get_hw_state(struct drm_i915_private *dev_priv)
+{
+ struct g4x_wm_values *wm = &dev_priv->display.wm.g4x;
+ struct intel_crtc *crtc;
+
+ g4x_read_wm_values(dev_priv, wm);
+
+ wm->cxsr = intel_uncore_read(&dev_priv->uncore, FW_BLC_SELF) & FW_BLC_SELF_EN;
+
+ for_each_intel_crtc(&dev_priv->drm, crtc) {
+ struct intel_crtc_state *crtc_state =
+ to_intel_crtc_state(crtc->base.state);
+ struct g4x_wm_state *active = &crtc->wm.active.g4x;
+ struct g4x_pipe_wm *raw;
+ enum pipe pipe = crtc->pipe;
+ enum plane_id plane_id;
+ int level, max_level;
+
+ active->cxsr = wm->cxsr;
+ active->hpll_en = wm->hpll_en;
+ active->fbc_en = wm->fbc_en;
+
+ active->sr = wm->sr;
+ active->hpll = wm->hpll;
+
+ for_each_plane_id_on_crtc(crtc, plane_id) {
+ active->wm.plane[plane_id] =
+ wm->pipe[pipe].plane[plane_id];
+ }
+
+ if (wm->cxsr && wm->hpll_en)
+ max_level = G4X_WM_LEVEL_HPLL;
+ else if (wm->cxsr)
+ max_level = G4X_WM_LEVEL_SR;
+ else
+ max_level = G4X_WM_LEVEL_NORMAL;
+
+ level = G4X_WM_LEVEL_NORMAL;
+ raw = &crtc_state->wm.g4x.raw[level];
+ for_each_plane_id_on_crtc(crtc, plane_id)
+ raw->plane[plane_id] = active->wm.plane[plane_id];
+
+ level = G4X_WM_LEVEL_SR;
+ if (level > max_level)
+ goto out;
+
+ raw = &crtc_state->wm.g4x.raw[level];
+ raw->plane[PLANE_PRIMARY] = active->sr.plane;
+ raw->plane[PLANE_CURSOR] = active->sr.cursor;
+ raw->plane[PLANE_SPRITE0] = 0;
+ raw->fbc = active->sr.fbc;
+
+ level = G4X_WM_LEVEL_HPLL;
+ if (level > max_level)
+ goto out;
+
+ raw = &crtc_state->wm.g4x.raw[level];
+ raw->plane[PLANE_PRIMARY] = active->hpll.plane;
+ raw->plane[PLANE_CURSOR] = active->hpll.cursor;
+ raw->plane[PLANE_SPRITE0] = 0;
+ raw->fbc = active->hpll.fbc;
+
+ level++;
+ out:
+ for_each_plane_id_on_crtc(crtc, plane_id)
+ g4x_raw_plane_wm_set(crtc_state, level,
+ plane_id, USHRT_MAX);
+ g4x_raw_fbc_wm_set(crtc_state, level, USHRT_MAX);
+
+ g4x_invalidate_wms(crtc, active, level);
+
+ crtc_state->wm.g4x.optimal = *active;
+ crtc_state->wm.g4x.intermediate = *active;
+
+ drm_dbg_kms(&dev_priv->drm,
+ "Initial watermarks: pipe %c, plane=%d, cursor=%d, sprite=%d\n",
+ pipe_name(pipe),
+ wm->pipe[pipe].plane[PLANE_PRIMARY],
+ wm->pipe[pipe].plane[PLANE_CURSOR],
+ wm->pipe[pipe].plane[PLANE_SPRITE0]);
+ }
+
+ drm_dbg_kms(&dev_priv->drm,
+ "Initial SR watermarks: plane=%d, cursor=%d fbc=%d\n",
+ wm->sr.plane, wm->sr.cursor, wm->sr.fbc);
+ drm_dbg_kms(&dev_priv->drm,
+ "Initial HPLL watermarks: plane=%d, SR cursor=%d fbc=%d\n",
+ wm->hpll.plane, wm->hpll.cursor, wm->hpll.fbc);
+ drm_dbg_kms(&dev_priv->drm, "Initial SR=%s HPLL=%s FBC=%s\n",
+ str_yes_no(wm->cxsr), str_yes_no(wm->hpll_en),
+ str_yes_no(wm->fbc_en));
+}
+
+static void g4x_wm_sanitize(struct drm_i915_private *dev_priv)
+{
+ struct intel_plane *plane;
+ struct intel_crtc *crtc;
+
+ mutex_lock(&dev_priv->display.wm.wm_mutex);
+
+ for_each_intel_plane(&dev_priv->drm, plane) {
+ struct intel_crtc *crtc =
+ intel_crtc_for_pipe(dev_priv, plane->pipe);
+ struct intel_crtc_state *crtc_state =
+ to_intel_crtc_state(crtc->base.state);
+ struct intel_plane_state *plane_state =
+ to_intel_plane_state(plane->base.state);
+ enum plane_id plane_id = plane->id;
+ int level;
+
+ if (plane_state->uapi.visible)
+ continue;
+
+ for (level = 0; level < dev_priv->display.wm.num_levels; level++) {
+ struct g4x_pipe_wm *raw =
+ &crtc_state->wm.g4x.raw[level];
+
+ raw->plane[plane_id] = 0;
+
+ if (plane_id == PLANE_PRIMARY)
+ raw->fbc = 0;
+ }
+ }
+
+ for_each_intel_crtc(&dev_priv->drm, crtc) {
+ struct intel_crtc_state *crtc_state =
+ to_intel_crtc_state(crtc->base.state);
+ int ret;
+
+ ret = _g4x_compute_pipe_wm(crtc_state);
+ drm_WARN_ON(&dev_priv->drm, ret);
+
+ crtc_state->wm.g4x.intermediate =
+ crtc_state->wm.g4x.optimal;
+ crtc->wm.active.g4x = crtc_state->wm.g4x.optimal;
+ }
+
+ g4x_program_watermarks(dev_priv);
+
+ mutex_unlock(&dev_priv->display.wm.wm_mutex);
+}
+
+static void g4x_wm_get_hw_state_and_sanitize(struct drm_i915_private *i915)
+{
+ g4x_wm_get_hw_state(i915);
+ g4x_wm_sanitize(i915);
+}
+
+static void vlv_wm_get_hw_state(struct drm_i915_private *dev_priv)
+{
+ struct vlv_wm_values *wm = &dev_priv->display.wm.vlv;
+ struct intel_crtc *crtc;
+ u32 val;
+
+ vlv_read_wm_values(dev_priv, wm);
+
+ wm->cxsr = intel_uncore_read(&dev_priv->uncore, FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
+ wm->level = VLV_WM_LEVEL_PM2;
+
+ if (IS_CHERRYVIEW(dev_priv)) {
+ vlv_punit_get(dev_priv);
+
+ val = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM);
+ if (val & DSP_MAXFIFO_PM5_ENABLE)
+ wm->level = VLV_WM_LEVEL_PM5;
+
+ /*
+ * If DDR DVFS is disabled in the BIOS, Punit
+ * will never ack the request. So if that happens
+ * assume we don't have to enable/disable DDR DVFS
+ * dynamically. To test that just set the REQ_ACK
+ * bit to poke the Punit, but don't change the
+ * HIGH/LOW bits so that we don't actually change
+ * the current state.
+ */
+ val = vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2);
+ val |= FORCE_DDR_FREQ_REQ_ACK;
+ vlv_punit_write(dev_priv, PUNIT_REG_DDR_SETUP2, val);
+
+ if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2) &
+ FORCE_DDR_FREQ_REQ_ACK) == 0, 3)) {
+ drm_dbg_kms(&dev_priv->drm,
+ "Punit not acking DDR DVFS request, "
+ "assuming DDR DVFS is disabled\n");
+ dev_priv->display.wm.num_levels = VLV_WM_LEVEL_PM5 + 1;
+ } else {
+ val = vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2);
+ if ((val & FORCE_DDR_HIGH_FREQ) == 0)
+ wm->level = VLV_WM_LEVEL_DDR_DVFS;
+ }
+
+ vlv_punit_put(dev_priv);
+ }
+
+ for_each_intel_crtc(&dev_priv->drm, crtc) {
+ struct intel_crtc_state *crtc_state =
+ to_intel_crtc_state(crtc->base.state);
+ struct vlv_wm_state *active = &crtc->wm.active.vlv;
+ const struct vlv_fifo_state *fifo_state =
+ &crtc_state->wm.vlv.fifo_state;
+ enum pipe pipe = crtc->pipe;
+ enum plane_id plane_id;
+ int level;
+
+ vlv_get_fifo_size(crtc_state);
+
+ active->num_levels = wm->level + 1;
+ active->cxsr = wm->cxsr;
+
+ for (level = 0; level < active->num_levels; level++) {
+ struct g4x_pipe_wm *raw =
+ &crtc_state->wm.vlv.raw[level];
+
+ active->sr[level].plane = wm->sr.plane;
+ active->sr[level].cursor = wm->sr.cursor;
+
+ for_each_plane_id_on_crtc(crtc, plane_id) {
+ active->wm[level].plane[plane_id] =
+ wm->pipe[pipe].plane[plane_id];
+
+ raw->plane[plane_id] =
+ vlv_invert_wm_value(active->wm[level].plane[plane_id],
+ fifo_state->plane[plane_id]);
+ }
+ }
+
+ for_each_plane_id_on_crtc(crtc, plane_id)
+ vlv_raw_plane_wm_set(crtc_state, level,
+ plane_id, USHRT_MAX);
+ vlv_invalidate_wms(crtc, active, level);
+
+ crtc_state->wm.vlv.optimal = *active;
+ crtc_state->wm.vlv.intermediate = *active;
+
+ drm_dbg_kms(&dev_priv->drm,
+ "Initial watermarks: pipe %c, plane=%d, cursor=%d, sprite0=%d, sprite1=%d\n",
+ pipe_name(pipe),
+ wm->pipe[pipe].plane[PLANE_PRIMARY],
+ wm->pipe[pipe].plane[PLANE_CURSOR],
+ wm->pipe[pipe].plane[PLANE_SPRITE0],
+ wm->pipe[pipe].plane[PLANE_SPRITE1]);
+ }
+
+ drm_dbg_kms(&dev_priv->drm,
+ "Initial watermarks: SR plane=%d, SR cursor=%d level=%d cxsr=%d\n",
+ wm->sr.plane, wm->sr.cursor, wm->level, wm->cxsr);
+}
+
+static void vlv_wm_sanitize(struct drm_i915_private *dev_priv)
+{
+ struct intel_plane *plane;
+ struct intel_crtc *crtc;
+
+ mutex_lock(&dev_priv->display.wm.wm_mutex);
+
+ for_each_intel_plane(&dev_priv->drm, plane) {
+ struct intel_crtc *crtc =
+ intel_crtc_for_pipe(dev_priv, plane->pipe);
+ struct intel_crtc_state *crtc_state =
+ to_intel_crtc_state(crtc->base.state);
+ struct intel_plane_state *plane_state =
+ to_intel_plane_state(plane->base.state);
+ enum plane_id plane_id = plane->id;
+ int level;
+
+ if (plane_state->uapi.visible)
+ continue;
+
+ for (level = 0; level < dev_priv->display.wm.num_levels; level++) {
+ struct g4x_pipe_wm *raw =
+ &crtc_state->wm.vlv.raw[level];
+
+ raw->plane[plane_id] = 0;
+ }
+ }
+
+ for_each_intel_crtc(&dev_priv->drm, crtc) {
+ struct intel_crtc_state *crtc_state =
+ to_intel_crtc_state(crtc->base.state);
+ int ret;
+
+ ret = _vlv_compute_pipe_wm(crtc_state);
+ drm_WARN_ON(&dev_priv->drm, ret);
+
+ crtc_state->wm.vlv.intermediate =
+ crtc_state->wm.vlv.optimal;
+ crtc->wm.active.vlv = crtc_state->wm.vlv.optimal;
+ }
+
+ vlv_program_watermarks(dev_priv);
+
+ mutex_unlock(&dev_priv->display.wm.wm_mutex);
+}
+
+static void vlv_wm_get_hw_state_and_sanitize(struct drm_i915_private *i915)
+{
+ vlv_wm_get_hw_state(i915);
+ vlv_wm_sanitize(i915);
+}
+
+/*
+ * FIXME should probably kill this and improve
+ * the real watermark readout/sanitation instead
+ */
+static void ilk_init_lp_watermarks(struct drm_i915_private *dev_priv)
+{
+ intel_uncore_rmw(&dev_priv->uncore, WM3_LP_ILK, WM_LP_ENABLE, 0);
+ intel_uncore_rmw(&dev_priv->uncore, WM2_LP_ILK, WM_LP_ENABLE, 0);
+ intel_uncore_rmw(&dev_priv->uncore, WM1_LP_ILK, WM_LP_ENABLE, 0);
+
+ /*
+ * Don't touch WM_LP_SPRITE_ENABLE here.
+ * Doing so could cause underruns.
+ */
+}
+
+static void ilk_wm_get_hw_state(struct drm_i915_private *dev_priv)
+{
+ struct ilk_wm_values *hw = &dev_priv->display.wm.hw;
+ struct intel_crtc *crtc;
+
+ ilk_init_lp_watermarks(dev_priv);
+
+ for_each_intel_crtc(&dev_priv->drm, crtc)
+ ilk_pipe_wm_get_hw_state(crtc);
+
+ hw->wm_lp[0] = intel_uncore_read(&dev_priv->uncore, WM1_LP_ILK);
+ hw->wm_lp[1] = intel_uncore_read(&dev_priv->uncore, WM2_LP_ILK);
+ hw->wm_lp[2] = intel_uncore_read(&dev_priv->uncore, WM3_LP_ILK);
+
+ hw->wm_lp_spr[0] = intel_uncore_read(&dev_priv->uncore, WM1S_LP_ILK);
+ if (DISPLAY_VER(dev_priv) >= 7) {
+ hw->wm_lp_spr[1] = intel_uncore_read(&dev_priv->uncore, WM2S_LP_IVB);
+ hw->wm_lp_spr[2] = intel_uncore_read(&dev_priv->uncore, WM3S_LP_IVB);
+ }
+
+ if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
+ hw->partitioning = (intel_uncore_read(&dev_priv->uncore, WM_MISC) &
+ WM_MISC_DATA_PARTITION_5_6) ?
+ INTEL_DDB_PART_5_6 : INTEL_DDB_PART_1_2;
+ else if (IS_IVYBRIDGE(dev_priv))
+ hw->partitioning = (intel_uncore_read(&dev_priv->uncore, DISP_ARB_CTL2) &
+ DISP_DATA_PARTITION_5_6) ?
+ INTEL_DDB_PART_5_6 : INTEL_DDB_PART_1_2;
+
+ hw->enable_fbc_wm =
+ !(intel_uncore_read(&dev_priv->uncore, DISP_ARB_CTL) & DISP_FBC_WM_DIS);
+}
+
+static const struct intel_wm_funcs ilk_wm_funcs = {
+ .compute_pipe_wm = ilk_compute_pipe_wm,
+ .compute_intermediate_wm = ilk_compute_intermediate_wm,
+ .initial_watermarks = ilk_initial_watermarks,
+ .optimize_watermarks = ilk_optimize_watermarks,
+ .get_hw_state = ilk_wm_get_hw_state,
+};
+
+static const struct intel_wm_funcs vlv_wm_funcs = {
+ .compute_pipe_wm = vlv_compute_pipe_wm,
+ .compute_intermediate_wm = vlv_compute_intermediate_wm,
+ .initial_watermarks = vlv_initial_watermarks,
+ .optimize_watermarks = vlv_optimize_watermarks,
+ .atomic_update_watermarks = vlv_atomic_update_fifo,
+ .get_hw_state = vlv_wm_get_hw_state_and_sanitize,
+};
+
+static const struct intel_wm_funcs g4x_wm_funcs = {
+ .compute_pipe_wm = g4x_compute_pipe_wm,
+ .compute_intermediate_wm = g4x_compute_intermediate_wm,
+ .initial_watermarks = g4x_initial_watermarks,
+ .optimize_watermarks = g4x_optimize_watermarks,
+ .get_hw_state = g4x_wm_get_hw_state_and_sanitize,
+};
+
+static const struct intel_wm_funcs pnv_wm_funcs = {
+ .update_wm = pnv_update_wm,
+};
+
+static const struct intel_wm_funcs i965_wm_funcs = {
+ .update_wm = i965_update_wm,
+};
+
+static const struct intel_wm_funcs i9xx_wm_funcs = {
+ .update_wm = i9xx_update_wm,
+};
+
+static const struct intel_wm_funcs i845_wm_funcs = {
+ .update_wm = i845_update_wm,
+};
+
+static const struct intel_wm_funcs nop_funcs = {
+};
+
+void i9xx_wm_init(struct drm_i915_private *dev_priv)
+{
+ /* For FIFO watermark updates */
+ if (HAS_PCH_SPLIT(dev_priv)) {
+ ilk_setup_wm_latency(dev_priv);
+ dev_priv->display.funcs.wm = &ilk_wm_funcs;
+ } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
+ vlv_setup_wm_latency(dev_priv);
+ dev_priv->display.funcs.wm = &vlv_wm_funcs;
+ } else if (IS_G4X(dev_priv)) {
+ g4x_setup_wm_latency(dev_priv);
+ dev_priv->display.funcs.wm = &g4x_wm_funcs;
+ } else if (IS_PINEVIEW(dev_priv)) {
+ if (!intel_get_cxsr_latency(!IS_MOBILE(dev_priv),
+ dev_priv->is_ddr3,
+ dev_priv->fsb_freq,
+ dev_priv->mem_freq)) {
+ drm_info(&dev_priv->drm,
+ "failed to find known CxSR latency "
+ "(found ddr%s fsb freq %d, mem freq %d), "
+ "disabling CxSR\n",
+ (dev_priv->is_ddr3 == 1) ? "3" : "2",
+ dev_priv->fsb_freq, dev_priv->mem_freq);
+ /* Disable CxSR and never update its watermark again */
+ intel_set_memory_cxsr(dev_priv, false);
+ dev_priv->display.funcs.wm = &nop_funcs;
+ } else {
+ dev_priv->display.funcs.wm = &pnv_wm_funcs;
+ }
+ } else if (DISPLAY_VER(dev_priv) == 4) {
+ dev_priv->display.funcs.wm = &i965_wm_funcs;
+ } else if (DISPLAY_VER(dev_priv) == 3) {
+ dev_priv->display.funcs.wm = &i9xx_wm_funcs;
+ } else if (DISPLAY_VER(dev_priv) == 2) {
+ if (INTEL_NUM_PIPES(dev_priv) == 1)
+ dev_priv->display.funcs.wm = &i845_wm_funcs;
+ else
+ dev_priv->display.funcs.wm = &i9xx_wm_funcs;
+ } else {
+ drm_err(&dev_priv->drm,
+ "unexpected fall-through in %s\n", __func__);
+ dev_priv->display.funcs.wm = &nop_funcs;
+ }
+}
diff --git a/drivers/gpu/drm/i915/display/i9xx_wm.h b/drivers/gpu/drm/i915/display/i9xx_wm.h
new file mode 100644
index 000000000000..a7875cbcd05a
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/i9xx_wm.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#ifndef __I9XX_WM_H__
+#define __I9XX_WM_H__
+
+#include <linux/types.h>
+
+struct drm_i915_private;
+struct intel_crtc_state;
+struct intel_plane_state;
+
+int ilk_wm_max_level(const struct drm_i915_private *i915);
+bool ilk_disable_lp_wm(struct drm_i915_private *i915);
+void ilk_wm_sanitize(struct drm_i915_private *i915);
+bool intel_set_memory_cxsr(struct drm_i915_private *i915, bool enable);
+void i9xx_wm_init(struct drm_i915_private *i915);
+
+#endif /* __I9XX_WM_H__ */
diff --git a/drivers/gpu/drm/i915/display/icl_dsi.c b/drivers/gpu/drm/i915/display/icl_dsi.c
index 468a792e6a40..4ff10b00ffbd 100644
--- a/drivers/gpu/drm/i915/display/icl_dsi.c
+++ b/drivers/gpu/drm/i915/display/icl_dsi.c
@@ -45,6 +45,7 @@
#include "intel_dsi_vbt.h"
#include "intel_panel.h"
#include "intel_vdsc.h"
+#include "intel_vdsc_regs.h"
#include "skl_scaler.h"
#include "skl_universal_plane.h"
@@ -207,7 +208,7 @@ void icl_dsi_frame_update(struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- u32 tmp, mode_flags;
+ u32 mode_flags;
enum port port;
mode_flags = crtc_state->mode_flags;
@@ -224,9 +225,7 @@ void icl_dsi_frame_update(struct intel_crtc_state *crtc_state)
else
return;
- tmp = intel_de_read(dev_priv, DSI_CMD_FRMCTL(port));
- tmp |= DSI_FRAME_UPDATE_REQUEST;
- intel_de_write(dev_priv, DSI_CMD_FRMCTL(port), tmp);
+ intel_de_rmw(dev_priv, DSI_CMD_FRMCTL(port), 0, DSI_FRAME_UPDATE_REQUEST);
}
static void dsi_program_swing_and_deemphasis(struct intel_encoder *encoder)
@@ -234,7 +233,7 @@ static void dsi_program_swing_and_deemphasis(struct intel_encoder *encoder)
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
enum phy phy;
- u32 tmp;
+ u32 tmp, mask, val;
int lane;
for_each_dsi_phy(phy, intel_dsi->phys) {
@@ -242,56 +241,35 @@ static void dsi_program_swing_and_deemphasis(struct intel_encoder *encoder)
* Program voltage swing and pre-emphasis level values as per
* table in BSPEC under DDI buffer programing
*/
+ mask = SCALING_MODE_SEL_MASK | RTERM_SELECT_MASK;
+ val = SCALING_MODE_SEL(0x2) | TAP2_DISABLE | TAP3_DISABLE |
+ RTERM_SELECT(0x6);
tmp = intel_de_read(dev_priv, ICL_PORT_TX_DW5_LN(0, phy));
- tmp &= ~(SCALING_MODE_SEL_MASK | RTERM_SELECT_MASK);
- tmp |= SCALING_MODE_SEL(0x2);
- tmp |= TAP2_DISABLE | TAP3_DISABLE;
- tmp |= RTERM_SELECT(0x6);
+ tmp &= ~mask;
+ tmp |= val;
intel_de_write(dev_priv, ICL_PORT_TX_DW5_GRP(phy), tmp);
+ intel_de_rmw(dev_priv, ICL_PORT_TX_DW5_AUX(phy), mask, val);
- tmp = intel_de_read(dev_priv, ICL_PORT_TX_DW5_AUX(phy));
- tmp &= ~(SCALING_MODE_SEL_MASK | RTERM_SELECT_MASK);
- tmp |= SCALING_MODE_SEL(0x2);
- tmp |= TAP2_DISABLE | TAP3_DISABLE;
- tmp |= RTERM_SELECT(0x6);
- intel_de_write(dev_priv, ICL_PORT_TX_DW5_AUX(phy), tmp);
-
+ mask = SWING_SEL_LOWER_MASK | SWING_SEL_UPPER_MASK |
+ RCOMP_SCALAR_MASK;
+ val = SWING_SEL_UPPER(0x2) | SWING_SEL_LOWER(0x2) |
+ RCOMP_SCALAR(0x98);
tmp = intel_de_read(dev_priv, ICL_PORT_TX_DW2_LN(0, phy));
- tmp &= ~(SWING_SEL_LOWER_MASK | SWING_SEL_UPPER_MASK |
- RCOMP_SCALAR_MASK);
- tmp |= SWING_SEL_UPPER(0x2);
- tmp |= SWING_SEL_LOWER(0x2);
- tmp |= RCOMP_SCALAR(0x98);
+ tmp &= ~mask;
+ tmp |= val;
intel_de_write(dev_priv, ICL_PORT_TX_DW2_GRP(phy), tmp);
+ intel_de_rmw(dev_priv, ICL_PORT_TX_DW2_AUX(phy), mask, val);
- tmp = intel_de_read(dev_priv, ICL_PORT_TX_DW2_AUX(phy));
- tmp &= ~(SWING_SEL_LOWER_MASK | SWING_SEL_UPPER_MASK |
- RCOMP_SCALAR_MASK);
- tmp |= SWING_SEL_UPPER(0x2);
- tmp |= SWING_SEL_LOWER(0x2);
- tmp |= RCOMP_SCALAR(0x98);
- intel_de_write(dev_priv, ICL_PORT_TX_DW2_AUX(phy), tmp);
-
- tmp = intel_de_read(dev_priv, ICL_PORT_TX_DW4_AUX(phy));
- tmp &= ~(POST_CURSOR_1_MASK | POST_CURSOR_2_MASK |
- CURSOR_COEFF_MASK);
- tmp |= POST_CURSOR_1(0x0);
- tmp |= POST_CURSOR_2(0x0);
- tmp |= CURSOR_COEFF(0x3f);
- intel_de_write(dev_priv, ICL_PORT_TX_DW4_AUX(phy), tmp);
-
- for (lane = 0; lane <= 3; lane++) {
- /* Bspec: must not use GRP register for write */
- tmp = intel_de_read(dev_priv,
- ICL_PORT_TX_DW4_LN(lane, phy));
- tmp &= ~(POST_CURSOR_1_MASK | POST_CURSOR_2_MASK |
- CURSOR_COEFF_MASK);
- tmp |= POST_CURSOR_1(0x0);
- tmp |= POST_CURSOR_2(0x0);
- tmp |= CURSOR_COEFF(0x3f);
- intel_de_write(dev_priv,
- ICL_PORT_TX_DW4_LN(lane, phy), tmp);
- }
+ mask = POST_CURSOR_1_MASK | POST_CURSOR_2_MASK |
+ CURSOR_COEFF_MASK;
+ val = POST_CURSOR_1(0x0) | POST_CURSOR_2(0x0) |
+ CURSOR_COEFF(0x3f);
+ intel_de_rmw(dev_priv, ICL_PORT_TX_DW4_AUX(phy), mask, val);
+
+ /* Bspec: must not use GRP register for write */
+ for (lane = 0; lane <= 3; lane++)
+ intel_de_rmw(dev_priv, ICL_PORT_TX_DW4_LN(lane, phy),
+ mask, val);
}
}
@@ -300,9 +278,21 @@ static void configure_dual_link_mode(struct intel_encoder *encoder,
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
+ i915_reg_t dss_ctl1_reg, dss_ctl2_reg;
u32 dss_ctl1;
- dss_ctl1 = intel_de_read(dev_priv, DSS_CTL1);
+ /* FIXME: Move all DSS handling to intel_vdsc.c */
+ if (DISPLAY_VER(dev_priv) >= 12) {
+ struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
+
+ dss_ctl1_reg = ICL_PIPE_DSS_CTL1(crtc->pipe);
+ dss_ctl2_reg = ICL_PIPE_DSS_CTL2(crtc->pipe);
+ } else {
+ dss_ctl1_reg = DSS_CTL1;
+ dss_ctl2_reg = DSS_CTL2;
+ }
+
+ dss_ctl1 = intel_de_read(dev_priv, dss_ctl1_reg);
dss_ctl1 |= SPLITTER_ENABLE;
dss_ctl1 &= ~OVERLAP_PIXELS_MASK;
dss_ctl1 |= OVERLAP_PIXELS(intel_dsi->pixel_overlap);
@@ -310,7 +300,6 @@ static void configure_dual_link_mode(struct intel_encoder *encoder,
if (intel_dsi->dual_link == DSI_DUAL_LINK_FRONT_BACK) {
const struct drm_display_mode *adjusted_mode =
&pipe_config->hw.adjusted_mode;
- u32 dss_ctl2;
u16 hactive = adjusted_mode->crtc_hdisplay;
u16 dl_buffer_depth;
@@ -323,16 +312,14 @@ static void configure_dual_link_mode(struct intel_encoder *encoder,
dss_ctl1 &= ~LEFT_DL_BUF_TARGET_DEPTH_MASK;
dss_ctl1 |= LEFT_DL_BUF_TARGET_DEPTH(dl_buffer_depth);
- dss_ctl2 = intel_de_read(dev_priv, DSS_CTL2);
- dss_ctl2 &= ~RIGHT_DL_BUF_TARGET_DEPTH_MASK;
- dss_ctl2 |= RIGHT_DL_BUF_TARGET_DEPTH(dl_buffer_depth);
- intel_de_write(dev_priv, DSS_CTL2, dss_ctl2);
+ intel_de_rmw(dev_priv, dss_ctl2_reg, RIGHT_DL_BUF_TARGET_DEPTH_MASK,
+ RIGHT_DL_BUF_TARGET_DEPTH(dl_buffer_depth));
} else {
/* Interleave */
dss_ctl1 |= DUAL_LINK_MODE_INTERLEAVE;
}
- intel_de_write(dev_priv, DSS_CTL1, dss_ctl1);
+ intel_de_write(dev_priv, dss_ctl1_reg, dss_ctl1);
}
/* aka DSI 8X clock */
@@ -412,13 +399,10 @@ static void gen11_dsi_enable_io_power(struct intel_encoder *encoder)
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
enum port port;
- u32 tmp;
- for_each_dsi_port(port, intel_dsi->ports) {
- tmp = intel_de_read(dev_priv, ICL_DSI_IO_MODECTL(port));
- tmp |= COMBO_PHY_MODE_DSI;
- intel_de_write(dev_priv, ICL_DSI_IO_MODECTL(port), tmp);
- }
+ for_each_dsi_port(port, intel_dsi->ports)
+ intel_de_rmw(dev_priv, ICL_DSI_IO_MODECTL(port),
+ 0, COMBO_PHY_MODE_DSI);
get_dsi_io_power_domains(dev_priv, intel_dsi);
}
@@ -444,26 +428,16 @@ static void gen11_dsi_config_phy_lanes_sequence(struct intel_encoder *encoder)
/* Step 4b(i) set loadgen select for transmit and aux lanes */
for_each_dsi_phy(phy, intel_dsi->phys) {
- tmp = intel_de_read(dev_priv, ICL_PORT_TX_DW4_AUX(phy));
- tmp &= ~LOADGEN_SELECT;
- intel_de_write(dev_priv, ICL_PORT_TX_DW4_AUX(phy), tmp);
- for (lane = 0; lane <= 3; lane++) {
- tmp = intel_de_read(dev_priv,
- ICL_PORT_TX_DW4_LN(lane, phy));
- tmp &= ~LOADGEN_SELECT;
- if (lane != 2)
- tmp |= LOADGEN_SELECT;
- intel_de_write(dev_priv,
- ICL_PORT_TX_DW4_LN(lane, phy), tmp);
- }
+ intel_de_rmw(dev_priv, ICL_PORT_TX_DW4_AUX(phy), LOADGEN_SELECT, 0);
+ for (lane = 0; lane <= 3; lane++)
+ intel_de_rmw(dev_priv, ICL_PORT_TX_DW4_LN(lane, phy),
+ LOADGEN_SELECT, lane != 2 ? LOADGEN_SELECT : 0);
}
/* Step 4b(ii) set latency optimization for transmit and aux lanes */
for_each_dsi_phy(phy, intel_dsi->phys) {
- tmp = intel_de_read(dev_priv, ICL_PORT_TX_DW2_AUX(phy));
- tmp &= ~FRC_LATENCY_OPTIM_MASK;
- tmp |= FRC_LATENCY_OPTIM_VAL(0x5);
- intel_de_write(dev_priv, ICL_PORT_TX_DW2_AUX(phy), tmp);
+ intel_de_rmw(dev_priv, ICL_PORT_TX_DW2_AUX(phy),
+ FRC_LATENCY_OPTIM_MASK, FRC_LATENCY_OPTIM_VAL(0x5));
tmp = intel_de_read(dev_priv, ICL_PORT_TX_DW2_LN(0, phy));
tmp &= ~FRC_LATENCY_OPTIM_MASK;
tmp |= FRC_LATENCY_OPTIM_VAL(0x5);
@@ -471,12 +445,8 @@ static void gen11_dsi_config_phy_lanes_sequence(struct intel_encoder *encoder)
/* For EHL, TGL, set latency optimization for PCS_DW1 lanes */
if (IS_JSL_EHL(dev_priv) || (DISPLAY_VER(dev_priv) >= 12)) {
- tmp = intel_de_read(dev_priv,
- ICL_PORT_PCS_DW1_AUX(phy));
- tmp &= ~LATENCY_OPTIM_MASK;
- tmp |= LATENCY_OPTIM_VAL(0);
- intel_de_write(dev_priv, ICL_PORT_PCS_DW1_AUX(phy),
- tmp);
+ intel_de_rmw(dev_priv, ICL_PORT_PCS_DW1_AUX(phy),
+ LATENCY_OPTIM_MASK, LATENCY_OPTIM_VAL(0));
tmp = intel_de_read(dev_priv,
ICL_PORT_PCS_DW1_LN(0, phy));
@@ -501,9 +471,7 @@ static void gen11_dsi_voltage_swing_program_seq(struct intel_encoder *encoder)
tmp = intel_de_read(dev_priv, ICL_PORT_PCS_DW1_LN(0, phy));
tmp &= ~COMMON_KEEPER_EN;
intel_de_write(dev_priv, ICL_PORT_PCS_DW1_GRP(phy), tmp);
- tmp = intel_de_read(dev_priv, ICL_PORT_PCS_DW1_AUX(phy));
- tmp &= ~COMMON_KEEPER_EN;
- intel_de_write(dev_priv, ICL_PORT_PCS_DW1_AUX(phy), tmp);
+ intel_de_rmw(dev_priv, ICL_PORT_PCS_DW1_AUX(phy), COMMON_KEEPER_EN, 0);
}
/*
@@ -511,20 +479,15 @@ static void gen11_dsi_voltage_swing_program_seq(struct intel_encoder *encoder)
* Note: loadgen select program is done
* as part of lane phy sequence configuration
*/
- for_each_dsi_phy(phy, intel_dsi->phys) {
- tmp = intel_de_read(dev_priv, ICL_PORT_CL_DW5(phy));
- tmp |= SUS_CLOCK_CONFIG;
- intel_de_write(dev_priv, ICL_PORT_CL_DW5(phy), tmp);
- }
+ for_each_dsi_phy(phy, intel_dsi->phys)
+ intel_de_rmw(dev_priv, ICL_PORT_CL_DW5(phy), 0, SUS_CLOCK_CONFIG);
/* Clear training enable to change swing values */
for_each_dsi_phy(phy, intel_dsi->phys) {
tmp = intel_de_read(dev_priv, ICL_PORT_TX_DW5_LN(0, phy));
tmp &= ~TX_TRAINING_EN;
intel_de_write(dev_priv, ICL_PORT_TX_DW5_GRP(phy), tmp);
- tmp = intel_de_read(dev_priv, ICL_PORT_TX_DW5_AUX(phy));
- tmp &= ~TX_TRAINING_EN;
- intel_de_write(dev_priv, ICL_PORT_TX_DW5_AUX(phy), tmp);
+ intel_de_rmw(dev_priv, ICL_PORT_TX_DW5_AUX(phy), TX_TRAINING_EN, 0);
}
/* Program swing and de-emphasis */
@@ -535,9 +498,7 @@ static void gen11_dsi_voltage_swing_program_seq(struct intel_encoder *encoder)
tmp = intel_de_read(dev_priv, ICL_PORT_TX_DW5_LN(0, phy));
tmp |= TX_TRAINING_EN;
intel_de_write(dev_priv, ICL_PORT_TX_DW5_GRP(phy), tmp);
- tmp = intel_de_read(dev_priv, ICL_PORT_TX_DW5_AUX(phy));
- tmp |= TX_TRAINING_EN;
- intel_de_write(dev_priv, ICL_PORT_TX_DW5_AUX(phy), tmp);
+ intel_de_rmw(dev_priv, ICL_PORT_TX_DW5_AUX(phy), 0, TX_TRAINING_EN);
}
}
@@ -545,13 +506,10 @@ static void gen11_dsi_enable_ddi_buffer(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
- u32 tmp;
enum port port;
for_each_dsi_port(port, intel_dsi->ports) {
- tmp = intel_de_read(dev_priv, DDI_BUF_CTL(port));
- tmp |= DDI_BUF_CTL_ENABLE;
- intel_de_write(dev_priv, DDI_BUF_CTL(port), tmp);
+ intel_de_rmw(dev_priv, DDI_BUF_CTL(port), 0, DDI_BUF_CTL_ENABLE);
if (wait_for_us(!(intel_de_read(dev_priv, DDI_BUF_CTL(port)) &
DDI_BUF_IS_IDLE),
@@ -567,17 +525,13 @@ gen11_dsi_setup_dphy_timings(struct intel_encoder *encoder,
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
- u32 tmp;
enum port port;
enum phy phy;
/* Program T-INIT master registers */
- for_each_dsi_port(port, intel_dsi->ports) {
- tmp = intel_de_read(dev_priv, ICL_DSI_T_INIT_MASTER(port));
- tmp &= ~DSI_T_INIT_MASTER_MASK;
- tmp |= intel_dsi->init_count;
- intel_de_write(dev_priv, ICL_DSI_T_INIT_MASTER(port), tmp);
- }
+ for_each_dsi_port(port, intel_dsi->ports)
+ intel_de_rmw(dev_priv, ICL_DSI_T_INIT_MASTER(port),
+ DSI_T_INIT_MASTER_MASK, intel_dsi->init_count);
/* Program DPHY clock lanes timings */
for_each_dsi_port(port, intel_dsi->ports) {
@@ -608,31 +562,22 @@ gen11_dsi_setup_dphy_timings(struct intel_encoder *encoder,
if (DISPLAY_VER(dev_priv) == 11) {
if (afe_clk(encoder, crtc_state) <= 800000) {
for_each_dsi_port(port, intel_dsi->ports) {
- tmp = intel_de_read(dev_priv,
- DPHY_TA_TIMING_PARAM(port));
- tmp &= ~TA_SURE_MASK;
- tmp |= TA_SURE_OVERRIDE | TA_SURE(0);
- intel_de_write(dev_priv,
- DPHY_TA_TIMING_PARAM(port),
- tmp);
+ intel_de_rmw(dev_priv, DPHY_TA_TIMING_PARAM(port),
+ TA_SURE_MASK,
+ TA_SURE_OVERRIDE | TA_SURE(0));
/* shadow register inside display core */
- tmp = intel_de_read(dev_priv,
- DSI_TA_TIMING_PARAM(port));
- tmp &= ~TA_SURE_MASK;
- tmp |= TA_SURE_OVERRIDE | TA_SURE(0);
- intel_de_write(dev_priv,
- DSI_TA_TIMING_PARAM(port), tmp);
+ intel_de_rmw(dev_priv, DSI_TA_TIMING_PARAM(port),
+ TA_SURE_MASK,
+ TA_SURE_OVERRIDE | TA_SURE(0));
}
}
}
if (IS_JSL_EHL(dev_priv)) {
- for_each_dsi_phy(phy, intel_dsi->phys) {
- tmp = intel_de_read(dev_priv, ICL_DPHY_CHKN(phy));
- tmp |= ICL_DPHY_CHKN_AFE_OVER_PPI_STRAP;
- intel_de_write(dev_priv, ICL_DPHY_CHKN(phy), tmp);
- }
+ for_each_dsi_phy(phy, intel_dsi->phys)
+ intel_de_rmw(dev_priv, ICL_DPHY_CHKN(phy),
+ 0, ICL_DPHY_CHKN_AFE_OVER_PPI_STRAP);
}
}
@@ -824,11 +769,8 @@ gen11_dsi_configure_transcoder(struct intel_encoder *encoder,
if (intel_dsi->dual_link) {
for_each_dsi_port(port, intel_dsi->ports) {
dsi_trans = dsi_port_to_transcoder(port);
- tmp = intel_de_read(dev_priv,
- TRANS_DDI_FUNC_CTL2(dsi_trans));
- tmp |= PORT_SYNC_MODE_ENABLE;
- intel_de_write(dev_priv,
- TRANS_DDI_FUNC_CTL2(dsi_trans), tmp);
+ intel_de_rmw(dev_priv, TRANS_DDI_FUNC_CTL2(dsi_trans),
+ 0, PORT_SYNC_MODE_ENABLE);
}
/* configure stream splitting */
@@ -958,8 +900,8 @@ gen11_dsi_set_transcoder_timings(struct intel_encoder *encoder,
/* program TRANS_HTOTAL register */
for_each_dsi_port(port, intel_dsi->ports) {
dsi_trans = dsi_port_to_transcoder(port);
- intel_de_write(dev_priv, HTOTAL(dsi_trans),
- (hactive - 1) | ((htotal - 1) << 16));
+ intel_de_write(dev_priv, TRANS_HTOTAL(dsi_trans),
+ HACTIVE(hactive - 1) | HTOTAL(htotal - 1));
}
/* TRANS_HSYNC register to be programmed only for video mode */
@@ -981,8 +923,8 @@ gen11_dsi_set_transcoder_timings(struct intel_encoder *encoder,
for_each_dsi_port(port, intel_dsi->ports) {
dsi_trans = dsi_port_to_transcoder(port);
- intel_de_write(dev_priv, HSYNC(dsi_trans),
- (hsync_start - 1) | ((hsync_end - 1) << 16));
+ intel_de_write(dev_priv, TRANS_HSYNC(dsi_trans),
+ HSYNC_START(hsync_start - 1) | HSYNC_END(hsync_end - 1));
}
}
@@ -995,8 +937,8 @@ gen11_dsi_set_transcoder_timings(struct intel_encoder *encoder,
* struct drm_display_mode.
* For interlace mode: program required pixel minus 2
*/
- intel_de_write(dev_priv, VTOTAL(dsi_trans),
- (vactive - 1) | ((vtotal - 1) << 16));
+ intel_de_write(dev_priv, TRANS_VTOTAL(dsi_trans),
+ VACTIVE(vactive - 1) | VTOTAL(vtotal - 1));
}
if (vsync_end < vsync_start || vsync_end > vtotal)
@@ -1009,8 +951,8 @@ gen11_dsi_set_transcoder_timings(struct intel_encoder *encoder,
if (is_vid_mode(intel_dsi)) {
for_each_dsi_port(port, intel_dsi->ports) {
dsi_trans = dsi_port_to_transcoder(port);
- intel_de_write(dev_priv, VSYNC(dsi_trans),
- (vsync_start - 1) | ((vsync_end - 1) << 16));
+ intel_de_write(dev_priv, TRANS_VSYNC(dsi_trans),
+ VSYNC_START(vsync_start - 1) | VSYNC_END(vsync_end - 1));
}
}
@@ -1023,17 +965,22 @@ gen11_dsi_set_transcoder_timings(struct intel_encoder *encoder,
if (is_vid_mode(intel_dsi)) {
for_each_dsi_port(port, intel_dsi->ports) {
dsi_trans = dsi_port_to_transcoder(port);
- intel_de_write(dev_priv, VSYNCSHIFT(dsi_trans),
+ intel_de_write(dev_priv, TRANS_VSYNCSHIFT(dsi_trans),
vsync_shift);
}
}
- /* program TRANS_VBLANK register, should be same as vtotal programmed */
+ /*
+ * program TRANS_VBLANK register, should be same as vtotal programmed
+ *
+ * FIXME get rid of these local hacks and do it right,
+ * this will not handle eg. delayed vblank correctly.
+ */
if (DISPLAY_VER(dev_priv) >= 12) {
for_each_dsi_port(port, intel_dsi->ports) {
dsi_trans = dsi_port_to_transcoder(port);
- intel_de_write(dev_priv, VBLANK(dsi_trans),
- (vactive - 1) | ((vtotal - 1) << 16));
+ intel_de_write(dev_priv, TRANS_VBLANK(dsi_trans),
+ VBLANK_START(vactive - 1) | VBLANK_END(vtotal - 1));
}
}
}
@@ -1044,17 +991,14 @@ static void gen11_dsi_enable_transcoder(struct intel_encoder *encoder)
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
enum port port;
enum transcoder dsi_trans;
- u32 tmp;
for_each_dsi_port(port, intel_dsi->ports) {
dsi_trans = dsi_port_to_transcoder(port);
- tmp = intel_de_read(dev_priv, PIPECONF(dsi_trans));
- tmp |= PIPECONF_ENABLE;
- intel_de_write(dev_priv, PIPECONF(dsi_trans), tmp);
+ intel_de_rmw(dev_priv, TRANSCONF(dsi_trans), 0, TRANSCONF_ENABLE);
/* wait for transcoder to be enabled */
- if (intel_de_wait_for_set(dev_priv, PIPECONF(dsi_trans),
- PIPECONF_STATE_ENABLE, 10))
+ if (intel_de_wait_for_set(dev_priv, TRANSCONF(dsi_trans),
+ TRANSCONF_STATE_ENABLE, 10))
drm_err(&dev_priv->drm,
"DSI transcoder not enabled\n");
}
@@ -1067,7 +1011,7 @@ static void gen11_dsi_setup_timeouts(struct intel_encoder *encoder,
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
enum port port;
enum transcoder dsi_trans;
- u32 tmp, hs_tx_timeout, lp_rx_timeout, ta_timeout, divisor, mul;
+ u32 hs_tx_timeout, lp_rx_timeout, ta_timeout, divisor, mul;
/*
* escape clock count calculation:
@@ -1087,26 +1031,23 @@ static void gen11_dsi_setup_timeouts(struct intel_encoder *encoder,
dsi_trans = dsi_port_to_transcoder(port);
/* program hst_tx_timeout */
- tmp = intel_de_read(dev_priv, DSI_HSTX_TO(dsi_trans));
- tmp &= ~HSTX_TIMEOUT_VALUE_MASK;
- tmp |= HSTX_TIMEOUT_VALUE(hs_tx_timeout);
- intel_de_write(dev_priv, DSI_HSTX_TO(dsi_trans), tmp);
+ intel_de_rmw(dev_priv, DSI_HSTX_TO(dsi_trans),
+ HSTX_TIMEOUT_VALUE_MASK,
+ HSTX_TIMEOUT_VALUE(hs_tx_timeout));
/* FIXME: DSI_CALIB_TO */
/* program lp_rx_host timeout */
- tmp = intel_de_read(dev_priv, DSI_LPRX_HOST_TO(dsi_trans));
- tmp &= ~LPRX_TIMEOUT_VALUE_MASK;
- tmp |= LPRX_TIMEOUT_VALUE(lp_rx_timeout);
- intel_de_write(dev_priv, DSI_LPRX_HOST_TO(dsi_trans), tmp);
+ intel_de_rmw(dev_priv, DSI_LPRX_HOST_TO(dsi_trans),
+ LPRX_TIMEOUT_VALUE_MASK,
+ LPRX_TIMEOUT_VALUE(lp_rx_timeout));
/* FIXME: DSI_PWAIT_TO */
/* program turn around timeout */
- tmp = intel_de_read(dev_priv, DSI_TA_TO(dsi_trans));
- tmp &= ~TA_TIMEOUT_VALUE_MASK;
- tmp |= TA_TIMEOUT_VALUE(ta_timeout);
- intel_de_write(dev_priv, DSI_TA_TO(dsi_trans), tmp);
+ intel_de_rmw(dev_priv, DSI_TA_TO(dsi_trans),
+ TA_TIMEOUT_VALUE_MASK,
+ TA_TIMEOUT_VALUE(ta_timeout));
}
}
@@ -1310,19 +1251,16 @@ static void gen11_dsi_disable_transcoder(struct intel_encoder *encoder)
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
enum port port;
enum transcoder dsi_trans;
- u32 tmp;
for_each_dsi_port(port, intel_dsi->ports) {
dsi_trans = dsi_port_to_transcoder(port);
/* disable transcoder */
- tmp = intel_de_read(dev_priv, PIPECONF(dsi_trans));
- tmp &= ~PIPECONF_ENABLE;
- intel_de_write(dev_priv, PIPECONF(dsi_trans), tmp);
+ intel_de_rmw(dev_priv, TRANSCONF(dsi_trans), TRANSCONF_ENABLE, 0);
/* wait for transcoder to be disabled */
- if (intel_de_wait_for_clear(dev_priv, PIPECONF(dsi_trans),
- PIPECONF_STATE_ENABLE, 50))
+ if (intel_de_wait_for_clear(dev_priv, TRANSCONF(dsi_trans),
+ TRANSCONF_STATE_ENABLE, 50))
drm_err(&dev_priv->drm,
"DSI trancoder not disabled\n");
}
@@ -1350,11 +1288,9 @@ static void gen11_dsi_deconfigure_trancoder(struct intel_encoder *encoder)
/* disable periodic update mode */
if (is_cmd_mode(intel_dsi)) {
- for_each_dsi_port(port, intel_dsi->ports) {
- tmp = intel_de_read(dev_priv, DSI_CMD_FRMCTL(port));
- tmp &= ~DSI_PERIODIC_FRAME_UPDATE_ENABLE;
- intel_de_write(dev_priv, DSI_CMD_FRMCTL(port), tmp);
- }
+ for_each_dsi_port(port, intel_dsi->ports)
+ intel_de_rmw(dev_priv, DSI_CMD_FRMCTL(port),
+ DSI_PERIODIC_FRAME_UPDATE_ENABLE, 0);
}
/* put dsi link in ULPS */
@@ -1374,20 +1310,16 @@ static void gen11_dsi_deconfigure_trancoder(struct intel_encoder *encoder)
/* disable ddi function */
for_each_dsi_port(port, intel_dsi->ports) {
dsi_trans = dsi_port_to_transcoder(port);
- tmp = intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL(dsi_trans));
- tmp &= ~TRANS_DDI_FUNC_ENABLE;
- intel_de_write(dev_priv, TRANS_DDI_FUNC_CTL(dsi_trans), tmp);
+ intel_de_rmw(dev_priv, TRANS_DDI_FUNC_CTL(dsi_trans),
+ TRANS_DDI_FUNC_ENABLE, 0);
}
/* disable port sync mode if dual link */
if (intel_dsi->dual_link) {
for_each_dsi_port(port, intel_dsi->ports) {
dsi_trans = dsi_port_to_transcoder(port);
- tmp = intel_de_read(dev_priv,
- TRANS_DDI_FUNC_CTL2(dsi_trans));
- tmp &= ~PORT_SYNC_MODE_ENABLE;
- intel_de_write(dev_priv,
- TRANS_DDI_FUNC_CTL2(dsi_trans), tmp);
+ intel_de_rmw(dev_priv, TRANS_DDI_FUNC_CTL2(dsi_trans),
+ PORT_SYNC_MODE_ENABLE, 0);
}
}
}
@@ -1396,14 +1328,11 @@ static void gen11_dsi_disable_port(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
- u32 tmp;
enum port port;
gen11_dsi_ungate_clocks(encoder);
for_each_dsi_port(port, intel_dsi->ports) {
- tmp = intel_de_read(dev_priv, DDI_BUF_CTL(port));
- tmp &= ~DDI_BUF_CTL_ENABLE;
- intel_de_write(dev_priv, DDI_BUF_CTL(port), tmp);
+ intel_de_rmw(dev_priv, DDI_BUF_CTL(port), DDI_BUF_CTL_ENABLE, 0);
if (wait_for_us((intel_de_read(dev_priv, DDI_BUF_CTL(port)) &
DDI_BUF_IS_IDLE),
@@ -1420,7 +1349,6 @@ static void gen11_dsi_disable_io_power(struct intel_encoder *encoder)
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
enum port port;
- u32 tmp;
for_each_dsi_port(port, intel_dsi->ports) {
intel_wakeref_t wakeref;
@@ -1434,11 +1362,9 @@ static void gen11_dsi_disable_io_power(struct intel_encoder *encoder)
}
/* set mode to DDI */
- for_each_dsi_port(port, intel_dsi->ports) {
- tmp = intel_de_read(dev_priv, ICL_DSI_IO_MODECTL(port));
- tmp &= ~COMBO_PHY_MODE_DSI;
- intel_de_write(dev_priv, ICL_DSI_IO_MODECTL(port), tmp);
- }
+ for_each_dsi_port(port, intel_dsi->ports)
+ intel_de_rmw(dev_priv, ICL_DSI_IO_MODECTL(port),
+ COMBO_PHY_MODE_DSI, 0);
}
static void gen11_dsi_disable(struct intel_atomic_state *state,
@@ -1574,7 +1500,7 @@ static void gen11_dsi_get_config(struct intel_encoder *encoder,
gen11_dsi_get_timings(encoder, pipe_config);
pipe_config->output_types |= BIT(INTEL_OUTPUT_DSI);
- pipe_config->pipe_bpp = bdw_get_pipemisc_bpp(crtc);
+ pipe_config->pipe_bpp = bdw_get_pipe_misc_bpp(crtc);
/* Get the details on which TE should be enabled */
if (is_cmd_mode(intel_dsi))
@@ -1754,8 +1680,8 @@ static bool gen11_dsi_get_hw_state(struct intel_encoder *encoder,
goto out;
}
- tmp = intel_de_read(dev_priv, PIPECONF(dsi_trans));
- ret = tmp & PIPECONF_ENABLE;
+ tmp = intel_de_read(dev_priv, TRANSCONF(dsi_trans));
+ ret = tmp & TRANSCONF_ENABLE;
}
out:
intel_display_power_put(dev_priv, encoder->power_domain, wakeref);
diff --git a/drivers/gpu/drm/i915/display/intel_atomic_plane.c b/drivers/gpu/drm/i915/display/intel_atomic_plane.c
index 1409bcfb6fd3..40de9f0f171b 100644
--- a/drivers/gpu/drm/i915/display/intel_atomic_plane.c
+++ b/drivers/gpu/drm/i915/display/intel_atomic_plane.c
@@ -32,18 +32,17 @@
*/
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_blend.h>
#include <drm/drm_fourcc.h>
-#include "gt/intel_rps.h"
-
#include "i915_config.h"
#include "intel_atomic_plane.h"
#include "intel_cdclk.h"
+#include "intel_display_rps.h"
#include "intel_display_trace.h"
#include "intel_display_types.h"
#include "intel_fb.h"
#include "intel_fb_pin.h"
-#include "intel_sprite.h"
#include "skl_scaler.h"
#include "skl_watermark.h"
@@ -363,6 +362,7 @@ void intel_plane_set_invisible(struct intel_crtc_state *crtc_state,
crtc_state->scaled_planes &= ~BIT(plane->id);
crtc_state->nv12_planes &= ~BIT(plane->id);
crtc_state->c8_planes &= ~BIT(plane->id);
+ crtc_state->async_flip_planes &= ~BIT(plane->id);
crtc_state->data_rate[plane->id] = 0;
crtc_state->data_rate_y[plane->id] = 0;
crtc_state->rel_data_rate[plane->id] = 0;
@@ -582,8 +582,10 @@ static int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_cr
intel_plane_is_scaled(new_plane_state))))
new_crtc_state->disable_lp_wm = true;
- if (intel_plane_do_async_flip(plane, old_crtc_state, new_crtc_state))
+ if (intel_plane_do_async_flip(plane, old_crtc_state, new_crtc_state)) {
new_crtc_state->do_async_flip = true;
+ new_crtc_state->async_flip_planes |= BIT(plane->id);
+ }
return 0;
}
@@ -938,62 +940,62 @@ int intel_atomic_plane_check_clipping(struct intel_plane_state *plane_state,
return 0;
}
-struct wait_rps_boost {
- struct wait_queue_entry wait;
-
- struct drm_crtc *crtc;
- struct i915_request *request;
-};
-
-static int do_rps_boost(struct wait_queue_entry *_wait,
- unsigned mode, int sync, void *key)
+int intel_plane_check_src_coordinates(struct intel_plane_state *plane_state)
{
- struct wait_rps_boost *wait = container_of(_wait, typeof(*wait), wait);
- struct i915_request *rq = wait->request;
+ struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev);
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
+ struct drm_rect *src = &plane_state->uapi.src;
+ u32 src_x, src_y, src_w, src_h, hsub, vsub;
+ bool rotated = drm_rotation_90_or_270(plane_state->hw.rotation);
/*
- * If we missed the vblank, but the request is already running it
- * is reasonable to assume that it will complete before the next
- * vblank without our intervention, so leave RPS alone.
+ * FIXME hsub/vsub vs. block size is a mess. Pre-tgl CCS
+ * abuses hsub/vsub so we can't use them here. But as they
+ * are limited to 32bpp RGB formats we don't actually need
+ * to check anything.
*/
- if (!i915_request_started(rq))
- intel_rps_boost(rq);
- i915_request_put(rq);
-
- drm_crtc_vblank_put(wait->crtc);
-
- list_del(&wait->wait.entry);
- kfree(wait);
- return 1;
-}
+ if (fb->modifier == I915_FORMAT_MOD_Y_TILED_CCS ||
+ fb->modifier == I915_FORMAT_MOD_Yf_TILED_CCS)
+ return 0;
-static void add_rps_boost_after_vblank(struct drm_crtc *crtc,
- struct dma_fence *fence)
-{
- struct wait_rps_boost *wait;
+ /*
+ * Hardware doesn't handle subpixel coordinates.
+ * Adjust to (macro)pixel boundary, but be careful not to
+ * increase the source viewport size, because that could
+ * push the downscaling factor out of bounds.
+ */
+ src_x = src->x1 >> 16;
+ src_w = drm_rect_width(src) >> 16;
+ src_y = src->y1 >> 16;
+ src_h = drm_rect_height(src) >> 16;
- if (!dma_fence_is_i915(fence))
- return;
+ drm_rect_init(src, src_x << 16, src_y << 16,
+ src_w << 16, src_h << 16);
- if (DISPLAY_VER(to_i915(crtc->dev)) < 6)
- return;
+ if (fb->format->format == DRM_FORMAT_RGB565 && rotated) {
+ hsub = 2;
+ vsub = 2;
+ } else {
+ hsub = fb->format->hsub;
+ vsub = fb->format->vsub;
+ }
- if (drm_crtc_vblank_get(crtc))
- return;
+ if (rotated)
+ hsub = vsub = max(hsub, vsub);
- wait = kmalloc(sizeof(*wait), GFP_KERNEL);
- if (!wait) {
- drm_crtc_vblank_put(crtc);
- return;
+ if (src_x % hsub || src_w % hsub) {
+ drm_dbg_kms(&i915->drm, "src x/w (%u, %u) must be a multiple of %u (rotated: %s)\n",
+ src_x, src_w, hsub, str_yes_no(rotated));
+ return -EINVAL;
}
- wait->request = to_request(dma_fence_get(fence));
- wait->crtc = crtc;
-
- wait->wait.func = do_rps_boost;
- wait->wait.flags = 0;
+ if (src_y % vsub || src_h % vsub) {
+ drm_dbg_kms(&i915->drm, "src y/h (%u, %u) must be a multiple of %u (rotated: %s)\n",
+ src_y, src_h, vsub, str_yes_no(rotated));
+ return -EINVAL;
+ }
- add_wait_queue(drm_crtc_vblank_waitqueue(crtc), &wait->wait);
+ return 0;
}
/**
@@ -1086,13 +1088,13 @@ intel_prepare_plane_fb(struct drm_plane *_plane,
dma_resv_iter_begin(&cursor, obj->base.resv,
DMA_RESV_USAGE_WRITE);
dma_resv_for_each_fence_unlocked(&cursor, fence) {
- add_rps_boost_after_vblank(new_plane_state->hw.crtc,
- fence);
+ intel_display_rps_boost_after_vblank(new_plane_state->hw.crtc,
+ fence);
}
dma_resv_iter_end(&cursor);
} else {
- add_rps_boost_after_vblank(new_plane_state->hw.crtc,
- new_plane_state->uapi.fence);
+ intel_display_rps_boost_after_vblank(new_plane_state->hw.crtc,
+ new_plane_state->uapi.fence);
}
/*
@@ -1103,10 +1105,7 @@ intel_prepare_plane_fb(struct drm_plane *_plane,
* that are not quite steady state without resorting to forcing
* maximum clocks following a vblank miss (see do_rps_boost()).
*/
- if (!state->rps_interactive) {
- intel_rps_mark_interactive(&to_gt(dev_priv)->rps, true);
- state->rps_interactive = true;
- }
+ intel_display_rps_mark_interactive(dev_priv, state, true);
return 0;
@@ -1137,10 +1136,7 @@ intel_cleanup_plane_fb(struct drm_plane *plane,
if (!obj)
return;
- if (state->rps_interactive) {
- intel_rps_mark_interactive(&to_gt(dev_priv)->rps, false);
- state->rps_interactive = false;
- }
+ intel_display_rps_mark_interactive(dev_priv, state, false);
/* Should only be called after a successful intel_prepare_plane_fb()! */
intel_plane_unpin_fb(old_plane_state);
diff --git a/drivers/gpu/drm/i915/display/intel_atomic_plane.h b/drivers/gpu/drm/i915/display/intel_atomic_plane.h
index 74b6d3b169a7..191dad0efc8e 100644
--- a/drivers/gpu/drm/i915/display/intel_atomic_plane.h
+++ b/drivers/gpu/drm/i915/display/intel_atomic_plane.h
@@ -62,6 +62,7 @@ int intel_atomic_plane_check_clipping(struct intel_plane_state *plane_state,
struct intel_crtc_state *crtc_state,
int min_scale, int max_scale,
bool can_position);
+int intel_plane_check_src_coordinates(struct intel_plane_state *plane_state);
void intel_plane_set_invisible(struct intel_crtc_state *crtc_state,
struct intel_plane_state *plane_state);
void intel_plane_helper_add(struct intel_plane *plane);
diff --git a/drivers/gpu/drm/i915/display/intel_audio.c b/drivers/gpu/drm/i915/display/intel_audio.c
index a9335c856644..3d5a9bbc6fde 100644
--- a/drivers/gpu/drm/i915/display/intel_audio.c
+++ b/drivers/gpu/drm/i915/display/intel_audio.c
@@ -581,8 +581,7 @@ static void enable_audio_dsc_wa(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- enum pipe pipe = crtc->pipe;
+ enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
unsigned int hblank_early_prog, samples_room;
unsigned int val;
@@ -592,32 +591,32 @@ static void enable_audio_dsc_wa(struct intel_encoder *encoder,
val = intel_de_read(i915, AUD_CONFIG_BE);
if (DISPLAY_VER(i915) == 11)
- val |= HBLANK_EARLY_ENABLE_ICL(pipe);
+ val |= HBLANK_EARLY_ENABLE_ICL(cpu_transcoder);
else if (DISPLAY_VER(i915) >= 12)
- val |= HBLANK_EARLY_ENABLE_TGL(pipe);
+ val |= HBLANK_EARLY_ENABLE_TGL(cpu_transcoder);
if (crtc_state->dsc.compression_enable &&
crtc_state->hw.adjusted_mode.hdisplay >= 3840 &&
crtc_state->hw.adjusted_mode.vdisplay >= 2160) {
/* Get hblank early enable value required */
- val &= ~HBLANK_START_COUNT_MASK(pipe);
+ val &= ~HBLANK_START_COUNT_MASK(cpu_transcoder);
hblank_early_prog = calc_hblank_early_prog(encoder, crtc_state);
if (hblank_early_prog < 32)
- val |= HBLANK_START_COUNT(pipe, HBLANK_START_COUNT_32);
+ val |= HBLANK_START_COUNT(cpu_transcoder, HBLANK_START_COUNT_32);
else if (hblank_early_prog < 64)
- val |= HBLANK_START_COUNT(pipe, HBLANK_START_COUNT_64);
+ val |= HBLANK_START_COUNT(cpu_transcoder, HBLANK_START_COUNT_64);
else if (hblank_early_prog < 96)
- val |= HBLANK_START_COUNT(pipe, HBLANK_START_COUNT_96);
+ val |= HBLANK_START_COUNT(cpu_transcoder, HBLANK_START_COUNT_96);
else
- val |= HBLANK_START_COUNT(pipe, HBLANK_START_COUNT_128);
+ val |= HBLANK_START_COUNT(cpu_transcoder, HBLANK_START_COUNT_128);
/* Get samples room value required */
- val &= ~NUMBER_SAMPLES_PER_LINE_MASK(pipe);
+ val &= ~NUMBER_SAMPLES_PER_LINE_MASK(cpu_transcoder);
samples_room = calc_samples_room(crtc_state);
if (samples_room < 3)
- val |= NUMBER_SAMPLES_PER_LINE(pipe, samples_room);
+ val |= NUMBER_SAMPLES_PER_LINE(cpu_transcoder, samples_room);
else /* Program 0 i.e "All Samples available in buffer" */
- val |= NUMBER_SAMPLES_PER_LINE(pipe, 0x0);
+ val |= NUMBER_SAMPLES_PER_LINE(cpu_transcoder, 0x0);
}
intel_de_write(i915, AUD_CONFIG_BE, val);
@@ -812,9 +811,9 @@ void intel_audio_codec_enable(struct intel_encoder *encoder,
struct i915_audio_component *acomp = i915->display.audio.component;
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct intel_connector *connector = to_intel_connector(conn_state->connector);
+ enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
struct intel_audio_state *audio_state;
enum port port = encoder->port;
- enum pipe pipe = crtc->pipe;
if (!crtc_state->has_audio)
return;
@@ -832,7 +831,7 @@ void intel_audio_codec_enable(struct intel_encoder *encoder,
mutex_lock(&i915->display.audio.mutex);
- audio_state = &i915->display.audio.state[pipe];
+ audio_state = &i915->display.audio.state[cpu_transcoder];
audio_state->encoder = encoder;
BUILD_BUG_ON(sizeof(audio_state->eld) != sizeof(crtc_state->eld));
@@ -842,14 +841,14 @@ void intel_audio_codec_enable(struct intel_encoder *encoder,
if (acomp && acomp->base.audio_ops &&
acomp->base.audio_ops->pin_eld_notify) {
- /* audio drivers expect pipe = -1 to indicate Non-MST cases */
+ /* audio drivers expect cpu_transcoder = -1 to indicate Non-MST cases */
if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST))
- pipe = -1;
+ cpu_transcoder = -1;
acomp->base.audio_ops->pin_eld_notify(acomp->base.audio_ops->audio_ptr,
- (int)port, (int)pipe);
+ (int)port, (int)cpu_transcoder);
}
- intel_lpe_audio_notify(i915, pipe, port, crtc_state->eld,
+ intel_lpe_audio_notify(i915, cpu_transcoder, port, crtc_state->eld,
crtc_state->port_clock,
intel_crtc_has_dp_encoder(crtc_state));
}
@@ -871,9 +870,9 @@ void intel_audio_codec_disable(struct intel_encoder *encoder,
struct i915_audio_component *acomp = i915->display.audio.component;
struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
struct intel_connector *connector = to_intel_connector(old_conn_state->connector);
+ enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
struct intel_audio_state *audio_state;
enum port port = encoder->port;
- enum pipe pipe = crtc->pipe;
if (!old_crtc_state->has_audio)
return;
@@ -890,7 +889,7 @@ void intel_audio_codec_disable(struct intel_encoder *encoder,
mutex_lock(&i915->display.audio.mutex);
- audio_state = &i915->display.audio.state[pipe];
+ audio_state = &i915->display.audio.state[cpu_transcoder];
audio_state->encoder = NULL;
memset(audio_state->eld, 0, sizeof(audio_state->eld));
@@ -899,27 +898,26 @@ void intel_audio_codec_disable(struct intel_encoder *encoder,
if (acomp && acomp->base.audio_ops &&
acomp->base.audio_ops->pin_eld_notify) {
- /* audio drivers expect pipe = -1 to indicate Non-MST cases */
+ /* audio drivers expect cpu_transcoder = -1 to indicate Non-MST cases */
if (!intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DP_MST))
- pipe = -1;
+ cpu_transcoder = -1;
acomp->base.audio_ops->pin_eld_notify(acomp->base.audio_ops->audio_ptr,
- (int)port, (int)pipe);
+ (int)port, (int)cpu_transcoder);
}
- intel_lpe_audio_notify(i915, pipe, port, NULL, 0, false);
+ intel_lpe_audio_notify(i915, cpu_transcoder, port, NULL, 0, false);
}
static void intel_acomp_get_config(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
struct intel_audio_state *audio_state;
- enum pipe pipe = crtc->pipe;
mutex_lock(&i915->display.audio.mutex);
- audio_state = &i915->display.audio.state[pipe];
+ audio_state = &i915->display.audio.state[cpu_transcoder];
if (audio_state->encoder)
memcpy(crtc_state->eld, audio_state->eld, sizeof(audio_state->eld));
@@ -985,11 +983,7 @@ void intel_audio_cdclk_change_pre(struct drm_i915_private *i915)
static void get_aud_ts_cdclk_m_n(int refclk, int cdclk, struct aud_ts_cdclk_m_n *aud_ts)
{
- if (refclk == 24000)
- aud_ts->m = 12;
- else
- aud_ts->m = 15;
-
+ aud_ts->m = 60;
aud_ts->n = cdclk * aud_ts->m / 24000;
}
@@ -1147,27 +1141,27 @@ static int i915_audio_component_get_cdclk_freq(struct device *kdev)
}
/*
- * get the intel audio state according to the parameter port and pipe
- * MST & (pipe >= 0): return the audio.state[pipe].encoder],
+ * get the intel audio state according to the parameter port and cpu_transcoder
+ * MST & (cpu_transcoder >= 0): return the audio.state[cpu_transcoder].encoder],
* when port is matched
- * MST & (pipe < 0): this is invalid
- * Non-MST & (pipe >= 0): only pipe = 0 (the first device entry)
+ * MST & (cpu_transcoder < 0): this is invalid
+ * Non-MST & (cpu_transcoder >= 0): only cpu_transcoder = 0 (the first device entry)
* will get the right intel_encoder with port matched
- * Non-MST & (pipe < 0): get the right intel_encoder with port matched
+ * Non-MST & (cpu_transcoder < 0): get the right intel_encoder with port matched
*/
static struct intel_audio_state *find_audio_state(struct drm_i915_private *i915,
- int port, int pipe)
+ int port, int cpu_transcoder)
{
/* MST */
- if (pipe >= 0) {
+ if (cpu_transcoder >= 0) {
struct intel_audio_state *audio_state;
struct intel_encoder *encoder;
if (drm_WARN_ON(&i915->drm,
- pipe >= ARRAY_SIZE(i915->display.audio.state)))
+ cpu_transcoder >= ARRAY_SIZE(i915->display.audio.state)))
return NULL;
- audio_state = &i915->display.audio.state[pipe];
+ audio_state = &i915->display.audio.state[cpu_transcoder];
encoder = audio_state->encoder;
if (encoder && encoder->port == port &&
@@ -1176,14 +1170,14 @@ static struct intel_audio_state *find_audio_state(struct drm_i915_private *i915,
}
/* Non-MST */
- if (pipe > 0)
+ if (cpu_transcoder > 0)
return NULL;
- for_each_pipe(i915, pipe) {
+ for_each_cpu_transcoder(i915, cpu_transcoder) {
struct intel_audio_state *audio_state;
struct intel_encoder *encoder;
- audio_state = &i915->display.audio.state[pipe];
+ audio_state = &i915->display.audio.state[cpu_transcoder];
encoder = audio_state->encoder;
if (encoder && encoder->port == port &&
@@ -1195,7 +1189,7 @@ static struct intel_audio_state *find_audio_state(struct drm_i915_private *i915,
}
static int i915_audio_component_sync_audio_rate(struct device *kdev, int port,
- int pipe, int rate)
+ int cpu_transcoder, int rate)
{
struct drm_i915_private *i915 = kdev_to_i915(kdev);
struct i915_audio_component *acomp = i915->display.audio.component;
@@ -1211,7 +1205,7 @@ static int i915_audio_component_sync_audio_rate(struct device *kdev, int port,
cookie = i915_audio_component_get_power(kdev);
mutex_lock(&i915->display.audio.mutex);
- audio_state = find_audio_state(i915, port, pipe);
+ audio_state = find_audio_state(i915, port, cpu_transcoder);
if (!audio_state) {
drm_dbg_kms(&i915->drm, "Not valid for port %c\n", port_name(port));
err = -ENODEV;
@@ -1223,7 +1217,7 @@ static int i915_audio_component_sync_audio_rate(struct device *kdev, int port,
/* FIXME stop using the legacy crtc pointer */
crtc = to_intel_crtc(encoder->base.crtc);
- /* port must be valid now, otherwise the pipe will be invalid */
+ /* port must be valid now, otherwise the cpu_transcoder will be invalid */
acomp->aud_sample_rate[port] = rate;
/* FIXME get rid of the crtc->config stuff */
@@ -1236,7 +1230,7 @@ static int i915_audio_component_sync_audio_rate(struct device *kdev, int port,
}
static int i915_audio_component_get_eld(struct device *kdev, int port,
- int pipe, bool *enabled,
+ int cpu_transcoder, bool *enabled,
unsigned char *buf, int max_bytes)
{
struct drm_i915_private *i915 = kdev_to_i915(kdev);
@@ -1245,7 +1239,7 @@ static int i915_audio_component_get_eld(struct device *kdev, int port,
mutex_lock(&i915->display.audio.mutex);
- audio_state = find_audio_state(i915, port, pipe);
+ audio_state = find_audio_state(i915, port, cpu_transcoder);
if (!audio_state) {
drm_dbg_kms(&i915->drm, "Not valid for port %c\n", port_name(port));
mutex_unlock(&i915->display.audio.mutex);
diff --git a/drivers/gpu/drm/i915/display/intel_backlight.c b/drivers/gpu/drm/i915/display/intel_backlight.c
index a4e4b7f79e4d..2e8f17c04522 100644
--- a/drivers/gpu/drm/i915/display/intel_backlight.c
+++ b/drivers/gpu/drm/i915/display/intel_backlight.c
@@ -105,7 +105,8 @@ void intel_backlight_set_pwm_level(const struct drm_connector_state *conn_state,
struct drm_i915_private *i915 = to_i915(connector->base.dev);
struct intel_panel *panel = &connector->panel;
- drm_dbg_kms(&i915->drm, "set backlight PWM = %d\n", val);
+ drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] set backlight PWM = %d\n",
+ connector->base.base.id, connector->base.name, val);
panel->backlight.pwm_funcs->set(conn_state, val);
}
@@ -283,7 +284,8 @@ intel_panel_actually_set_backlight(const struct drm_connector_state *conn_state,
struct drm_i915_private *i915 = to_i915(connector->base.dev);
struct intel_panel *panel = &connector->panel;
- drm_dbg_kms(&i915->drm, "set backlight level = %d\n", level);
+ drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] set backlight level = %d\n",
+ connector->base.base.id, connector->base.name, level);
panel->backlight.funcs->set(conn_state, level);
}
@@ -345,27 +347,24 @@ static void lpt_disable_backlight(const struct drm_connector_state *old_conn_sta
*/
tmp = intel_de_read(i915, BLC_PWM_CPU_CTL2);
if (tmp & BLM_PWM_ENABLE) {
- drm_dbg_kms(&i915->drm, "cpu backlight was enabled, disabling\n");
+ drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] CPU backlight was enabled, disabling\n",
+ connector->base.base.id, connector->base.name);
intel_de_write(i915, BLC_PWM_CPU_CTL2, tmp & ~BLM_PWM_ENABLE);
}
- tmp = intel_de_read(i915, BLC_PWM_PCH_CTL1);
- intel_de_write(i915, BLC_PWM_PCH_CTL1, tmp & ~BLM_PCH_PWM_ENABLE);
+ intel_de_rmw(i915, BLC_PWM_PCH_CTL1, BLM_PCH_PWM_ENABLE, 0);
}
static void pch_disable_backlight(const struct drm_connector_state *old_conn_state, u32 val)
{
struct intel_connector *connector = to_intel_connector(old_conn_state->connector);
struct drm_i915_private *i915 = to_i915(connector->base.dev);
- u32 tmp;
intel_backlight_set_pwm_level(old_conn_state, val);
- tmp = intel_de_read(i915, BLC_PWM_CPU_CTL2);
- intel_de_write(i915, BLC_PWM_CPU_CTL2, tmp & ~BLM_PWM_ENABLE);
+ intel_de_rmw(i915, BLC_PWM_CPU_CTL2, BLM_PWM_ENABLE, 0);
- tmp = intel_de_read(i915, BLC_PWM_PCH_CTL1);
- intel_de_write(i915, BLC_PWM_PCH_CTL1, tmp & ~BLM_PCH_PWM_ENABLE);
+ intel_de_rmw(i915, BLC_PWM_PCH_CTL1, BLM_PCH_PWM_ENABLE, 0);
}
static void i9xx_disable_backlight(const struct drm_connector_state *old_conn_state, u32 val)
@@ -376,12 +375,10 @@ static void i9xx_disable_backlight(const struct drm_connector_state *old_conn_st
static void i965_disable_backlight(const struct drm_connector_state *old_conn_state, u32 val)
{
struct drm_i915_private *i915 = to_i915(old_conn_state->connector->dev);
- u32 tmp;
intel_backlight_set_pwm_level(old_conn_state, val);
- tmp = intel_de_read(i915, BLC_PWM_CTL2);
- intel_de_write(i915, BLC_PWM_CTL2, tmp & ~BLM_PWM_ENABLE);
+ intel_de_rmw(i915, BLC_PWM_CTL2, BLM_PWM_ENABLE, 0);
}
static void vlv_disable_backlight(const struct drm_connector_state *old_conn_state, u32 val)
@@ -389,12 +386,10 @@ static void vlv_disable_backlight(const struct drm_connector_state *old_conn_sta
struct intel_connector *connector = to_intel_connector(old_conn_state->connector);
struct drm_i915_private *i915 = to_i915(connector->base.dev);
enum pipe pipe = to_intel_crtc(old_conn_state->crtc)->pipe;
- u32 tmp;
intel_backlight_set_pwm_level(old_conn_state, val);
- tmp = intel_de_read(i915, VLV_BLC_PWM_CTL2(pipe));
- intel_de_write(i915, VLV_BLC_PWM_CTL2(pipe), tmp & ~BLM_PWM_ENABLE);
+ intel_de_rmw(i915, VLV_BLC_PWM_CTL2(pipe), BLM_PWM_ENABLE, 0);
}
static void bxt_disable_backlight(const struct drm_connector_state *old_conn_state, u32 val)
@@ -402,19 +397,14 @@ static void bxt_disable_backlight(const struct drm_connector_state *old_conn_sta
struct intel_connector *connector = to_intel_connector(old_conn_state->connector);
struct drm_i915_private *i915 = to_i915(connector->base.dev);
struct intel_panel *panel = &connector->panel;
- u32 tmp;
intel_backlight_set_pwm_level(old_conn_state, val);
- tmp = intel_de_read(i915, BXT_BLC_PWM_CTL(panel->backlight.controller));
- intel_de_write(i915, BXT_BLC_PWM_CTL(panel->backlight.controller),
- tmp & ~BXT_BLC_PWM_ENABLE);
+ intel_de_rmw(i915, BXT_BLC_PWM_CTL(panel->backlight.controller),
+ BXT_BLC_PWM_ENABLE, 0);
- if (panel->backlight.controller == 1) {
- val = intel_de_read(i915, UTIL_PIN_CTL);
- val &= ~UTIL_PIN_ENABLE;
- intel_de_write(i915, UTIL_PIN_CTL, val);
- }
+ if (panel->backlight.controller == 1)
+ intel_de_rmw(i915, UTIL_PIN_CTL, UTIL_PIN_ENABLE, 0);
}
static void cnp_disable_backlight(const struct drm_connector_state *old_conn_state, u32 val)
@@ -422,13 +412,11 @@ static void cnp_disable_backlight(const struct drm_connector_state *old_conn_sta
struct intel_connector *connector = to_intel_connector(old_conn_state->connector);
struct drm_i915_private *i915 = to_i915(connector->base.dev);
struct intel_panel *panel = &connector->panel;
- u32 tmp;
intel_backlight_set_pwm_level(old_conn_state, val);
- tmp = intel_de_read(i915, BXT_BLC_PWM_CTL(panel->backlight.controller));
- intel_de_write(i915, BXT_BLC_PWM_CTL(panel->backlight.controller),
- tmp & ~BXT_BLC_PWM_ENABLE);
+ intel_de_rmw(i915, BXT_BLC_PWM_CTL(panel->backlight.controller),
+ BXT_BLC_PWM_ENABLE, 0);
}
static void ext_pwm_disable_backlight(const struct drm_connector_state *old_conn_state, u32 level)
@@ -458,7 +446,8 @@ void intel_backlight_disable(const struct drm_connector_state *old_conn_state)
* another client is not activated.
*/
if (i915->drm.switch_power_state == DRM_SWITCH_POWER_CHANGING) {
- drm_dbg_kms(&i915->drm, "Skipping backlight disable on vga switch\n");
+ drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] Skipping backlight disable on vga switch\n",
+ connector->base.base.id, connector->base.name);
return;
}
@@ -478,30 +467,24 @@ static void lpt_enable_backlight(const struct intel_crtc_state *crtc_state,
struct intel_connector *connector = to_intel_connector(conn_state->connector);
struct drm_i915_private *i915 = to_i915(connector->base.dev);
struct intel_panel *panel = &connector->panel;
- u32 pch_ctl1, pch_ctl2, schicken;
+ u32 pch_ctl1, pch_ctl2;
pch_ctl1 = intel_de_read(i915, BLC_PWM_PCH_CTL1);
if (pch_ctl1 & BLM_PCH_PWM_ENABLE) {
- drm_dbg_kms(&i915->drm, "pch backlight already enabled\n");
+ drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] PCH backlight already enabled\n",
+ connector->base.base.id, connector->base.name);
pch_ctl1 &= ~BLM_PCH_PWM_ENABLE;
intel_de_write(i915, BLC_PWM_PCH_CTL1, pch_ctl1);
}
- if (HAS_PCH_LPT(i915)) {
- schicken = intel_de_read(i915, SOUTH_CHICKEN2);
- if (panel->backlight.alternate_pwm_increment)
- schicken |= LPT_PWM_GRANULARITY;
- else
- schicken &= ~LPT_PWM_GRANULARITY;
- intel_de_write(i915, SOUTH_CHICKEN2, schicken);
- } else {
- schicken = intel_de_read(i915, SOUTH_CHICKEN1);
- if (panel->backlight.alternate_pwm_increment)
- schicken |= SPT_PWM_GRANULARITY;
- else
- schicken &= ~SPT_PWM_GRANULARITY;
- intel_de_write(i915, SOUTH_CHICKEN1, schicken);
- }
+ if (HAS_PCH_LPT(i915))
+ intel_de_rmw(i915, SOUTH_CHICKEN2, LPT_PWM_GRANULARITY,
+ panel->backlight.alternate_pwm_increment ?
+ LPT_PWM_GRANULARITY : 0);
+ else
+ intel_de_rmw(i915, SOUTH_CHICKEN1, SPT_PWM_GRANULARITY,
+ panel->backlight.alternate_pwm_increment ?
+ SPT_PWM_GRANULARITY : 0);
pch_ctl2 = panel->backlight.pwm_level_max << 16;
intel_de_write(i915, BLC_PWM_PCH_CTL2, pch_ctl2);
@@ -533,14 +516,16 @@ static void pch_enable_backlight(const struct intel_crtc_state *crtc_state,
cpu_ctl2 = intel_de_read(i915, BLC_PWM_CPU_CTL2);
if (cpu_ctl2 & BLM_PWM_ENABLE) {
- drm_dbg_kms(&i915->drm, "cpu backlight already enabled\n");
+ drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] CPU backlight already enabled\n",
+ connector->base.base.id, connector->base.name);
cpu_ctl2 &= ~BLM_PWM_ENABLE;
intel_de_write(i915, BLC_PWM_CPU_CTL2, cpu_ctl2);
}
pch_ctl1 = intel_de_read(i915, BLC_PWM_PCH_CTL1);
if (pch_ctl1 & BLM_PCH_PWM_ENABLE) {
- drm_dbg_kms(&i915->drm, "pch backlight already enabled\n");
+ drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] PCH backlight already enabled\n",
+ connector->base.base.id, connector->base.name);
pch_ctl1 &= ~BLM_PCH_PWM_ENABLE;
intel_de_write(i915, BLC_PWM_PCH_CTL1, pch_ctl1);
}
@@ -578,7 +563,8 @@ static void i9xx_enable_backlight(const struct intel_crtc_state *crtc_state,
ctl = intel_de_read(i915, BLC_PWM_CTL);
if (ctl & BACKLIGHT_DUTY_CYCLE_MASK_PNV) {
- drm_dbg_kms(&i915->drm, "backlight already enabled\n");
+ drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] backlight already enabled\n",
+ connector->base.base.id, connector->base.name);
intel_de_write(i915, BLC_PWM_CTL, 0);
}
@@ -618,7 +604,8 @@ static void i965_enable_backlight(const struct intel_crtc_state *crtc_state,
ctl2 = intel_de_read(i915, BLC_PWM_CTL2);
if (ctl2 & BLM_PWM_ENABLE) {
- drm_dbg_kms(&i915->drm, "backlight already enabled\n");
+ drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] backlight already enabled\n",
+ connector->base.base.id, connector->base.name);
ctl2 &= ~BLM_PWM_ENABLE;
intel_de_write(i915, BLC_PWM_CTL2, ctl2);
}
@@ -653,7 +640,8 @@ static void vlv_enable_backlight(const struct intel_crtc_state *crtc_state,
ctl2 = intel_de_read(i915, VLV_BLC_PWM_CTL2(pipe));
if (ctl2 & BLM_PWM_ENABLE) {
- drm_dbg_kms(&i915->drm, "backlight already enabled\n");
+ drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] backlight already enabled\n",
+ connector->base.base.id, connector->base.name);
ctl2 &= ~BLM_PWM_ENABLE;
intel_de_write(i915, VLV_BLC_PWM_CTL2(pipe), ctl2);
}
@@ -685,7 +673,8 @@ static void bxt_enable_backlight(const struct intel_crtc_state *crtc_state,
if (panel->backlight.controller == 1) {
val = intel_de_read(i915, UTIL_PIN_CTL);
if (val & UTIL_PIN_ENABLE) {
- drm_dbg_kms(&i915->drm, "util pin already enabled\n");
+ drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] utility pin already enabled\n",
+ connector->base.base.id, connector->base.name);
val &= ~UTIL_PIN_ENABLE;
intel_de_write(i915, UTIL_PIN_CTL, val);
}
@@ -699,7 +688,8 @@ static void bxt_enable_backlight(const struct intel_crtc_state *crtc_state,
pwm_ctl = intel_de_read(i915, BXT_BLC_PWM_CTL(panel->backlight.controller));
if (pwm_ctl & BXT_BLC_PWM_ENABLE) {
- drm_dbg_kms(&i915->drm, "backlight already enabled\n");
+ drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] backlight already enabled\n",
+ connector->base.base.id, connector->base.name);
pwm_ctl &= ~BXT_BLC_PWM_ENABLE;
intel_de_write(i915, BXT_BLC_PWM_CTL(panel->backlight.controller),
pwm_ctl);
@@ -1270,6 +1260,10 @@ static int lpt_setup_backlight(struct intel_connector *connector, enum pipe unus
cpu_ctl2 & ~BLM_PWM_ENABLE);
}
+ drm_dbg_kms(&i915->drm,
+ "[CONNECTOR:%d:%s] Using native PCH PWM for backlight control\n",
+ connector->base.base.id, connector->base.name);
+
return 0;
}
@@ -1297,6 +1291,10 @@ static int pch_setup_backlight(struct intel_connector *connector, enum pipe unus
panel->backlight.pwm_enabled = (cpu_ctl2 & BLM_PWM_ENABLE) &&
(pch_ctl1 & BLM_PCH_PWM_ENABLE);
+ drm_dbg_kms(&i915->drm,
+ "[CONNECTOR:%d:%s] Using native PCH PWM for backlight control\n",
+ connector->base.base.id, connector->base.name);
+
return 0;
}
@@ -1335,6 +1333,10 @@ static int i9xx_setup_backlight(struct intel_connector *connector, enum pipe unu
panel->backlight.pwm_enabled = val != 0;
+ drm_dbg_kms(&i915->drm,
+ "[CONNECTOR:%d:%s] Using native PWM for backlight control\n",
+ connector->base.base.id, connector->base.name);
+
return 0;
}
@@ -1364,6 +1366,10 @@ static int i965_setup_backlight(struct intel_connector *connector, enum pipe unu
panel->backlight.pwm_enabled = ctl2 & BLM_PWM_ENABLE;
+ drm_dbg_kms(&i915->drm,
+ "[CONNECTOR:%d:%s] Using native PWM for backlight control\n",
+ connector->base.base.id, connector->base.name);
+
return 0;
}
@@ -1392,6 +1398,10 @@ static int vlv_setup_backlight(struct intel_connector *connector, enum pipe pipe
panel->backlight.pwm_enabled = ctl2 & BLM_PWM_ENABLE;
+ drm_dbg_kms(&i915->drm,
+ "[CONNECTOR:%d:%s] Using native PWM for backlight control (on pipe %c)\n",
+ connector->base.base.id, connector->base.name, pipe_name(pipe));
+
return 0;
}
@@ -1428,6 +1438,11 @@ bxt_setup_backlight(struct intel_connector *connector, enum pipe unused)
panel->backlight.pwm_enabled = pwm_ctl & BXT_BLC_PWM_ENABLE;
+ drm_dbg_kms(&i915->drm,
+ "[CONNECTOR:%d:%s] Using native PWM for backlight control (controller=%d)\n",
+ connector->base.base.id, connector->base.name,
+ panel->backlight.controller);
+
return 0;
}
@@ -1468,7 +1483,8 @@ cnp_setup_backlight(struct intel_connector *connector, enum pipe unused)
*/
panel->backlight.controller = connector->panel.vbt.backlight.controller;
if (!cnp_backlight_controller_is_valid(i915, panel->backlight.controller)) {
- drm_dbg_kms(&i915->drm, "Invalid backlight controller %d, assuming 0\n",
+ drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] Invalid backlight controller %d, assuming 0\n",
+ connector->base.base.id, connector->base.name,
panel->backlight.controller);
panel->backlight.controller = 0;
}
@@ -1490,6 +1506,11 @@ cnp_setup_backlight(struct intel_connector *connector, enum pipe unused)
panel->backlight.pwm_enabled = pwm_ctl & BXT_BLC_PWM_ENABLE;
+ drm_dbg_kms(&i915->drm,
+ "[CONNECTOR:%d:%s] Using native PCH PWM for backlight control (controller=%d)\n",
+ connector->base.base.id, connector->base.name,
+ panel->backlight.controller);
+
return 0;
}
@@ -1511,8 +1532,8 @@ static int ext_pwm_setup_backlight(struct intel_connector *connector,
}
if (IS_ERR(panel->backlight.pwm)) {
- drm_err(&i915->drm, "Failed to get the %s PWM chip\n",
- desc);
+ drm_err(&i915->drm, "[CONNECTOR:%d:%s] Failed to get the %s PWM chip\n",
+ connector->base.base.id, connector->base.name, desc);
panel->backlight.pwm = NULL;
return -ENODEV;
}
@@ -1529,7 +1550,8 @@ static int ext_pwm_setup_backlight(struct intel_connector *connector,
level = intel_backlight_invert_pwm_level(connector, level);
panel->backlight.pwm_enabled = true;
- drm_dbg_kms(&i915->drm, "PWM already enabled at freq %ld, VBT freq %d, level %d\n",
+ drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] PWM already enabled at freq %ld, VBT freq %d, level %d\n",
+ connector->base.base.id, connector->base.name,
NSEC_PER_SEC / (unsigned long)panel->backlight.pwm_state.period,
get_vbt_pwm_freq(connector), level);
} else {
@@ -1538,8 +1560,10 @@ static int ext_pwm_setup_backlight(struct intel_connector *connector,
NSEC_PER_SEC / get_vbt_pwm_freq(connector);
}
- drm_info(&i915->drm, "Using %s PWM for LCD backlight control\n",
- desc);
+ drm_dbg_kms(&i915->drm,
+ "[CONNECTOR:%d:%s] Using %s PWM for backlight control\n",
+ connector->base.base.id, connector->base.name, desc);
+
return 0;
}
@@ -1582,8 +1606,9 @@ static void intel_pwm_disable_backlight(const struct drm_connector_state *conn_s
static int intel_pwm_setup_backlight(struct intel_connector *connector, enum pipe pipe)
{
struct intel_panel *panel = &connector->panel;
- int ret = panel->backlight.pwm_funcs->setup(connector, pipe);
+ int ret;
+ ret = panel->backlight.pwm_funcs->setup(connector, pipe);
if (ret < 0)
return ret;
@@ -1623,10 +1648,12 @@ int intel_backlight_setup(struct intel_connector *connector, enum pipe pipe)
if (!connector->panel.vbt.backlight.present) {
if (intel_has_quirk(i915, QUIRK_BACKLIGHT_PRESENT)) {
drm_dbg_kms(&i915->drm,
- "no backlight present per VBT, but present per quirk\n");
+ "[CONNECTOR:%d:%s] no backlight present per VBT, but present per quirk\n",
+ connector->base.base.id, connector->base.name);
} else {
drm_dbg_kms(&i915->drm,
- "no backlight present per VBT\n");
+ "[CONNECTOR:%d:%s] no backlight present per VBT\n",
+ connector->base.base.id, connector->base.name);
return 0;
}
}
@@ -1642,16 +1669,16 @@ int intel_backlight_setup(struct intel_connector *connector, enum pipe pipe)
if (ret) {
drm_dbg_kms(&i915->drm,
- "failed to setup backlight for connector %s\n",
- connector->base.name);
+ "[CONNECTOR:%d:%s] failed to setup backlight\n",
+ connector->base.base.id, connector->base.name);
return ret;
}
panel->backlight.present = true;
drm_dbg_kms(&i915->drm,
- "Connector %s backlight initialized, %s, brightness %u/%u\n",
- connector->base.name,
+ "[CONNECTOR:%d:%s] backlight initialized, %s, brightness %u/%u\n",
+ connector->base.base.id, connector->base.name,
str_enabled_disabled(panel->backlight.enabled),
panel->backlight.level, panel->backlight.max);
diff --git a/drivers/gpu/drm/i915/display/intel_bios.c b/drivers/gpu/drm/i915/display/intel_bios.c
index 04b846440de6..75e69dffc5e9 100644
--- a/drivers/gpu/drm/i915/display/intel_bios.c
+++ b/drivers/gpu/drm/i915/display/intel_bios.c
@@ -141,8 +141,8 @@ struct bdb_block_entry {
};
static const void *
-find_section(struct drm_i915_private *i915,
- enum bdb_block_id section_id)
+bdb_find_section(struct drm_i915_private *i915,
+ enum bdb_block_id section_id)
{
struct bdb_block_entry *entry;
@@ -201,7 +201,7 @@ static size_t lfp_data_min_size(struct drm_i915_private *i915)
const struct bdb_lvds_lfp_data_ptrs *ptrs;
size_t size;
- ptrs = find_section(i915, BDB_LVDS_LFP_DATA_PTRS);
+ ptrs = bdb_find_section(i915, BDB_LVDS_LFP_DATA_PTRS);
if (!ptrs)
return 0;
@@ -630,7 +630,7 @@ static int vbt_get_panel_type(struct drm_i915_private *i915,
{
const struct bdb_lvds_options *lvds_options;
- lvds_options = find_section(i915, BDB_LVDS_OPTIONS);
+ lvds_options = bdb_find_section(i915, BDB_LVDS_OPTIONS);
if (!lvds_options)
return -1;
@@ -671,11 +671,11 @@ static int pnpid_get_panel_type(struct drm_i915_private *i915,
dump_pnp_id(i915, edid_id, "EDID");
- ptrs = find_section(i915, BDB_LVDS_LFP_DATA_PTRS);
+ ptrs = bdb_find_section(i915, BDB_LVDS_LFP_DATA_PTRS);
if (!ptrs)
return -1;
- data = find_section(i915, BDB_LVDS_LFP_DATA);
+ data = bdb_find_section(i915, BDB_LVDS_LFP_DATA);
if (!data)
return -1;
@@ -791,7 +791,7 @@ parse_panel_options(struct drm_i915_private *i915,
int panel_type = panel->vbt.panel_type;
int drrs_mode;
- lvds_options = find_section(i915, BDB_LVDS_OPTIONS);
+ lvds_options = bdb_find_section(i915, BDB_LVDS_OPTIONS);
if (!lvds_options)
return;
@@ -881,11 +881,11 @@ parse_lfp_data(struct drm_i915_private *i915,
const struct lvds_pnp_id *pnp_id;
int panel_type = panel->vbt.panel_type;
- ptrs = find_section(i915, BDB_LVDS_LFP_DATA_PTRS);
+ ptrs = bdb_find_section(i915, BDB_LVDS_LFP_DATA_PTRS);
if (!ptrs)
return;
- data = find_section(i915, BDB_LVDS_LFP_DATA);
+ data = bdb_find_section(i915, BDB_LVDS_LFP_DATA);
if (!data)
return;
@@ -932,7 +932,7 @@ parse_generic_dtd(struct drm_i915_private *i915,
if (i915->display.vbt.version < 229)
return;
- generic_dtd = find_section(i915, BDB_GENERIC_DTD);
+ generic_dtd = bdb_find_section(i915, BDB_GENERIC_DTD);
if (!generic_dtd)
return;
@@ -1011,7 +1011,7 @@ parse_lfp_backlight(struct drm_i915_private *i915,
int panel_type = panel->vbt.panel_type;
u16 level;
- backlight_data = find_section(i915, BDB_LVDS_BACKLIGHT);
+ backlight_data = bdb_find_section(i915, BDB_LVDS_BACKLIGHT);
if (!backlight_data)
return;
@@ -1084,6 +1084,12 @@ parse_lfp_backlight(struct drm_i915_private *i915,
panel->vbt.backlight.min_brightness = entry->min_brightness;
}
+ if (i915->display.vbt.version >= 239)
+ panel->vbt.backlight.hdr_dpcd_refresh_timeout =
+ DIV_ROUND_UP(backlight_data->hdr_dpcd_refresh_timeout[panel_type], 100);
+ else
+ panel->vbt.backlight.hdr_dpcd_refresh_timeout = 30;
+
drm_dbg_kms(&i915->drm,
"VBT backlight PWM modulation frequency %u Hz, "
"active %s, min brightness %u, level %u, controller %u\n",
@@ -1113,14 +1119,14 @@ parse_sdvo_panel_data(struct drm_i915_private *i915,
if (index == -1) {
const struct bdb_sdvo_lvds_options *sdvo_lvds_options;
- sdvo_lvds_options = find_section(i915, BDB_SDVO_LVDS_OPTIONS);
+ sdvo_lvds_options = bdb_find_section(i915, BDB_SDVO_LVDS_OPTIONS);
if (!sdvo_lvds_options)
return;
index = sdvo_lvds_options->panel_type;
}
- dtds = find_section(i915, BDB_SDVO_PANEL_DTDS);
+ dtds = bdb_find_section(i915, BDB_SDVO_PANEL_DTDS);
if (!dtds)
return;
@@ -1156,7 +1162,7 @@ parse_general_features(struct drm_i915_private *i915)
{
const struct bdb_general_features *general;
- general = find_section(i915, BDB_GENERAL_FEATURES);
+ general = bdb_find_section(i915, BDB_GENERAL_FEATURES);
if (!general)
return;
@@ -1202,9 +1208,7 @@ child_device_ptr(const struct bdb_general_definitions *defs, int i)
static void
parse_sdvo_device_mapping(struct drm_i915_private *i915)
{
- struct sdvo_device_mapping *mapping;
const struct intel_bios_encoder_data *devdata;
- const struct child_device_config *child;
int count = 0;
/*
@@ -1217,7 +1221,8 @@ parse_sdvo_device_mapping(struct drm_i915_private *i915)
}
list_for_each_entry(devdata, &i915->display.vbt.display_devices, node) {
- child = &devdata->child;
+ const struct child_device_config *child = &devdata->child;
+ struct sdvo_device_mapping *mapping;
if (child->slave_addr != SLAVE_ADDR1 &&
child->slave_addr != SLAVE_ADDR2) {
@@ -1280,7 +1285,7 @@ parse_driver_features(struct drm_i915_private *i915)
{
const struct bdb_driver_features *driver;
- driver = find_section(i915, BDB_DRIVER_FEATURES);
+ driver = bdb_find_section(i915, BDB_DRIVER_FEATURES);
if (!driver)
return;
@@ -1317,7 +1322,7 @@ parse_panel_driver_features(struct drm_i915_private *i915,
{
const struct bdb_driver_features *driver;
- driver = find_section(i915, BDB_DRIVER_FEATURES);
+ driver = bdb_find_section(i915, BDB_DRIVER_FEATURES);
if (!driver)
return;
@@ -1357,7 +1362,7 @@ parse_power_conservation_features(struct drm_i915_private *i915,
if (i915->display.vbt.version < 228)
return;
- power = find_section(i915, BDB_LFP_POWER);
+ power = bdb_find_section(i915, BDB_LFP_POWER);
if (!power)
return;
@@ -1397,7 +1402,7 @@ parse_edp(struct drm_i915_private *i915,
const struct edp_fast_link_params *edp_link_params;
int panel_type = panel->vbt.panel_type;
- edp = find_section(i915, BDB_EDP);
+ edp = bdb_find_section(i915, BDB_EDP);
if (!edp)
return;
@@ -1527,7 +1532,7 @@ parse_psr(struct drm_i915_private *i915,
const struct psr_table *psr_table;
int panel_type = panel->vbt.panel_type;
- psr = find_section(i915, BDB_PSR);
+ psr = bdb_find_section(i915, BDB_PSR);
if (!psr) {
drm_dbg_kms(&i915->drm, "No PSR BDB found.\n");
return;
@@ -1688,7 +1693,7 @@ parse_mipi_config(struct drm_i915_private *i915,
/* Parse #52 for panel index used from panel_type already
* parsed
*/
- start = find_section(i915, BDB_MIPI_CONFIG);
+ start = bdb_find_section(i915, BDB_MIPI_CONFIG);
if (!start) {
drm_dbg_kms(&i915->drm, "No MIPI config BDB found");
return;
@@ -2000,7 +2005,7 @@ parse_mipi_sequence(struct drm_i915_private *i915,
if (panel->vbt.dsi.panel_id != MIPI_DSI_GENERIC_PANEL_ID)
return;
- sequence = find_section(i915, BDB_MIPI_SEQUENCE);
+ sequence = bdb_find_section(i915, BDB_MIPI_SEQUENCE);
if (!sequence) {
drm_dbg_kms(&i915->drm,
"No MIPI Sequence found, parsing complete\n");
@@ -2075,14 +2080,13 @@ parse_compression_parameters(struct drm_i915_private *i915)
{
const struct bdb_compression_parameters *params;
struct intel_bios_encoder_data *devdata;
- const struct child_device_config *child;
u16 block_size;
int index;
if (i915->display.vbt.version < 198)
return;
- params = find_section(i915, BDB_COMPRESSION_PARAMETERS);
+ params = bdb_find_section(i915, BDB_COMPRESSION_PARAMETERS);
if (params) {
/* Sanity checks */
if (params->entry_size != sizeof(params->data[0])) {
@@ -2100,7 +2104,7 @@ parse_compression_parameters(struct drm_i915_private *i915)
}
list_for_each_entry(devdata, &i915->display.vbt.display_devices, node) {
- child = &devdata->child;
+ const struct child_device_config *child = &devdata->child;
if (!child->compression_enable)
continue;
@@ -2226,14 +2230,14 @@ static u8 map_ddc_pin(struct drm_i915_private *i915, u8 vbt_pin)
static enum port get_port_by_ddc_pin(struct drm_i915_private *i915, u8 ddc_pin)
{
- const struct intel_bios_encoder_data *devdata;
enum port port;
if (!ddc_pin)
return PORT_NONE;
for_each_port(port) {
- devdata = i915->display.vbt.ports[port];
+ const struct intel_bios_encoder_data *devdata =
+ i915->display.vbt.ports[port];
if (devdata && ddc_pin == devdata->child.ddc_pin)
return port;
@@ -2292,14 +2296,14 @@ static void sanitize_ddc_pin(struct intel_bios_encoder_data *devdata,
static enum port get_port_by_aux_ch(struct drm_i915_private *i915, u8 aux_ch)
{
- const struct intel_bios_encoder_data *devdata;
enum port port;
if (!aux_ch)
return PORT_NONE;
for_each_port(port) {
- devdata = i915->display.vbt.ports[port];
+ const struct intel_bios_encoder_data *devdata =
+ i915->display.vbt.ports[port];
if (devdata && aux_ch == devdata->child.aux_channel)
return port;
@@ -2522,7 +2526,7 @@ static int parse_bdb_216_dp_max_link_rate(const int vbt_max_link_rate)
}
}
-static int _intel_bios_dp_max_link_rate(const struct intel_bios_encoder_data *devdata)
+int intel_bios_dp_max_link_rate(const struct intel_bios_encoder_data *devdata)
{
if (!devdata || devdata->i915->display.vbt.version < 216)
return 0;
@@ -2533,7 +2537,7 @@ static int _intel_bios_dp_max_link_rate(const struct intel_bios_encoder_data *de
return parse_bdb_216_dp_max_link_rate(devdata->child.dp_max_link_rate);
}
-static int _intel_bios_dp_max_lane_count(const struct intel_bios_encoder_data *devdata)
+int intel_bios_dp_max_lane_count(const struct intel_bios_encoder_data *devdata)
{
if (!devdata || devdata->i915->display.vbt.version < 244)
return 0;
@@ -2587,7 +2591,7 @@ intel_bios_encoder_supports_dp(const struct intel_bios_encoder_data *devdata)
return devdata->child.device_type & DEVICE_TYPE_DISPLAYPORT_OUTPUT;
}
-static bool
+bool
intel_bios_encoder_supports_edp(const struct intel_bios_encoder_data *devdata)
{
return intel_bios_encoder_supports_dp(devdata) &&
@@ -2600,7 +2604,14 @@ intel_bios_encoder_supports_dsi(const struct intel_bios_encoder_data *devdata)
return devdata->child.device_type & DEVICE_TYPE_MIPI_OUTPUT;
}
-static int _intel_bios_hdmi_level_shift(const struct intel_bios_encoder_data *devdata)
+bool
+intel_bios_encoder_is_lspcon(const struct intel_bios_encoder_data *devdata)
+{
+ return devdata && HAS_LSPCON(devdata->i915) && devdata->child.lspcon;
+}
+
+/* This is an index in the HDMI/DVI DDI buffer translation table, or -1 */
+int intel_bios_hdmi_level_shift(const struct intel_bios_encoder_data *devdata)
{
if (!devdata || devdata->i915->display.vbt.version < 158)
return -1;
@@ -2608,7 +2619,7 @@ static int _intel_bios_hdmi_level_shift(const struct intel_bios_encoder_data *de
return devdata->child.hdmi_level_shifter_value;
}
-static int _intel_bios_max_tmds_clock(const struct intel_bios_encoder_data *devdata)
+int intel_bios_hdmi_max_tmds_clock(const struct intel_bios_encoder_data *devdata)
{
if (!devdata || devdata->i915->display.vbt.version < 204)
return 0;
@@ -2666,37 +2677,37 @@ static void print_ddi_port(const struct intel_bios_encoder_data *devdata,
drm_dbg_kms(&i915->drm,
"Port %c VBT info: CRT:%d DVI:%d HDMI:%d DP:%d eDP:%d DSI:%d LSPCON:%d USB-Type-C:%d TBT:%d DSC:%d\n",
port_name(port), is_crt, is_dvi, is_hdmi, is_dp, is_edp, is_dsi,
- HAS_LSPCON(i915) && child->lspcon,
+ intel_bios_encoder_is_lspcon(devdata),
supports_typec_usb, supports_tbt,
devdata->dsc != NULL);
- hdmi_level_shift = _intel_bios_hdmi_level_shift(devdata);
+ hdmi_level_shift = intel_bios_hdmi_level_shift(devdata);
if (hdmi_level_shift >= 0) {
drm_dbg_kms(&i915->drm,
"Port %c VBT HDMI level shift: %d\n",
port_name(port), hdmi_level_shift);
}
- max_tmds_clock = _intel_bios_max_tmds_clock(devdata);
+ max_tmds_clock = intel_bios_hdmi_max_tmds_clock(devdata);
if (max_tmds_clock)
drm_dbg_kms(&i915->drm,
"Port %c VBT HDMI max TMDS clock: %d kHz\n",
port_name(port), max_tmds_clock);
/* I_boost config for SKL and above */
- dp_boost_level = intel_bios_encoder_dp_boost_level(devdata);
+ dp_boost_level = intel_bios_dp_boost_level(devdata);
if (dp_boost_level)
drm_dbg_kms(&i915->drm,
"Port %c VBT (e)DP boost level: %d\n",
port_name(port), dp_boost_level);
- hdmi_boost_level = intel_bios_encoder_hdmi_boost_level(devdata);
+ hdmi_boost_level = intel_bios_hdmi_boost_level(devdata);
if (hdmi_boost_level)
drm_dbg_kms(&i915->drm,
"Port %c VBT HDMI boost level: %d\n",
port_name(port), hdmi_boost_level);
- dp_max_link_rate = _intel_bios_dp_max_link_rate(devdata);
+ dp_max_link_rate = intel_bios_dp_max_link_rate(devdata);
if (dp_max_link_rate)
drm_dbg_kms(&i915->drm,
"Port %c VBT DP max link rate: %d\n",
@@ -2781,7 +2792,7 @@ parse_general_definitions(struct drm_i915_private *i915)
u16 block_size;
int bus_pin;
- defs = find_section(i915, BDB_GENERAL_DEFINITIONS);
+ defs = bdb_find_section(i915, BDB_GENERAL_DEFINITIONS);
if (!defs) {
drm_dbg_kms(&i915->drm,
"No general definition block is found, no devices defined.\n");
@@ -2811,7 +2822,7 @@ parse_general_definitions(struct drm_i915_private *i915)
expected_size = 37;
} else if (i915->display.vbt.version <= 215) {
expected_size = 38;
- } else if (i915->display.vbt.version <= 237) {
+ } else if (i915->display.vbt.version <= 250) {
expected_size = 39;
} else {
expected_size = sizeof(*child);
@@ -3306,7 +3317,6 @@ void intel_bios_fini_panel(struct intel_panel *panel)
bool intel_bios_is_tv_present(struct drm_i915_private *i915)
{
const struct intel_bios_encoder_data *devdata;
- const struct child_device_config *child;
if (!i915->display.vbt.int_tv_support)
return false;
@@ -3315,7 +3325,7 @@ bool intel_bios_is_tv_present(struct drm_i915_private *i915)
return true;
list_for_each_entry(devdata, &i915->display.vbt.display_devices, node) {
- child = &devdata->child;
+ const struct child_device_config *child = &devdata->child;
/*
* If the device type is not TV, continue.
@@ -3349,13 +3359,12 @@ bool intel_bios_is_tv_present(struct drm_i915_private *i915)
bool intel_bios_is_lvds_present(struct drm_i915_private *i915, u8 *i2c_pin)
{
const struct intel_bios_encoder_data *devdata;
- const struct child_device_config *child;
if (list_empty(&i915->display.vbt.display_devices))
return true;
list_for_each_entry(devdata, &i915->display.vbt.display_devices, node) {
- child = &devdata->child;
+ const struct child_device_config *child = &devdata->child;
/* If the device type is not LFP, continue.
* We have to check both the new identifiers as well as the
@@ -3397,25 +3406,22 @@ bool intel_bios_is_lvds_present(struct drm_i915_private *i915, u8 *i2c_pin)
*/
bool intel_bios_is_port_present(struct drm_i915_private *i915, enum port port)
{
+ const struct intel_bios_encoder_data *devdata;
+
if (WARN_ON(!has_ddi_port_info(i915)))
return true;
- return i915->display.vbt.ports[port];
-}
+ if (!is_port_valid(i915, port))
+ return false;
-/**
- * intel_bios_is_port_edp - is the device in given port eDP
- * @i915: i915 device instance
- * @port: port to check
- *
- * Return true if the device in %port is eDP.
- */
-bool intel_bios_is_port_edp(struct drm_i915_private *i915, enum port port)
-{
- const struct intel_bios_encoder_data *devdata =
- intel_bios_encoder_data_lookup(i915, port);
+ list_for_each_entry(devdata, &i915->display.vbt.display_devices, node) {
+ const struct child_device_config *child = &devdata->child;
+
+ if (dvo_port_to_port(i915, child->dvo_port) == port)
+ return true;
+ }
- return devdata && intel_bios_encoder_supports_edp(devdata);
+ return false;
}
static bool intel_bios_encoder_supports_dp_dual_mode(const struct intel_bios_encoder_data *devdata)
@@ -3457,17 +3463,14 @@ bool intel_bios_is_dsi_present(struct drm_i915_private *i915,
enum port *port)
{
const struct intel_bios_encoder_data *devdata;
- const struct child_device_config *child;
- u8 dvo_port;
list_for_each_entry(devdata, &i915->display.vbt.display_devices, node) {
- child = &devdata->child;
+ const struct child_device_config *child = &devdata->child;
+ u8 dvo_port = child->dvo_port;
if (!(child->device_type & DEVICE_TYPE_MIPI_OUTPUT))
continue;
- dvo_port = child->dvo_port;
-
if (dsi_dvo_port_to_port(i915, dvo_port) == PORT_NONE) {
drm_dbg_kms(&i915->drm,
"VBT has unsupported DSI port %c\n",
@@ -3554,10 +3557,9 @@ bool intel_bios_get_dsc_params(struct intel_encoder *encoder,
{
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
const struct intel_bios_encoder_data *devdata;
- const struct child_device_config *child;
list_for_each_entry(devdata, &i915->display.vbt.display_devices, node) {
- child = &devdata->child;
+ const struct child_device_config *child = &devdata->child;
if (!(child->device_type & DEVICE_TYPE_MIPI_OUTPUT))
continue;
@@ -3576,73 +3578,10 @@ bool intel_bios_get_dsc_params(struct intel_encoder *encoder,
return false;
}
-/**
- * intel_bios_is_port_hpd_inverted - is HPD inverted for %port
- * @i915: i915 device instance
- * @port: port to check
- *
- * Return true if HPD should be inverted for %port.
- */
-bool
-intel_bios_is_port_hpd_inverted(const struct drm_i915_private *i915,
- enum port port)
+static enum aux_ch map_aux_ch(struct drm_i915_private *i915, u8 aux_channel)
{
- const struct intel_bios_encoder_data *devdata = i915->display.vbt.ports[port];
-
- if (drm_WARN_ON_ONCE(&i915->drm,
- !IS_GEMINILAKE(i915) && !IS_BROXTON(i915)))
- return false;
-
- return devdata && devdata->child.hpd_invert;
-}
-
-/**
- * intel_bios_is_lspcon_present - if LSPCON is attached on %port
- * @i915: i915 device instance
- * @port: port to check
- *
- * Return true if LSPCON is present on this port
- */
-bool
-intel_bios_is_lspcon_present(const struct drm_i915_private *i915,
- enum port port)
-{
- const struct intel_bios_encoder_data *devdata = i915->display.vbt.ports[port];
-
- return HAS_LSPCON(i915) && devdata && devdata->child.lspcon;
-}
-
-/**
- * intel_bios_is_lane_reversal_needed - if lane reversal needed on port
- * @i915: i915 device instance
- * @port: port to check
- *
- * Return true if port requires lane reversal
- */
-bool
-intel_bios_is_lane_reversal_needed(const struct drm_i915_private *i915,
- enum port port)
-{
- const struct intel_bios_encoder_data *devdata = i915->display.vbt.ports[port];
-
- return devdata && devdata->child.lane_reversal;
-}
-
-enum aux_ch intel_bios_port_aux_ch(struct drm_i915_private *i915,
- enum port port)
-{
- const struct intel_bios_encoder_data *devdata = i915->display.vbt.ports[port];
enum aux_ch aux_ch;
- if (!devdata || !devdata->child.aux_channel) {
- aux_ch = (enum aux_ch)port;
-
- drm_dbg_kms(&i915->drm,
- "using AUX %c for port %c (platform default)\n",
- aux_ch_name(aux_ch), port_name(port));
- return aux_ch;
- }
-
/*
* RKL/DG1 VBT uses PHY based mapping. Combo PHYs A,B,C,D
* map to DDI A,B,TC1,TC2 respectively.
@@ -3650,7 +3589,7 @@ enum aux_ch intel_bios_port_aux_ch(struct drm_i915_private *i915,
* ADL-S VBT uses PHY based mapping. Combo PHYs A,B,C,D,E
* map to DDI A,TC1,TC2,TC3,TC4 respectively.
*/
- switch (devdata->child.aux_channel) {
+ switch (aux_channel) {
case DP_AUX_A:
aux_ch = AUX_CH_A;
break;
@@ -3711,35 +3650,23 @@ enum aux_ch intel_bios_port_aux_ch(struct drm_i915_private *i915,
aux_ch = AUX_CH_I;
break;
default:
- MISSING_CASE(devdata->child.aux_channel);
+ MISSING_CASE(aux_channel);
aux_ch = AUX_CH_A;
break;
}
- drm_dbg_kms(&i915->drm, "using AUX %c for port %c (VBT)\n",
- aux_ch_name(aux_ch), port_name(port));
-
return aux_ch;
}
-int intel_bios_max_tmds_clock(struct intel_encoder *encoder)
+enum aux_ch intel_bios_dp_aux_ch(const struct intel_bios_encoder_data *devdata)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
- const struct intel_bios_encoder_data *devdata = i915->display.vbt.ports[encoder->port];
+ if (!devdata || !devdata->child.aux_channel)
+ return AUX_CH_NONE;
- return _intel_bios_max_tmds_clock(devdata);
+ return map_aux_ch(devdata->i915, devdata->child.aux_channel);
}
-/* This is an index in the HDMI/DVI DDI buffer translation table, or -1 */
-int intel_bios_hdmi_level_shift(struct intel_encoder *encoder)
-{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
- const struct intel_bios_encoder_data *devdata = i915->display.vbt.ports[encoder->port];
-
- return _intel_bios_hdmi_level_shift(devdata);
-}
-
-int intel_bios_encoder_dp_boost_level(const struct intel_bios_encoder_data *devdata)
+int intel_bios_dp_boost_level(const struct intel_bios_encoder_data *devdata)
{
if (!devdata || devdata->i915->display.vbt.version < 196 || !devdata->child.iboost)
return 0;
@@ -3747,7 +3674,7 @@ int intel_bios_encoder_dp_boost_level(const struct intel_bios_encoder_data *devd
return translate_iboost(devdata->child.dp_iboost_level);
}
-int intel_bios_encoder_hdmi_boost_level(const struct intel_bios_encoder_data *devdata)
+int intel_bios_hdmi_boost_level(const struct intel_bios_encoder_data *devdata)
{
if (!devdata || devdata->i915->display.vbt.version < 196 || !devdata->child.iboost)
return 0;
@@ -3755,31 +3682,12 @@ int intel_bios_encoder_hdmi_boost_level(const struct intel_bios_encoder_data *de
return translate_iboost(devdata->child.hdmi_iboost_level);
}
-int intel_bios_dp_max_link_rate(struct intel_encoder *encoder)
-{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
- const struct intel_bios_encoder_data *devdata = i915->display.vbt.ports[encoder->port];
-
- return _intel_bios_dp_max_link_rate(devdata);
-}
-
-int intel_bios_dp_max_lane_count(struct intel_encoder *encoder)
-{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
- const struct intel_bios_encoder_data *devdata = i915->display.vbt.ports[encoder->port];
-
- return _intel_bios_dp_max_lane_count(devdata);
-}
-
-int intel_bios_alternate_ddc_pin(struct intel_encoder *encoder)
+int intel_bios_hdmi_ddc_pin(const struct intel_bios_encoder_data *devdata)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
- const struct intel_bios_encoder_data *devdata = i915->display.vbt.ports[encoder->port];
-
if (!devdata || !devdata->child.ddc_pin)
return 0;
- return map_ddc_pin(i915, devdata->child.ddc_pin);
+ return map_ddc_pin(devdata->i915, devdata->child.ddc_pin);
}
bool intel_bios_encoder_supports_typec_usb(const struct intel_bios_encoder_data *devdata)
@@ -3792,6 +3700,16 @@ bool intel_bios_encoder_supports_tbt(const struct intel_bios_encoder_data *devda
return devdata->i915->display.vbt.version >= 209 && devdata->child.tbt;
}
+bool intel_bios_encoder_lane_reversal(const struct intel_bios_encoder_data *devdata)
+{
+ return devdata && devdata->child.lane_reversal;
+}
+
+bool intel_bios_encoder_hpd_invert(const struct intel_bios_encoder_data *devdata)
+{
+ return devdata && devdata->child.hpd_invert;
+}
+
const struct intel_bios_encoder_data *
intel_bios_encoder_data_lookup(struct drm_i915_private *i915, enum port port)
{
diff --git a/drivers/gpu/drm/i915/display/intel_bios.h b/drivers/gpu/drm/i915/display/intel_bios.h
index d221f784aa88..8a0730c9b48c 100644
--- a/drivers/gpu/drm/i915/display/intel_bios.h
+++ b/drivers/gpu/drm/i915/display/intel_bios.h
@@ -38,6 +38,7 @@ struct intel_bios_encoder_data;
struct intel_crtc_state;
struct intel_encoder;
struct intel_panel;
+enum aux_ch;
enum port;
enum intel_backlight_type {
@@ -248,21 +249,9 @@ bool intel_bios_is_port_present(struct drm_i915_private *dev_priv, enum port por
bool intel_bios_is_port_edp(struct drm_i915_private *dev_priv, enum port port);
bool intel_bios_is_port_dp_dual_mode(struct drm_i915_private *dev_priv, enum port port);
bool intel_bios_is_dsi_present(struct drm_i915_private *dev_priv, enum port *port);
-bool intel_bios_is_port_hpd_inverted(const struct drm_i915_private *i915,
- enum port port);
-bool intel_bios_is_lspcon_present(const struct drm_i915_private *i915,
- enum port port);
-bool intel_bios_is_lane_reversal_needed(const struct drm_i915_private *i915,
- enum port port);
-enum aux_ch intel_bios_port_aux_ch(struct drm_i915_private *dev_priv, enum port port);
bool intel_bios_get_dsc_params(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state,
int dsc_max_bpc);
-int intel_bios_max_tmds_clock(struct intel_encoder *encoder);
-int intel_bios_hdmi_level_shift(struct intel_encoder *encoder);
-int intel_bios_dp_max_link_rate(struct intel_encoder *encoder);
-int intel_bios_dp_max_lane_count(struct intel_encoder *encoder);
-int intel_bios_alternate_ddc_pin(struct intel_encoder *encoder);
bool intel_bios_port_supports_typec_usb(struct drm_i915_private *i915, enum port port);
bool intel_bios_port_supports_tbt(struct drm_i915_private *i915, enum port port);
@@ -272,9 +261,19 @@ intel_bios_encoder_data_lookup(struct drm_i915_private *i915, enum port port);
bool intel_bios_encoder_supports_dvi(const struct intel_bios_encoder_data *devdata);
bool intel_bios_encoder_supports_hdmi(const struct intel_bios_encoder_data *devdata);
bool intel_bios_encoder_supports_dp(const struct intel_bios_encoder_data *devdata);
+bool intel_bios_encoder_supports_edp(const struct intel_bios_encoder_data *devdata);
bool intel_bios_encoder_supports_typec_usb(const struct intel_bios_encoder_data *devdata);
bool intel_bios_encoder_supports_tbt(const struct intel_bios_encoder_data *devdata);
-int intel_bios_encoder_dp_boost_level(const struct intel_bios_encoder_data *devdata);
-int intel_bios_encoder_hdmi_boost_level(const struct intel_bios_encoder_data *devdata);
+bool intel_bios_encoder_is_lspcon(const struct intel_bios_encoder_data *devdata);
+bool intel_bios_encoder_lane_reversal(const struct intel_bios_encoder_data *devdata);
+bool intel_bios_encoder_hpd_invert(const struct intel_bios_encoder_data *devdata);
+enum aux_ch intel_bios_dp_aux_ch(const struct intel_bios_encoder_data *devdata);
+int intel_bios_dp_boost_level(const struct intel_bios_encoder_data *devdata);
+int intel_bios_dp_max_lane_count(const struct intel_bios_encoder_data *devdata);
+int intel_bios_dp_max_link_rate(const struct intel_bios_encoder_data *devdata);
+int intel_bios_hdmi_boost_level(const struct intel_bios_encoder_data *devdata);
+int intel_bios_hdmi_ddc_pin(const struct intel_bios_encoder_data *devdata);
+int intel_bios_hdmi_level_shift(const struct intel_bios_encoder_data *devdata);
+int intel_bios_hdmi_max_tmds_clock(const struct intel_bios_encoder_data *devdata);
#endif /* _INTEL_BIOS_H_ */
diff --git a/drivers/gpu/drm/i915/display/intel_bw.c b/drivers/gpu/drm/i915/display/intel_bw.c
index 1c236f02b380..202321ffbe2a 100644
--- a/drivers/gpu/drm/i915/display/intel_bw.c
+++ b/drivers/gpu/drm/i915/display/intel_bw.c
@@ -119,6 +119,32 @@ static int adls_pcode_read_psf_gv_point_info(struct drm_i915_private *dev_priv,
return 0;
}
+static u16 icl_qgv_points_mask(struct drm_i915_private *i915)
+{
+ unsigned int num_psf_gv_points = i915->display.bw.max[0].num_psf_gv_points;
+ unsigned int num_qgv_points = i915->display.bw.max[0].num_qgv_points;
+ u16 qgv_points = 0, psf_points = 0;
+
+ /*
+ * We can _not_ use the whole ADLS_QGV_PT_MASK here, as PCode rejects
+ * it with failure if we try masking any unadvertised points.
+ * So need to operate only with those returned from PCode.
+ */
+ if (num_qgv_points > 0)
+ qgv_points = GENMASK(num_qgv_points - 1, 0);
+
+ if (num_psf_gv_points > 0)
+ psf_points = GENMASK(num_psf_gv_points - 1, 0);
+
+ return ICL_PCODE_REQ_QGV_PT(qgv_points) | ADLS_PCODE_REQ_PSF_PT(psf_points);
+}
+
+static bool is_sagv_enabled(struct drm_i915_private *i915, u16 points_mask)
+{
+ return !is_power_of_2(~points_mask & icl_qgv_points_mask(i915) &
+ ICL_PCODE_REQ_QGV_PT_MASK);
+}
+
int icl_pcode_restrict_qgv_points(struct drm_i915_private *dev_priv,
u32 points_mask)
{
@@ -136,6 +162,9 @@ int icl_pcode_restrict_qgv_points(struct drm_i915_private *dev_priv,
return ret;
}
+ dev_priv->display.sagv.status = is_sagv_enabled(dev_priv, points_mask) ?
+ I915_SAGV_ENABLED : I915_SAGV_DISABLED;
+
return 0;
}
@@ -965,26 +994,6 @@ int intel_bw_calc_min_cdclk(struct intel_atomic_state *state,
return 0;
}
-static u16 icl_qgv_points_mask(struct drm_i915_private *i915)
-{
- unsigned int num_psf_gv_points = i915->display.bw.max[0].num_psf_gv_points;
- unsigned int num_qgv_points = i915->display.bw.max[0].num_qgv_points;
- u16 qgv_points = 0, psf_points = 0;
-
- /*
- * We can _not_ use the whole ADLS_QGV_PT_MASK here, as PCode rejects
- * it with failure if we try masking any unadvertised points.
- * So need to operate only with those returned from PCode.
- */
- if (num_qgv_points > 0)
- qgv_points = GENMASK(num_qgv_points - 1, 0);
-
- if (num_psf_gv_points > 0)
- psf_points = GENMASK(num_psf_gv_points - 1, 0);
-
- return ICL_PCODE_REQ_QGV_PT(qgv_points) | ADLS_PCODE_REQ_PSF_PT(psf_points);
-}
-
static int intel_bw_check_data_rate(struct intel_atomic_state *state, bool *changed)
{
struct drm_i915_private *i915 = to_i915(state->base.dev);
diff --git a/drivers/gpu/drm/i915/display/intel_cdclk.c b/drivers/gpu/drm/i915/display/intel_cdclk.c
index 7e16b655c833..084a483f9776 100644
--- a/drivers/gpu/drm/i915/display/intel_cdclk.c
+++ b/drivers/gpu/drm/i915/display/intel_cdclk.c
@@ -1329,6 +1329,30 @@ static const struct intel_cdclk_vals adlp_cdclk_table[] = {
{}
};
+static const struct intel_cdclk_vals rplu_cdclk_table[] = {
+ { .refclk = 19200, .cdclk = 172800, .divider = 3, .ratio = 27 },
+ { .refclk = 19200, .cdclk = 192000, .divider = 2, .ratio = 20 },
+ { .refclk = 19200, .cdclk = 307200, .divider = 2, .ratio = 32 },
+ { .refclk = 19200, .cdclk = 480000, .divider = 2, .ratio = 50 },
+ { .refclk = 19200, .cdclk = 556800, .divider = 2, .ratio = 58 },
+ { .refclk = 19200, .cdclk = 652800, .divider = 2, .ratio = 68 },
+
+ { .refclk = 24000, .cdclk = 176000, .divider = 3, .ratio = 22 },
+ { .refclk = 24000, .cdclk = 192000, .divider = 2, .ratio = 16 },
+ { .refclk = 24000, .cdclk = 312000, .divider = 2, .ratio = 26 },
+ { .refclk = 24000, .cdclk = 480000, .divider = 2, .ratio = 40 },
+ { .refclk = 24000, .cdclk = 552000, .divider = 2, .ratio = 46 },
+ { .refclk = 24000, .cdclk = 648000, .divider = 2, .ratio = 54 },
+
+ { .refclk = 38400, .cdclk = 179200, .divider = 3, .ratio = 14 },
+ { .refclk = 38400, .cdclk = 192000, .divider = 2, .ratio = 10 },
+ { .refclk = 38400, .cdclk = 307200, .divider = 2, .ratio = 16 },
+ { .refclk = 38400, .cdclk = 480000, .divider = 2, .ratio = 25 },
+ { .refclk = 38400, .cdclk = 556800, .divider = 2, .ratio = 29 },
+ { .refclk = 38400, .cdclk = 652800, .divider = 2, .ratio = 34 },
+ {}
+};
+
static const struct intel_cdclk_vals dg2_cdclk_table[] = {
{ .refclk = 38400, .cdclk = 163200, .divider = 2, .ratio = 34, .waveform = 0x8888 },
{ .refclk = 38400, .cdclk = 204000, .divider = 2, .ratio = 34, .waveform = 0x9248 },
@@ -1801,6 +1825,13 @@ static bool cdclk_compute_crawl_and_squash_midpoint(struct drm_i915_private *i91
return true;
}
+static bool pll_enable_wa_needed(struct drm_i915_private *dev_priv)
+{
+ return ((IS_DG2(dev_priv) || IS_METEORLAKE(dev_priv)) &&
+ dev_priv->display.cdclk.hw.vco > 0 &&
+ HAS_CDCLK_SQUASH(dev_priv));
+}
+
static void _bxt_set_cdclk(struct drm_i915_private *dev_priv,
const struct intel_cdclk_config *cdclk_config,
enum pipe pipe)
@@ -1815,9 +1846,13 @@ static void _bxt_set_cdclk(struct drm_i915_private *dev_priv,
!cdclk_pll_is_unknown(dev_priv->display.cdclk.hw.vco)) {
if (dev_priv->display.cdclk.hw.vco != vco)
adlp_cdclk_pll_crawl(dev_priv, vco);
- } else if (DISPLAY_VER(dev_priv) >= 11)
+ } else if (DISPLAY_VER(dev_priv) >= 11) {
+ /* wa_15010685871: dg2, mtl */
+ if (pll_enable_wa_needed(dev_priv))
+ dg2_cdclk_squash_program(dev_priv, 0);
+
icl_cdclk_pll_update(dev_priv, vco);
- else
+ } else
bxt_cdclk_pll_update(dev_priv, vco);
waveform = cdclk_squash_waveform(dev_priv, cdclk);
@@ -3353,6 +3388,8 @@ void intel_init_cdclk_hooks(struct drm_i915_private *dev_priv)
/* Wa_22011320316:adl-p[a0] */
if (IS_ADLP_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0))
dev_priv->display.cdclk.table = adlp_a_step_cdclk_table;
+ else if (IS_ADLP_RPLU(dev_priv))
+ dev_priv->display.cdclk.table = rplu_cdclk_table;
else
dev_priv->display.cdclk.table = adlp_cdclk_table;
} else if (IS_ROCKETLAKE(dev_priv)) {
diff --git a/drivers/gpu/drm/i915/display/intel_color.c b/drivers/gpu/drm/i915/display/intel_color.c
index 8d97c299e657..36aac88143ac 100644
--- a/drivers/gpu/drm/i915/display/intel_color.c
+++ b/drivers/gpu/drm/i915/display/intel_color.c
@@ -47,6 +47,11 @@ struct intel_color_funcs {
*/
void (*color_commit_arm)(const struct intel_crtc_state *crtc_state);
/*
+ * Perform any extra tasks needed after all the
+ * double buffered registers have been latched.
+ */
+ void (*color_post_update)(const struct intel_crtc_state *crtc_state);
+ /*
* Load LUTs (and other single buffered color management
* registers). Will (hopefully) be called during the vblank
* following the latching of any double buffered registers
@@ -257,7 +262,7 @@ static bool ilk_limited_range(const struct intel_crtc_state *crtc_state)
if (DISPLAY_VER(i915) >= 11)
return false;
- /* pre-hsw have PIPECONF_COLOR_RANGE_SELECT */
+ /* pre-hsw have TRANSCONF_COLOR_RANGE_SELECT */
if (DISPLAY_VER(i915) < 7 || IS_IVYBRIDGE(i915))
return false;
@@ -614,9 +619,33 @@ static void ilk_lut_12p4_pack(struct drm_color_lut *entry, u32 ldw, u32 udw)
static void icl_color_commit_noarm(const struct intel_crtc_state *crtc_state)
{
+ /*
+ * Despite Wa_1406463849, ICL no longer suffers from the SKL
+ * DC5/PSR CSC black screen issue (see skl_color_commit_noarm()).
+ * Possibly due to the extra sticky CSC arming
+ * (see icl_color_post_update()).
+ *
+ * On TGL+ all CSC arming issues have been properly fixed.
+ */
icl_load_csc_matrix(crtc_state);
}
+static void skl_color_commit_noarm(const struct intel_crtc_state *crtc_state)
+{
+ /*
+ * Possibly related to display WA #1184, SKL CSC loses the latched
+ * CSC coeff/offset register values if the CSC registers are disarmed
+ * between DC5 exit and PSR exit. This will cause the plane(s) to
+ * output all black (until CSC_MODE is rearmed and properly latched).
+ * Once PSR exit (and proper register latching) has occurred the
+ * danger is over. Thus when PSR is enabled the CSC coeff/offset
+ * register programming will be peformed from skl_color_commit_arm()
+ * which is called after PSR exit.
+ */
+ if (!crtc_state->has_psr)
+ ilk_load_csc_matrix(crtc_state);
+}
+
static void ilk_color_commit_noarm(const struct intel_crtc_state *crtc_state)
{
ilk_load_csc_matrix(crtc_state);
@@ -624,7 +653,7 @@ static void ilk_color_commit_noarm(const struct intel_crtc_state *crtc_state)
static void i9xx_color_commit_arm(const struct intel_crtc_state *crtc_state)
{
- /* update PIPECONF GAMMA_MODE */
+ /* update TRANSCONF GAMMA_MODE */
i9xx_set_pipeconf(crtc_state);
}
@@ -633,7 +662,7 @@ static void ilk_color_commit_arm(const struct intel_crtc_state *crtc_state)
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *i915 = to_i915(crtc->base.dev);
- /* update PIPECONF GAMMA_MODE */
+ /* update TRANSCONF GAMMA_MODE */
ilk_set_pipeconf(crtc_state);
intel_de_write_fw(i915, PIPE_CSC_MODE(crtc->pipe),
@@ -659,6 +688,9 @@ static void skl_color_commit_arm(const struct intel_crtc_state *crtc_state)
enum pipe pipe = crtc->pipe;
u32 val = 0;
+ if (crtc_state->has_psr)
+ ilk_load_csc_matrix(crtc_state);
+
/*
* We don't (yet) allow userspace to control the pipe background color,
* so force it to black, but apply pipe gamma and CSC appropriately
@@ -677,6 +709,47 @@ static void skl_color_commit_arm(const struct intel_crtc_state *crtc_state)
crtc_state->csc_mode);
}
+static void icl_color_commit_arm(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ enum pipe pipe = crtc->pipe;
+
+ /*
+ * We don't (yet) allow userspace to control the pipe background color,
+ * so force it to black.
+ */
+ intel_de_write(i915, SKL_BOTTOM_COLOR(pipe), 0);
+
+ intel_de_write(i915, GAMMA_MODE(crtc->pipe),
+ crtc_state->gamma_mode);
+
+ intel_de_write_fw(i915, PIPE_CSC_MODE(crtc->pipe),
+ crtc_state->csc_mode);
+}
+
+static void icl_color_post_update(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+
+ /*
+ * Despite Wa_1406463849, ICL CSC is no longer disarmed by
+ * coeff/offset register *writes*. Instead, once CSC_MODE
+ * is armed it stays armed, even after it has been latched.
+ * Afterwards the coeff/offset registers become effectively
+ * self-arming. That self-arming must be disabled before the
+ * next icl_color_commit_noarm() tries to write the next set
+ * of coeff/offset registers. Fortunately register *reads*
+ * do still disarm the CSC. Naturally this must not be done
+ * until the previously written CSC registers have actually
+ * been latched.
+ *
+ * TGL+ no longer need this workaround.
+ */
+ intel_de_read_fw(i915, PIPE_CSC_PREOFF_HI(crtc->pipe));
+}
+
static struct drm_property_blob *
create_linear_lut(struct drm_i915_private *i915, int lut_size)
{
@@ -1256,8 +1329,11 @@ static void icl_load_luts(const struct intel_crtc_state *crtc_state)
break;
}
- if (crtc_state->dsb)
- intel_dsb_commit(crtc_state->dsb);
+ if (crtc_state->dsb) {
+ intel_dsb_finish(crtc_state->dsb);
+ intel_dsb_commit(crtc_state->dsb, false);
+ intel_dsb_wait(crtc_state->dsb);
+ }
}
static u32 chv_cgm_degamma_ldw(const struct drm_color_lut *color)
@@ -1373,6 +1449,14 @@ void intel_color_commit_arm(const struct intel_crtc_state *crtc_state)
i915->display.funcs.color->color_commit_arm(crtc_state);
}
+void intel_color_post_update(const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
+
+ if (i915->display.funcs.color->color_post_update)
+ i915->display.funcs.color->color_post_update(crtc_state);
+}
+
void intel_color_prepare_commit(struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
@@ -1380,6 +1464,9 @@ void intel_color_prepare_commit(struct intel_crtc_state *crtc_state)
/* FIXME DSB has issues loading LUTs, disable it for now */
return;
+ if (!crtc_state->pre_csc_lut && !crtc_state->post_csc_lut)
+ return;
+
crtc_state->dsb = intel_dsb_prepare(crtc, 1024);
}
@@ -1500,6 +1587,8 @@ intel_color_add_affected_planes(struct intel_crtc_state *new_crtc_state)
return PTR_ERR(plane_state);
new_crtc_state->update_planes |= BIT(plane->id);
+ new_crtc_state->async_flip_planes = 0;
+ new_crtc_state->do_async_flip = false;
/* plane control register changes blocked by CxSR */
if (HAS_GMCH(i915))
@@ -3064,10 +3153,20 @@ static const struct intel_color_funcs i9xx_color_funcs = {
.lut_equal = i9xx_lut_equal,
};
+static const struct intel_color_funcs tgl_color_funcs = {
+ .color_check = icl_color_check,
+ .color_commit_noarm = icl_color_commit_noarm,
+ .color_commit_arm = icl_color_commit_arm,
+ .load_luts = icl_load_luts,
+ .read_luts = icl_read_luts,
+ .lut_equal = icl_lut_equal,
+};
+
static const struct intel_color_funcs icl_color_funcs = {
.color_check = icl_color_check,
.color_commit_noarm = icl_color_commit_noarm,
- .color_commit_arm = skl_color_commit_arm,
+ .color_commit_arm = icl_color_commit_arm,
+ .color_post_update = icl_color_post_update,
.load_luts = icl_load_luts,
.read_luts = icl_read_luts,
.lut_equal = icl_lut_equal,
@@ -3075,7 +3174,7 @@ static const struct intel_color_funcs icl_color_funcs = {
static const struct intel_color_funcs glk_color_funcs = {
.color_check = glk_color_check,
- .color_commit_noarm = ilk_color_commit_noarm,
+ .color_commit_noarm = skl_color_commit_noarm,
.color_commit_arm = skl_color_commit_arm,
.load_luts = glk_load_luts,
.read_luts = glk_read_luts,
@@ -3084,7 +3183,7 @@ static const struct intel_color_funcs glk_color_funcs = {
static const struct intel_color_funcs skl_color_funcs = {
.color_check = ivb_color_check,
- .color_commit_noarm = ilk_color_commit_noarm,
+ .color_commit_noarm = skl_color_commit_noarm,
.color_commit_arm = skl_color_commit_arm,
.load_luts = bdw_load_luts,
.read_luts = bdw_read_luts,
@@ -3180,7 +3279,9 @@ void intel_color_init_hooks(struct drm_i915_private *i915)
else
i915->display.funcs.color = &i9xx_color_funcs;
} else {
- if (DISPLAY_VER(i915) >= 11)
+ if (DISPLAY_VER(i915) >= 12)
+ i915->display.funcs.color = &tgl_color_funcs;
+ else if (DISPLAY_VER(i915) == 11)
i915->display.funcs.color = &icl_color_funcs;
else if (DISPLAY_VER(i915) == 10)
i915->display.funcs.color = &glk_color_funcs;
diff --git a/drivers/gpu/drm/i915/display/intel_color.h b/drivers/gpu/drm/i915/display/intel_color.h
index d620b5b1e2a6..8002492be709 100644
--- a/drivers/gpu/drm/i915/display/intel_color.h
+++ b/drivers/gpu/drm/i915/display/intel_color.h
@@ -21,6 +21,7 @@ void intel_color_prepare_commit(struct intel_crtc_state *crtc_state);
void intel_color_cleanup_commit(struct intel_crtc_state *crtc_state);
void intel_color_commit_noarm(const struct intel_crtc_state *crtc_state);
void intel_color_commit_arm(const struct intel_crtc_state *crtc_state);
+void intel_color_post_update(const struct intel_crtc_state *crtc_state);
void intel_color_load_luts(const struct intel_crtc_state *crtc_state);
void intel_color_get_config(struct intel_crtc_state *crtc_state);
bool intel_color_lut_equal(const struct intel_crtc_state *crtc_state,
diff --git a/drivers/gpu/drm/i915/display/intel_combo_phy.c b/drivers/gpu/drm/i915/display/intel_combo_phy.c
index 8b870b2dd4f9..922a6d87b553 100644
--- a/drivers/gpu/drm/i915/display/intel_combo_phy.c
+++ b/drivers/gpu/drm/i915/display/intel_combo_phy.c
@@ -78,14 +78,11 @@ static void icl_set_procmon_ref_values(struct drm_i915_private *dev_priv,
enum phy phy)
{
const struct icl_procmon *procmon;
- u32 val;
procmon = icl_get_procmon_ref_values(dev_priv, phy);
- val = intel_de_read(dev_priv, ICL_PORT_COMP_DW1(phy));
- val &= ~((0xff << 16) | 0xff);
- val |= procmon->dw1;
- intel_de_write(dev_priv, ICL_PORT_COMP_DW1(phy), val);
+ intel_de_rmw(dev_priv, ICL_PORT_COMP_DW1(phy),
+ (0xff << 16) | 0xff, procmon->dw1);
intel_de_write(dev_priv, ICL_PORT_COMP_DW9(phy), procmon->dw9);
intel_de_write(dev_priv, ICL_PORT_COMP_DW10(phy), procmon->dw10);
@@ -236,8 +233,7 @@ static bool icl_combo_phy_verify_state(struct drm_i915_private *dev_priv,
ICL_PORT_TX_DW8_ODCC_CLK_DIV_SEL_DIV2);
ret &= check_phy_reg(dev_priv, phy, ICL_PORT_PCS_DW1_LN(0, phy),
- DCC_MODE_SELECT_MASK,
- DCC_MODE_SELECT_CONTINUOSLY);
+ DCC_MODE_SELECT_MASK, RUN_DCC_ONCE);
}
ret &= icl_verify_procmon_ref_values(dev_priv, phy);
@@ -267,7 +263,6 @@ void intel_combo_phy_power_up_lanes(struct drm_i915_private *dev_priv,
int lane_count, bool lane_reversal)
{
u8 lane_mask;
- u32 val;
if (is_dsi) {
drm_WARN_ON(&dev_priv->drm, lane_reversal);
@@ -308,10 +303,8 @@ void intel_combo_phy_power_up_lanes(struct drm_i915_private *dev_priv,
}
}
- val = intel_de_read(dev_priv, ICL_PORT_CL_DW10(phy));
- val &= ~PWR_DOWN_LN_MASK;
- val |= lane_mask;
- intel_de_write(dev_priv, ICL_PORT_CL_DW10(phy), val);
+ intel_de_rmw(dev_priv, ICL_PORT_CL_DW10(phy),
+ PWR_DOWN_LN_MASK, lane_mask);
}
static void icl_combo_phys_init(struct drm_i915_private *dev_priv)
@@ -360,25 +353,19 @@ skip_phy_misc:
val = intel_de_read(dev_priv, ICL_PORT_PCS_DW1_LN(0, phy));
val &= ~DCC_MODE_SELECT_MASK;
- val |= DCC_MODE_SELECT_CONTINUOSLY;
+ val |= RUN_DCC_ONCE;
intel_de_write(dev_priv, ICL_PORT_PCS_DW1_GRP(phy), val);
}
icl_set_procmon_ref_values(dev_priv, phy);
- if (phy_is_master(dev_priv, phy)) {
- val = intel_de_read(dev_priv, ICL_PORT_COMP_DW8(phy));
- val |= IREFGEN;
- intel_de_write(dev_priv, ICL_PORT_COMP_DW8(phy), val);
- }
-
- val = intel_de_read(dev_priv, ICL_PORT_COMP_DW0(phy));
- val |= COMP_INIT;
- intel_de_write(dev_priv, ICL_PORT_COMP_DW0(phy), val);
+ if (phy_is_master(dev_priv, phy))
+ intel_de_rmw(dev_priv, ICL_PORT_COMP_DW8(phy),
+ 0, IREFGEN);
- val = intel_de_read(dev_priv, ICL_PORT_CL_DW5(phy));
- val |= CL_POWER_DOWN_ENABLE;
- intel_de_write(dev_priv, ICL_PORT_CL_DW5(phy), val);
+ intel_de_rmw(dev_priv, ICL_PORT_COMP_DW0(phy), 0, COMP_INIT);
+ intel_de_rmw(dev_priv, ICL_PORT_CL_DW5(phy),
+ 0, CL_POWER_DOWN_ENABLE);
}
}
@@ -387,8 +374,6 @@ static void icl_combo_phys_uninit(struct drm_i915_private *dev_priv)
enum phy phy;
for_each_combo_phy_reverse(dev_priv, phy) {
- u32 val;
-
if (phy == PHY_A &&
!icl_combo_phy_verify_state(dev_priv, phy)) {
if (IS_TIGERLAKE(dev_priv) || IS_DG1(dev_priv)) {
@@ -410,14 +395,11 @@ static void icl_combo_phys_uninit(struct drm_i915_private *dev_priv)
if (!has_phy_misc(dev_priv, phy))
goto skip_phy_misc;
- val = intel_de_read(dev_priv, ICL_PHY_MISC(phy));
- val |= ICL_PHY_MISC_DE_IO_COMP_PWR_DOWN;
- intel_de_write(dev_priv, ICL_PHY_MISC(phy), val);
+ intel_de_rmw(dev_priv, ICL_PHY_MISC(phy), 0,
+ ICL_PHY_MISC_DE_IO_COMP_PWR_DOWN);
skip_phy_misc:
- val = intel_de_read(dev_priv, ICL_PORT_COMP_DW0(phy));
- val &= ~COMP_INIT;
- intel_de_write(dev_priv, ICL_PORT_COMP_DW0(phy), val);
+ intel_de_rmw(dev_priv, ICL_PORT_COMP_DW0(phy), COMP_INIT, 0);
}
}
diff --git a/drivers/gpu/drm/i915/display/intel_combo_phy_regs.h b/drivers/gpu/drm/i915/display/intel_combo_phy_regs.h
index 2ed65193ca19..b0983edccf3f 100644
--- a/drivers/gpu/drm/i915/display/intel_combo_phy_regs.h
+++ b/drivers/gpu/drm/i915/display/intel_combo_phy_regs.h
@@ -90,8 +90,8 @@
#define ICL_PORT_PCS_DW1_AUX(phy) _MMIO(_ICL_PORT_PCS_DW_AUX(1, phy))
#define ICL_PORT_PCS_DW1_GRP(phy) _MMIO(_ICL_PORT_PCS_DW_GRP(1, phy))
#define ICL_PORT_PCS_DW1_LN(ln, phy) _MMIO(_ICL_PORT_PCS_DW_LN(1, ln, phy))
-#define DCC_MODE_SELECT_MASK (0x3 << 20)
-#define DCC_MODE_SELECT_CONTINUOSLY (0x3 << 20)
+#define DCC_MODE_SELECT_MASK REG_GENMASK(21, 20)
+#define RUN_DCC_ONCE REG_FIELD_PREP(DCC_MODE_SELECT_MASK, 0)
#define COMMON_KEEPER_EN (1 << 26)
#define LATENCY_OPTIM_MASK (0x3 << 2)
#define LATENCY_OPTIM_VAL(x) ((x) << 2)
diff --git a/drivers/gpu/drm/i915/display/intel_crt.c b/drivers/gpu/drm/i915/display/intel_crt.c
index 7267ffc7f539..8f2ebead0826 100644
--- a/drivers/gpu/drm/i915/display/intel_crt.c
+++ b/drivers/gpu/drm/i915/display/intel_crt.c
@@ -260,7 +260,7 @@ static void hsw_post_disable_crt(struct intel_atomic_state *state,
ilk_pfit_disable(old_crtc_state);
- intel_ddi_disable_pipe_clock(old_crtc_state);
+ intel_ddi_disable_transcoder_clock(old_crtc_state);
pch_post_disable_crt(state, encoder, old_crtc_state, old_conn_state);
@@ -300,7 +300,7 @@ static void hsw_pre_enable_crt(struct intel_atomic_state *state,
hsw_fdi_link_train(encoder, crtc_state);
- intel_ddi_enable_pipe_clock(encoder, crtc_state);
+ intel_ddi_enable_transcoder_clock(encoder, crtc_state);
}
static void hsw_enable_crt(struct intel_atomic_state *state,
@@ -678,10 +678,11 @@ static bool intel_crt_detect_ddc(struct drm_connector *connector)
}
static enum drm_connector_status
-intel_crt_load_detect(struct intel_crt *crt, u32 pipe)
+intel_crt_load_detect(struct intel_crt *crt, enum pipe pipe)
{
struct drm_device *dev = crt->base.base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
+ enum transcoder cpu_transcoder = (enum transcoder)pipe;
u32 save_bclrpat;
u32 save_vtotal;
u32 vtotal, vactive;
@@ -693,25 +694,25 @@ intel_crt_load_detect(struct intel_crt *crt, u32 pipe)
drm_dbg_kms(&dev_priv->drm, "starting load-detect on CRT\n");
- save_bclrpat = intel_de_read(dev_priv, BCLRPAT(pipe));
- save_vtotal = intel_de_read(dev_priv, VTOTAL(pipe));
- vblank = intel_de_read(dev_priv, VBLANK(pipe));
+ save_bclrpat = intel_de_read(dev_priv, BCLRPAT(cpu_transcoder));
+ save_vtotal = intel_de_read(dev_priv, TRANS_VTOTAL(cpu_transcoder));
+ vblank = intel_de_read(dev_priv, TRANS_VBLANK(cpu_transcoder));
- vtotal = ((save_vtotal >> 16) & 0xfff) + 1;
- vactive = (save_vtotal & 0x7ff) + 1;
+ vtotal = REG_FIELD_GET(VTOTAL_MASK, save_vtotal) + 1;
+ vactive = REG_FIELD_GET(VACTIVE_MASK, save_vtotal) + 1;
- vblank_start = (vblank & 0xfff) + 1;
- vblank_end = ((vblank >> 16) & 0xfff) + 1;
+ vblank_start = REG_FIELD_GET(VBLANK_START_MASK, vblank) + 1;
+ vblank_end = REG_FIELD_GET(VBLANK_END_MASK, vblank) + 1;
/* Set the border color to purple. */
- intel_de_write(dev_priv, BCLRPAT(pipe), 0x500050);
+ intel_de_write(dev_priv, BCLRPAT(cpu_transcoder), 0x500050);
if (DISPLAY_VER(dev_priv) != 2) {
- u32 pipeconf = intel_de_read(dev_priv, PIPECONF(pipe));
+ u32 transconf = intel_de_read(dev_priv, TRANSCONF(cpu_transcoder));
- intel_de_write(dev_priv, PIPECONF(pipe),
- pipeconf | PIPECONF_FORCE_BORDER);
- intel_de_posting_read(dev_priv, PIPECONF(pipe));
+ intel_de_write(dev_priv, TRANSCONF(cpu_transcoder),
+ transconf | TRANSCONF_FORCE_BORDER);
+ intel_de_posting_read(dev_priv, TRANSCONF(cpu_transcoder));
/* Wait for next Vblank to substitue
* border color for Color info */
intel_crtc_wait_for_next_vblank(intel_crtc_for_pipe(dev_priv, pipe));
@@ -720,7 +721,7 @@ intel_crt_load_detect(struct intel_crt *crt, u32 pipe)
connector_status_connected :
connector_status_disconnected;
- intel_de_write(dev_priv, PIPECONF(pipe), pipeconf);
+ intel_de_write(dev_priv, TRANSCONF(cpu_transcoder), transconf);
} else {
bool restore_vblank = false;
int count, detect;
@@ -730,12 +731,13 @@ intel_crt_load_detect(struct intel_crt *crt, u32 pipe)
* Yes, this will flicker
*/
if (vblank_start <= vactive && vblank_end >= vtotal) {
- u32 vsync = intel_de_read(dev_priv, VSYNC(pipe));
- u32 vsync_start = (vsync & 0xffff) + 1;
+ u32 vsync = intel_de_read(dev_priv, TRANS_VSYNC(cpu_transcoder));
+ u32 vsync_start = REG_FIELD_GET(VSYNC_START_MASK, vsync) + 1;
vblank_start = vsync_start;
- intel_de_write(dev_priv, VBLANK(pipe),
- (vblank_start - 1) | ((vblank_end - 1) << 16));
+ intel_de_write(dev_priv, TRANS_VBLANK(cpu_transcoder),
+ VBLANK_START(vblank_start - 1) |
+ VBLANK_END(vblank_end - 1));
restore_vblank = true;
}
/* sample in the vertical border, selecting the larger one */
@@ -766,7 +768,7 @@ intel_crt_load_detect(struct intel_crt *crt, u32 pipe)
/* restore vblank if necessary */
if (restore_vblank)
- intel_de_write(dev_priv, VBLANK(pipe), vblank);
+ intel_de_write(dev_priv, TRANS_VBLANK(cpu_transcoder), vblank);
/*
* If more than 3/4 of the scanline detected a monitor,
* then it is assumed to be present. This works even on i830,
@@ -779,7 +781,7 @@ intel_crt_load_detect(struct intel_crt *crt, u32 pipe)
}
/* Restore previous settings */
- intel_de_write(dev_priv, BCLRPAT(pipe), save_bclrpat);
+ intel_de_write(dev_priv, BCLRPAT(cpu_transcoder), save_bclrpat);
return status;
}
diff --git a/drivers/gpu/drm/i915/display/intel_crtc.c b/drivers/gpu/drm/i915/display/intel_crtc.c
index 82be0fbe9934..ed45a6934854 100644
--- a/drivers/gpu/drm/i915/display/intel_crtc.c
+++ b/drivers/gpu/drm/i915/display/intel_crtc.c
@@ -25,6 +25,7 @@
#include "intel_display_types.h"
#include "intel_drrs.h"
#include "intel_dsi.h"
+#include "intel_fifo_underrun.h"
#include "intel_pipe_crc.h"
#include "intel_psr.h"
#include "intel_sprite.h"
@@ -211,7 +212,7 @@ static void intel_crtc_destroy(struct drm_crtc *_crtc)
static int intel_crtc_late_register(struct drm_crtc *crtc)
{
- intel_crtc_debugfs_add(crtc);
+ intel_crtc_debugfs_add(to_intel_crtc(crtc));
return 0;
}
@@ -314,6 +315,8 @@ int intel_crtc_init(struct drm_i915_private *dev_priv, enum pipe pipe)
}
crtc->plane_ids_mask |= BIT(primary->id);
+ intel_init_fifo_underrun_reporting(dev_priv, crtc, false);
+
for_each_sprite(dev_priv, pipe, sprite) {
struct intel_plane *plane;
@@ -683,6 +686,14 @@ void intel_pipe_update_end(struct intel_crtc_state *new_crtc_state)
*/
intel_vrr_send_push(new_crtc_state);
+ /*
+ * Seamless M/N update may need to update frame timings.
+ *
+ * FIXME Should be synchronized with the start of vblank somehow...
+ */
+ if (new_crtc_state->seamless_m_n && intel_crtc_needs_fastset(new_crtc_state))
+ intel_crtc_update_active_timings(new_crtc_state);
+
local_irq_enable();
if (intel_vgpu_active(dev_priv))
diff --git a/drivers/gpu/drm/i915/display/intel_crtc_state_dump.c b/drivers/gpu/drm/i915/display/intel_crtc_state_dump.c
index 2422d6ef5777..766633566fd6 100644
--- a/drivers/gpu/drm/i915/display/intel_crtc_state_dump.c
+++ b/drivers/gpu/drm/i915/display/intel_crtc_state_dump.c
@@ -14,14 +14,16 @@
static void intel_dump_crtc_timings(struct drm_i915_private *i915,
const struct drm_display_mode *mode)
{
- drm_dbg_kms(&i915->drm, "crtc timings: %d %d %d %d %d %d %d %d %d, "
- "type: 0x%x flags: 0x%x\n",
+ drm_dbg_kms(&i915->drm, "crtc timings: clock=%d, "
+ "hd=%d hb=%d-%d hs=%d-%d ht=%d, "
+ "vd=%d vb=%d-%d vs=%d-%d vt=%d, "
+ "flags=0x%x\n",
mode->crtc_clock,
- mode->crtc_hdisplay, mode->crtc_hsync_start,
- mode->crtc_hsync_end, mode->crtc_htotal,
- mode->crtc_vdisplay, mode->crtc_vsync_start,
- mode->crtc_vsync_end, mode->crtc_vtotal,
- mode->type, mode->flags);
+ mode->crtc_hdisplay, mode->crtc_hblank_start, mode->crtc_hblank_end,
+ mode->crtc_hsync_start, mode->crtc_hsync_end, mode->crtc_htotal,
+ mode->crtc_vdisplay, mode->crtc_vblank_start, mode->crtc_vblank_end,
+ mode->crtc_vsync_start, mode->crtc_vsync_end, mode->crtc_vtotal,
+ mode->flags);
}
static void
diff --git a/drivers/gpu/drm/i915/display/intel_cursor.c b/drivers/gpu/drm/i915/display/intel_cursor.c
index d190fa0d393b..31bef0427377 100644
--- a/drivers/gpu/drm/i915/display/intel_cursor.c
+++ b/drivers/gpu/drm/i915/display/intel_cursor.c
@@ -21,7 +21,6 @@
#include "intel_fb_pin.h"
#include "intel_frontbuffer.h"
#include "intel_psr.h"
-#include "intel_sprite.h"
#include "skl_watermark.h"
/* Cursor formats */
@@ -532,9 +531,10 @@ static void i9xx_cursor_update_arm(struct intel_plane *plane,
skl_write_cursor_wm(plane, crtc_state);
if (plane_state)
- intel_psr2_program_plane_sel_fetch(plane, crtc_state, plane_state, 0);
+ intel_psr2_program_plane_sel_fetch_arm(plane, crtc_state,
+ plane_state);
else
- intel_psr2_disable_plane_sel_fetch(plane, crtc_state);
+ intel_psr2_disable_plane_sel_fetch_arm(plane, crtc_state);
if (plane->cursor.base != base ||
plane->cursor.size != fbc_ctl ||
diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c
index 254559abedfb..73240cf78c8b 100644
--- a/drivers/gpu/drm/i915/display/intel_ddi.c
+++ b/drivers/gpu/drm/i915/display/intel_ddi.c
@@ -47,6 +47,7 @@
#include "intel_dkl_phy.h"
#include "intel_dkl_phy_regs.h"
#include "intel_dp.h"
+#include "intel_dp_aux.h"
#include "intel_dp_link_training.h"
#include "intel_dp_mst.h"
#include "intel_dpio_phy.h"
@@ -64,9 +65,9 @@
#include "intel_psr.h"
#include "intel_quirks.h"
#include "intel_snps_phy.h"
-#include "intel_sprite.h"
#include "intel_tc.h"
#include "intel_vdsc.h"
+#include "intel_vdsc_regs.h"
#include "intel_vrr.h"
#include "skl_scaler.h"
#include "skl_universal_plane.h"
@@ -89,7 +90,7 @@ static int intel_ddi_hdmi_level(struct intel_encoder *encoder,
{
int level;
- level = intel_bios_hdmi_level_shift(encoder);
+ level = intel_bios_hdmi_level_shift(encoder->devdata);
if (level < 0)
level = trans->hdmi_default_entry;
@@ -126,7 +127,7 @@ void hsw_prepare_dp_ddi_buffers(struct intel_encoder *encoder,
/* If we're boosting the current, set bit 31 of trans1 */
if (has_iboost(dev_priv) &&
- intel_bios_encoder_dp_boost_level(encoder->devdata))
+ intel_bios_dp_boost_level(encoder->devdata))
iboost_bit = DDI_BUF_BALANCE_LEG_ENABLE;
for (i = 0; i < n_entries; i++) {
@@ -158,7 +159,7 @@ static void hsw_prepare_hdmi_ddi_buffers(struct intel_encoder *encoder,
/* If we're boosting the current, set bit 31 of trans1 */
if (has_iboost(dev_priv) &&
- intel_bios_encoder_hdmi_boost_level(encoder->devdata))
+ intel_bios_hdmi_boost_level(encoder->devdata))
iboost_bit = DDI_BUF_BALANCE_LEG_ENABLE;
/* Entry 9 is for HDMI: */
@@ -644,19 +645,14 @@ int intel_ddi_toggle_hdcp_bits(struct intel_encoder *intel_encoder,
struct drm_i915_private *dev_priv = to_i915(dev);
intel_wakeref_t wakeref;
int ret = 0;
- u32 tmp;
wakeref = intel_display_power_get_if_enabled(dev_priv,
intel_encoder->power_domain);
if (drm_WARN_ON(dev, !wakeref))
return -ENXIO;
- tmp = intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder));
- if (enable)
- tmp |= hdcp_mask;
- else
- tmp &= ~hdcp_mask;
- intel_de_write(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder), tmp);
+ intel_de_rmw(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder),
+ hdcp_mask, enable ? hdcp_mask : 0);
intel_display_power_put(dev_priv, intel_encoder->power_domain, wakeref);
return ret;
}
@@ -948,8 +944,8 @@ static void intel_ddi_get_power_domains(struct intel_encoder *encoder,
main_link_aux_power_domain_get(dig_port, crtc_state);
}
-void intel_ddi_enable_pipe_clock(struct intel_encoder *encoder,
- const struct intel_crtc_state *crtc_state)
+void intel_ddi_enable_transcoder_clock(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
@@ -957,33 +953,34 @@ void intel_ddi_enable_pipe_clock(struct intel_encoder *encoder,
enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
u32 val;
- if (cpu_transcoder != TRANSCODER_EDP) {
- if (DISPLAY_VER(dev_priv) >= 13)
- val = TGL_TRANS_CLK_SEL_PORT(phy);
- else if (DISPLAY_VER(dev_priv) >= 12)
- val = TGL_TRANS_CLK_SEL_PORT(encoder->port);
- else
- val = TRANS_CLK_SEL_PORT(encoder->port);
+ if (cpu_transcoder == TRANSCODER_EDP)
+ return;
- intel_de_write(dev_priv, TRANS_CLK_SEL(cpu_transcoder), val);
- }
+ if (DISPLAY_VER(dev_priv) >= 13)
+ val = TGL_TRANS_CLK_SEL_PORT(phy);
+ else if (DISPLAY_VER(dev_priv) >= 12)
+ val = TGL_TRANS_CLK_SEL_PORT(encoder->port);
+ else
+ val = TRANS_CLK_SEL_PORT(encoder->port);
+
+ intel_de_write(dev_priv, TRANS_CLK_SEL(cpu_transcoder), val);
}
-void intel_ddi_disable_pipe_clock(const struct intel_crtc_state *crtc_state)
+void intel_ddi_disable_transcoder_clock(const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
+ u32 val;
- if (cpu_transcoder != TRANSCODER_EDP) {
- if (DISPLAY_VER(dev_priv) >= 12)
- intel_de_write(dev_priv,
- TRANS_CLK_SEL(cpu_transcoder),
- TGL_TRANS_CLK_SEL_DISABLED);
- else
- intel_de_write(dev_priv,
- TRANS_CLK_SEL(cpu_transcoder),
- TRANS_CLK_SEL_DISABLED);
- }
+ if (cpu_transcoder == TRANSCODER_EDP)
+ return;
+
+ if (DISPLAY_VER(dev_priv) >= 12)
+ val = TGL_TRANS_CLK_SEL_DISABLED;
+ else
+ val = TRANS_CLK_SEL_DISABLED;
+
+ intel_de_write(dev_priv, TRANS_CLK_SEL(cpu_transcoder), val);
}
static void _skl_ddi_set_iboost(struct drm_i915_private *dev_priv,
@@ -1009,9 +1006,9 @@ static void skl_ddi_set_iboost(struct intel_encoder *encoder,
u8 iboost;
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
- iboost = intel_bios_encoder_hdmi_boost_level(encoder->devdata);
+ iboost = intel_bios_hdmi_boost_level(encoder->devdata);
else
- iboost = intel_bios_encoder_dp_boost_level(encoder->devdata);
+ iboost = intel_bios_dp_boost_level(encoder->devdata);
if (iboost == 0) {
const struct intel_ddi_buf_trans *trans;
@@ -2200,15 +2197,13 @@ static void intel_ddi_enable_fec(struct intel_encoder *encoder,
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dp *intel_dp;
- u32 val;
if (!crtc_state->fec_enable)
return;
intel_dp = enc_to_intel_dp(encoder);
- val = intel_de_read(dev_priv, dp_tp_ctl_reg(encoder, crtc_state));
- val |= DP_TP_CTL_FEC_ENABLE;
- intel_de_write(dev_priv, dp_tp_ctl_reg(encoder, crtc_state), val);
+ intel_de_rmw(dev_priv, dp_tp_ctl_reg(encoder, crtc_state),
+ 0, DP_TP_CTL_FEC_ENABLE);
}
static void intel_ddi_disable_fec_state(struct intel_encoder *encoder,
@@ -2216,15 +2211,13 @@ static void intel_ddi_disable_fec_state(struct intel_encoder *encoder,
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dp *intel_dp;
- u32 val;
if (!crtc_state->fec_enable)
return;
intel_dp = enc_to_intel_dp(encoder);
- val = intel_de_read(dev_priv, dp_tp_ctl_reg(encoder, crtc_state));
- val &= ~DP_TP_CTL_FEC_ENABLE;
- intel_de_write(dev_priv, dp_tp_ctl_reg(encoder, crtc_state), val);
+ intel_de_rmw(dev_priv, dp_tp_ctl_reg(encoder, crtc_state),
+ DP_TP_CTL_FEC_ENABLE, 0);
intel_de_posting_read(dev_priv, dp_tp_ctl_reg(encoder, crtc_state));
}
@@ -2387,7 +2380,7 @@ static void tgl_ddi_pre_enable_dp(struct intel_atomic_state *state,
* 7.a Configure Transcoder Clock Select to direct the Port clock to the
* Transcoder.
*/
- intel_ddi_enable_pipe_clock(encoder, crtc_state);
+ intel_ddi_enable_transcoder_clock(encoder, crtc_state);
if (HAS_DP20(dev_priv))
intel_ddi_config_transcoder_dp2(encoder, crtc_state);
@@ -2514,7 +2507,7 @@ static void hsw_ddi_pre_enable_dp(struct intel_atomic_state *state,
intel_ddi_enable_fec(encoder, crtc_state);
if (!is_mst)
- intel_ddi_enable_pipe_clock(encoder, crtc_state);
+ intel_ddi_enable_transcoder_clock(encoder, crtc_state);
intel_dsc_dp_pps_write(encoder, crtc_state);
}
@@ -2526,6 +2519,10 @@ static void intel_ddi_pre_enable_dp(struct intel_atomic_state *state,
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ if (HAS_DP20(dev_priv))
+ intel_dp_128b132b_sdp_crc16(enc_to_intel_dp(encoder),
+ crtc_state);
+
if (DISPLAY_VER(dev_priv) >= 12)
tgl_ddi_pre_enable_dp(state, encoder, crtc_state, conn_state);
else
@@ -2556,7 +2553,7 @@ static void intel_ddi_pre_enable_hdmi(struct intel_atomic_state *state,
icl_program_mg_dp_mode(dig_port, crtc_state);
- intel_ddi_enable_pipe_clock(encoder, crtc_state);
+ intel_ddi_enable_transcoder_clock(encoder, crtc_state);
dig_port->set_infoframes(encoder,
crtc_state->has_infoframe,
@@ -2622,12 +2619,9 @@ static void intel_disable_ddi_buf(struct intel_encoder *encoder,
wait = true;
}
- if (intel_crtc_has_dp_encoder(crtc_state)) {
- val = intel_de_read(dev_priv, dp_tp_ctl_reg(encoder, crtc_state));
- val &= ~(DP_TP_CTL_ENABLE | DP_TP_CTL_LINK_TRAIN_MASK);
- val |= DP_TP_CTL_LINK_TRAIN_PAT1;
- intel_de_write(dev_priv, dp_tp_ctl_reg(encoder, crtc_state), val);
- }
+ if (intel_crtc_has_dp_encoder(crtc_state))
+ intel_de_rmw(dev_priv, dp_tp_ctl_reg(encoder, crtc_state),
+ DP_TP_CTL_ENABLE, 0);
/* Disable FEC in DP Sink */
intel_ddi_disable_fec_state(encoder, crtc_state);
@@ -2660,19 +2654,14 @@ static void intel_ddi_post_disable_dp(struct intel_atomic_state *state,
if (DISPLAY_VER(dev_priv) >= 12) {
if (is_mst) {
enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
- u32 val;
- val = intel_de_read(dev_priv,
- TRANS_DDI_FUNC_CTL(cpu_transcoder));
- val &= ~(TGL_TRANS_DDI_PORT_MASK |
- TRANS_DDI_MODE_SELECT_MASK);
- intel_de_write(dev_priv,
- TRANS_DDI_FUNC_CTL(cpu_transcoder),
- val);
+ intel_de_rmw(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder),
+ TGL_TRANS_DDI_PORT_MASK | TRANS_DDI_MODE_SELECT_MASK,
+ 0);
}
} else {
if (!is_mst)
- intel_ddi_disable_pipe_clock(old_crtc_state);
+ intel_ddi_disable_transcoder_clock(old_crtc_state);
}
intel_disable_ddi_buf(encoder, old_crtc_state);
@@ -2683,7 +2672,7 @@ static void intel_ddi_post_disable_dp(struct intel_atomic_state *state,
* transcoder"
*/
if (DISPLAY_VER(dev_priv) >= 12)
- intel_ddi_disable_pipe_clock(old_crtc_state);
+ intel_ddi_disable_transcoder_clock(old_crtc_state);
intel_pps_vdd_on(intel_dp);
intel_pps_off(intel_dp);
@@ -2709,12 +2698,12 @@ static void intel_ddi_post_disable_hdmi(struct intel_atomic_state *state,
old_crtc_state, old_conn_state);
if (DISPLAY_VER(dev_priv) < 12)
- intel_ddi_disable_pipe_clock(old_crtc_state);
+ intel_ddi_disable_transcoder_clock(old_crtc_state);
intel_disable_ddi_buf(encoder, old_crtc_state);
if (DISPLAY_VER(dev_priv) >= 12)
- intel_ddi_disable_pipe_clock(old_crtc_state);
+ intel_ddi_disable_transcoder_clock(old_crtc_state);
intel_display_power_put(dev_priv,
dig_port->ddi_io_power_domain,
@@ -3153,8 +3142,7 @@ static void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp,
wait = true;
}
- dp_tp_ctl &= ~(DP_TP_CTL_ENABLE | DP_TP_CTL_LINK_TRAIN_MASK);
- dp_tp_ctl |= DP_TP_CTL_LINK_TRAIN_PAT1;
+ dp_tp_ctl &= ~DP_TP_CTL_ENABLE;
intel_de_write(dev_priv, dp_tp_ctl_reg(encoder, crtc_state), dp_tp_ctl);
intel_de_posting_read(dev_priv, dp_tp_ctl_reg(encoder, crtc_state));
@@ -3222,12 +3210,9 @@ static void intel_ddi_set_idle_link_train(struct intel_dp *intel_dp,
struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
enum port port = encoder->port;
- u32 val;
- val = intel_de_read(dev_priv, dp_tp_ctl_reg(encoder, crtc_state));
- val &= ~DP_TP_CTL_LINK_TRAIN_MASK;
- val |= DP_TP_CTL_LINK_TRAIN_IDLE;
- intel_de_write(dev_priv, dp_tp_ctl_reg(encoder, crtc_state), val);
+ intel_de_rmw(dev_priv, dp_tp_ctl_reg(encoder, crtc_state),
+ DP_TP_CTL_LINK_TRAIN_MASK, DP_TP_CTL_LINK_TRAIN_IDLE);
/*
* Until TGL on PORT_A we can have only eDP in SST mode. There the only
@@ -3559,6 +3544,37 @@ static void icl_ddi_combo_get_config(struct intel_encoder *encoder,
intel_ddi_get_config(encoder, crtc_state);
}
+static bool icl_ddi_tc_pll_is_tbt(const struct intel_shared_dpll *pll)
+{
+ return pll->info->id == DPLL_ID_ICL_TBTPLL;
+}
+
+static enum icl_port_dpll_id
+icl_ddi_tc_port_pll_type(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ const struct intel_shared_dpll *pll = crtc_state->shared_dpll;
+
+ if (drm_WARN_ON(&i915->drm, !pll))
+ return ICL_PORT_DPLL_DEFAULT;
+
+ if (icl_ddi_tc_pll_is_tbt(pll))
+ return ICL_PORT_DPLL_DEFAULT;
+ else
+ return ICL_PORT_DPLL_MG_PHY;
+}
+
+enum icl_port_dpll_id
+intel_ddi_port_pll_type(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
+{
+ if (!encoder->port_pll_type)
+ return ICL_PORT_DPLL_DEFAULT;
+
+ return encoder->port_pll_type(encoder, crtc_state);
+}
+
static void icl_ddi_tc_get_clock(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state,
struct intel_shared_dpll *pll)
@@ -3571,7 +3587,7 @@ static void icl_ddi_tc_get_clock(struct intel_encoder *encoder,
if (drm_WARN_ON(&i915->drm, !pll))
return;
- if (pll->info->id == DPLL_ID_ICL_TBTPLL)
+ if (icl_ddi_tc_pll_is_tbt(pll))
port_dpll_id = ICL_PORT_DPLL_DEFAULT;
else
port_dpll_id = ICL_PORT_DPLL_MG_PHY;
@@ -3584,7 +3600,7 @@ static void icl_ddi_tc_get_clock(struct intel_encoder *encoder,
icl_set_active_port_dpll(crtc_state, port_dpll_id);
- if (crtc_state->shared_dpll->info->id == DPLL_ID_ICL_TBTPLL)
+ if (icl_ddi_tc_pll_is_tbt(crtc_state->shared_dpll))
crtc_state->port_clock = icl_calc_tbt_pll_link(i915, encoder->port);
else
crtc_state->port_clock = intel_dpll_get_freq(i915, crtc_state->shared_dpll,
@@ -3626,7 +3642,8 @@ static void intel_ddi_sync_state(struct intel_encoder *encoder,
enum phy phy = intel_port_to_phy(i915, encoder->port);
if (intel_phy_is_tc(i915, phy))
- intel_tc_port_sanitize_mode(enc_to_dig_port(encoder));
+ intel_tc_port_sanitize_mode(enc_to_dig_port(encoder),
+ crtc_state);
if (crtc_state && intel_crtc_has_dp_encoder(crtc_state))
intel_dp_sync_state(encoder, crtc_state);
@@ -4305,7 +4322,7 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
intel_bios_encoder_supports_hdmi(devdata);
init_dp = intel_bios_encoder_supports_dp(devdata);
- if (intel_bios_is_lspcon_present(dev_priv, port)) {
+ if (intel_bios_encoder_is_lspcon(devdata)) {
/*
* Lspcon device needs to be driven with DP connector
* with special detection sequence. So make sure DP
@@ -4420,6 +4437,7 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
encoder->enable_clock = jsl_ddi_tc_enable_clock;
encoder->disable_clock = jsl_ddi_tc_disable_clock;
encoder->is_clock_enabled = jsl_ddi_tc_is_clock_enabled;
+ encoder->port_pll_type = icl_ddi_tc_port_pll_type;
encoder->get_config = icl_ddi_combo_get_config;
} else {
encoder->enable_clock = icl_ddi_combo_enable_clock;
@@ -4432,6 +4450,7 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
encoder->enable_clock = icl_ddi_tc_enable_clock;
encoder->disable_clock = icl_ddi_tc_disable_clock;
encoder->is_clock_enabled = icl_ddi_tc_is_clock_enabled;
+ encoder->port_pll_type = icl_ddi_tc_port_pll_type;
encoder->get_config = icl_ddi_tc_get_config;
} else {
encoder->enable_clock = icl_ddi_combo_enable_clock;
@@ -4500,18 +4519,28 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
intel_de_read(dev_priv, DDI_BUF_CTL(port))
& (DDI_BUF_PORT_REVERSAL | DDI_A_4_LANES);
- if (intel_bios_is_lane_reversal_needed(dev_priv, port))
+ if (intel_bios_encoder_lane_reversal(devdata))
dig_port->saved_port_bits |= DDI_BUF_PORT_REVERSAL;
dig_port->dp.output_reg = INVALID_MMIO_REG;
dig_port->max_lanes = intel_ddi_max_lanes(dig_port);
- dig_port->aux_ch = intel_bios_port_aux_ch(dev_priv, port);
+ dig_port->aux_ch = intel_dp_aux_ch(encoder);
if (intel_phy_is_tc(dev_priv, phy)) {
bool is_legacy =
!intel_bios_encoder_supports_typec_usb(devdata) &&
!intel_bios_encoder_supports_tbt(devdata);
+ if (!is_legacy && init_hdmi) {
+ is_legacy = !init_dp;
+
+ drm_dbg_kms(&dev_priv->drm,
+ "VBT says port %c is non-legacy TC and has HDMI (with DP: %s), assume it's %s\n",
+ port_name(port),
+ str_yes_no(init_dp),
+ is_legacy ? "legacy" : "non-legacy");
+ }
+
intel_tc_port_init(dig_port, is_legacy);
encoder->update_prepare = intel_ddi_update_prepare;
@@ -4521,35 +4550,21 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
drm_WARN_ON(&dev_priv->drm, port > PORT_I);
dig_port->ddi_io_power_domain = intel_display_power_ddi_io_domain(dev_priv, port);
- if (init_dp) {
- if (!intel_ddi_init_dp_connector(dig_port))
- goto err;
-
- dig_port->hpd_pulse = intel_dp_hpd_pulse;
-
- if (dig_port->dp.mso_link_count)
- encoder->pipe_mask = intel_ddi_splitter_pipe_mask(dev_priv);
- }
-
- /* In theory we don't need the encoder->type check, but leave it just in
- * case we have some really bad VBTs... */
- if (encoder->type != INTEL_OUTPUT_EDP && init_hdmi) {
- if (!intel_ddi_init_hdmi_connector(dig_port))
- goto err;
- }
-
if (DISPLAY_VER(dev_priv) >= 11) {
if (intel_phy_is_tc(dev_priv, phy))
dig_port->connected = intel_tc_port_connected;
else
dig_port->connected = lpt_digital_port_connected;
- } else if (DISPLAY_VER(dev_priv) >= 8) {
- if (port == PORT_A || IS_GEMINILAKE(dev_priv) ||
- IS_BROXTON(dev_priv))
+ } else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) {
+ dig_port->connected = bdw_digital_port_connected;
+ } else if (DISPLAY_VER(dev_priv) == 9) {
+ dig_port->connected = lpt_digital_port_connected;
+ } else if (IS_BROADWELL(dev_priv)) {
+ if (port == PORT_A)
dig_port->connected = bdw_digital_port_connected;
else
dig_port->connected = lpt_digital_port_connected;
- } else {
+ } else if (IS_HASWELL(dev_priv)) {
if (port == PORT_A)
dig_port->connected = hsw_digital_port_connected;
else
@@ -4558,6 +4573,25 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
intel_infoframe_init(dig_port);
+ if (init_dp) {
+ if (!intel_ddi_init_dp_connector(dig_port))
+ goto err;
+
+ dig_port->hpd_pulse = intel_dp_hpd_pulse;
+
+ if (dig_port->dp.mso_link_count)
+ encoder->pipe_mask = intel_ddi_splitter_pipe_mask(dev_priv);
+ }
+
+ /*
+ * In theory we don't need the encoder->type check,
+ * but leave it just in case we have some really bad VBTs...
+ */
+ if (encoder->type != INTEL_OUTPUT_EDP && init_hdmi) {
+ if (!intel_ddi_init_hdmi_connector(dig_port))
+ goto err;
+ }
+
return;
err:
diff --git a/drivers/gpu/drm/i915/display/intel_ddi.h b/drivers/gpu/drm/i915/display/intel_ddi.h
index d39076facdce..c85e74ae68e4 100644
--- a/drivers/gpu/drm/i915/display/intel_ddi.h
+++ b/drivers/gpu/drm/i915/display/intel_ddi.h
@@ -40,6 +40,9 @@ void hsw_ddi_enable_clock(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state);
void hsw_ddi_disable_clock(struct intel_encoder *encoder);
bool hsw_ddi_is_clock_enabled(struct intel_encoder *encoder);
+enum icl_port_dpll_id
+intel_ddi_port_pll_type(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state);
void hsw_ddi_get_config(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state);
struct intel_shared_dpll *icl_ddi_combo_get_pll(struct intel_encoder *encoder);
@@ -52,9 +55,9 @@ bool intel_ddi_get_hw_state(struct intel_encoder *encoder, enum pipe *pipe);
void intel_ddi_enable_transcoder_func(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state);
void intel_ddi_disable_transcoder_func(const struct intel_crtc_state *crtc_state);
-void intel_ddi_enable_pipe_clock(struct intel_encoder *encoder,
- const struct intel_crtc_state *crtc_state);
-void intel_ddi_disable_pipe_clock(const struct intel_crtc_state *crtc_state);
+void intel_ddi_enable_transcoder_clock(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state);
+void intel_ddi_disable_transcoder_clock(const struct intel_crtc_state *crtc_state);
void intel_ddi_set_dp_msa(const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state);
bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector);
diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c
index d3994e2a7d63..5a386c7c0bc9 100644
--- a/drivers/gpu/drm/i915/display/intel_display.c
+++ b/drivers/gpu/drm/i915/display/intel_display.c
@@ -55,6 +55,7 @@
#include "i915_reg.h"
#include "i915_utils.h"
#include "i9xx_plane.h"
+#include "i9xx_wm.h"
#include "icl_dsi.h"
#include "intel_acpi.h"
#include "intel_atomic.h"
@@ -94,6 +95,7 @@
#include "intel_hotplug.h"
#include "intel_hti.h"
#include "intel_lvds.h"
+#include "intel_lvds_regs.h"
#include "intel_modeset_setup.h"
#include "intel_modeset_verify.h"
#include "intel_overlay.h"
@@ -109,13 +111,14 @@
#include "intel_quirks.h"
#include "intel_sdvo.h"
#include "intel_snps_phy.h"
-#include "intel_sprite.h"
#include "intel_tc.h"
#include "intel_tv.h"
#include "intel_vblank.h"
#include "intel_vdsc.h"
+#include "intel_vdsc_regs.h"
#include "intel_vga.h"
#include "intel_vrr.h"
+#include "intel_wm.h"
#include "skl_scaler.h"
#include "skl_universal_plane.h"
#include "skl_watermark.h"
@@ -127,104 +130,9 @@
static void intel_set_transcoder_timings(const struct intel_crtc_state *crtc_state);
static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state);
static void hsw_set_transconf(const struct intel_crtc_state *crtc_state);
-static void bdw_set_pipemisc(const struct intel_crtc_state *crtc_state);
+static void bdw_set_pipe_misc(const struct intel_crtc_state *crtc_state);
static void ilk_pfit_enable(const struct intel_crtc_state *crtc_state);
-/**
- * intel_update_watermarks - update FIFO watermark values based on current modes
- * @dev_priv: i915 device
- *
- * Calculate watermark values for the various WM regs based on current mode
- * and plane configuration.
- *
- * There are several cases to deal with here:
- * - normal (i.e. non-self-refresh)
- * - self-refresh (SR) mode
- * - lines are large relative to FIFO size (buffer can hold up to 2)
- * - lines are small relative to FIFO size (buffer can hold more than 2
- * lines), so need to account for TLB latency
- *
- * The normal calculation is:
- * watermark = dotclock * bytes per pixel * latency
- * where latency is platform & configuration dependent (we assume pessimal
- * values here).
- *
- * The SR calculation is:
- * watermark = (trunc(latency/line time)+1) * surface width *
- * bytes per pixel
- * where
- * line time = htotal / dotclock
- * surface width = hdisplay for normal plane and 64 for cursor
- * and latency is assumed to be high, as above.
- *
- * The final value programmed to the register should always be rounded up,
- * and include an extra 2 entries to account for clock crossings.
- *
- * We don't use the sprite, so we can ignore that. And on Crestline we have
- * to set the non-SR watermarks to 8.
- */
-void intel_update_watermarks(struct drm_i915_private *dev_priv)
-{
- if (dev_priv->display.funcs.wm->update_wm)
- dev_priv->display.funcs.wm->update_wm(dev_priv);
-}
-
-static int intel_compute_pipe_wm(struct intel_atomic_state *state,
- struct intel_crtc *crtc)
-{
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
- if (dev_priv->display.funcs.wm->compute_pipe_wm)
- return dev_priv->display.funcs.wm->compute_pipe_wm(state, crtc);
- return 0;
-}
-
-static int intel_compute_intermediate_wm(struct intel_atomic_state *state,
- struct intel_crtc *crtc)
-{
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
- if (!dev_priv->display.funcs.wm->compute_intermediate_wm)
- return 0;
- if (drm_WARN_ON(&dev_priv->drm,
- !dev_priv->display.funcs.wm->compute_pipe_wm))
- return 0;
- return dev_priv->display.funcs.wm->compute_intermediate_wm(state, crtc);
-}
-
-static bool intel_initial_watermarks(struct intel_atomic_state *state,
- struct intel_crtc *crtc)
-{
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
- if (dev_priv->display.funcs.wm->initial_watermarks) {
- dev_priv->display.funcs.wm->initial_watermarks(state, crtc);
- return true;
- }
- return false;
-}
-
-static void intel_atomic_update_watermarks(struct intel_atomic_state *state,
- struct intel_crtc *crtc)
-{
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
- if (dev_priv->display.funcs.wm->atomic_update_watermarks)
- dev_priv->display.funcs.wm->atomic_update_watermarks(state, crtc);
-}
-
-static void intel_optimize_watermarks(struct intel_atomic_state *state,
- struct intel_crtc *crtc)
-{
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
- if (dev_priv->display.funcs.wm->optimize_watermarks)
- dev_priv->display.funcs.wm->optimize_watermarks(state, crtc);
-}
-
-static int intel_compute_global_watermarks(struct intel_atomic_state *state)
-{
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
- if (dev_priv->display.funcs.wm->compute_global_watermarks)
- return dev_priv->display.funcs.wm->compute_global_watermarks(state);
- return 0;
-}
-
/* returns HPLL frequency in kHz */
int vlv_get_hpll_vco(struct drm_i915_private *dev_priv)
{
@@ -293,11 +201,11 @@ static void
skl_wa_827(struct drm_i915_private *dev_priv, enum pipe pipe, bool enable)
{
if (enable)
- intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe),
- intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe)) | DUPS1_GATING_DIS | DUPS2_GATING_DIS);
+ intel_de_rmw(dev_priv, CLKGATE_DIS_PSL(pipe),
+ 0, DUPS1_GATING_DIS | DUPS2_GATING_DIS);
else
- intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe),
- intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe)) & ~(DUPS1_GATING_DIS | DUPS2_GATING_DIS));
+ intel_de_rmw(dev_priv, CLKGATE_DIS_PSL(pipe),
+ DUPS1_GATING_DIS | DUPS2_GATING_DIS, 0);
}
/* Wa_2006604312:icl,ehl */
@@ -306,11 +214,9 @@ icl_wa_scalerclkgating(struct drm_i915_private *dev_priv, enum pipe pipe,
bool enable)
{
if (enable)
- intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe),
- intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe)) | DPFR_GATING_DIS);
+ intel_de_rmw(dev_priv, CLKGATE_DIS_PSL(pipe), 0, DPFR_GATING_DIS);
else
- intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe),
- intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe)) & ~DPFR_GATING_DIS);
+ intel_de_rmw(dev_priv, CLKGATE_DIS_PSL(pipe), DPFR_GATING_DIS, 0);
}
/* Wa_1604331009:icl,jsl,ehl */
@@ -395,8 +301,8 @@ intel_wait_for_pipe_off(const struct intel_crtc_state *old_crtc_state)
enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
/* Wait for the Pipe State to go off */
- if (intel_de_wait_for_clear(dev_priv, PIPECONF(cpu_transcoder),
- PIPECONF_STATE_ENABLE, 100))
+ if (intel_de_wait_for_clear(dev_priv, TRANSCONF(cpu_transcoder),
+ TRANSCONF_STATE_ENABLE, 100))
drm_WARN(&dev_priv->drm, 1, "pipe_off wait timed out\n");
} else {
intel_wait_for_pipe_scanline_stopped(crtc);
@@ -417,8 +323,8 @@ void assert_transcoder(struct drm_i915_private *dev_priv,
power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
if (wakeref) {
- u32 val = intel_de_read(dev_priv, PIPECONF(cpu_transcoder));
- cur_state = !!(val & PIPECONF_ENABLE);
+ u32 val = intel_de_read(dev_priv, TRANSCONF(cpu_transcoder));
+ cur_state = !!(val & TRANSCONF_ENABLE);
intel_display_power_put(dev_priv, power_domain, wakeref);
} else {
@@ -530,15 +436,15 @@ void intel_enable_transcoder(const struct intel_crtc_state *new_crtc_state)
intel_de_rmw(dev_priv, PIPE_ARB_CTL(pipe),
0, PIPE_ARB_USE_PROG_SLOTS);
- reg = PIPECONF(cpu_transcoder);
+ reg = TRANSCONF(cpu_transcoder);
val = intel_de_read(dev_priv, reg);
- if (val & PIPECONF_ENABLE) {
+ if (val & TRANSCONF_ENABLE) {
/* we keep both pipes enabled on 830 */
drm_WARN_ON(&dev_priv->drm, !IS_I830(dev_priv));
return;
}
- intel_de_write(dev_priv, reg, val | PIPECONF_ENABLE);
+ intel_de_write(dev_priv, reg, val | TRANSCONF_ENABLE);
intel_de_posting_read(dev_priv, reg);
/*
@@ -569,9 +475,9 @@ void intel_disable_transcoder(const struct intel_crtc_state *old_crtc_state)
*/
assert_planes_disabled(crtc);
- reg = PIPECONF(cpu_transcoder);
+ reg = TRANSCONF(cpu_transcoder);
val = intel_de_read(dev_priv, reg);
- if ((val & PIPECONF_ENABLE) == 0)
+ if ((val & TRANSCONF_ENABLE) == 0)
return;
/*
@@ -579,11 +485,11 @@ void intel_disable_transcoder(const struct intel_crtc_state *old_crtc_state)
* so best keep it disabled when not needed.
*/
if (old_crtc_state->double_wide)
- val &= ~PIPECONF_DOUBLE_WIDE;
+ val &= ~TRANSCONF_DOUBLE_WIDE;
/* Don't disable pipe or pipe PLLs if needed */
if (!IS_I830(dev_priv))
- val &= ~PIPECONF_ENABLE;
+ val &= ~TRANSCONF_ENABLE;
if (DISPLAY_VER(dev_priv) >= 14)
intel_de_rmw(dev_priv, MTL_CHICKEN_TRANS(cpu_transcoder),
@@ -593,7 +499,7 @@ void intel_disable_transcoder(const struct intel_crtc_state *old_crtc_state)
FECSTALL_DIS_DPTSTREAM_DPTTG, 0);
intel_de_write(dev_priv, reg, val);
- if ((val & PIPECONF_ENABLE) == 0)
+ if ((val & TRANSCONF_ENABLE) == 0)
intel_wait_for_pipe_off(old_crtc_state);
}
@@ -1209,6 +1115,9 @@ static void intel_post_plane_update(struct intel_atomic_state *state,
if (needs_cursorclk_wa(old_crtc_state) &&
!needs_cursorclk_wa(new_crtc_state))
icl_wa_cursorclkgating(dev_priv, pipe, false);
+
+ if (intel_crtc_needs_color_update(new_crtc_state))
+ intel_color_post_update(new_crtc_state);
}
static void intel_crtc_enable_flip_done(struct intel_atomic_state *state,
@@ -1252,7 +1161,8 @@ static void intel_crtc_async_flip_disable_wa(struct intel_atomic_state *state,
intel_atomic_get_old_crtc_state(state, crtc);
const struct intel_crtc_state *new_crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
- u8 update_planes = new_crtc_state->update_planes;
+ u8 disable_async_flip_planes = old_crtc_state->async_flip_planes &
+ ~new_crtc_state->async_flip_planes;
const struct intel_plane_state *old_plane_state;
struct intel_plane *plane;
bool need_vbl_wait = false;
@@ -1261,7 +1171,7 @@ static void intel_crtc_async_flip_disable_wa(struct intel_atomic_state *state,
for_each_old_intel_plane_in_state(state, plane, old_plane_state, i) {
if (plane->need_async_flip_disable_wa &&
plane->pipe == crtc->pipe &&
- update_planes & BIT(plane->id)) {
+ disable_async_flip_planes & BIT(plane->id)) {
/*
* Apart from the async flip bit we want to
* preserve the old state for the plane.
@@ -1378,7 +1288,7 @@ static void intel_pre_plane_update(struct intel_atomic_state *state,
* WA for platforms where async address update enable bit
* is double buffered and only latched at start of vblank.
*/
- if (old_crtc_state->uapi.async_flip && !new_crtc_state->uapi.async_flip)
+ if (old_crtc_state->async_flip_planes & ~new_crtc_state->async_flip_planes)
intel_crtc_async_flip_disable_wa(state, crtc);
}
@@ -1801,12 +1711,10 @@ static void hsw_set_frame_start_delay(const struct intel_crtc_state *crtc_state)
enum transcoder transcoder = crtc_state->cpu_transcoder;
i915_reg_t reg = DISPLAY_VER(dev_priv) >= 14 ? MTL_CHICKEN_TRANS(transcoder) :
CHICKEN_TRANS(transcoder);
- u32 val;
- val = intel_de_read(dev_priv, reg);
- val &= ~HSW_FRAME_START_DELAY_MASK;
- val |= HSW_FRAME_START_DELAY(crtc_state->framestart_delay - 1);
- intel_de_write(dev_priv, reg, val);
+ intel_de_rmw(dev_priv, reg,
+ HSW_FRAME_START_DELAY_MASK,
+ HSW_FRAME_START_DELAY(crtc_state->framestart_delay - 1));
}
static void icl_ddi_bigjoiner_pre_enable(struct intel_atomic_state *state,
@@ -1846,7 +1754,7 @@ static void hsw_configure_cpu_transcoder(const struct intel_crtc_state *crtc_sta
intel_set_transcoder_timings(crtc_state);
if (cpu_transcoder != TRANSCODER_EDP)
- intel_de_write(dev_priv, PIPE_MULT(cpu_transcoder),
+ intel_de_write(dev_priv, TRANS_MULT(cpu_transcoder),
crtc_state->pixel_multiplier - 1);
hsw_set_frame_start_delay(crtc_state);
@@ -1887,7 +1795,7 @@ static void hsw_crtc_enable(struct intel_atomic_state *state,
intel_set_pipe_src_size(new_crtc_state);
if (DISPLAY_VER(dev_priv) >= 9 || IS_BROADWELL(dev_priv))
- bdw_set_pipemisc(new_crtc_state);
+ bdw_set_pipe_misc(new_crtc_state);
if (!intel_crtc_is_bigjoiner_slave(new_crtc_state) &&
!transcoder_is_dsi(cpu_transcoder))
@@ -2233,6 +2141,8 @@ static void valleyview_crtc_enable(struct intel_atomic_state *state,
intel_set_pipe_src_size(new_crtc_state);
+ intel_de_write(dev_priv, VLV_PIPE_MSA_MISC(pipe), 0);
+
if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) {
intel_de_write(dev_priv, CHV_BLEND(pipe), CHV_BLEND_LEGACY);
intel_de_write(dev_priv, CHV_CANVAS(pipe), 0);
@@ -2819,12 +2729,14 @@ static void intel_set_transcoder_timings(const struct intel_crtc_state *crtc_sta
enum pipe pipe = crtc->pipe;
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
- u32 crtc_vtotal, crtc_vblank_end;
+ u32 crtc_vdisplay, crtc_vtotal, crtc_vblank_start, crtc_vblank_end;
int vsyncshift = 0;
/* We need to be careful not to changed the adjusted mode, for otherwise
* the hw state checker will get angry at the mismatch. */
+ crtc_vdisplay = adjusted_mode->crtc_vdisplay;
crtc_vtotal = adjusted_mode->crtc_vtotal;
+ crtc_vblank_start = adjusted_mode->crtc_vblank_start;
crtc_vblank_end = adjusted_mode->crtc_vblank_end;
if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
@@ -2841,23 +2753,44 @@ static void intel_set_transcoder_timings(const struct intel_crtc_state *crtc_sta
vsyncshift += adjusted_mode->crtc_htotal;
}
+ /*
+ * VBLANK_START no longer works on ADL+, instead we must use
+ * TRANS_SET_CONTEXT_LATENCY to configure the pipe vblank start.
+ */
+ if (DISPLAY_VER(dev_priv) >= 13) {
+ intel_de_write(dev_priv, TRANS_SET_CONTEXT_LATENCY(cpu_transcoder),
+ crtc_vblank_start - crtc_vdisplay);
+
+ /*
+ * VBLANK_START not used by hw, just clear it
+ * to make it stand out in register dumps.
+ */
+ crtc_vblank_start = 1;
+ }
+
if (DISPLAY_VER(dev_priv) > 3)
- intel_de_write(dev_priv, VSYNCSHIFT(cpu_transcoder),
- vsyncshift);
-
- intel_de_write(dev_priv, HTOTAL(cpu_transcoder),
- (adjusted_mode->crtc_hdisplay - 1) | ((adjusted_mode->crtc_htotal - 1) << 16));
- intel_de_write(dev_priv, HBLANK(cpu_transcoder),
- (adjusted_mode->crtc_hblank_start - 1) | ((adjusted_mode->crtc_hblank_end - 1) << 16));
- intel_de_write(dev_priv, HSYNC(cpu_transcoder),
- (adjusted_mode->crtc_hsync_start - 1) | ((adjusted_mode->crtc_hsync_end - 1) << 16));
-
- intel_de_write(dev_priv, VTOTAL(cpu_transcoder),
- (adjusted_mode->crtc_vdisplay - 1) | ((crtc_vtotal - 1) << 16));
- intel_de_write(dev_priv, VBLANK(cpu_transcoder),
- (adjusted_mode->crtc_vblank_start - 1) | ((crtc_vblank_end - 1) << 16));
- intel_de_write(dev_priv, VSYNC(cpu_transcoder),
- (adjusted_mode->crtc_vsync_start - 1) | ((adjusted_mode->crtc_vsync_end - 1) << 16));
+ intel_de_write(dev_priv, TRANS_VSYNCSHIFT(cpu_transcoder),
+ vsyncshift);
+
+ intel_de_write(dev_priv, TRANS_HTOTAL(cpu_transcoder),
+ HACTIVE(adjusted_mode->crtc_hdisplay - 1) |
+ HTOTAL(adjusted_mode->crtc_htotal - 1));
+ intel_de_write(dev_priv, TRANS_HBLANK(cpu_transcoder),
+ HBLANK_START(adjusted_mode->crtc_hblank_start - 1) |
+ HBLANK_END(adjusted_mode->crtc_hblank_end - 1));
+ intel_de_write(dev_priv, TRANS_HSYNC(cpu_transcoder),
+ HSYNC_START(adjusted_mode->crtc_hsync_start - 1) |
+ HSYNC_END(adjusted_mode->crtc_hsync_end - 1));
+
+ intel_de_write(dev_priv, TRANS_VTOTAL(cpu_transcoder),
+ VACTIVE(crtc_vdisplay - 1) |
+ VTOTAL(crtc_vtotal - 1));
+ intel_de_write(dev_priv, TRANS_VBLANK(cpu_transcoder),
+ VBLANK_START(crtc_vblank_start - 1) |
+ VBLANK_END(crtc_vblank_end - 1));
+ intel_de_write(dev_priv, TRANS_VSYNC(cpu_transcoder),
+ VSYNC_START(adjusted_mode->crtc_vsync_start - 1) |
+ VSYNC_END(adjusted_mode->crtc_vsync_end - 1));
/* Workaround: when the EDP input selection is B, the VTOTAL_B must be
* programmed with the VTOTAL_EDP value. Same for VTOTAL_C. This is
@@ -2865,9 +2798,9 @@ static void intel_set_transcoder_timings(const struct intel_crtc_state *crtc_sta
* bits. */
if (IS_HASWELL(dev_priv) && cpu_transcoder == TRANSCODER_EDP &&
(pipe == PIPE_B || pipe == PIPE_C))
- intel_de_write(dev_priv, VTOTAL(pipe),
- intel_de_read(dev_priv, VTOTAL(cpu_transcoder)));
-
+ intel_de_write(dev_priv, TRANS_VTOTAL(pipe),
+ VACTIVE(crtc_vdisplay - 1) |
+ VTOTAL(crtc_vtotal - 1));
}
static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state)
@@ -2895,9 +2828,9 @@ static bool intel_pipe_is_interlaced(const struct intel_crtc_state *crtc_state)
if (DISPLAY_VER(dev_priv) >= 9 ||
IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
- return intel_de_read(dev_priv, PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK_HSW;
+ return intel_de_read(dev_priv, TRANSCONF(cpu_transcoder)) & TRANSCONF_INTERLACE_MASK_HSW;
else
- return intel_de_read(dev_priv, PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK;
+ return intel_de_read(dev_priv, TRANSCONF(cpu_transcoder)) & TRANSCONF_INTERLACE_MASK;
}
static void intel_get_transcoder_timings(struct intel_crtc *crtc,
@@ -2906,43 +2839,47 @@ static void intel_get_transcoder_timings(struct intel_crtc *crtc,
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
+ struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
u32 tmp;
- tmp = intel_de_read(dev_priv, HTOTAL(cpu_transcoder));
- pipe_config->hw.adjusted_mode.crtc_hdisplay = (tmp & 0xffff) + 1;
- pipe_config->hw.adjusted_mode.crtc_htotal = ((tmp >> 16) & 0xffff) + 1;
+ tmp = intel_de_read(dev_priv, TRANS_HTOTAL(cpu_transcoder));
+ adjusted_mode->crtc_hdisplay = REG_FIELD_GET(HACTIVE_MASK, tmp) + 1;
+ adjusted_mode->crtc_htotal = REG_FIELD_GET(HTOTAL_MASK, tmp) + 1;
if (!transcoder_is_dsi(cpu_transcoder)) {
- tmp = intel_de_read(dev_priv, HBLANK(cpu_transcoder));
- pipe_config->hw.adjusted_mode.crtc_hblank_start =
- (tmp & 0xffff) + 1;
- pipe_config->hw.adjusted_mode.crtc_hblank_end =
- ((tmp >> 16) & 0xffff) + 1;
+ tmp = intel_de_read(dev_priv, TRANS_HBLANK(cpu_transcoder));
+ adjusted_mode->crtc_hblank_start = REG_FIELD_GET(HBLANK_START_MASK, tmp) + 1;
+ adjusted_mode->crtc_hblank_end = REG_FIELD_GET(HBLANK_END_MASK, tmp) + 1;
}
- tmp = intel_de_read(dev_priv, HSYNC(cpu_transcoder));
- pipe_config->hw.adjusted_mode.crtc_hsync_start = (tmp & 0xffff) + 1;
- pipe_config->hw.adjusted_mode.crtc_hsync_end = ((tmp >> 16) & 0xffff) + 1;
- tmp = intel_de_read(dev_priv, VTOTAL(cpu_transcoder));
- pipe_config->hw.adjusted_mode.crtc_vdisplay = (tmp & 0xffff) + 1;
- pipe_config->hw.adjusted_mode.crtc_vtotal = ((tmp >> 16) & 0xffff) + 1;
+ tmp = intel_de_read(dev_priv, TRANS_HSYNC(cpu_transcoder));
+ adjusted_mode->crtc_hsync_start = REG_FIELD_GET(HSYNC_START_MASK, tmp) + 1;
+ adjusted_mode->crtc_hsync_end = REG_FIELD_GET(HSYNC_END_MASK, tmp) + 1;
+
+ tmp = intel_de_read(dev_priv, TRANS_VTOTAL(cpu_transcoder));
+ adjusted_mode->crtc_vdisplay = REG_FIELD_GET(VACTIVE_MASK, tmp) + 1;
+ adjusted_mode->crtc_vtotal = REG_FIELD_GET(VTOTAL_MASK, tmp) + 1;
+ /* FIXME TGL+ DSI transcoders have this! */
if (!transcoder_is_dsi(cpu_transcoder)) {
- tmp = intel_de_read(dev_priv, VBLANK(cpu_transcoder));
- pipe_config->hw.adjusted_mode.crtc_vblank_start =
- (tmp & 0xffff) + 1;
- pipe_config->hw.adjusted_mode.crtc_vblank_end =
- ((tmp >> 16) & 0xffff) + 1;
+ tmp = intel_de_read(dev_priv, TRANS_VBLANK(cpu_transcoder));
+ adjusted_mode->crtc_vblank_start = REG_FIELD_GET(VBLANK_START_MASK, tmp) + 1;
+ adjusted_mode->crtc_vblank_end = REG_FIELD_GET(VBLANK_END_MASK, tmp) + 1;
}
- tmp = intel_de_read(dev_priv, VSYNC(cpu_transcoder));
- pipe_config->hw.adjusted_mode.crtc_vsync_start = (tmp & 0xffff) + 1;
- pipe_config->hw.adjusted_mode.crtc_vsync_end = ((tmp >> 16) & 0xffff) + 1;
+ tmp = intel_de_read(dev_priv, TRANS_VSYNC(cpu_transcoder));
+ adjusted_mode->crtc_vsync_start = REG_FIELD_GET(VSYNC_START_MASK, tmp) + 1;
+ adjusted_mode->crtc_vsync_end = REG_FIELD_GET(VSYNC_END_MASK, tmp) + 1;
if (intel_pipe_is_interlaced(pipe_config)) {
- pipe_config->hw.adjusted_mode.flags |= DRM_MODE_FLAG_INTERLACE;
- pipe_config->hw.adjusted_mode.crtc_vtotal += 1;
- pipe_config->hw.adjusted_mode.crtc_vblank_end += 1;
+ adjusted_mode->flags |= DRM_MODE_FLAG_INTERLACE;
+ adjusted_mode->crtc_vtotal += 1;
+ adjusted_mode->crtc_vblank_end += 1;
}
+
+ if (DISPLAY_VER(dev_priv) >= 13 && !transcoder_is_dsi(cpu_transcoder))
+ adjusted_mode->crtc_vblank_start =
+ adjusted_mode->crtc_vdisplay +
+ intel_de_read(dev_priv, TRANS_SET_CONTEXT_LATENCY(cpu_transcoder));
}
static void intel_bigjoiner_adjust_pipe_src(struct intel_crtc_state *crtc_state)
@@ -2982,7 +2919,8 @@ void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- u32 pipeconf = 0;
+ enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
+ u32 val = 0;
/*
* - We keep both pipes enabled on 830
@@ -2990,18 +2928,18 @@ void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state)
* - During fastset the pipe is already enabled and must remain so
*/
if (IS_I830(dev_priv) || !intel_crtc_needs_modeset(crtc_state))
- pipeconf |= PIPECONF_ENABLE;
+ val |= TRANSCONF_ENABLE;
if (crtc_state->double_wide)
- pipeconf |= PIPECONF_DOUBLE_WIDE;
+ val |= TRANSCONF_DOUBLE_WIDE;
/* only g4x and later have fancy bpc/dither controls */
if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
IS_CHERRYVIEW(dev_priv)) {
/* Bspec claims that we can't use dithering for 30bpp pipes. */
if (crtc_state->dither && crtc_state->pipe_bpp != 30)
- pipeconf |= PIPECONF_DITHER_EN |
- PIPECONF_DITHER_TYPE_SP;
+ val |= TRANSCONF_DITHER_EN |
+ TRANSCONF_DITHER_TYPE_SP;
switch (crtc_state->pipe_bpp) {
default:
@@ -3009,13 +2947,13 @@ void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state)
MISSING_CASE(crtc_state->pipe_bpp);
fallthrough;
case 18:
- pipeconf |= PIPECONF_BPC_6;
+ val |= TRANSCONF_BPC_6;
break;
case 24:
- pipeconf |= PIPECONF_BPC_8;
+ val |= TRANSCONF_BPC_8;
break;
case 30:
- pipeconf |= PIPECONF_BPC_10;
+ val |= TRANSCONF_BPC_10;
break;
}
}
@@ -3023,23 +2961,23 @@ void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state)
if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
if (DISPLAY_VER(dev_priv) < 4 ||
intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
- pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
+ val |= TRANSCONF_INTERLACE_W_FIELD_INDICATION;
else
- pipeconf |= PIPECONF_INTERLACE_W_SYNC_SHIFT;
+ val |= TRANSCONF_INTERLACE_W_SYNC_SHIFT;
} else {
- pipeconf |= PIPECONF_INTERLACE_PROGRESSIVE;
+ val |= TRANSCONF_INTERLACE_PROGRESSIVE;
}
if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
crtc_state->limited_color_range)
- pipeconf |= PIPECONF_COLOR_RANGE_SELECT;
+ val |= TRANSCONF_COLOR_RANGE_SELECT;
- pipeconf |= PIPECONF_GAMMA_MODE(crtc_state->gamma_mode);
+ val |= TRANSCONF_GAMMA_MODE(crtc_state->gamma_mode);
- pipeconf |= PIPECONF_FRAME_START_DELAY(crtc_state->framestart_delay - 1);
+ val |= TRANSCONF_FRAME_START_DELAY(crtc_state->framestart_delay - 1);
- intel_de_write(dev_priv, PIPECONF(crtc->pipe), pipeconf);
- intel_de_posting_read(dev_priv, PIPECONF(crtc->pipe));
+ intel_de_write(dev_priv, TRANSCONF(cpu_transcoder), val);
+ intel_de_posting_read(dev_priv, TRANSCONF(cpu_transcoder));
}
static bool i9xx_has_pfit(struct drm_i915_private *dev_priv)
@@ -3140,20 +3078,20 @@ static void chv_crtc_clock_get(struct intel_crtc *crtc,
}
static enum intel_output_format
-bdw_get_pipemisc_output_format(struct intel_crtc *crtc)
+bdw_get_pipe_misc_output_format(struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
u32 tmp;
- tmp = intel_de_read(dev_priv, PIPEMISC(crtc->pipe));
+ tmp = intel_de_read(dev_priv, PIPE_MISC(crtc->pipe));
- if (tmp & PIPEMISC_YUV420_ENABLE) {
+ if (tmp & PIPE_MISC_YUV420_ENABLE) {
/* We support 4:2:0 in full blend mode only */
drm_WARN_ON(&dev_priv->drm,
- (tmp & PIPEMISC_YUV420_MODE_FULL_BLEND) == 0);
+ (tmp & PIPE_MISC_YUV420_MODE_FULL_BLEND) == 0);
return INTEL_OUTPUT_FORMAT_YCBCR420;
- } else if (tmp & PIPEMISC_OUTPUT_COLORSPACE_YUV) {
+ } else if (tmp & PIPE_MISC_OUTPUT_COLORSPACE_YUV) {
return INTEL_OUTPUT_FORMAT_YCBCR444;
} else {
return INTEL_OUTPUT_FORMAT_RGB;
@@ -3198,20 +3136,20 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
ret = false;
- tmp = intel_de_read(dev_priv, PIPECONF(crtc->pipe));
- if (!(tmp & PIPECONF_ENABLE))
+ tmp = intel_de_read(dev_priv, TRANSCONF(pipe_config->cpu_transcoder));
+ if (!(tmp & TRANSCONF_ENABLE))
goto out;
if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
IS_CHERRYVIEW(dev_priv)) {
- switch (tmp & PIPECONF_BPC_MASK) {
- case PIPECONF_BPC_6:
+ switch (tmp & TRANSCONF_BPC_MASK) {
+ case TRANSCONF_BPC_6:
pipe_config->pipe_bpp = 18;
break;
- case PIPECONF_BPC_8:
+ case TRANSCONF_BPC_8:
pipe_config->pipe_bpp = 24;
break;
- case PIPECONF_BPC_10:
+ case TRANSCONF_BPC_10:
pipe_config->pipe_bpp = 30;
break;
default:
@@ -3221,12 +3159,12 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
}
if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
- (tmp & PIPECONF_COLOR_RANGE_SELECT))
+ (tmp & TRANSCONF_COLOR_RANGE_SELECT))
pipe_config->limited_color_range = true;
- pipe_config->gamma_mode = REG_FIELD_GET(PIPECONF_GAMMA_MODE_MASK_I9XX, tmp);
+ pipe_config->gamma_mode = REG_FIELD_GET(TRANSCONF_GAMMA_MODE_MASK_I9XX, tmp);
- pipe_config->framestart_delay = REG_FIELD_GET(PIPECONF_FRAME_START_DELAY_MASK, tmp) + 1;
+ pipe_config->framestart_delay = REG_FIELD_GET(TRANSCONF_FRAME_START_DELAY_MASK, tmp) + 1;
if (IS_CHERRYVIEW(dev_priv))
pipe_config->cgm_mode = intel_de_read(dev_priv,
@@ -3236,7 +3174,7 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
intel_color_get_config(pipe_config);
if (DISPLAY_VER(dev_priv) < 4)
- pipe_config->double_wide = tmp & PIPECONF_DOUBLE_WIDE;
+ pipe_config->double_wide = tmp & TRANSCONF_DOUBLE_WIDE;
intel_get_transcoder_timings(crtc, pipe_config);
intel_get_pipe_src_size(crtc, pipe_config);
@@ -3306,7 +3244,7 @@ void ilk_set_pipeconf(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- enum pipe pipe = crtc->pipe;
+ enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
u32 val = 0;
/*
@@ -3314,7 +3252,7 @@ void ilk_set_pipeconf(const struct intel_crtc_state *crtc_state)
* - During fastset the pipe is already enabled and must remain so
*/
if (!intel_crtc_needs_modeset(crtc_state))
- val |= PIPECONF_ENABLE;
+ val |= TRANSCONF_ENABLE;
switch (crtc_state->pipe_bpp) {
default:
@@ -3322,26 +3260,26 @@ void ilk_set_pipeconf(const struct intel_crtc_state *crtc_state)
MISSING_CASE(crtc_state->pipe_bpp);
fallthrough;
case 18:
- val |= PIPECONF_BPC_6;
+ val |= TRANSCONF_BPC_6;
break;
case 24:
- val |= PIPECONF_BPC_8;
+ val |= TRANSCONF_BPC_8;
break;
case 30:
- val |= PIPECONF_BPC_10;
+ val |= TRANSCONF_BPC_10;
break;
case 36:
- val |= PIPECONF_BPC_12;
+ val |= TRANSCONF_BPC_12;
break;
}
if (crtc_state->dither)
- val |= PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP;
+ val |= TRANSCONF_DITHER_EN | TRANSCONF_DITHER_TYPE_SP;
if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
- val |= PIPECONF_INTERLACE_IF_ID_ILK;
+ val |= TRANSCONF_INTERLACE_IF_ID_ILK;
else
- val |= PIPECONF_INTERLACE_PF_PD_ILK;
+ val |= TRANSCONF_INTERLACE_PF_PD_ILK;
/*
* This would end up with an odd purple hue over
@@ -3352,18 +3290,18 @@ void ilk_set_pipeconf(const struct intel_crtc_state *crtc_state)
if (crtc_state->limited_color_range &&
!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
- val |= PIPECONF_COLOR_RANGE_SELECT;
+ val |= TRANSCONF_COLOR_RANGE_SELECT;
if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB)
- val |= PIPECONF_OUTPUT_COLORSPACE_YUV709;
+ val |= TRANSCONF_OUTPUT_COLORSPACE_YUV709;
- val |= PIPECONF_GAMMA_MODE(crtc_state->gamma_mode);
+ val |= TRANSCONF_GAMMA_MODE(crtc_state->gamma_mode);
- val |= PIPECONF_FRAME_START_DELAY(crtc_state->framestart_delay - 1);
- val |= PIPECONF_MSA_TIMING_DELAY(crtc_state->msa_timing_delay);
+ val |= TRANSCONF_FRAME_START_DELAY(crtc_state->framestart_delay - 1);
+ val |= TRANSCONF_MSA_TIMING_DELAY(crtc_state->msa_timing_delay);
- intel_de_write(dev_priv, PIPECONF(pipe), val);
- intel_de_posting_read(dev_priv, PIPECONF(pipe));
+ intel_de_write(dev_priv, TRANSCONF(cpu_transcoder), val);
+ intel_de_posting_read(dev_priv, TRANSCONF(cpu_transcoder));
}
static void hsw_set_transconf(const struct intel_crtc_state *crtc_state)
@@ -3378,25 +3316,25 @@ static void hsw_set_transconf(const struct intel_crtc_state *crtc_state)
* - During fastset the pipe is already enabled and must remain so
*/
if (!intel_crtc_needs_modeset(crtc_state))
- val |= PIPECONF_ENABLE;
+ val |= TRANSCONF_ENABLE;
if (IS_HASWELL(dev_priv) && crtc_state->dither)
- val |= PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP;
+ val |= TRANSCONF_DITHER_EN | TRANSCONF_DITHER_TYPE_SP;
if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
- val |= PIPECONF_INTERLACE_IF_ID_ILK;
+ val |= TRANSCONF_INTERLACE_IF_ID_ILK;
else
- val |= PIPECONF_INTERLACE_PF_PD_ILK;
+ val |= TRANSCONF_INTERLACE_PF_PD_ILK;
if (IS_HASWELL(dev_priv) &&
crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB)
- val |= PIPECONF_OUTPUT_COLORSPACE_YUV_HSW;
+ val |= TRANSCONF_OUTPUT_COLORSPACE_YUV_HSW;
- intel_de_write(dev_priv, PIPECONF(cpu_transcoder), val);
- intel_de_posting_read(dev_priv, PIPECONF(cpu_transcoder));
+ intel_de_write(dev_priv, TRANSCONF(cpu_transcoder), val);
+ intel_de_posting_read(dev_priv, TRANSCONF(cpu_transcoder));
}
-static void bdw_set_pipemisc(const struct intel_crtc_state *crtc_state)
+static void bdw_set_pipe_misc(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
@@ -3404,18 +3342,18 @@ static void bdw_set_pipemisc(const struct intel_crtc_state *crtc_state)
switch (crtc_state->pipe_bpp) {
case 18:
- val |= PIPEMISC_BPC_6;
+ val |= PIPE_MISC_BPC_6;
break;
case 24:
- val |= PIPEMISC_BPC_8;
+ val |= PIPE_MISC_BPC_8;
break;
case 30:
- val |= PIPEMISC_BPC_10;
+ val |= PIPE_MISC_BPC_10;
break;
case 36:
/* Port output 12BPC defined for ADLP+ */
if (DISPLAY_VER(dev_priv) > 12)
- val |= PIPEMISC_BPC_12_ADLP;
+ val |= PIPE_MISC_BPC_12_ADLP;
break;
default:
MISSING_CASE(crtc_state->pipe_bpp);
@@ -3423,38 +3361,38 @@ static void bdw_set_pipemisc(const struct intel_crtc_state *crtc_state)
}
if (crtc_state->dither)
- val |= PIPEMISC_DITHER_ENABLE | PIPEMISC_DITHER_TYPE_SP;
+ val |= PIPE_MISC_DITHER_ENABLE | PIPE_MISC_DITHER_TYPE_SP;
if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 ||
crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444)
- val |= PIPEMISC_OUTPUT_COLORSPACE_YUV;
+ val |= PIPE_MISC_OUTPUT_COLORSPACE_YUV;
if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
- val |= PIPEMISC_YUV420_ENABLE |
- PIPEMISC_YUV420_MODE_FULL_BLEND;
+ val |= PIPE_MISC_YUV420_ENABLE |
+ PIPE_MISC_YUV420_MODE_FULL_BLEND;
if (DISPLAY_VER(dev_priv) >= 11 && is_hdr_mode(crtc_state))
- val |= PIPEMISC_HDR_MODE_PRECISION;
+ val |= PIPE_MISC_HDR_MODE_PRECISION;
if (DISPLAY_VER(dev_priv) >= 12)
- val |= PIPEMISC_PIXEL_ROUNDING_TRUNC;
+ val |= PIPE_MISC_PIXEL_ROUNDING_TRUNC;
- intel_de_write(dev_priv, PIPEMISC(crtc->pipe), val);
+ intel_de_write(dev_priv, PIPE_MISC(crtc->pipe), val);
}
-int bdw_get_pipemisc_bpp(struct intel_crtc *crtc)
+int bdw_get_pipe_misc_bpp(struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
u32 tmp;
- tmp = intel_de_read(dev_priv, PIPEMISC(crtc->pipe));
+ tmp = intel_de_read(dev_priv, PIPE_MISC(crtc->pipe));
- switch (tmp & PIPEMISC_BPC_MASK) {
- case PIPEMISC_BPC_6:
+ switch (tmp & PIPE_MISC_BPC_MASK) {
+ case PIPE_MISC_BPC_6:
return 18;
- case PIPEMISC_BPC_8:
+ case PIPE_MISC_BPC_8:
return 24;
- case PIPEMISC_BPC_10:
+ case PIPE_MISC_BPC_10:
return 30;
/*
* PORT OUTPUT 12 BPC defined for ADLP+.
@@ -3466,7 +3404,7 @@ int bdw_get_pipemisc_bpp(struct intel_crtc *crtc)
* on older platforms, need to find a workaround for 12 BPC
* MIPI DSI HW readout.
*/
- case PIPEMISC_BPC_12_ADLP:
+ case PIPE_MISC_BPC_12_ADLP:
if (DISPLAY_VER(dev_priv) > 12)
return 36;
fallthrough;
@@ -3618,33 +3556,33 @@ static bool ilk_get_pipe_config(struct intel_crtc *crtc,
pipe_config->shared_dpll = NULL;
ret = false;
- tmp = intel_de_read(dev_priv, PIPECONF(crtc->pipe));
- if (!(tmp & PIPECONF_ENABLE))
+ tmp = intel_de_read(dev_priv, TRANSCONF(pipe_config->cpu_transcoder));
+ if (!(tmp & TRANSCONF_ENABLE))
goto out;
- switch (tmp & PIPECONF_BPC_MASK) {
- case PIPECONF_BPC_6:
+ switch (tmp & TRANSCONF_BPC_MASK) {
+ case TRANSCONF_BPC_6:
pipe_config->pipe_bpp = 18;
break;
- case PIPECONF_BPC_8:
+ case TRANSCONF_BPC_8:
pipe_config->pipe_bpp = 24;
break;
- case PIPECONF_BPC_10:
+ case TRANSCONF_BPC_10:
pipe_config->pipe_bpp = 30;
break;
- case PIPECONF_BPC_12:
+ case TRANSCONF_BPC_12:
pipe_config->pipe_bpp = 36;
break;
default:
break;
}
- if (tmp & PIPECONF_COLOR_RANGE_SELECT)
+ if (tmp & TRANSCONF_COLOR_RANGE_SELECT)
pipe_config->limited_color_range = true;
- switch (tmp & PIPECONF_OUTPUT_COLORSPACE_MASK) {
- case PIPECONF_OUTPUT_COLORSPACE_YUV601:
- case PIPECONF_OUTPUT_COLORSPACE_YUV709:
+ switch (tmp & TRANSCONF_OUTPUT_COLORSPACE_MASK) {
+ case TRANSCONF_OUTPUT_COLORSPACE_YUV601:
+ case TRANSCONF_OUTPUT_COLORSPACE_YUV709:
pipe_config->output_format = INTEL_OUTPUT_FORMAT_YCBCR444;
break;
default:
@@ -3652,11 +3590,11 @@ static bool ilk_get_pipe_config(struct intel_crtc *crtc,
break;
}
- pipe_config->gamma_mode = REG_FIELD_GET(PIPECONF_GAMMA_MODE_MASK_ILK, tmp);
+ pipe_config->gamma_mode = REG_FIELD_GET(TRANSCONF_GAMMA_MODE_MASK_ILK, tmp);
- pipe_config->framestart_delay = REG_FIELD_GET(PIPECONF_FRAME_START_DELAY_MASK, tmp) + 1;
+ pipe_config->framestart_delay = REG_FIELD_GET(TRANSCONF_FRAME_START_DELAY_MASK, tmp) + 1;
- pipe_config->msa_timing_delay = REG_FIELD_GET(PIPECONF_MSA_TIMING_DELAY_MASK, tmp);
+ pipe_config->msa_timing_delay = REG_FIELD_GET(TRANSCONF_MSA_TIMING_DELAY_MASK, tmp);
pipe_config->csc_mode = intel_de_read(dev_priv,
PIPE_CSC_MODE(crtc->pipe));
@@ -3933,9 +3871,9 @@ static bool hsw_get_transcoder_state(struct intel_crtc *crtc,
pipe_config->pch_pfit.force_thru = true;
}
- tmp = intel_de_read(dev_priv, PIPECONF(pipe_config->cpu_transcoder));
+ tmp = intel_de_read(dev_priv, TRANSCONF(pipe_config->cpu_transcoder));
- return tmp & PIPECONF_ENABLE;
+ return tmp & TRANSCONF_ENABLE;
}
static bool bxt_get_dsi_transcoder_state(struct intel_crtc *crtc,
@@ -4039,15 +3977,15 @@ static bool hsw_get_pipe_config(struct intel_crtc *crtc,
if (IS_HASWELL(dev_priv)) {
u32 tmp = intel_de_read(dev_priv,
- PIPECONF(pipe_config->cpu_transcoder));
+ TRANSCONF(pipe_config->cpu_transcoder));
- if (tmp & PIPECONF_OUTPUT_COLORSPACE_YUV_HSW)
+ if (tmp & TRANSCONF_OUTPUT_COLORSPACE_YUV_HSW)
pipe_config->output_format = INTEL_OUTPUT_FORMAT_YCBCR444;
else
pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
} else {
pipe_config->output_format =
- bdw_get_pipemisc_output_format(crtc);
+ bdw_get_pipe_misc_output_format(crtc);
}
pipe_config->gamma_mode = intel_de_read(dev_priv,
@@ -4090,7 +4028,7 @@ static bool hsw_get_pipe_config(struct intel_crtc *crtc,
!transcoder_is_dsi(pipe_config->cpu_transcoder)) {
pipe_config->pixel_multiplier =
intel_de_read(dev_priv,
- PIPE_MULT(pipe_config->cpu_transcoder)) + 1;
+ TRANS_MULT(pipe_config->cpu_transcoder)) + 1;
} else {
pipe_config->pixel_multiplier = 1;
}
@@ -5145,6 +5083,7 @@ intel_crtc_prepare_cleared_state(struct intel_atomic_state *state,
* only fields that are know to not cause problems are preserved. */
saved_state->uapi = crtc_state->uapi;
+ saved_state->inherited = crtc_state->inherited;
saved_state->scaler_state = crtc_state->scaler_state;
saved_state->shared_dpll = crtc_state->shared_dpll;
saved_state->dpll_hw_state = crtc_state->dpll_hw_state;
@@ -5439,6 +5378,20 @@ pipe_config_dp_vsc_sdp_mismatch(struct drm_i915_private *dev_priv,
}
}
+/* Returns the length up to and including the last differing byte */
+static size_t
+memcmp_diff_len(const u8 *a, const u8 *b, size_t len)
+{
+ int i;
+
+ for (i = len - 1; i >= 0; i--) {
+ if (a[i] != b[i])
+ return i + 1;
+ }
+
+ return 0;
+}
+
static void
pipe_config_buffer_mismatch(struct drm_i915_private *dev_priv,
bool fastset, const char *name,
@@ -5448,6 +5401,9 @@ pipe_config_buffer_mismatch(struct drm_i915_private *dev_priv,
if (!drm_debug_enabled(DRM_UT_KMS))
return;
+ /* only dump up to the last difference */
+ len = memcmp_diff_len(a, b, len);
+
drm_dbg_kms(&dev_priv->drm,
"fastset mismatch in %s buffer\n", name);
print_hex_dump(KERN_DEBUG, "expected: ", DUMP_PREFIX_NONE,
@@ -5455,6 +5411,9 @@ pipe_config_buffer_mismatch(struct drm_i915_private *dev_priv,
print_hex_dump(KERN_DEBUG, "found: ", DUMP_PREFIX_NONE,
16, 0, b, len, false);
} else {
+ /* only dump up to the last difference */
+ len = memcmp_diff_len(a, b, len);
+
drm_err(&dev_priv->drm, "mismatch in %s buffer\n", name);
print_hex_dump(KERN_ERR, "expected: ", DUMP_PREFIX_NONE,
16, 0, a, len, false);
@@ -5943,73 +5902,13 @@ int intel_modeset_all_pipes(struct intel_atomic_state *state,
return ret;
crtc_state->update_planes |= crtc_state->active_planes;
+ crtc_state->async_flip_planes = 0;
+ crtc_state->do_async_flip = false;
}
return 0;
}
-void intel_crtc_update_active_timings(const struct intel_crtc_state *crtc_state)
-{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- struct drm_display_mode adjusted_mode;
-
- drm_mode_init(&adjusted_mode, &crtc_state->hw.adjusted_mode);
-
- if (crtc_state->vrr.enable) {
- adjusted_mode.crtc_vtotal = crtc_state->vrr.vmax;
- adjusted_mode.crtc_vblank_end = crtc_state->vrr.vmax;
- adjusted_mode.crtc_vblank_start = intel_vrr_vmin_vblank_start(crtc_state);
- crtc->vmax_vblank_start = intel_vrr_vmax_vblank_start(crtc_state);
- }
-
- drm_calc_timestamping_constants(&crtc->base, &adjusted_mode);
-
- crtc->mode_flags = crtc_state->mode_flags;
-
- /*
- * The scanline counter increments at the leading edge of hsync.
- *
- * On most platforms it starts counting from vtotal-1 on the
- * first active line. That means the scanline counter value is
- * always one less than what we would expect. Ie. just after
- * start of vblank, which also occurs at start of hsync (on the
- * last active line), the scanline counter will read vblank_start-1.
- *
- * On gen2 the scanline counter starts counting from 1 instead
- * of vtotal-1, so we have to subtract one (or rather add vtotal-1
- * to keep the value positive), instead of adding one.
- *
- * On HSW+ the behaviour of the scanline counter depends on the output
- * type. For DP ports it behaves like most other platforms, but on HDMI
- * there's an extra 1 line difference. So we need to add two instead of
- * one to the value.
- *
- * On VLV/CHV DSI the scanline counter would appear to increment
- * approx. 1/3 of a scanline before start of vblank. Unfortunately
- * that means we can't tell whether we're in vblank or not while
- * we're on that particular line. We must still set scanline_offset
- * to 1 so that the vblank timestamps come out correct when we query
- * the scanline counter from within the vblank interrupt handler.
- * However if queried just before the start of vblank we'll get an
- * answer that's slightly in the future.
- */
- if (DISPLAY_VER(dev_priv) == 2) {
- int vtotal;
-
- vtotal = adjusted_mode.crtc_vtotal;
- if (adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
- vtotal /= 2;
-
- crtc->scanline_offset = vtotal - 1;
- } else if (HAS_DDI(dev_priv) &&
- intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
- crtc->scanline_offset = 2;
- } else {
- crtc->scanline_offset = 1;
- }
-}
-
/*
* This implements the workaround described in the "notes" section of the mode
* set sequence documentation. When going from no pipes or single pipe to
@@ -6695,8 +6594,8 @@ static int intel_bigjoiner_add_affected_crtcs(struct intel_atomic_state *state)
* @dev: drm device
* @_state: state to validate
*/
-static int intel_atomic_check(struct drm_device *dev,
- struct drm_atomic_state *_state)
+int intel_atomic_check(struct drm_device *dev,
+ struct drm_atomic_state *_state)
{
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_atomic_state *state = to_intel_atomic_state(_state);
@@ -7014,7 +6913,7 @@ static void commit_pipe_pre_planes(struct intel_atomic_state *state,
intel_color_commit_arm(new_crtc_state);
if (DISPLAY_VER(dev_priv) >= 9 || IS_BROADWELL(dev_priv))
- bdw_set_pipemisc(new_crtc_state);
+ bdw_set_pipe_misc(new_crtc_state);
if (intel_crtc_needs_fastset(new_crtc_state))
intel_pipe_fastset(old_crtc_state, new_crtc_state);
@@ -7090,6 +6989,8 @@ static void intel_update_crtc(struct intel_atomic_state *state,
intel_fbc_update(state, crtc);
+ drm_WARN_ON(&i915->drm, !intel_display_power_is_enabled(i915, POWER_DOMAIN_DC_OFF));
+
if (!modeset &&
intel_crtc_needs_color_update(new_crtc_state))
intel_color_commit_noarm(new_crtc_state);
@@ -7457,8 +7358,28 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state)
drm_atomic_helper_wait_for_dependencies(&state->base);
drm_dp_mst_atomic_wait_for_dependencies(&state->base);
- if (state->modeset)
- wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_MODESET);
+ /*
+ * During full modesets we write a lot of registers, wait
+ * for PLLs, etc. Doing that while DC states are enabled
+ * is not a good idea.
+ *
+ * During fastsets and other updates we also need to
+ * disable DC states due to the following scenario:
+ * 1. DC5 exit and PSR exit happen
+ * 2. Some or all _noarm() registers are written
+ * 3. Due to some long delay PSR is re-entered
+ * 4. DC5 entry -> DMC saves the already written new
+ * _noarm() registers and the old not yet written
+ * _arm() registers
+ * 5. DC5 exit -> DMC restores a mixture of old and
+ * new register values and arms the update
+ * 6. PSR exit -> hardware latches a mixture of old and
+ * new register values -> corrupted frame, or worse
+ * 7. New _arm() registers are finally written
+ * 8. Hardware finally latches a complete set of new
+ * register values, and subsequent frames will be OK again
+ */
+ wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_DC_OFF);
intel_atomic_prepare_plane_clear_colors(state);
@@ -7607,8 +7528,8 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state)
* the culprit.
*/
intel_uncore_arm_unclaimed_mmio_detection(&dev_priv->uncore);
- intel_display_power_put(dev_priv, POWER_DOMAIN_MODESET, wakeref);
}
+ intel_display_power_put(dev_priv, POWER_DOMAIN_DC_OFF, wakeref);
intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref);
/*
@@ -8356,124 +8277,6 @@ void intel_modeset_init_hw(struct drm_i915_private *i915)
cdclk_state->logical = cdclk_state->actual = i915->display.cdclk.hw;
}
-static int sanitize_watermarks_add_affected(struct drm_atomic_state *state)
-{
- struct drm_plane *plane;
- struct intel_crtc *crtc;
-
- for_each_intel_crtc(state->dev, crtc) {
- struct intel_crtc_state *crtc_state;
-
- crtc_state = intel_atomic_get_crtc_state(state, crtc);
- if (IS_ERR(crtc_state))
- return PTR_ERR(crtc_state);
-
- if (crtc_state->hw.active) {
- /*
- * Preserve the inherited flag to avoid
- * taking the full modeset path.
- */
- crtc_state->inherited = true;
- }
- }
-
- drm_for_each_plane(plane, state->dev) {
- struct drm_plane_state *plane_state;
-
- plane_state = drm_atomic_get_plane_state(state, plane);
- if (IS_ERR(plane_state))
- return PTR_ERR(plane_state);
- }
-
- return 0;
-}
-
-/*
- * Calculate what we think the watermarks should be for the state we've read
- * out of the hardware and then immediately program those watermarks so that
- * we ensure the hardware settings match our internal state.
- *
- * We can calculate what we think WM's should be by creating a duplicate of the
- * current state (which was constructed during hardware readout) and running it
- * through the atomic check code to calculate new watermark values in the
- * state object.
- */
-static void sanitize_watermarks(struct drm_i915_private *dev_priv)
-{
- struct drm_atomic_state *state;
- struct intel_atomic_state *intel_state;
- struct intel_crtc *crtc;
- struct intel_crtc_state *crtc_state;
- struct drm_modeset_acquire_ctx ctx;
- int ret;
- int i;
-
- /* Only supported on platforms that use atomic watermark design */
- if (!dev_priv->display.funcs.wm->optimize_watermarks)
- return;
-
- state = drm_atomic_state_alloc(&dev_priv->drm);
- if (drm_WARN_ON(&dev_priv->drm, !state))
- return;
-
- intel_state = to_intel_atomic_state(state);
-
- drm_modeset_acquire_init(&ctx, 0);
-
-retry:
- state->acquire_ctx = &ctx;
-
- /*
- * Hardware readout is the only time we don't want to calculate
- * intermediate watermarks (since we don't trust the current
- * watermarks).
- */
- if (!HAS_GMCH(dev_priv))
- intel_state->skip_intermediate_wm = true;
-
- ret = sanitize_watermarks_add_affected(state);
- if (ret)
- goto fail;
-
- ret = intel_atomic_check(&dev_priv->drm, state);
- if (ret)
- goto fail;
-
- /* Write calculated watermark values back */
- for_each_new_intel_crtc_in_state(intel_state, crtc, crtc_state, i) {
- crtc_state->wm.need_postvbl_update = true;
- intel_optimize_watermarks(intel_state, crtc);
-
- to_intel_crtc_state(crtc->base.state)->wm = crtc_state->wm;
- }
-
-fail:
- if (ret == -EDEADLK) {
- drm_atomic_state_clear(state);
- drm_modeset_backoff(&ctx);
- goto retry;
- }
-
- /*
- * If we fail here, it means that the hardware appears to be
- * programmed in a way that shouldn't be possible, given our
- * understanding of watermark requirements. This might mean a
- * mistake in the hardware readout code or a mistake in the
- * watermark calculations for a given platform. Raise a WARN
- * so that this is noticeable.
- *
- * If this actually happens, we'll have to just leave the
- * BIOS-programmed watermarks untouched and hope for the best.
- */
- drm_WARN(&dev_priv->drm, ret,
- "Could not determine valid watermarks for inherited state\n");
-
- drm_atomic_state_put(state);
-
- drm_modeset_drop_locks(&ctx);
- drm_modeset_acquire_fini(&ctx);
-}
-
static int intel_initial_commit(struct drm_device *dev)
{
struct drm_atomic_state *state = NULL;
@@ -8634,12 +8437,16 @@ int intel_modeset_init_noirq(struct drm_i915_private *i915)
goto cleanup_bios;
/* FIXME: completely on the wrong abstraction layer */
+ ret = intel_power_domains_init(i915);
+ if (ret < 0)
+ goto cleanup_vga;
+
intel_power_domains_init_hw(i915, false);
if (!HAS_DISPLAY(i915))
return 0;
- intel_dmc_ucode_init(i915);
+ intel_dmc_init(i915);
i915->display.wq.modeset = alloc_ordered_workqueue("i915_modeset", 0);
i915->display.wq.flip = alloc_workqueue("i915_flip", WQ_HIGHPRI |
@@ -8674,8 +8481,9 @@ int intel_modeset_init_noirq(struct drm_i915_private *i915)
return 0;
cleanup_vga_client_pw_domain_dmc:
- intel_dmc_ucode_fini(i915);
+ intel_dmc_fini(i915);
intel_power_domains_driver_remove(i915);
+cleanup_vga:
intel_vga_unregister(i915);
cleanup_bios:
intel_bios_driver_remove(i915);
@@ -8694,7 +8502,7 @@ int intel_modeset_init_nogem(struct drm_i915_private *i915)
if (!HAS_DISPLAY(i915))
return 0;
- intel_init_pm(i915);
+ intel_wm_init(i915);
intel_panel_sanitize_ssc(i915);
@@ -8750,7 +8558,7 @@ int intel_modeset_init_nogem(struct drm_i915_private *i915)
* since the watermark calculation done here will use pstate->fb.
*/
if (!HAS_GMCH(i915))
- sanitize_watermarks(i915);
+ ilk_wm_sanitize(i915);
return 0;
}
@@ -8791,6 +8599,7 @@ int intel_modeset_init(struct drm_i915_private *i915)
void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
{
struct intel_crtc *crtc = intel_crtc_for_pipe(dev_priv, pipe);
+ enum transcoder cpu_transcoder = (enum transcoder)pipe;
/* 640x480@60Hz, ~25175 kHz */
struct dpll clock = {
.m1 = 18,
@@ -8817,13 +8626,20 @@ void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
PLL_REF_INPUT_DREFCLK |
DPLL_VCO_ENABLE;
- intel_de_write(dev_priv, HTOTAL(pipe), (640 - 1) | ((800 - 1) << 16));
- intel_de_write(dev_priv, HBLANK(pipe), (640 - 1) | ((800 - 1) << 16));
- intel_de_write(dev_priv, HSYNC(pipe), (656 - 1) | ((752 - 1) << 16));
- intel_de_write(dev_priv, VTOTAL(pipe), (480 - 1) | ((525 - 1) << 16));
- intel_de_write(dev_priv, VBLANK(pipe), (480 - 1) | ((525 - 1) << 16));
- intel_de_write(dev_priv, VSYNC(pipe), (490 - 1) | ((492 - 1) << 16));
- intel_de_write(dev_priv, PIPESRC(pipe), ((640 - 1) << 16) | (480 - 1));
+ intel_de_write(dev_priv, TRANS_HTOTAL(cpu_transcoder),
+ HACTIVE(640 - 1) | HTOTAL(800 - 1));
+ intel_de_write(dev_priv, TRANS_HBLANK(cpu_transcoder),
+ HBLANK_START(640 - 1) | HBLANK_END(800 - 1));
+ intel_de_write(dev_priv, TRANS_HSYNC(cpu_transcoder),
+ HSYNC_START(656 - 1) | HSYNC_END(752 - 1));
+ intel_de_write(dev_priv, TRANS_VTOTAL(cpu_transcoder),
+ VACTIVE(480 - 1) | VTOTAL(525 - 1));
+ intel_de_write(dev_priv, TRANS_VBLANK(cpu_transcoder),
+ VBLANK_START(480 - 1) | VBLANK_END(525 - 1));
+ intel_de_write(dev_priv, TRANS_VSYNC(cpu_transcoder),
+ VSYNC_START(490 - 1) | VSYNC_END(492 - 1));
+ intel_de_write(dev_priv, PIPESRC(pipe),
+ PIPESRC_WIDTH(640 - 1) | PIPESRC_HEIGHT(480 - 1));
intel_de_write(dev_priv, FP0(pipe), fp);
intel_de_write(dev_priv, FP1(pipe), fp);
@@ -8854,8 +8670,8 @@ void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
udelay(150); /* wait for warmup */
}
- intel_de_write(dev_priv, PIPECONF(pipe), PIPECONF_ENABLE);
- intel_de_posting_read(dev_priv, PIPECONF(pipe));
+ intel_de_write(dev_priv, TRANSCONF(pipe), TRANSCONF_ENABLE);
+ intel_de_posting_read(dev_priv, TRANSCONF(pipe));
intel_wait_for_pipe_scanline_moving(crtc);
}
@@ -8878,8 +8694,8 @@ void i830_disable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
drm_WARN_ON(&dev_priv->drm,
intel_de_read(dev_priv, CURCNTR(PIPE_B)) & MCURSOR_MODE_MASK);
- intel_de_write(dev_priv, PIPECONF(pipe), 0);
- intel_de_posting_read(dev_priv, PIPECONF(pipe));
+ intel_de_write(dev_priv, TRANSCONF(pipe), 0);
+ intel_de_posting_read(dev_priv, TRANSCONF(pipe));
intel_wait_for_pipe_scanline_stopped(crtc);
@@ -9000,7 +8816,7 @@ void intel_modeset_driver_remove_noirq(struct drm_i915_private *i915)
/* part #3: call after gem init */
void intel_modeset_driver_remove_nogem(struct drm_i915_private *i915)
{
- intel_dmc_ucode_fini(i915);
+ intel_dmc_fini(i915);
intel_power_domains_driver_remove(i915);
@@ -9035,14 +8851,14 @@ void intel_display_driver_register(struct drm_i915_private *i915)
if (!HAS_DISPLAY(i915))
return;
- intel_display_debugfs_register(i915);
-
/* Must be done after probing outputs */
intel_opregion_register(i915);
intel_acpi_video_register(i915);
intel_audio_init(i915);
+ intel_display_debugfs_register(i915);
+
/*
* Some ports require correctly set-up hpd registers for
* detection to work properly (leading to ghost connected
@@ -9051,7 +8867,7 @@ void intel_display_driver_register(struct drm_i915_private *i915)
* enabled. We do it last so that the async config cannot run
* before the connectors are registered.
*/
- intel_fbdev_initial_config_async(&i915->drm);
+ intel_fbdev_initial_config_async(i915);
/*
* We need to coordinate the hotplugs with the asynchronous
diff --git a/drivers/gpu/drm/i915/display/intel_display.h b/drivers/gpu/drm/i915/display/intel_display.h
index cb6f520cc575..596fd3ec1983 100644
--- a/drivers/gpu/drm/i915/display/intel_display.h
+++ b/drivers/gpu/drm/i915/display/intel_display.h
@@ -32,6 +32,7 @@
enum drm_scaling_filter;
struct dpll;
+struct drm_atomic_state;
struct drm_connector;
struct drm_device;
struct drm_display_mode;
@@ -171,6 +172,8 @@ enum tc_port_mode {
};
enum aux_ch {
+ AUX_CH_NONE = -1,
+
AUX_CH_A,
AUX_CH_B,
AUX_CH_C,
@@ -394,6 +397,7 @@ enum phy_fia {
((connector) = to_intel_connector((__state)->base.connectors[__i].ptr), \
(new_connector_state) = to_intel_digital_connector_state((__state)->base.connectors[__i].new_state), 1))
+int intel_atomic_check(struct drm_device *dev, struct drm_atomic_state *state);
int intel_atomic_add_affected_planes(struct intel_atomic_state *state,
struct intel_crtc *crtc);
u8 intel_calc_active_pipes(struct intel_atomic_state *state,
@@ -418,7 +422,6 @@ bool intel_crtc_get_pipe_config(struct intel_crtc_state *crtc_state);
bool intel_pipe_config_compare(const struct intel_crtc_state *current_config,
const struct intel_crtc_state *pipe_config,
bool fastset);
-void intel_crtc_update_active_timings(const struct intel_crtc_state *crtc_state);
void intel_plane_destroy(struct drm_plane *plane);
void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state);
@@ -507,7 +510,7 @@ void intel_crtc_arm_fifo_underrun(struct intel_crtc *crtc,
struct intel_crtc_state *crtc_state);
void ilk_pfit_disable(const struct intel_crtc_state *old_crtc_state);
-int bdw_get_pipemisc_bpp(struct intel_crtc *crtc);
+int bdw_get_pipe_misc_bpp(struct intel_crtc *crtc);
unsigned int intel_plane_fence_y_offset(const struct intel_plane_state *plane_state);
bool intel_plane_uses_fence(const struct intel_plane_state *plane_state);
diff --git a/drivers/gpu/drm/i915/display/intel_display_core.h b/drivers/gpu/drm/i915/display/intel_display_core.h
index fb8670aa2932..0b5509f268a7 100644
--- a/drivers/gpu/drm/i915/display/intel_display_core.h
+++ b/drivers/gpu/drm/i915/display/intel_display_core.h
@@ -19,13 +19,12 @@
#include "intel_cdclk.h"
#include "intel_display_limits.h"
#include "intel_display_power.h"
-#include "intel_dmc.h"
#include "intel_dpll_mgr.h"
#include "intel_fbc.h"
#include "intel_global_state.h"
#include "intel_gmbus.h"
#include "intel_opregion.h"
-#include "intel_pm_types.h"
+#include "intel_wm_types.h"
struct drm_i915_private;
struct drm_property;
@@ -40,6 +39,7 @@ struct intel_cdclk_vals;
struct intel_color_funcs;
struct intel_crtc;
struct intel_crtc_state;
+struct intel_dmc;
struct intel_dpll_funcs;
struct intel_dpll_mgr;
struct intel_fbdev;
@@ -85,6 +85,7 @@ struct intel_wm_funcs {
void (*optimize_watermarks)(struct intel_atomic_state *state,
struct intel_crtc *crtc);
int (*compute_global_watermarks)(struct intel_atomic_state *state);
+ void (*get_hw_state)(struct drm_i915_private *i915);
};
struct intel_audio_state {
@@ -102,7 +103,7 @@ struct intel_audio {
u32 freq_cntrl;
/* current audio state for the audio component hooks */
- struct intel_audio_state state[I915_MAX_PIPES];
+ struct intel_audio_state state[I915_MAX_TRANSCODERS];
/* necessary resource sharing with HDMI LPE audio driver. */
struct {
@@ -182,6 +183,17 @@ struct intel_hotplug {
* blocked behind the non-DP one.
*/
struct workqueue_struct *dp_wq;
+
+ /*
+ * Flag to track if long HPDs need not to be processed
+ *
+ * Some panels generate long HPDs while keep connected to the port.
+ * This can cause issues with CI tests results. In CI systems we
+ * don't expect to disconnect the panels and could ignore the long
+ * HPDs generated from the faulty panels. This flag can be used as
+ * cue to ignore the long HPDs and can be set / unset using debugfs.
+ */
+ bool ignore_long_hpd;
};
struct intel_vbt_data {
@@ -243,7 +255,7 @@ struct intel_wm {
struct g4x_wm_values g4x;
};
- u8 max_level;
+ u8 num_levels;
/*
* Should be held around atomic WM register writing; also
@@ -340,6 +352,11 @@ struct intel_display {
} dkl;
struct {
+ struct intel_dmc *dmc;
+ intel_wakeref_t wakeref;
+ } dmc;
+
+ struct {
/* VLV/CHV/BXT/GLK DSI MMIO register base address */
u32 mmio_base;
} dsi;
@@ -378,9 +395,15 @@ struct intel_display {
} gmbus;
struct {
- struct i915_hdcp_comp_master *master;
+ struct i915_hdcp_master *master;
bool comp_added;
+ /*
+ * HDCP message struct for allocation of memory which can be
+ * reused when sending message to gsc cs.
+ * this is only populated post Meteorlake
+ */
+ struct intel_hdcp_gsc_message *hdcp_message;
/* Mutex to protect the above hdcp component related values. */
struct mutex comp_mutex;
} hdcp;
@@ -466,7 +489,6 @@ struct intel_display {
/* Grouping using named structs. Keep sorted. */
struct intel_audio audio;
- struct intel_dmc dmc;
struct intel_dpll dpll;
struct intel_fbc *fbc[I915_MAX_FBCS];
struct intel_frontbuffer_tracking fb_tracking;
diff --git a/drivers/gpu/drm/i915/display/intel_display_debugfs.c b/drivers/gpu/drm/i915/display/intel_display_debugfs.c
index 7bcd90384a46..cc5026272558 100644
--- a/drivers/gpu/drm/i915/display/intel_display_debugfs.c
+++ b/drivers/gpu/drm/i915/display/intel_display_debugfs.c
@@ -8,6 +8,7 @@
#include <drm/drm_debugfs.h>
#include <drm/drm_fourcc.h>
+#include "hsw_ips.h"
#include "i915_debugfs.h"
#include "i915_irq.h"
#include "i915_reg.h"
@@ -26,10 +27,9 @@
#include "intel_hdmi.h"
#include "intel_hotplug.h"
#include "intel_panel.h"
-#include "intel_pm.h"
#include "intel_psr.h"
#include "intel_sprite.h"
-#include "skl_watermark.h"
+#include "intel_wm.h"
static inline struct drm_i915_private *node_to_i915(struct drm_info_node *node)
{
@@ -49,33 +49,6 @@ static int i915_frontbuffer_tracking(struct seq_file *m, void *unused)
return 0;
}
-static int i915_ips_status(struct seq_file *m, void *unused)
-{
- struct drm_i915_private *dev_priv = node_to_i915(m->private);
- intel_wakeref_t wakeref;
-
- if (!HAS_IPS(dev_priv))
- return -ENODEV;
-
- wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
-
- seq_printf(m, "Enabled by kernel parameter: %s\n",
- str_yes_no(dev_priv->params.enable_ips));
-
- if (DISPLAY_VER(dev_priv) >= 8) {
- seq_puts(m, "Currently: unknown\n");
- } else {
- if (intel_de_read(dev_priv, IPS_CTL) & IPS_ENABLE)
- seq_puts(m, "Currently: enabled\n");
- else
- seq_puts(m, "Currently: disabled\n");
- }
-
- intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
-
- return 0;
-}
-
static int i915_sr_status(struct seq_file *m, void *unused)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
@@ -169,269 +142,6 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
return 0;
}
-static int i915_psr_sink_status_show(struct seq_file *m, void *data)
-{
- u8 val;
- static const char * const sink_status[] = {
- "inactive",
- "transition to active, capture and display",
- "active, display from RFB",
- "active, capture and display on sink device timings",
- "transition to inactive, capture and display, timing re-sync",
- "reserved",
- "reserved",
- "sink internal error",
- };
- struct drm_connector *connector = m->private;
- struct intel_dp *intel_dp =
- intel_attached_dp(to_intel_connector(connector));
- int ret;
-
- if (!CAN_PSR(intel_dp)) {
- seq_puts(m, "PSR Unsupported\n");
- return -ENODEV;
- }
-
- if (connector->status != connector_status_connected)
- return -ENODEV;
-
- ret = drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_STATUS, &val);
-
- if (ret == 1) {
- const char *str = "unknown";
-
- val &= DP_PSR_SINK_STATE_MASK;
- if (val < ARRAY_SIZE(sink_status))
- str = sink_status[val];
- seq_printf(m, "Sink PSR status: 0x%x [%s]\n", val, str);
- } else {
- return ret;
- }
-
- return 0;
-}
-DEFINE_SHOW_ATTRIBUTE(i915_psr_sink_status);
-
-static void
-psr_source_status(struct intel_dp *intel_dp, struct seq_file *m)
-{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- const char *status = "unknown";
- u32 val, status_val;
-
- if (intel_dp->psr.psr2_enabled) {
- static const char * const live_status[] = {
- "IDLE",
- "CAPTURE",
- "CAPTURE_FS",
- "SLEEP",
- "BUFON_FW",
- "ML_UP",
- "SU_STANDBY",
- "FAST_SLEEP",
- "DEEP_SLEEP",
- "BUF_ON",
- "TG_ON"
- };
- val = intel_de_read(dev_priv,
- EDP_PSR2_STATUS(intel_dp->psr.transcoder));
- status_val = REG_FIELD_GET(EDP_PSR2_STATUS_STATE_MASK, val);
- if (status_val < ARRAY_SIZE(live_status))
- status = live_status[status_val];
- } else {
- static const char * const live_status[] = {
- "IDLE",
- "SRDONACK",
- "SRDENT",
- "BUFOFF",
- "BUFON",
- "AUXACK",
- "SRDOFFACK",
- "SRDENT_ON",
- };
- val = intel_de_read(dev_priv,
- EDP_PSR_STATUS(intel_dp->psr.transcoder));
- status_val = (val & EDP_PSR_STATUS_STATE_MASK) >>
- EDP_PSR_STATUS_STATE_SHIFT;
- if (status_val < ARRAY_SIZE(live_status))
- status = live_status[status_val];
- }
-
- seq_printf(m, "Source PSR status: %s [0x%08x]\n", status, val);
-}
-
-static int intel_psr_status(struct seq_file *m, struct intel_dp *intel_dp)
-{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- struct intel_psr *psr = &intel_dp->psr;
- intel_wakeref_t wakeref;
- const char *status;
- bool enabled;
- u32 val;
-
- seq_printf(m, "Sink support: %s", str_yes_no(psr->sink_support));
- if (psr->sink_support)
- seq_printf(m, " [0x%02x]", intel_dp->psr_dpcd[0]);
- seq_puts(m, "\n");
-
- if (!psr->sink_support)
- return 0;
-
- wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
- mutex_lock(&psr->lock);
-
- if (psr->enabled)
- status = psr->psr2_enabled ? "PSR2 enabled" : "PSR1 enabled";
- else
- status = "disabled";
- seq_printf(m, "PSR mode: %s\n", status);
-
- if (!psr->enabled) {
- seq_printf(m, "PSR sink not reliable: %s\n",
- str_yes_no(psr->sink_not_reliable));
-
- goto unlock;
- }
-
- if (psr->psr2_enabled) {
- val = intel_de_read(dev_priv,
- EDP_PSR2_CTL(intel_dp->psr.transcoder));
- enabled = val & EDP_PSR2_ENABLE;
- } else {
- val = intel_de_read(dev_priv,
- EDP_PSR_CTL(intel_dp->psr.transcoder));
- enabled = val & EDP_PSR_ENABLE;
- }
- seq_printf(m, "Source PSR ctl: %s [0x%08x]\n",
- str_enabled_disabled(enabled), val);
- psr_source_status(intel_dp, m);
- seq_printf(m, "Busy frontbuffer bits: 0x%08x\n",
- psr->busy_frontbuffer_bits);
-
- /*
- * SKL+ Perf counter is reset to 0 everytime DC state is entered
- */
- if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
- val = intel_de_read(dev_priv,
- EDP_PSR_PERF_CNT(intel_dp->psr.transcoder));
- val &= EDP_PSR_PERF_CNT_MASK;
- seq_printf(m, "Performance counter: %u\n", val);
- }
-
- if (psr->debug & I915_PSR_DEBUG_IRQ) {
- seq_printf(m, "Last attempted entry at: %lld\n",
- psr->last_entry_attempt);
- seq_printf(m, "Last exit at: %lld\n", psr->last_exit);
- }
-
- if (psr->psr2_enabled) {
- u32 su_frames_val[3];
- int frame;
-
- /*
- * Reading all 3 registers before hand to minimize crossing a
- * frame boundary between register reads
- */
- for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame += 3) {
- val = intel_de_read(dev_priv,
- PSR2_SU_STATUS(intel_dp->psr.transcoder, frame));
- su_frames_val[frame / 3] = val;
- }
-
- seq_puts(m, "Frame:\tPSR2 SU blocks:\n");
-
- for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame++) {
- u32 su_blocks;
-
- su_blocks = su_frames_val[frame / 3] &
- PSR2_SU_STATUS_MASK(frame);
- su_blocks = su_blocks >> PSR2_SU_STATUS_SHIFT(frame);
- seq_printf(m, "%d\t%d\n", frame, su_blocks);
- }
-
- seq_printf(m, "PSR2 selective fetch: %s\n",
- str_enabled_disabled(psr->psr2_sel_fetch_enabled));
- }
-
-unlock:
- mutex_unlock(&psr->lock);
- intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
-
- return 0;
-}
-
-static int i915_edp_psr_status(struct seq_file *m, void *data)
-{
- struct drm_i915_private *dev_priv = node_to_i915(m->private);
- struct intel_dp *intel_dp = NULL;
- struct intel_encoder *encoder;
-
- if (!HAS_PSR(dev_priv))
- return -ENODEV;
-
- /* Find the first EDP which supports PSR */
- for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
- intel_dp = enc_to_intel_dp(encoder);
- break;
- }
-
- if (!intel_dp)
- return -ENODEV;
-
- return intel_psr_status(m, intel_dp);
-}
-
-static int
-i915_edp_psr_debug_set(void *data, u64 val)
-{
- struct drm_i915_private *dev_priv = data;
- struct intel_encoder *encoder;
- intel_wakeref_t wakeref;
- int ret = -ENODEV;
-
- if (!HAS_PSR(dev_priv))
- return ret;
-
- for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
- struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
-
- drm_dbg_kms(&dev_priv->drm, "Setting PSR debug to %llx\n", val);
-
- wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
-
- // TODO: split to each transcoder's PSR debug state
- ret = intel_psr_debug_set(intel_dp, val);
-
- intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
- }
-
- return ret;
-}
-
-static int
-i915_edp_psr_debug_get(void *data, u64 *val)
-{
- struct drm_i915_private *dev_priv = data;
- struct intel_encoder *encoder;
-
- if (!HAS_PSR(dev_priv))
- return -ENODEV;
-
- for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
- struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
-
- // TODO: split to each transcoder's PSR debug state
- *val = READ_ONCE(intel_dp->psr.debug);
- return 0;
- }
-
- return -ENODEV;
-}
-
-DEFINE_SIMPLE_ATTRIBUTE(i915_edp_psr_debug_fops,
- i915_edp_psr_debug_get, i915_edp_psr_debug_set,
- "%llu\n");
-
static int i915_power_domain_info(struct seq_file *m, void *unused)
{
struct drm_i915_private *i915 = node_to_i915(m->private);
@@ -832,10 +542,10 @@ static const struct file_operations crtc_updates_fops = {
.write = crtc_updates_write
};
-static void crtc_updates_add(struct drm_crtc *crtc)
+static void crtc_updates_add(struct intel_crtc *crtc)
{
- debugfs_create_file("i915_update_info", 0644, crtc->debugfs_entry,
- to_intel_crtc(crtc), &crtc_updates_fops);
+ debugfs_create_file("i915_update_info", 0644, crtc->base.debugfs_entry,
+ crtc, &crtc_updates_fops);
}
#else
@@ -845,7 +555,7 @@ static void crtc_updates_info(struct seq_file *m,
{
}
-static void crtc_updates_add(struct drm_crtc *crtc)
+static void crtc_updates_add(struct intel_crtc *crtc)
{
}
#endif
@@ -1282,237 +992,6 @@ static int i915_displayport_test_type_show(struct seq_file *m, void *data)
}
DEFINE_SHOW_ATTRIBUTE(i915_displayport_test_type);
-static void wm_latency_show(struct seq_file *m, const u16 wm[8])
-{
- struct drm_i915_private *dev_priv = m->private;
- int level;
- int num_levels;
-
- if (IS_CHERRYVIEW(dev_priv))
- num_levels = 3;
- else if (IS_VALLEYVIEW(dev_priv))
- num_levels = 1;
- else if (IS_G4X(dev_priv))
- num_levels = 3;
- else
- num_levels = ilk_wm_max_level(dev_priv) + 1;
-
- drm_modeset_lock_all(&dev_priv->drm);
-
- for (level = 0; level < num_levels; level++) {
- unsigned int latency = wm[level];
-
- /*
- * - WM1+ latency values in 0.5us units
- * - latencies are in us on gen9/vlv/chv
- */
- if (DISPLAY_VER(dev_priv) >= 9 ||
- IS_VALLEYVIEW(dev_priv) ||
- IS_CHERRYVIEW(dev_priv) ||
- IS_G4X(dev_priv))
- latency *= 10;
- else if (level > 0)
- latency *= 5;
-
- seq_printf(m, "WM%d %u (%u.%u usec)\n",
- level, wm[level], latency / 10, latency % 10);
- }
-
- drm_modeset_unlock_all(&dev_priv->drm);
-}
-
-static int pri_wm_latency_show(struct seq_file *m, void *data)
-{
- struct drm_i915_private *dev_priv = m->private;
- const u16 *latencies;
-
- if (DISPLAY_VER(dev_priv) >= 9)
- latencies = dev_priv->display.wm.skl_latency;
- else
- latencies = dev_priv->display.wm.pri_latency;
-
- wm_latency_show(m, latencies);
-
- return 0;
-}
-
-static int spr_wm_latency_show(struct seq_file *m, void *data)
-{
- struct drm_i915_private *dev_priv = m->private;
- const u16 *latencies;
-
- if (DISPLAY_VER(dev_priv) >= 9)
- latencies = dev_priv->display.wm.skl_latency;
- else
- latencies = dev_priv->display.wm.spr_latency;
-
- wm_latency_show(m, latencies);
-
- return 0;
-}
-
-static int cur_wm_latency_show(struct seq_file *m, void *data)
-{
- struct drm_i915_private *dev_priv = m->private;
- const u16 *latencies;
-
- if (DISPLAY_VER(dev_priv) >= 9)
- latencies = dev_priv->display.wm.skl_latency;
- else
- latencies = dev_priv->display.wm.cur_latency;
-
- wm_latency_show(m, latencies);
-
- return 0;
-}
-
-static int pri_wm_latency_open(struct inode *inode, struct file *file)
-{
- struct drm_i915_private *dev_priv = inode->i_private;
-
- if (DISPLAY_VER(dev_priv) < 5 && !IS_G4X(dev_priv))
- return -ENODEV;
-
- return single_open(file, pri_wm_latency_show, dev_priv);
-}
-
-static int spr_wm_latency_open(struct inode *inode, struct file *file)
-{
- struct drm_i915_private *dev_priv = inode->i_private;
-
- if (HAS_GMCH(dev_priv))
- return -ENODEV;
-
- return single_open(file, spr_wm_latency_show, dev_priv);
-}
-
-static int cur_wm_latency_open(struct inode *inode, struct file *file)
-{
- struct drm_i915_private *dev_priv = inode->i_private;
-
- if (HAS_GMCH(dev_priv))
- return -ENODEV;
-
- return single_open(file, cur_wm_latency_show, dev_priv);
-}
-
-static ssize_t wm_latency_write(struct file *file, const char __user *ubuf,
- size_t len, loff_t *offp, u16 wm[8])
-{
- struct seq_file *m = file->private_data;
- struct drm_i915_private *dev_priv = m->private;
- u16 new[8] = { 0 };
- int num_levels;
- int level;
- int ret;
- char tmp[32];
-
- if (IS_CHERRYVIEW(dev_priv))
- num_levels = 3;
- else if (IS_VALLEYVIEW(dev_priv))
- num_levels = 1;
- else if (IS_G4X(dev_priv))
- num_levels = 3;
- else
- num_levels = ilk_wm_max_level(dev_priv) + 1;
-
- if (len >= sizeof(tmp))
- return -EINVAL;
-
- if (copy_from_user(tmp, ubuf, len))
- return -EFAULT;
-
- tmp[len] = '\0';
-
- ret = sscanf(tmp, "%hu %hu %hu %hu %hu %hu %hu %hu",
- &new[0], &new[1], &new[2], &new[3],
- &new[4], &new[5], &new[6], &new[7]);
- if (ret != num_levels)
- return -EINVAL;
-
- drm_modeset_lock_all(&dev_priv->drm);
-
- for (level = 0; level < num_levels; level++)
- wm[level] = new[level];
-
- drm_modeset_unlock_all(&dev_priv->drm);
-
- return len;
-}
-
-
-static ssize_t pri_wm_latency_write(struct file *file, const char __user *ubuf,
- size_t len, loff_t *offp)
-{
- struct seq_file *m = file->private_data;
- struct drm_i915_private *dev_priv = m->private;
- u16 *latencies;
-
- if (DISPLAY_VER(dev_priv) >= 9)
- latencies = dev_priv->display.wm.skl_latency;
- else
- latencies = dev_priv->display.wm.pri_latency;
-
- return wm_latency_write(file, ubuf, len, offp, latencies);
-}
-
-static ssize_t spr_wm_latency_write(struct file *file, const char __user *ubuf,
- size_t len, loff_t *offp)
-{
- struct seq_file *m = file->private_data;
- struct drm_i915_private *dev_priv = m->private;
- u16 *latencies;
-
- if (DISPLAY_VER(dev_priv) >= 9)
- latencies = dev_priv->display.wm.skl_latency;
- else
- latencies = dev_priv->display.wm.spr_latency;
-
- return wm_latency_write(file, ubuf, len, offp, latencies);
-}
-
-static ssize_t cur_wm_latency_write(struct file *file, const char __user *ubuf,
- size_t len, loff_t *offp)
-{
- struct seq_file *m = file->private_data;
- struct drm_i915_private *dev_priv = m->private;
- u16 *latencies;
-
- if (DISPLAY_VER(dev_priv) >= 9)
- latencies = dev_priv->display.wm.skl_latency;
- else
- latencies = dev_priv->display.wm.cur_latency;
-
- return wm_latency_write(file, ubuf, len, offp, latencies);
-}
-
-static const struct file_operations i915_pri_wm_latency_fops = {
- .owner = THIS_MODULE,
- .open = pri_wm_latency_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .write = pri_wm_latency_write
-};
-
-static const struct file_operations i915_spr_wm_latency_fops = {
- .owner = THIS_MODULE,
- .open = spr_wm_latency_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .write = spr_wm_latency_write
-};
-
-static const struct file_operations i915_cur_wm_latency_fops = {
- .owner = THIS_MODULE,
- .open = cur_wm_latency_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .write = cur_wm_latency_write
-};
-
static ssize_t
i915_fifo_underrun_reset_write(struct file *filp,
const char __user *ubuf,
@@ -1574,12 +1053,10 @@ static const struct file_operations i915_fifo_underrun_reset_ops = {
static const struct drm_info_list intel_display_debugfs_list[] = {
{"i915_frontbuffer_tracking", i915_frontbuffer_tracking, 0},
- {"i915_ips_status", i915_ips_status, 0},
{"i915_sr_status", i915_sr_status, 0},
{"i915_opregion", i915_opregion, 0},
{"i915_vbt", i915_vbt, 0},
{"i915_gem_framebuffer", i915_gem_framebuffer_info, 0},
- {"i915_edp_psr_status", i915_edp_psr_status, 0},
{"i915_power_domain_info", i915_power_domain_info, 0},
{"i915_display_info", i915_display_info, 0},
{"i915_shared_dplls_info", i915_shared_dplls_info, 0},
@@ -1593,13 +1070,9 @@ static const struct {
const struct file_operations *fops;
} intel_display_debugfs_files[] = {
{"i915_fifo_underrun_reset", &i915_fifo_underrun_reset_ops},
- {"i915_pri_wm_latency", &i915_pri_wm_latency_fops},
- {"i915_spr_wm_latency", &i915_spr_wm_latency_fops},
- {"i915_cur_wm_latency", &i915_cur_wm_latency_fops},
{"i915_dp_test_data", &i915_displayport_test_data_fops},
{"i915_dp_test_type", &i915_displayport_test_type_fops},
{"i915_dp_test_active", &i915_displayport_test_active_fops},
- {"i915_edp_psr_debug", &i915_edp_psr_debug_fops},
};
void intel_display_debugfs_register(struct drm_i915_private *i915)
@@ -1619,10 +1092,12 @@ void intel_display_debugfs_register(struct drm_i915_private *i915)
ARRAY_SIZE(intel_display_debugfs_list),
minor->debugfs_root, minor);
+ hsw_ips_debugfs_register(i915);
intel_dmc_debugfs_register(i915);
intel_fbc_debugfs_register(i915);
intel_hpd_debugfs_register(i915);
- skl_watermark_ipc_debugfs_register(i915);
+ intel_psr_debugfs_register(i915);
+ intel_wm_debugfs_register(i915);
}
static int i915_panel_show(struct seq_file *m, void *data)
@@ -1674,16 +1149,6 @@ out:
}
DEFINE_SHOW_ATTRIBUTE(i915_hdcp_sink_capability);
-static int i915_psr_status_show(struct seq_file *m, void *data)
-{
- struct drm_connector *connector = m->private;
- struct intel_dp *intel_dp =
- intel_attached_dp(to_intel_connector(connector));
-
- return intel_psr_status(m, intel_dp);
-}
-DEFINE_SHOW_ATTRIBUTE(i915_psr_status);
-
static int i915_lpsp_capability_show(struct seq_file *m, void *data)
{
struct drm_connector *connector = m->private;
@@ -1901,7 +1366,7 @@ static const struct file_operations i915_dsc_bpc_fops = {
*/
static int i915_current_bpc_show(struct seq_file *m, void *data)
{
- struct intel_crtc *crtc = to_intel_crtc(m->private);
+ struct intel_crtc *crtc = m->private;
struct intel_crtc_state *crtc_state;
int ret;
@@ -1918,6 +1383,17 @@ static int i915_current_bpc_show(struct seq_file *m, void *data)
}
DEFINE_SHOW_ATTRIBUTE(i915_current_bpc);
+/* Pipe may differ from crtc index if pipes are fused off */
+static int intel_crtc_pipe_show(struct seq_file *m, void *unused)
+{
+ struct intel_crtc *crtc = m->private;
+
+ seq_printf(m, "%c\n", pipe_name(crtc->pipe));
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(intel_crtc_pipe);
+
/**
* intel_connector_debugfs_add - add i915 specific connector debugfs files
* @connector: pointer to a registered drm_connector
@@ -1936,19 +1412,11 @@ void intel_connector_debugfs_add(struct intel_connector *intel_connector)
return;
intel_drrs_connector_debugfs_add(intel_connector);
+ intel_psr_connector_debugfs_add(intel_connector);
- if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
+ if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
debugfs_create_file("i915_panel_timings", S_IRUGO, root,
connector, &i915_panel_fops);
- debugfs_create_file("i915_psr_sink_status", S_IRUGO, root,
- connector, &i915_psr_sink_status_fops);
- }
-
- if (HAS_PSR(dev_priv) &&
- connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
- debugfs_create_file("i915_psr_status", 0444, root,
- connector, &i915_psr_status_fops);
- }
if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
connector->connector_type == DRM_MODE_CONNECTOR_HDMIA ||
@@ -1983,15 +1451,19 @@ void intel_connector_debugfs_add(struct intel_connector *intel_connector)
*
* Failure to add debugfs entries should generally be ignored.
*/
-void intel_crtc_debugfs_add(struct drm_crtc *crtc)
+void intel_crtc_debugfs_add(struct intel_crtc *crtc)
{
- if (!crtc->debugfs_entry)
+ struct dentry *root = crtc->base.debugfs_entry;
+
+ if (!root)
return;
crtc_updates_add(crtc);
- intel_drrs_crtc_debugfs_add(to_intel_crtc(crtc));
- intel_fbc_crtc_debugfs_add(to_intel_crtc(crtc));
+ intel_drrs_crtc_debugfs_add(crtc);
+ intel_fbc_crtc_debugfs_add(crtc);
- debugfs_create_file("i915_current_bpc", 0444, crtc->debugfs_entry, crtc,
+ debugfs_create_file("i915_current_bpc", 0444, root, crtc,
&i915_current_bpc_fops);
+ debugfs_create_file("i915_pipe", 0444, root, crtc,
+ &intel_crtc_pipe_fops);
}
diff --git a/drivers/gpu/drm/i915/display/intel_display_debugfs.h b/drivers/gpu/drm/i915/display/intel_display_debugfs.h
index d3a79c07c384..e1f479b7acd1 100644
--- a/drivers/gpu/drm/i915/display/intel_display_debugfs.h
+++ b/drivers/gpu/drm/i915/display/intel_display_debugfs.h
@@ -6,18 +6,18 @@
#ifndef __INTEL_DISPLAY_DEBUGFS_H__
#define __INTEL_DISPLAY_DEBUGFS_H__
-struct drm_crtc;
struct drm_i915_private;
struct intel_connector;
+struct intel_crtc;
#ifdef CONFIG_DEBUG_FS
void intel_display_debugfs_register(struct drm_i915_private *i915);
void intel_connector_debugfs_add(struct intel_connector *connector);
-void intel_crtc_debugfs_add(struct drm_crtc *crtc);
+void intel_crtc_debugfs_add(struct intel_crtc *crtc);
#else
static inline void intel_display_debugfs_register(struct drm_i915_private *i915) {}
static inline void intel_connector_debugfs_add(struct intel_connector *connector) {}
-static inline void intel_crtc_debugfs_add(struct drm_crtc *crtc) {}
+static inline void intel_crtc_debugfs_add(struct intel_crtc *crtc) {}
#endif
#endif /* __INTEL_DISPLAY_DEBUGFS_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_display_power.c b/drivers/gpu/drm/i915/display/intel_display_power.c
index 1a23ecd4623a..f86060195987 100644
--- a/drivers/gpu/drm/i915/display/intel_display_power.c
+++ b/drivers/gpu/drm/i915/display/intel_display_power.c
@@ -264,9 +264,10 @@ bool intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
}
static u32
-sanitize_target_dc_state(struct drm_i915_private *dev_priv,
+sanitize_target_dc_state(struct drm_i915_private *i915,
u32 target_dc_state)
{
+ struct i915_power_domains *power_domains = &i915->display.power.domains;
static const u32 states[] = {
DC_STATE_EN_UPTO_DC6,
DC_STATE_EN_UPTO_DC5,
@@ -279,7 +280,7 @@ sanitize_target_dc_state(struct drm_i915_private *dev_priv,
if (target_dc_state != states[i])
continue;
- if (dev_priv->display.dmc.allowed_dc_mask & target_dc_state)
+ if (power_domains->allowed_dc_mask & target_dc_state)
break;
target_dc_state = states[i + 1];
@@ -312,7 +313,7 @@ void intel_display_power_set_target_dc_state(struct drm_i915_private *dev_priv,
state = sanitize_target_dc_state(dev_priv, state);
- if (state == dev_priv->display.dmc.target_dc_state)
+ if (state == power_domains->target_dc_state)
goto unlock;
dc_off_enabled = intel_power_well_is_enabled(dev_priv, power_well);
@@ -323,7 +324,7 @@ void intel_display_power_set_target_dc_state(struct drm_i915_private *dev_priv,
if (!dc_off_enabled)
intel_power_well_enable(dev_priv, power_well);
- dev_priv->display.dmc.target_dc_state = state;
+ power_domains->target_dc_state = state;
if (!dc_off_enabled)
intel_power_well_disable(dev_priv, power_well);
@@ -992,10 +993,10 @@ int intel_power_domains_init(struct drm_i915_private *dev_priv)
dev_priv->params.disable_power_well =
sanitize_disable_power_well_option(dev_priv,
dev_priv->params.disable_power_well);
- dev_priv->display.dmc.allowed_dc_mask =
+ power_domains->allowed_dc_mask =
get_allowed_dc_mask(dev_priv, dev_priv->params.enable_dc);
- dev_priv->display.dmc.target_dc_state =
+ power_domains->target_dc_state =
sanitize_target_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6);
mutex_init(&power_domains->lock);
@@ -1260,9 +1261,7 @@ static void hsw_disable_lcpll(struct drm_i915_private *dev_priv,
drm_err(&dev_priv->drm, "D_COMP RCOMP still in progress\n");
if (allow_power_down) {
- val = intel_de_read(dev_priv, LCPLL_CTL);
- val |= LCPLL_POWER_DOWN_ALLOW;
- intel_de_write(dev_priv, LCPLL_CTL, val);
+ intel_de_rmw(dev_priv, LCPLL_CTL, 0, LCPLL_POWER_DOWN_ALLOW);
intel_de_posting_read(dev_priv, LCPLL_CTL);
}
}
@@ -1306,9 +1305,7 @@ static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
drm_err(&dev_priv->drm, "LCPLL not locked yet\n");
if (val & LCPLL_CD_SOURCE_FCLK) {
- val = intel_de_read(dev_priv, LCPLL_CTL);
- val &= ~LCPLL_CD_SOURCE_FCLK;
- intel_de_write(dev_priv, LCPLL_CTL, val);
+ intel_de_rmw(dev_priv, LCPLL_CTL, LCPLL_CD_SOURCE_FCLK, 0);
if (wait_for_us((intel_de_read(dev_priv, LCPLL_CTL) &
LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1))
@@ -1347,15 +1344,11 @@ static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
*/
static void hsw_enable_pc8(struct drm_i915_private *dev_priv)
{
- u32 val;
-
drm_dbg_kms(&dev_priv->drm, "Enabling package C8+\n");
- if (HAS_PCH_LPT_LP(dev_priv)) {
- val = intel_de_read(dev_priv, SOUTH_DSPCLK_GATE_D);
- val &= ~PCH_LP_PARTITION_LEVEL_DISABLE;
- intel_de_write(dev_priv, SOUTH_DSPCLK_GATE_D, val);
- }
+ if (HAS_PCH_LPT_LP(dev_priv))
+ intel_de_rmw(dev_priv, SOUTH_DSPCLK_GATE_D,
+ PCH_LP_PARTITION_LEVEL_DISABLE, 0);
lpt_disable_clkout_dp(dev_priv);
hsw_disable_lcpll(dev_priv, true, true);
@@ -1363,25 +1356,21 @@ static void hsw_enable_pc8(struct drm_i915_private *dev_priv)
static void hsw_disable_pc8(struct drm_i915_private *dev_priv)
{
- u32 val;
-
drm_dbg_kms(&dev_priv->drm, "Disabling package C8+\n");
hsw_restore_lcpll(dev_priv);
intel_init_pch_refclk(dev_priv);
- if (HAS_PCH_LPT_LP(dev_priv)) {
- val = intel_de_read(dev_priv, SOUTH_DSPCLK_GATE_D);
- val |= PCH_LP_PARTITION_LEVEL_DISABLE;
- intel_de_write(dev_priv, SOUTH_DSPCLK_GATE_D, val);
- }
+ if (HAS_PCH_LPT_LP(dev_priv))
+ intel_de_rmw(dev_priv, SOUTH_DSPCLK_GATE_D,
+ 0, PCH_LP_PARTITION_LEVEL_DISABLE);
}
static void intel_pch_reset_handshake(struct drm_i915_private *dev_priv,
bool enable)
{
i915_reg_t reg;
- u32 reset_bits, val;
+ u32 reset_bits;
if (IS_IVYBRIDGE(dev_priv)) {
reg = GEN7_MSG_CTL;
@@ -1394,14 +1383,7 @@ static void intel_pch_reset_handshake(struct drm_i915_private *dev_priv,
if (DISPLAY_VER(dev_priv) >= 14)
reset_bits |= MTL_RESET_PICA_HANDSHAKE_EN;
- val = intel_de_read(dev_priv, reg);
-
- if (enable)
- val |= reset_bits;
- else
- val &= ~reset_bits;
-
- intel_de_write(dev_priv, reg, val);
+ intel_de_rmw(dev_priv, reg, reset_bits, enable ? reset_bits : 0);
}
static void skl_display_core_init(struct drm_i915_private *dev_priv,
@@ -1580,10 +1562,8 @@ static void tgl_bw_buddy_init(struct drm_i915_private *dev_priv)
return;
if (IS_ALDERLAKE_S(dev_priv) ||
- IS_DG1_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0) ||
- IS_RKL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0) ||
- IS_TGL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_C0))
- /* Wa_1409767108:tgl,dg1,adl-s */
+ IS_RKL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0))
+ /* Wa_1409767108 */
table = wa_1409767108_buddy_page_masks;
else
table = tgl_buddy_page_masks;
@@ -1618,7 +1598,6 @@ static void icl_display_core_init(struct drm_i915_private *dev_priv,
{
struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
struct i915_power_well *well;
- u32 val;
gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
@@ -1646,6 +1625,10 @@ static void icl_display_core_init(struct drm_i915_private *dev_priv,
intel_power_well_enable(dev_priv, well);
mutex_unlock(&power_domains->lock);
+ if (DISPLAY_VER(dev_priv) == 14)
+ intel_de_rmw(dev_priv, DC_STATE_EN,
+ HOLD_PHY_PG1_LATCH | HOLD_PHY_CLKREQ_PG1_LATCH, 0);
+
/* 4. Enable CDCLK. */
intel_cdclk_init_hw(dev_priv);
@@ -1670,11 +1653,10 @@ static void icl_display_core_init(struct drm_i915_private *dev_priv,
intel_dmc_load_program(dev_priv);
/* Wa_14011508470:tgl,dg1,rkl,adl-s,adl-p */
- if (DISPLAY_VER(dev_priv) >= 12) {
- val = DCPR_CLEAR_MEMSTAT_DIS | DCPR_SEND_RESP_IMM |
- DCPR_MASK_LPMODE | DCPR_MASK_MAXLATENCY_MEMUP_CLR;
- intel_de_rmw(dev_priv, GEN11_CHICKEN_DCPR_2, 0, val);
- }
+ if (DISPLAY_VER(dev_priv) >= 12)
+ intel_de_rmw(dev_priv, GEN11_CHICKEN_DCPR_2, 0,
+ DCPR_CLEAR_MEMSTAT_DIS | DCPR_SEND_RESP_IMM |
+ DCPR_MASK_LPMODE | DCPR_MASK_MAXLATENCY_MEMUP_CLR);
/* Wa_14011503030:xelpd */
if (DISPLAY_VER(dev_priv) >= 13)
@@ -1700,6 +1682,10 @@ static void icl_display_core_uninit(struct drm_i915_private *dev_priv)
/* 3. Disable CD clock */
intel_cdclk_uninit_hw(dev_priv);
+ if (DISPLAY_VER(dev_priv) == 14)
+ intel_de_rmw(dev_priv, DC_STATE_EN, 0,
+ HOLD_PHY_PG1_LATCH | HOLD_PHY_CLKREQ_PG1_LATCH);
+
/*
* 4. Disable Power Well 1 (PG1).
* The AUX IO power wells are toggled on demand, so they are already
@@ -2055,7 +2041,7 @@ void intel_power_domains_suspend(struct drm_i915_private *i915,
* resources as required and also enable deeper system power states
* that would be blocked if the firmware was inactive.
*/
- if (!(i915->display.dmc.allowed_dc_mask & DC_STATE_EN_DC9) &&
+ if (!(power_domains->allowed_dc_mask & DC_STATE_EN_DC9) &&
suspend_mode == I915_DRM_SUSPEND_IDLE &&
intel_dmc_has_payload(i915)) {
intel_display_power_flush_work(i915);
@@ -2244,22 +2230,22 @@ void intel_display_power_suspend(struct drm_i915_private *i915)
void intel_display_power_resume(struct drm_i915_private *i915)
{
+ struct i915_power_domains *power_domains = &i915->display.power.domains;
+
if (DISPLAY_VER(i915) >= 11) {
bxt_disable_dc9(i915);
icl_display_core_init(i915, true);
if (intel_dmc_has_payload(i915)) {
- if (i915->display.dmc.allowed_dc_mask &
- DC_STATE_EN_UPTO_DC6)
+ if (power_domains->allowed_dc_mask & DC_STATE_EN_UPTO_DC6)
skl_enable_dc6(i915);
- else if (i915->display.dmc.allowed_dc_mask &
- DC_STATE_EN_UPTO_DC5)
+ else if (power_domains->allowed_dc_mask & DC_STATE_EN_UPTO_DC5)
gen9_enable_dc5(i915);
}
} else if (IS_GEMINILAKE(i915) || IS_BROXTON(i915)) {
bxt_disable_dc9(i915);
bxt_display_core_init(i915, true);
if (intel_dmc_has_payload(i915) &&
- (i915->display.dmc.allowed_dc_mask & DC_STATE_EN_UPTO_DC5))
+ (power_domains->allowed_dc_mask & DC_STATE_EN_UPTO_DC5))
gen9_enable_dc5(i915);
} else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) {
hsw_disable_pc8(i915);
diff --git a/drivers/gpu/drm/i915/display/intel_display_power.h b/drivers/gpu/drm/i915/display/intel_display_power.h
index 2154d900b1aa..8e96be8e6330 100644
--- a/drivers/gpu/drm/i915/display/intel_display_power.h
+++ b/drivers/gpu/drm/i915/display/intel_display_power.h
@@ -137,6 +137,10 @@ struct i915_power_domains {
bool display_core_suspended;
int power_well_count;
+ u32 dc_state;
+ u32 target_dc_state;
+ u32 allowed_dc_mask;
+
intel_wakeref_t init_wakeref;
intel_wakeref_t disable_wakeref;
diff --git a/drivers/gpu/drm/i915/display/intel_display_power_well.c b/drivers/gpu/drm/i915/display/intel_display_power_well.c
index 8710dd41ffd4..1676df1dc066 100644
--- a/drivers/gpu/drm/i915/display/intel_display_power_well.c
+++ b/drivers/gpu/drm/i915/display/intel_display_power_well.c
@@ -333,7 +333,6 @@ static void hsw_power_well_enable(struct drm_i915_private *dev_priv,
{
const struct i915_power_well_regs *regs = power_well->desc->ops->regs;
int pw_idx = i915_power_well_instance(power_well)->hsw.idx;
- u32 val;
if (power_well->desc->has_fuses) {
enum skl_power_gate pg;
@@ -356,9 +355,7 @@ static void hsw_power_well_enable(struct drm_i915_private *dev_priv,
gen9_wait_for_power_well_fuses(dev_priv, SKL_PG0);
}
- val = intel_de_read(dev_priv, regs->driver);
- intel_de_write(dev_priv, regs->driver,
- val | HSW_PWR_WELL_CTL_REQ(pw_idx));
+ intel_de_rmw(dev_priv, regs->driver, 0, HSW_PWR_WELL_CTL_REQ(pw_idx));
hsw_wait_for_power_well_enable(dev_priv, power_well, false);
@@ -380,17 +377,27 @@ static void hsw_power_well_disable(struct drm_i915_private *dev_priv,
{
const struct i915_power_well_regs *regs = power_well->desc->ops->regs;
int pw_idx = i915_power_well_instance(power_well)->hsw.idx;
- u32 val;
hsw_power_well_pre_disable(dev_priv,
power_well->desc->irq_pipe_mask);
- val = intel_de_read(dev_priv, regs->driver);
- intel_de_write(dev_priv, regs->driver,
- val & ~HSW_PWR_WELL_CTL_REQ(pw_idx));
+ intel_de_rmw(dev_priv, regs->driver, HSW_PWR_WELL_CTL_REQ(pw_idx), 0);
hsw_wait_for_power_well_disable(dev_priv, power_well);
}
+static bool intel_port_is_edp(struct drm_i915_private *i915, enum port port)
+{
+ struct intel_encoder *encoder;
+
+ for_each_intel_encoder(&i915->drm, encoder) {
+ if (encoder->type == INTEL_OUTPUT_EDP &&
+ encoder->port == port)
+ return true;
+ }
+
+ return false;
+}
+
static void
icl_combo_phy_aux_power_well_enable(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
@@ -398,29 +405,22 @@ icl_combo_phy_aux_power_well_enable(struct drm_i915_private *dev_priv,
const struct i915_power_well_regs *regs = power_well->desc->ops->regs;
int pw_idx = i915_power_well_instance(power_well)->hsw.idx;
enum phy phy = icl_aux_pw_to_phy(dev_priv, power_well);
- u32 val;
drm_WARN_ON(&dev_priv->drm, !IS_ICELAKE(dev_priv));
- val = intel_de_read(dev_priv, regs->driver);
- intel_de_write(dev_priv, regs->driver,
- val | HSW_PWR_WELL_CTL_REQ(pw_idx));
+ intel_de_rmw(dev_priv, regs->driver, 0, HSW_PWR_WELL_CTL_REQ(pw_idx));
- if (DISPLAY_VER(dev_priv) < 12) {
- val = intel_de_read(dev_priv, ICL_PORT_CL_DW12(phy));
- intel_de_write(dev_priv, ICL_PORT_CL_DW12(phy),
- val | ICL_LANE_ENABLE_AUX);
- }
+ if (DISPLAY_VER(dev_priv) < 12)
+ intel_de_rmw(dev_priv, ICL_PORT_CL_DW12(phy),
+ 0, ICL_LANE_ENABLE_AUX);
hsw_wait_for_power_well_enable(dev_priv, power_well, false);
/* Display WA #1178: icl */
if (pw_idx >= ICL_PW_CTL_IDX_AUX_A && pw_idx <= ICL_PW_CTL_IDX_AUX_B &&
- !intel_bios_is_port_edp(dev_priv, (enum port)phy)) {
- val = intel_de_read(dev_priv, ICL_AUX_ANAOVRD1(pw_idx));
- val |= ICL_AUX_ANAOVRD1_ENABLE | ICL_AUX_ANAOVRD1_LDO_BYPASS;
- intel_de_write(dev_priv, ICL_AUX_ANAOVRD1(pw_idx), val);
- }
+ !intel_port_is_edp(dev_priv, (enum port)phy))
+ intel_de_rmw(dev_priv, ICL_AUX_ANAOVRD1(pw_idx),
+ 0, ICL_AUX_ANAOVRD1_ENABLE | ICL_AUX_ANAOVRD1_LDO_BYPASS);
}
static void
@@ -430,17 +430,12 @@ icl_combo_phy_aux_power_well_disable(struct drm_i915_private *dev_priv,
const struct i915_power_well_regs *regs = power_well->desc->ops->regs;
int pw_idx = i915_power_well_instance(power_well)->hsw.idx;
enum phy phy = icl_aux_pw_to_phy(dev_priv, power_well);
- u32 val;
drm_WARN_ON(&dev_priv->drm, !IS_ICELAKE(dev_priv));
- val = intel_de_read(dev_priv, ICL_PORT_CL_DW12(phy));
- intel_de_write(dev_priv, ICL_PORT_CL_DW12(phy),
- val & ~ICL_LANE_ENABLE_AUX);
+ intel_de_rmw(dev_priv, ICL_PORT_CL_DW12(phy), ICL_LANE_ENABLE_AUX, 0);
- val = intel_de_read(dev_priv, regs->driver);
- intel_de_write(dev_priv, regs->driver,
- val & ~HSW_PWR_WELL_CTL_REQ(pw_idx));
+ intel_de_rmw(dev_priv, regs->driver, HSW_PWR_WELL_CTL_REQ(pw_idx), 0);
hsw_wait_for_power_well_disable(dev_priv, power_well);
}
@@ -502,19 +497,15 @@ icl_tc_phy_aux_power_well_enable(struct drm_i915_private *dev_priv,
const struct i915_power_well_regs *regs = power_well->desc->ops->regs;
bool is_tbt = power_well->desc->is_tc_tbt;
bool timeout_expected;
- u32 val;
icl_tc_port_assert_ref_held(dev_priv, power_well, dig_port);
- val = intel_de_read(dev_priv, DP_AUX_CH_CTL(aux_ch));
- val &= ~DP_AUX_CH_CTL_TBT_IO;
- if (is_tbt)
- val |= DP_AUX_CH_CTL_TBT_IO;
- intel_de_write(dev_priv, DP_AUX_CH_CTL(aux_ch), val);
+ intel_de_rmw(dev_priv, DP_AUX_CH_CTL(aux_ch),
+ DP_AUX_CH_CTL_TBT_IO, is_tbt ? DP_AUX_CH_CTL_TBT_IO : 0);
- val = intel_de_read(dev_priv, regs->driver);
- intel_de_write(dev_priv, regs->driver,
- val | HSW_PWR_WELL_CTL_REQ(i915_power_well_instance(power_well)->hsw.idx));
+ intel_de_rmw(dev_priv, regs->driver,
+ 0,
+ HSW_PWR_WELL_CTL_REQ(i915_power_well_instance(power_well)->hsw.idx));
/*
* An AUX timeout is expected if the TBT DP tunnel is down,
@@ -700,19 +691,20 @@ static u32 gen9_dc_mask(struct drm_i915_private *dev_priv)
return mask;
}
-void gen9_sanitize_dc_state(struct drm_i915_private *dev_priv)
+void gen9_sanitize_dc_state(struct drm_i915_private *i915)
{
+ struct i915_power_domains *power_domains = &i915->display.power.domains;
u32 val;
- if (!HAS_DISPLAY(dev_priv))
+ if (!HAS_DISPLAY(i915))
return;
- val = intel_de_read(dev_priv, DC_STATE_EN) & gen9_dc_mask(dev_priv);
+ val = intel_de_read(i915, DC_STATE_EN) & gen9_dc_mask(i915);
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(&i915->drm,
"Resetting DC state tracking from %02x to %02x\n",
- dev_priv->display.dmc.dc_state, val);
- dev_priv->display.dmc.dc_state = val;
+ power_domains->dc_state, val);
+ power_domains->dc_state = val;
}
/**
@@ -740,6 +732,7 @@ void gen9_sanitize_dc_state(struct drm_i915_private *dev_priv)
*/
void gen9_set_dc_state(struct drm_i915_private *dev_priv, u32 state)
{
+ struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
u32 val;
u32 mask;
@@ -747,8 +740,8 @@ void gen9_set_dc_state(struct drm_i915_private *dev_priv, u32 state)
return;
if (drm_WARN_ON_ONCE(&dev_priv->drm,
- state & ~dev_priv->display.dmc.allowed_dc_mask))
- state &= dev_priv->display.dmc.allowed_dc_mask;
+ state & ~power_domains->allowed_dc_mask))
+ state &= power_domains->allowed_dc_mask;
val = intel_de_read(dev_priv, DC_STATE_EN);
mask = gen9_dc_mask(dev_priv);
@@ -756,16 +749,16 @@ void gen9_set_dc_state(struct drm_i915_private *dev_priv, u32 state)
val & mask, state);
/* Check if DMC is ignoring our DC state requests */
- if ((val & mask) != dev_priv->display.dmc.dc_state)
+ if ((val & mask) != power_domains->dc_state)
drm_err(&dev_priv->drm, "DC state mismatch (0x%x -> 0x%x)\n",
- dev_priv->display.dmc.dc_state, val & mask);
+ power_domains->dc_state, val & mask);
val &= ~mask;
val |= state;
gen9_write_dc_state(dev_priv, val);
- dev_priv->display.dmc.dc_state = val & mask;
+ power_domains->dc_state = val & mask;
}
static void tgl_enable_dc3co(struct drm_i915_private *dev_priv)
@@ -776,12 +769,8 @@ static void tgl_enable_dc3co(struct drm_i915_private *dev_priv)
static void tgl_disable_dc3co(struct drm_i915_private *dev_priv)
{
- u32 val;
-
drm_dbg_kms(&dev_priv->drm, "Disabling DC3CO\n");
- val = intel_de_read(dev_priv, DC_STATE_EN);
- val &= ~DC_STATE_DC3CO_STATUS;
- intel_de_write(dev_priv, DC_STATE_EN, val);
+ intel_de_rmw(dev_priv, DC_STATE_EN, DC_STATE_DC3CO_STATUS, 0);
gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
/*
* Delay of 200us DC3CO Exit time B.Spec 49196
@@ -820,8 +809,8 @@ void gen9_enable_dc5(struct drm_i915_private *dev_priv)
/* Wa Display #1183: skl,kbl,cfl */
if (DISPLAY_VER(dev_priv) == 9 && !IS_BROXTON(dev_priv))
- intel_de_write(dev_priv, GEN8_CHICKEN_DCPR_1,
- intel_de_read(dev_priv, GEN8_CHICKEN_DCPR_1) | SKL_SELECT_ALTERNATE_DC_EXIT);
+ intel_de_rmw(dev_priv, GEN8_CHICKEN_DCPR_1,
+ 0, SKL_SELECT_ALTERNATE_DC_EXIT);
gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC5);
}
@@ -847,8 +836,8 @@ void skl_enable_dc6(struct drm_i915_private *dev_priv)
/* Wa Display #1183: skl,kbl,cfl */
if (DISPLAY_VER(dev_priv) == 9 && !IS_BROXTON(dev_priv))
- intel_de_write(dev_priv, GEN8_CHICKEN_DCPR_1,
- intel_de_read(dev_priv, GEN8_CHICKEN_DCPR_1) | SKL_SELECT_ALTERNATE_DC_EXIT);
+ intel_de_rmw(dev_priv, GEN8_CHICKEN_DCPR_1,
+ 0, SKL_SELECT_ALTERNATE_DC_EXIT);
gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6);
}
@@ -957,9 +946,10 @@ static void gen9_assert_dbuf_enabled(struct drm_i915_private *dev_priv)
void gen9_disable_dc_states(struct drm_i915_private *dev_priv)
{
+ struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
struct intel_cdclk_config cdclk_config = {};
- if (dev_priv->display.dmc.target_dc_state == DC_STATE_EN_DC3CO) {
+ if (power_domains->target_dc_state == DC_STATE_EN_DC3CO) {
tgl_disable_dc3co(dev_priv);
return;
}
@@ -998,10 +988,12 @@ static void gen9_dc_off_power_well_enable(struct drm_i915_private *dev_priv,
static void gen9_dc_off_power_well_disable(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
+ struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
+
if (!intel_dmc_has_payload(dev_priv))
return;
- switch (dev_priv->display.dmc.target_dc_state) {
+ switch (power_domains->target_dc_state) {
case DC_STATE_EN_DC3CO:
tgl_enable_dc3co(dev_priv);
break;
@@ -1033,9 +1025,9 @@ static bool i9xx_always_on_power_well_enabled(struct drm_i915_private *dev_priv,
static void i830_pipes_power_well_enable(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
- if ((intel_de_read(dev_priv, PIPECONF(PIPE_A)) & PIPECONF_ENABLE) == 0)
+ if ((intel_de_read(dev_priv, TRANSCONF(PIPE_A)) & TRANSCONF_ENABLE) == 0)
i830_enable_pipe(dev_priv, PIPE_A);
- if ((intel_de_read(dev_priv, PIPECONF(PIPE_B)) & PIPECONF_ENABLE) == 0)
+ if ((intel_de_read(dev_priv, TRANSCONF(PIPE_B)) & TRANSCONF_ENABLE) == 0)
i830_enable_pipe(dev_priv, PIPE_B);
}
@@ -1049,8 +1041,8 @@ static void i830_pipes_power_well_disable(struct drm_i915_private *dev_priv,
static bool i830_pipes_power_well_enabled(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
- return intel_de_read(dev_priv, PIPECONF(PIPE_A)) & PIPECONF_ENABLE &&
- intel_de_read(dev_priv, PIPECONF(PIPE_B)) & PIPECONF_ENABLE;
+ return intel_de_read(dev_priv, TRANSCONF(PIPE_A)) & TRANSCONF_ENABLE &&
+ intel_de_read(dev_priv, TRANSCONF(PIPE_B)) & TRANSCONF_ENABLE;
}
static void i830_pipes_power_well_sync_hw(struct drm_i915_private *dev_priv,
@@ -1149,18 +1141,14 @@ static bool vlv_power_well_enabled(struct drm_i915_private *dev_priv,
static void vlv_init_display_clock_gating(struct drm_i915_private *dev_priv)
{
- u32 val;
-
/*
* On driver load, a pipe may be active and driving a DSI display.
* Preserve DPOUNIT_CLOCK_GATE_DISABLE to avoid the pipe getting stuck
* (and never recovering) in this case. intel_dsi_post_disable() will
* clear it when we turn off the display.
*/
- val = intel_de_read(dev_priv, DSPCLK_GATE_D(dev_priv));
- val &= DPOUNIT_CLOCK_GATE_DISABLE;
- val |= VRHUNIT_CLOCK_GATE_DISABLE;
- intel_de_write(dev_priv, DSPCLK_GATE_D(dev_priv), val);
+ intel_de_rmw(dev_priv, DSPCLK_GATE_D(dev_priv),
+ ~DPOUNIT_CLOCK_GATE_DISABLE, VRHUNIT_CLOCK_GATE_DISABLE);
/*
* Disable trickle feed and enable pnd deadline calculation
@@ -1276,8 +1264,7 @@ static void vlv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
* both PLLs disabled, or we risk losing DPIO and PLL
* synchronization.
*/
- intel_de_write(dev_priv, DPIO_CTL,
- intel_de_read(dev_priv, DPIO_CTL) | DPIO_CMNRST);
+ intel_de_rmw(dev_priv, DPIO_CTL, 0, DPIO_CMNRST);
}
static void vlv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
@@ -1289,8 +1276,7 @@ static void vlv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
assert_pll_disabled(dev_priv, pipe);
/* Assert common reset */
- intel_de_write(dev_priv, DPIO_CTL,
- intel_de_read(dev_priv, DPIO_CTL) & ~DPIO_CMNRST);
+ intel_de_rmw(dev_priv, DPIO_CTL, DPIO_CMNRST, 0);
vlv_set_power_well(dev_priv, power_well, false);
}
diff --git a/drivers/gpu/drm/i915/display/intel_display_reg_defs.h b/drivers/gpu/drm/i915/display/intel_display_reg_defs.h
index 02605418ff08..755c1ea8225c 100644
--- a/drivers/gpu/drm/i915/display/intel_display_reg_defs.h
+++ b/drivers/gpu/drm/i915/display/intel_display_reg_defs.h
@@ -13,7 +13,7 @@
#define VLV_DISPLAY_BASE 0x180000
/*
- * Named helper wrappers around _PICK_EVEN() and _PICK().
+ * Named helper wrappers around _PICK_EVEN() and _PICK_EVEN_2RANGES().
*/
#define _PIPE(pipe, a, b) _PICK_EVEN(pipe, a, b)
#define _PLANE(plane, a, b) _PICK_EVEN(plane, a, b)
@@ -29,12 +29,8 @@
#define _MMIO_PLL(pll, a, b) _MMIO(_PLL(pll, a, b))
#define _MMIO_PHY(phy, a, b) _MMIO(_PHY(phy, a, b))
-#define _PHY3(phy, ...) _PICK(phy, __VA_ARGS__)
-
-#define _MMIO_PIPE3(pipe, a, b, c) _MMIO(_PICK(pipe, a, b, c))
-#define _MMIO_PORT3(pipe, a, b, c) _MMIO(_PICK(pipe, a, b, c))
-#define _MMIO_PHY3(phy, a, b, c) _MMIO(_PHY3(phy, a, b, c))
-#define _MMIO_PLL3(pll, ...) _MMIO(_PICK(pll, __VA_ARGS__))
+#define _MMIO_PIPE3(pipe, a, b, c) _MMIO(_PICK_EVEN_2RANGES(pipe, 1, a, a, b, c))
+#define _MMIO_PORT3(pipe, a, b, c) _MMIO(_PICK_EVEN_2RANGES(pipe, 1, a, a, b, c))
/*
* Device info offset array based helpers for groups of registers with unevenly
diff --git a/drivers/gpu/drm/i915/display/intel_display_rps.c b/drivers/gpu/drm/i915/display/intel_display_rps.c
new file mode 100644
index 000000000000..918d0327169a
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_display_rps.c
@@ -0,0 +1,81 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#include <drm/drm_crtc.h>
+#include <drm/drm_vblank.h>
+
+#include "gt/intel_rps.h"
+#include "i915_drv.h"
+#include "intel_display_rps.h"
+#include "intel_display_types.h"
+
+struct wait_rps_boost {
+ struct wait_queue_entry wait;
+
+ struct drm_crtc *crtc;
+ struct i915_request *request;
+};
+
+static int do_rps_boost(struct wait_queue_entry *_wait,
+ unsigned mode, int sync, void *key)
+{
+ struct wait_rps_boost *wait = container_of(_wait, typeof(*wait), wait);
+ struct i915_request *rq = wait->request;
+
+ /*
+ * If we missed the vblank, but the request is already running it
+ * is reasonable to assume that it will complete before the next
+ * vblank without our intervention, so leave RPS alone.
+ */
+ if (!i915_request_started(rq))
+ intel_rps_boost(rq);
+ i915_request_put(rq);
+
+ drm_crtc_vblank_put(wait->crtc);
+
+ list_del(&wait->wait.entry);
+ kfree(wait);
+ return 1;
+}
+
+void intel_display_rps_boost_after_vblank(struct drm_crtc *crtc,
+ struct dma_fence *fence)
+{
+ struct wait_rps_boost *wait;
+
+ if (!dma_fence_is_i915(fence))
+ return;
+
+ if (DISPLAY_VER(to_i915(crtc->dev)) < 6)
+ return;
+
+ if (drm_crtc_vblank_get(crtc))
+ return;
+
+ wait = kmalloc(sizeof(*wait), GFP_KERNEL);
+ if (!wait) {
+ drm_crtc_vblank_put(crtc);
+ return;
+ }
+
+ wait->request = to_request(dma_fence_get(fence));
+ wait->crtc = crtc;
+
+ wait->wait.func = do_rps_boost;
+ wait->wait.flags = 0;
+
+ add_wait_queue(drm_crtc_vblank_waitqueue(crtc), &wait->wait);
+}
+
+void intel_display_rps_mark_interactive(struct drm_i915_private *i915,
+ struct intel_atomic_state *state,
+ bool interactive)
+{
+ if (state->rps_interactive == interactive)
+ return;
+
+ intel_rps_mark_interactive(&to_gt(i915)->rps, interactive);
+ state->rps_interactive = interactive;
+}
diff --git a/drivers/gpu/drm/i915/display/intel_display_rps.h b/drivers/gpu/drm/i915/display/intel_display_rps.h
new file mode 100644
index 000000000000..e19009c2371a
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_display_rps.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#ifndef __INTEL_DISPLAY_RPS_H__
+#define __INTEL_DISPLAY_RPS_H__
+
+#include <linux/types.h>
+
+struct dma_fence;
+struct drm_crtc;
+struct drm_i915_private;
+struct intel_atomic_state;
+
+void intel_display_rps_boost_after_vblank(struct drm_crtc *crtc,
+ struct dma_fence *fence);
+void intel_display_rps_mark_interactive(struct drm_i915_private *i915,
+ struct intel_atomic_state *state,
+ bool interactive);
+
+#endif /* __INTEL_DISPLAY_RPS_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_display_types.h b/drivers/gpu/drm/i915/display/intel_display_types.h
index 54c517ca9632..ab146b5b68bd 100644
--- a/drivers/gpu/drm/i915/display/intel_display_types.h
+++ b/drivers/gpu/drm/i915/display/intel_display_types.h
@@ -43,7 +43,7 @@
#include <drm/drm_rect.h>
#include <drm/drm_vblank.h>
#include <drm/drm_vblank_work.h>
-#include <drm/i915_mei_hdcp_interface.h>
+#include <drm/i915_hdcp_interface.h>
#include <media/cec-notifier.h>
#include "i915_vma.h"
@@ -53,7 +53,7 @@
#include "intel_display_limits.h"
#include "intel_display_power.h"
#include "intel_dpll_mgr.h"
-#include "intel_pm_types.h"
+#include "intel_wm_types.h"
struct drm_printer;
struct __intel_global_objs_state;
@@ -255,6 +255,11 @@ struct intel_encoder {
* Returns whether the port clock is enabled or not.
*/
bool (*is_clock_enabled)(struct intel_encoder *encoder);
+ /*
+ * Returns the PLL type the port uses.
+ */
+ enum icl_port_dpll_id (*port_pll_type)(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state);
const struct intel_ddi_buf_trans *(*get_buf_trans)(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
int *n_entries);
@@ -326,6 +331,7 @@ struct intel_vbt_panel_data {
struct {
u16 pwm_freq_hz;
u16 brightness_precision_bits;
+ u16 hdr_dpcd_refresh_timeout;
bool present;
bool active_low_pwm;
u8 min_brightness; /* min_brightness/255 of max */
@@ -1249,6 +1255,9 @@ struct intel_crtc_state {
/* bitmask of planes that will be updated during the commit */
u8 update_planes;
+ /* bitmask of planes with async flip active */
+ u8 async_flip_planes;
+
u8 framestart_delay; /* 1-4 */
u8 msa_timing_delay; /* 0-3 */
@@ -1502,17 +1511,6 @@ struct intel_watermark_params {
u8 cacheline_size;
};
-struct cxsr_latency {
- bool is_desktop : 1;
- bool is_ddr3 : 1;
- u16 fsb_freq;
- u16 mem_freq;
- u16 display_sr;
- u16 display_hpll_disable;
- u16 cursor_sr;
- u16 cursor_hpll_disable;
-};
-
#define to_intel_atomic_state(x) container_of(x, struct intel_atomic_state, base)
#define to_intel_crtc(x) container_of(x, struct intel_crtc, base)
#define to_intel_crtc_state(x) container_of(x, struct intel_crtc_state, uapi)
@@ -1631,6 +1629,8 @@ struct intel_psr {
bool psr2_sel_fetch_cff_enabled;
bool req_psr2_sdp_prior_scanline;
u8 sink_sync_latency;
+ u8 io_wake_lines;
+ u8 fast_wake_lines;
ktime_t last_entry_attempt;
ktime_t last_exit;
bool sink_not_reliable;
@@ -1788,6 +1788,7 @@ struct intel_digital_port {
bool tc_legacy_port:1;
char tc_port_name[8];
enum tc_port_mode tc_mode;
+ enum tc_port_mode tc_init_mode;
enum phy_fia tc_phy_fia;
u8 tc_phy_fia_idx;
diff --git a/drivers/gpu/drm/i915/display/intel_dmc.c b/drivers/gpu/drm/i915/display/intel_dmc.c
index 257aa2b7cf20..8a88de67ff0a 100644
--- a/drivers/gpu/drm/i915/display/intel_dmc.c
+++ b/drivers/gpu/drm/i915/display/intel_dmc.c
@@ -38,6 +38,39 @@
* low-power state and comes back to normal.
*/
+enum intel_dmc_id {
+ DMC_FW_MAIN = 0,
+ DMC_FW_PIPEA,
+ DMC_FW_PIPEB,
+ DMC_FW_PIPEC,
+ DMC_FW_PIPED,
+ DMC_FW_MAX
+};
+
+struct intel_dmc {
+ struct drm_i915_private *i915;
+ struct work_struct work;
+ const char *fw_path;
+ u32 max_fw_size; /* bytes */
+ u32 version;
+ struct dmc_fw_info {
+ u32 mmio_count;
+ i915_reg_t mmioaddr[20];
+ u32 mmiodata[20];
+ u32 dmc_offset;
+ u32 start_mmioaddr;
+ u32 dmc_fw_size; /*dwords */
+ u32 *payload;
+ bool present;
+ } dmc_info[DMC_FW_MAX];
+};
+
+/* Note: This may be NULL. */
+static struct intel_dmc *i915_to_dmc(struct drm_i915_private *i915)
+{
+ return i915->display.dmc.dmc;
+}
+
#define DMC_VERSION(major, minor) ((major) << 16 | (minor))
#define DMC_VERSION_MAJOR(version) ((version) >> 16)
#define DMC_VERSION_MINOR(version) ((version) & 0xffff)
@@ -56,10 +89,13 @@
__stringify(major) "_" \
__stringify(minor) ".bin"
+#define XELPDP_DMC_MAX_FW_SIZE 0x7000
#define DISPLAY_VER13_DMC_MAX_FW_SIZE 0x20000
-
#define DISPLAY_VER12_DMC_MAX_FW_SIZE ICL_DMC_MAX_FW_SIZE
+#define MTL_DMC_PATH DMC_PATH(mtl)
+MODULE_FIRMWARE(MTL_DMC_PATH);
+
#define DG2_DMC_PATH DMC_LEGACY_PATH(dg2, 2, 08)
MODULE_FIRMWARE(DG2_DMC_PATH);
@@ -249,9 +285,19 @@ struct stepping_info {
char substepping;
};
-static bool has_dmc_id_fw(struct drm_i915_private *i915, int dmc_id)
+#define for_each_dmc_id(__dmc_id) \
+ for ((__dmc_id) = DMC_FW_MAIN; (__dmc_id) < DMC_FW_MAX; (__dmc_id)++)
+
+static bool is_valid_dmc_id(enum intel_dmc_id dmc_id)
{
- return i915->display.dmc.dmc_info[dmc_id].payload;
+ return dmc_id >= DMC_FW_MAIN && dmc_id < DMC_FW_MAX;
+}
+
+static bool has_dmc_id_fw(struct drm_i915_private *i915, enum intel_dmc_id dmc_id)
+{
+ struct intel_dmc *dmc = i915_to_dmc(i915);
+
+ return dmc && dmc->dmc_info[dmc_id].payload;
}
bool intel_dmc_has_payload(struct drm_i915_private *i915)
@@ -270,12 +316,12 @@ intel_get_stepping_info(struct drm_i915_private *i915,
return si;
}
-static void gen9_set_dc_state_debugmask(struct drm_i915_private *dev_priv)
+static void gen9_set_dc_state_debugmask(struct drm_i915_private *i915)
{
/* The below bit doesn't need to be cleared ever afterwards */
- intel_de_rmw(dev_priv, DC_STATE_DEBUG, 0,
+ intel_de_rmw(i915, DC_STATE_DEBUG, 0,
DC_STATE_DEBUG_MASK_CORES | DC_STATE_DEBUG_MASK_MEMORY_UP);
- intel_de_posting_read(dev_priv, DC_STATE_DEBUG);
+ intel_de_posting_read(i915, DC_STATE_DEBUG);
}
static void disable_event_handler(struct drm_i915_private *i915,
@@ -315,26 +361,23 @@ disable_flip_queue_event(struct drm_i915_private *i915,
}
static bool
-get_flip_queue_event_regs(struct drm_i915_private *i915, int dmc_id,
+get_flip_queue_event_regs(struct drm_i915_private *i915, enum intel_dmc_id dmc_id,
i915_reg_t *ctl_reg, i915_reg_t *htp_reg)
{
- switch (dmc_id) {
- case DMC_FW_MAIN:
+ if (dmc_id == DMC_FW_MAIN) {
if (DISPLAY_VER(i915) == 12) {
*ctl_reg = DMC_EVT_CTL(i915, dmc_id, 3);
*htp_reg = DMC_EVT_HTP(i915, dmc_id, 3);
return true;
}
- break;
- case DMC_FW_PIPEA ... DMC_FW_PIPED:
+ } else if (dmc_id >= DMC_FW_PIPEA && dmc_id <= DMC_FW_PIPED) {
if (IS_DG2(i915)) {
*ctl_reg = DMC_EVT_CTL(i915, dmc_id, 2);
*htp_reg = DMC_EVT_HTP(i915, dmc_id, 2);
return true;
}
- break;
}
return false;
@@ -343,13 +386,13 @@ get_flip_queue_event_regs(struct drm_i915_private *i915, int dmc_id,
static void
disable_all_flip_queue_events(struct drm_i915_private *i915)
{
- int dmc_id;
+ enum intel_dmc_id dmc_id;
/* TODO: check if the following applies to all D13+ platforms. */
if (!IS_DG2(i915) && !IS_TIGERLAKE(i915))
return;
- for (dmc_id = 0; dmc_id < DMC_FW_MAX; dmc_id++) {
+ for_each_dmc_id(dmc_id) {
i915_reg_t ctl_reg;
i915_reg_t htp_reg;
@@ -365,34 +408,31 @@ disable_all_flip_queue_events(struct drm_i915_private *i915)
static void disable_all_event_handlers(struct drm_i915_private *i915)
{
- int id;
+ enum intel_dmc_id dmc_id;
/* TODO: disable the event handlers on pre-GEN12 platforms as well */
if (DISPLAY_VER(i915) < 12)
return;
- for (id = DMC_FW_MAIN; id < DMC_FW_MAX; id++) {
+ for_each_dmc_id(dmc_id) {
int handler;
- if (!has_dmc_id_fw(i915, id))
+ if (!has_dmc_id_fw(i915, dmc_id))
continue;
for (handler = 0; handler < DMC_EVENT_HANDLER_COUNT_GEN12; handler++)
disable_event_handler(i915,
- DMC_EVT_CTL(i915, id, handler),
- DMC_EVT_HTP(i915, id, handler));
+ DMC_EVT_CTL(i915, dmc_id, handler),
+ DMC_EVT_HTP(i915, dmc_id, handler));
}
}
-static void pipedmc_clock_gating_wa(struct drm_i915_private *i915, bool enable)
+static void adlp_pipedmc_clock_gating_wa(struct drm_i915_private *i915, bool enable)
{
enum pipe pipe;
- if (DISPLAY_VER(i915) < 13)
- return;
-
/*
- * Wa_16015201720:adl-p,dg2, mtl
+ * Wa_16015201720:adl-p,dg2
* The WA requires clock gating to be disabled all the time
* for pipe A and B.
* For pipe C and D clock gating needs to be disabled only
@@ -408,9 +448,30 @@ static void pipedmc_clock_gating_wa(struct drm_i915_private *i915, bool enable)
PIPEDMC_GATING_DIS, 0);
}
+static void mtl_pipedmc_clock_gating_wa(struct drm_i915_private *i915)
+{
+ /*
+ * Wa_16015201720
+ * The WA requires clock gating to be disabled all the time
+ * for pipe A and B.
+ */
+ intel_de_rmw(i915, GEN9_CLKGATE_DIS_0, 0,
+ MTL_PIPEDMC_GATING_DIS_A | MTL_PIPEDMC_GATING_DIS_B);
+}
+
+static void pipedmc_clock_gating_wa(struct drm_i915_private *i915, bool enable)
+{
+ if (DISPLAY_VER(i915) >= 14 && enable)
+ mtl_pipedmc_clock_gating_wa(i915);
+ else if (DISPLAY_VER(i915) == 13)
+ adlp_pipedmc_clock_gating_wa(i915, enable);
+}
+
void intel_dmc_enable_pipe(struct drm_i915_private *i915, enum pipe pipe)
{
- if (!has_dmc_id_fw(i915, PIPE_TO_DMC_ID(pipe)))
+ enum intel_dmc_id dmc_id = PIPE_TO_DMC_ID(pipe);
+
+ if (!is_valid_dmc_id(dmc_id) || !has_dmc_id_fw(i915, dmc_id))
return;
if (DISPLAY_VER(i915) >= 14)
@@ -421,7 +482,9 @@ void intel_dmc_enable_pipe(struct drm_i915_private *i915, enum pipe pipe)
void intel_dmc_disable_pipe(struct drm_i915_private *i915, enum pipe pipe)
{
- if (!has_dmc_id_fw(i915, PIPE_TO_DMC_ID(pipe)))
+ enum intel_dmc_id dmc_id = PIPE_TO_DMC_ID(pipe);
+
+ if (!is_valid_dmc_id(dmc_id) || !has_dmc_id_fw(i915, dmc_id))
return;
if (DISPLAY_VER(i915) >= 14)
@@ -432,57 +495,59 @@ void intel_dmc_disable_pipe(struct drm_i915_private *i915, enum pipe pipe)
/**
* intel_dmc_load_program() - write the firmware from memory to register.
- * @dev_priv: i915 drm device.
+ * @i915: i915 drm device.
*
* DMC firmware is read from a .bin file and kept in internal memory one time.
* Everytime display comes back from low power state this function is called to
* copy the firmware from internal memory to registers.
*/
-void intel_dmc_load_program(struct drm_i915_private *dev_priv)
+void intel_dmc_load_program(struct drm_i915_private *i915)
{
- struct intel_dmc *dmc = &dev_priv->display.dmc;
- u32 id, i;
+ struct i915_power_domains *power_domains = &i915->display.power.domains;
+ struct intel_dmc *dmc = i915_to_dmc(i915);
+ enum intel_dmc_id dmc_id;
+ u32 i;
- if (!intel_dmc_has_payload(dev_priv))
+ if (!intel_dmc_has_payload(i915))
return;
- pipedmc_clock_gating_wa(dev_priv, true);
+ pipedmc_clock_gating_wa(i915, true);
- disable_all_event_handlers(dev_priv);
+ disable_all_event_handlers(i915);
- assert_rpm_wakelock_held(&dev_priv->runtime_pm);
+ assert_rpm_wakelock_held(&i915->runtime_pm);
preempt_disable();
- for (id = 0; id < DMC_FW_MAX; id++) {
- for (i = 0; i < dmc->dmc_info[id].dmc_fw_size; i++) {
- intel_de_write_fw(dev_priv,
- DMC_PROGRAM(dmc->dmc_info[id].start_mmioaddr, i),
- dmc->dmc_info[id].payload[i]);
+ for_each_dmc_id(dmc_id) {
+ for (i = 0; i < dmc->dmc_info[dmc_id].dmc_fw_size; i++) {
+ intel_de_write_fw(i915,
+ DMC_PROGRAM(dmc->dmc_info[dmc_id].start_mmioaddr, i),
+ dmc->dmc_info[dmc_id].payload[i]);
}
}
preempt_enable();
- for (id = 0; id < DMC_FW_MAX; id++) {
- for (i = 0; i < dmc->dmc_info[id].mmio_count; i++) {
- intel_de_write(dev_priv, dmc->dmc_info[id].mmioaddr[i],
- dmc->dmc_info[id].mmiodata[i]);
+ for_each_dmc_id(dmc_id) {
+ for (i = 0; i < dmc->dmc_info[dmc_id].mmio_count; i++) {
+ intel_de_write(i915, dmc->dmc_info[dmc_id].mmioaddr[i],
+ dmc->dmc_info[dmc_id].mmiodata[i]);
}
}
- dev_priv->display.dmc.dc_state = 0;
+ power_domains->dc_state = 0;
- gen9_set_dc_state_debugmask(dev_priv);
+ gen9_set_dc_state_debugmask(i915);
/*
* Flip queue events need to be disabled before enabling DC5/6.
* i915 doesn't use the flip queue feature, so disable it already
* here.
*/
- disable_all_flip_queue_events(dev_priv);
+ disable_all_flip_queue_events(i915);
- pipedmc_clock_gating_wa(dev_priv, false);
+ pipedmc_clock_gating_wa(i915, false);
}
/**
@@ -504,8 +569,11 @@ void intel_dmc_disable_program(struct drm_i915_private *i915)
void assert_dmc_loaded(struct drm_i915_private *i915)
{
- drm_WARN_ONCE(&i915->drm,
- !intel_de_read(i915, DMC_PROGRAM(i915->display.dmc.dmc_info[DMC_FW_MAIN].start_mmioaddr, 0)),
+ struct intel_dmc *dmc = i915_to_dmc(i915);
+
+ drm_WARN_ONCE(&i915->drm, !dmc, "DMC not initialized\n");
+ drm_WARN_ONCE(&i915->drm, dmc &&
+ !intel_de_read(i915, DMC_PROGRAM(dmc->dmc_info[DMC_FW_MAIN].start_mmioaddr, 0)),
"DMC program storage start is NULL\n");
drm_WARN_ONCE(&i915->drm, !intel_de_read(i915, DMC_SSP_BASE),
"DMC SSP Base Not fine\n");
@@ -540,15 +608,15 @@ static void dmc_set_fw_offset(struct intel_dmc *dmc,
const struct stepping_info *si,
u8 package_ver)
{
- unsigned int i, id;
-
- struct drm_i915_private *i915 = container_of(dmc, typeof(*i915), display.dmc);
+ struct drm_i915_private *i915 = dmc->i915;
+ enum intel_dmc_id dmc_id;
+ unsigned int i;
for (i = 0; i < num_entries; i++) {
- id = package_ver <= 1 ? DMC_FW_MAIN : fw_info[i].dmc_id;
+ dmc_id = package_ver <= 1 ? DMC_FW_MAIN : fw_info[i].dmc_id;
- if (id >= DMC_FW_MAX) {
- drm_dbg(&i915->drm, "Unsupported firmware id: %u\n", id);
+ if (!is_valid_dmc_id(dmc_id)) {
+ drm_dbg(&i915->drm, "Unsupported firmware id: %u\n", dmc_id);
continue;
}
@@ -556,29 +624,24 @@ static void dmc_set_fw_offset(struct intel_dmc *dmc,
* check for the stepping since we already found a previous FW
* for this id.
*/
- if (dmc->dmc_info[id].present)
+ if (dmc->dmc_info[dmc_id].present)
continue;
if (fw_info_matches_stepping(&fw_info[i], si)) {
- dmc->dmc_info[id].present = true;
- dmc->dmc_info[id].dmc_offset = fw_info[i].offset;
+ dmc->dmc_info[dmc_id].present = true;
+ dmc->dmc_info[dmc_id].dmc_offset = fw_info[i].offset;
}
}
}
static bool dmc_mmio_addr_sanity_check(struct intel_dmc *dmc,
const u32 *mmioaddr, u32 mmio_count,
- int header_ver, u8 dmc_id)
+ int header_ver, enum intel_dmc_id dmc_id)
{
- struct drm_i915_private *i915 = container_of(dmc, typeof(*i915), display.dmc);
+ struct drm_i915_private *i915 = dmc->i915;
u32 start_range, end_range;
int i;
- if (dmc_id >= DMC_FW_MAX) {
- drm_warn(&i915->drm, "Unsupported firmware id %u\n", dmc_id);
- return false;
- }
-
if (header_ver == 1) {
start_range = DMC_MMIO_START_RANGE;
end_range = DMC_MMIO_END_RANGE;
@@ -606,9 +669,9 @@ static bool dmc_mmio_addr_sanity_check(struct intel_dmc *dmc,
static u32 parse_dmc_fw_header(struct intel_dmc *dmc,
const struct intel_dmc_header_base *dmc_header,
- size_t rem_size, u8 dmc_id)
+ size_t rem_size, enum intel_dmc_id dmc_id)
{
- struct drm_i915_private *i915 = container_of(dmc, typeof(*i915), display.dmc);
+ struct drm_i915_private *i915 = dmc->i915;
struct dmc_fw_info *dmc_info = &dmc->dmc_info[dmc_id];
unsigned int header_len_bytes, dmc_header_size, payload_size, i;
const u32 *mmioaddr, *mmiodata;
@@ -719,7 +782,7 @@ parse_dmc_fw_package(struct intel_dmc *dmc,
const struct stepping_info *si,
size_t rem_size)
{
- struct drm_i915_private *i915 = container_of(dmc, typeof(*i915), display.dmc);
+ struct drm_i915_private *i915 = dmc->i915;
u32 package_size = sizeof(struct intel_package_header);
u32 num_entries, max_entries;
const struct intel_fw_info *fw_info;
@@ -773,7 +836,7 @@ static u32 parse_dmc_fw_css(struct intel_dmc *dmc,
struct intel_css_header *css_header,
size_t rem_size)
{
- struct drm_i915_private *i915 = container_of(dmc, typeof(*i915), display.dmc);
+ struct drm_i915_private *i915 = dmc->i915;
if (rem_size < sizeof(struct intel_css_header)) {
drm_err(&i915->drm, "Truncated DMC firmware, refusing.\n");
@@ -793,18 +856,17 @@ static u32 parse_dmc_fw_css(struct intel_dmc *dmc,
return sizeof(struct intel_css_header);
}
-static void parse_dmc_fw(struct drm_i915_private *dev_priv,
- const struct firmware *fw)
+static void parse_dmc_fw(struct intel_dmc *dmc, const struct firmware *fw)
{
+ struct drm_i915_private *i915 = dmc->i915;
struct intel_css_header *css_header;
struct intel_package_header *package_header;
struct intel_dmc_header_base *dmc_header;
- struct intel_dmc *dmc = &dev_priv->display.dmc;
struct stepping_info display_info = { '*', '*'};
- const struct stepping_info *si = intel_get_stepping_info(dev_priv, &display_info);
+ const struct stepping_info *si = intel_get_stepping_info(i915, &display_info);
+ enum intel_dmc_id dmc_id;
u32 readcount = 0;
u32 r, offset;
- int id;
if (!fw)
return;
@@ -825,34 +887,33 @@ static void parse_dmc_fw(struct drm_i915_private *dev_priv,
readcount += r;
- for (id = 0; id < DMC_FW_MAX; id++) {
- if (!dev_priv->display.dmc.dmc_info[id].present)
+ for_each_dmc_id(dmc_id) {
+ if (!dmc->dmc_info[dmc_id].present)
continue;
- offset = readcount + dmc->dmc_info[id].dmc_offset * 4;
+ offset = readcount + dmc->dmc_info[dmc_id].dmc_offset * 4;
if (offset > fw->size) {
- drm_err(&dev_priv->drm, "Reading beyond the fw_size\n");
+ drm_err(&i915->drm, "Reading beyond the fw_size\n");
continue;
}
dmc_header = (struct intel_dmc_header_base *)&fw->data[offset];
- parse_dmc_fw_header(dmc, dmc_header, fw->size - offset, id);
+ parse_dmc_fw_header(dmc, dmc_header, fw->size - offset, dmc_id);
}
}
-static void intel_dmc_runtime_pm_get(struct drm_i915_private *dev_priv)
+static void intel_dmc_runtime_pm_get(struct drm_i915_private *i915)
{
- drm_WARN_ON(&dev_priv->drm, dev_priv->display.dmc.wakeref);
- dev_priv->display.dmc.wakeref =
- intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
+ drm_WARN_ON(&i915->drm, i915->display.dmc.wakeref);
+ i915->display.dmc.wakeref = intel_display_power_get(i915, POWER_DOMAIN_INIT);
}
-static void intel_dmc_runtime_pm_put(struct drm_i915_private *dev_priv)
+static void intel_dmc_runtime_pm_put(struct drm_i915_private *i915)
{
intel_wakeref_t wakeref __maybe_unused =
- fetch_and_zero(&dev_priv->display.dmc.wakeref);
+ fetch_and_zero(&i915->display.dmc.wakeref);
- intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, wakeref);
+ intel_display_power_put(i915, POWER_DOMAIN_INIT, wakeref);
}
static const char *dmc_fallback_path(struct drm_i915_private *i915)
@@ -865,46 +926,40 @@ static const char *dmc_fallback_path(struct drm_i915_private *i915)
static void dmc_load_work_fn(struct work_struct *work)
{
- struct drm_i915_private *dev_priv;
- struct intel_dmc *dmc;
+ struct intel_dmc *dmc = container_of(work, typeof(*dmc), work);
+ struct drm_i915_private *i915 = dmc->i915;
const struct firmware *fw = NULL;
const char *fallback_path;
int err;
- dev_priv = container_of(work, typeof(*dev_priv), display.dmc.work);
- dmc = &dev_priv->display.dmc;
-
- err = request_firmware(&fw, dev_priv->display.dmc.fw_path, dev_priv->drm.dev);
+ err = request_firmware(&fw, dmc->fw_path, i915->drm.dev);
- if (err == -ENOENT && !dev_priv->params.dmc_firmware_path) {
- fallback_path = dmc_fallback_path(dev_priv);
+ if (err == -ENOENT && !i915->params.dmc_firmware_path) {
+ fallback_path = dmc_fallback_path(i915);
if (fallback_path) {
- drm_dbg_kms(&dev_priv->drm,
- "%s not found, falling back to %s\n",
- dmc->fw_path,
- fallback_path);
- err = request_firmware(&fw, fallback_path, dev_priv->drm.dev);
+ drm_dbg_kms(&i915->drm, "%s not found, falling back to %s\n",
+ dmc->fw_path, fallback_path);
+ err = request_firmware(&fw, fallback_path, i915->drm.dev);
if (err == 0)
- dev_priv->display.dmc.fw_path = fallback_path;
+ dmc->fw_path = fallback_path;
}
}
- parse_dmc_fw(dev_priv, fw);
+ parse_dmc_fw(dmc, fw);
- if (intel_dmc_has_payload(dev_priv)) {
- intel_dmc_load_program(dev_priv);
- intel_dmc_runtime_pm_put(dev_priv);
+ if (intel_dmc_has_payload(i915)) {
+ intel_dmc_load_program(i915);
+ intel_dmc_runtime_pm_put(i915);
- drm_info(&dev_priv->drm,
- "Finished loading DMC firmware %s (v%u.%u)\n",
- dev_priv->display.dmc.fw_path, DMC_VERSION_MAJOR(dmc->version),
+ drm_info(&i915->drm, "Finished loading DMC firmware %s (v%u.%u)\n",
+ dmc->fw_path, DMC_VERSION_MAJOR(dmc->version),
DMC_VERSION_MINOR(dmc->version));
} else {
- drm_notice(&dev_priv->drm,
+ drm_notice(&i915->drm,
"Failed to load DMC firmware %s."
" Disabling runtime power management.\n",
dmc->fw_path);
- drm_notice(&dev_priv->drm, "DMC firmware homepage: %s",
+ drm_notice(&i915->drm, "DMC firmware homepage: %s",
INTEL_UC_FIRMWARE_URL);
}
@@ -912,19 +967,17 @@ static void dmc_load_work_fn(struct work_struct *work)
}
/**
- * intel_dmc_ucode_init() - initialize the firmware loading.
- * @dev_priv: i915 drm device.
+ * intel_dmc_init() - initialize the firmware loading.
+ * @i915: i915 drm device.
*
* This function is called at the time of loading the display driver to read
* firmware from a .bin file and copied into a internal memory.
*/
-void intel_dmc_ucode_init(struct drm_i915_private *dev_priv)
+void intel_dmc_init(struct drm_i915_private *i915)
{
- struct intel_dmc *dmc = &dev_priv->display.dmc;
-
- INIT_WORK(&dev_priv->display.dmc.work, dmc_load_work_fn);
+ struct intel_dmc *dmc;
- if (!HAS_DMC(dev_priv))
+ if (!HAS_DMC(i915))
return;
/*
@@ -935,168 +988,195 @@ void intel_dmc_ucode_init(struct drm_i915_private *dev_priv)
* suspend as runtime suspend *requires* a working DMC for whatever
* reason.
*/
- intel_dmc_runtime_pm_get(dev_priv);
+ intel_dmc_runtime_pm_get(i915);
+
+ dmc = kzalloc(sizeof(*dmc), GFP_KERNEL);
+ if (!dmc)
+ return;
+
+ dmc->i915 = i915;
+
+ INIT_WORK(&dmc->work, dmc_load_work_fn);
- if (IS_DG2(dev_priv)) {
+ if (IS_METEORLAKE(i915)) {
+ dmc->fw_path = MTL_DMC_PATH;
+ dmc->max_fw_size = XELPDP_DMC_MAX_FW_SIZE;
+ } else if (IS_DG2(i915)) {
dmc->fw_path = DG2_DMC_PATH;
dmc->max_fw_size = DISPLAY_VER13_DMC_MAX_FW_SIZE;
- } else if (IS_ALDERLAKE_P(dev_priv)) {
+ } else if (IS_ALDERLAKE_P(i915)) {
dmc->fw_path = ADLP_DMC_PATH;
dmc->max_fw_size = DISPLAY_VER13_DMC_MAX_FW_SIZE;
- } else if (IS_ALDERLAKE_S(dev_priv)) {
+ } else if (IS_ALDERLAKE_S(i915)) {
dmc->fw_path = ADLS_DMC_PATH;
dmc->max_fw_size = DISPLAY_VER12_DMC_MAX_FW_SIZE;
- } else if (IS_DG1(dev_priv)) {
+ } else if (IS_DG1(i915)) {
dmc->fw_path = DG1_DMC_PATH;
dmc->max_fw_size = DISPLAY_VER12_DMC_MAX_FW_SIZE;
- } else if (IS_ROCKETLAKE(dev_priv)) {
+ } else if (IS_ROCKETLAKE(i915)) {
dmc->fw_path = RKL_DMC_PATH;
dmc->max_fw_size = DISPLAY_VER12_DMC_MAX_FW_SIZE;
- } else if (IS_TIGERLAKE(dev_priv)) {
+ } else if (IS_TIGERLAKE(i915)) {
dmc->fw_path = TGL_DMC_PATH;
dmc->max_fw_size = DISPLAY_VER12_DMC_MAX_FW_SIZE;
- } else if (DISPLAY_VER(dev_priv) == 11) {
+ } else if (DISPLAY_VER(i915) == 11) {
dmc->fw_path = ICL_DMC_PATH;
dmc->max_fw_size = ICL_DMC_MAX_FW_SIZE;
- } else if (IS_GEMINILAKE(dev_priv)) {
+ } else if (IS_GEMINILAKE(i915)) {
dmc->fw_path = GLK_DMC_PATH;
dmc->max_fw_size = GLK_DMC_MAX_FW_SIZE;
- } else if (IS_KABYLAKE(dev_priv) ||
- IS_COFFEELAKE(dev_priv) ||
- IS_COMETLAKE(dev_priv)) {
+ } else if (IS_KABYLAKE(i915) ||
+ IS_COFFEELAKE(i915) ||
+ IS_COMETLAKE(i915)) {
dmc->fw_path = KBL_DMC_PATH;
dmc->max_fw_size = KBL_DMC_MAX_FW_SIZE;
- } else if (IS_SKYLAKE(dev_priv)) {
+ } else if (IS_SKYLAKE(i915)) {
dmc->fw_path = SKL_DMC_PATH;
dmc->max_fw_size = SKL_DMC_MAX_FW_SIZE;
- } else if (IS_BROXTON(dev_priv)) {
+ } else if (IS_BROXTON(i915)) {
dmc->fw_path = BXT_DMC_PATH;
dmc->max_fw_size = BXT_DMC_MAX_FW_SIZE;
}
- if (dev_priv->params.dmc_firmware_path) {
- if (strlen(dev_priv->params.dmc_firmware_path) == 0) {
- dmc->fw_path = NULL;
- drm_info(&dev_priv->drm,
+ if (i915->params.dmc_firmware_path) {
+ if (strlen(i915->params.dmc_firmware_path) == 0) {
+ drm_info(&i915->drm,
"Disabling DMC firmware and runtime PM\n");
- return;
+ goto out;
}
- dmc->fw_path = dev_priv->params.dmc_firmware_path;
+ dmc->fw_path = i915->params.dmc_firmware_path;
}
if (!dmc->fw_path) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(&i915->drm,
"No known DMC firmware for platform, disabling runtime PM\n");
- return;
+ goto out;
}
- drm_dbg_kms(&dev_priv->drm, "Loading %s\n", dmc->fw_path);
- schedule_work(&dev_priv->display.dmc.work);
+ i915->display.dmc.dmc = dmc;
+
+ drm_dbg_kms(&i915->drm, "Loading %s\n", dmc->fw_path);
+ schedule_work(&dmc->work);
+
+ return;
+
+out:
+ kfree(dmc);
}
/**
- * intel_dmc_ucode_suspend() - prepare DMC firmware before system suspend
- * @dev_priv: i915 drm device
+ * intel_dmc_suspend() - prepare DMC firmware before system suspend
+ * @i915: i915 drm device
*
* Prepare the DMC firmware before entering system suspend. This includes
* flushing pending work items and releasing any resources acquired during
* init.
*/
-void intel_dmc_ucode_suspend(struct drm_i915_private *dev_priv)
+void intel_dmc_suspend(struct drm_i915_private *i915)
{
- if (!HAS_DMC(dev_priv))
+ struct intel_dmc *dmc = i915_to_dmc(i915);
+
+ if (!HAS_DMC(i915))
return;
- flush_work(&dev_priv->display.dmc.work);
+ if (dmc)
+ flush_work(&dmc->work);
/* Drop the reference held in case DMC isn't loaded. */
- if (!intel_dmc_has_payload(dev_priv))
- intel_dmc_runtime_pm_put(dev_priv);
+ if (!intel_dmc_has_payload(i915))
+ intel_dmc_runtime_pm_put(i915);
}
/**
- * intel_dmc_ucode_resume() - init DMC firmware during system resume
- * @dev_priv: i915 drm device
+ * intel_dmc_resume() - init DMC firmware during system resume
+ * @i915: i915 drm device
*
* Reinitialize the DMC firmware during system resume, reacquiring any
- * resources released in intel_dmc_ucode_suspend().
+ * resources released in intel_dmc_suspend().
*/
-void intel_dmc_ucode_resume(struct drm_i915_private *dev_priv)
+void intel_dmc_resume(struct drm_i915_private *i915)
{
- if (!HAS_DMC(dev_priv))
+ if (!HAS_DMC(i915))
return;
/*
* Reacquire the reference to keep RPM disabled in case DMC isn't
* loaded.
*/
- if (!intel_dmc_has_payload(dev_priv))
- intel_dmc_runtime_pm_get(dev_priv);
+ if (!intel_dmc_has_payload(i915))
+ intel_dmc_runtime_pm_get(i915);
}
/**
- * intel_dmc_ucode_fini() - unload the DMC firmware.
- * @dev_priv: i915 drm device.
+ * intel_dmc_fini() - unload the DMC firmware.
+ * @i915: i915 drm device.
*
* Firmmware unloading includes freeing the internal memory and reset the
* firmware loading status.
*/
-void intel_dmc_ucode_fini(struct drm_i915_private *dev_priv)
+void intel_dmc_fini(struct drm_i915_private *i915)
{
- int id;
+ struct intel_dmc *dmc = i915_to_dmc(i915);
+ enum intel_dmc_id dmc_id;
- if (!HAS_DMC(dev_priv))
+ if (!HAS_DMC(i915))
return;
- intel_dmc_ucode_suspend(dev_priv);
- drm_WARN_ON(&dev_priv->drm, dev_priv->display.dmc.wakeref);
+ intel_dmc_suspend(i915);
+ drm_WARN_ON(&i915->drm, i915->display.dmc.wakeref);
+
+ if (dmc) {
+ for_each_dmc_id(dmc_id)
+ kfree(dmc->dmc_info[dmc_id].payload);
- for (id = 0; id < DMC_FW_MAX; id++)
- kfree(dev_priv->display.dmc.dmc_info[id].payload);
+ kfree(dmc);
+ i915->display.dmc.dmc = NULL;
+ }
}
void intel_dmc_print_error_state(struct drm_i915_error_state_buf *m,
struct drm_i915_private *i915)
{
- struct intel_dmc *dmc = &i915->display.dmc;
+ struct intel_dmc *dmc = i915_to_dmc(i915);
if (!HAS_DMC(i915))
return;
+ i915_error_printf(m, "DMC initialized: %s\n", str_yes_no(dmc));
i915_error_printf(m, "DMC loaded: %s\n",
str_yes_no(intel_dmc_has_payload(i915)));
- i915_error_printf(m, "DMC fw version: %d.%d\n",
- DMC_VERSION_MAJOR(dmc->version),
- DMC_VERSION_MINOR(dmc->version));
+ if (dmc)
+ i915_error_printf(m, "DMC fw version: %d.%d\n",
+ DMC_VERSION_MAJOR(dmc->version),
+ DMC_VERSION_MINOR(dmc->version));
}
static int intel_dmc_debugfs_status_show(struct seq_file *m, void *unused)
{
struct drm_i915_private *i915 = m->private;
+ struct intel_dmc *dmc = i915_to_dmc(i915);
intel_wakeref_t wakeref;
- struct intel_dmc *dmc;
i915_reg_t dc5_reg, dc6_reg = INVALID_MMIO_REG;
if (!HAS_DMC(i915))
return -ENODEV;
- dmc = &i915->display.dmc;
-
wakeref = intel_runtime_pm_get(&i915->runtime_pm);
+ seq_printf(m, "DMC initialized: %s\n", str_yes_no(dmc));
seq_printf(m, "fw loaded: %s\n",
str_yes_no(intel_dmc_has_payload(i915)));
- seq_printf(m, "path: %s\n", dmc->fw_path);
+ seq_printf(m, "path: %s\n", dmc ? dmc->fw_path : "N/A");
seq_printf(m, "Pipe A fw needed: %s\n",
str_yes_no(GRAPHICS_VER(i915) >= 12));
seq_printf(m, "Pipe A fw loaded: %s\n",
- str_yes_no(dmc->dmc_info[DMC_FW_PIPEA].payload));
+ str_yes_no(has_dmc_id_fw(i915, DMC_FW_PIPEA)));
seq_printf(m, "Pipe B fw needed: %s\n",
str_yes_no(IS_ALDERLAKE_P(i915) ||
DISPLAY_VER(i915) >= 14));
seq_printf(m, "Pipe B fw loaded: %s\n",
- str_yes_no(dmc->dmc_info[DMC_FW_PIPEB].payload));
+ str_yes_no(has_dmc_id_fw(i915, DMC_FW_PIPEB)));
if (!intel_dmc_has_payload(i915))
goto out;
@@ -1130,9 +1210,10 @@ static int intel_dmc_debugfs_status_show(struct seq_file *m, void *unused)
seq_printf(m, "DC5 -> DC6 count: %d\n",
intel_de_read(i915, dc6_reg));
-out:
seq_printf(m, "program base: 0x%08x\n",
intel_de_read(i915, DMC_PROGRAM(dmc->dmc_info[DMC_FW_MAIN].start_mmioaddr, 0)));
+
+out:
seq_printf(m, "ssp base: 0x%08x\n",
intel_de_read(i915, DMC_SSP_BASE));
seq_printf(m, "htp: 0x%08x\n", intel_de_read(i915, DMC_HTP_SKL));
diff --git a/drivers/gpu/drm/i915/display/intel_dmc.h b/drivers/gpu/drm/i915/display/intel_dmc.h
index fd1725de4289..fd607afff2ef 100644
--- a/drivers/gpu/drm/i915/display/intel_dmc.h
+++ b/drivers/gpu/drm/i915/display/intel_dmc.h
@@ -6,54 +6,20 @@
#ifndef __INTEL_DMC_H__
#define __INTEL_DMC_H__
-#include "i915_reg_defs.h"
-#include "intel_wakeref.h"
-#include <linux/workqueue.h>
+#include <linux/types.h>
struct drm_i915_error_state_buf;
struct drm_i915_private;
-
enum pipe;
-enum {
- DMC_FW_MAIN = 0,
- DMC_FW_PIPEA,
- DMC_FW_PIPEB,
- DMC_FW_PIPEC,
- DMC_FW_PIPED,
- DMC_FW_MAX
-};
-
-struct intel_dmc {
- struct work_struct work;
- const char *fw_path;
- u32 max_fw_size; /* bytes */
- u32 version;
- struct dmc_fw_info {
- u32 mmio_count;
- i915_reg_t mmioaddr[20];
- u32 mmiodata[20];
- u32 dmc_offset;
- u32 start_mmioaddr;
- u32 dmc_fw_size; /*dwords */
- u32 *payload;
- bool present;
- } dmc_info[DMC_FW_MAX];
-
- u32 dc_state;
- u32 target_dc_state;
- u32 allowed_dc_mask;
- intel_wakeref_t wakeref;
-};
-
-void intel_dmc_ucode_init(struct drm_i915_private *i915);
+void intel_dmc_init(struct drm_i915_private *i915);
void intel_dmc_load_program(struct drm_i915_private *i915);
void intel_dmc_disable_program(struct drm_i915_private *i915);
void intel_dmc_enable_pipe(struct drm_i915_private *i915, enum pipe pipe);
void intel_dmc_disable_pipe(struct drm_i915_private *i915, enum pipe pipe);
-void intel_dmc_ucode_fini(struct drm_i915_private *i915);
-void intel_dmc_ucode_suspend(struct drm_i915_private *i915);
-void intel_dmc_ucode_resume(struct drm_i915_private *i915);
+void intel_dmc_fini(struct drm_i915_private *i915);
+void intel_dmc_suspend(struct drm_i915_private *i915);
+void intel_dmc_resume(struct drm_i915_private *i915);
bool intel_dmc_has_payload(struct drm_i915_private *i915);
void intel_dmc_debugfs_register(struct drm_i915_private *i915);
void intel_dmc_print_error_state(struct drm_i915_error_state_buf *m,
diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c
index 62cbab7402e9..da1c00ee92fb 100644
--- a/drivers/gpu/drm/i915/display/intel_dp.c
+++ b/drivers/gpu/drm/i915/display/intel_dp.c
@@ -288,7 +288,7 @@ static int intel_dp_max_common_rate(struct intel_dp *intel_dp)
static int intel_dp_max_source_lane_count(struct intel_digital_port *dig_port)
{
- int vbt_max_lanes = intel_bios_dp_max_lane_count(&dig_port->base);
+ int vbt_max_lanes = intel_bios_dp_max_lane_count(dig_port->base.devdata);
int max_lanes = dig_port->max_lanes;
if (vbt_max_lanes)
@@ -425,7 +425,7 @@ static int vbt_max_link_rate(struct intel_dp *intel_dp)
struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
int max_rate;
- max_rate = intel_bios_dp_max_link_rate(encoder);
+ max_rate = intel_bios_dp_max_link_rate(encoder->devdata);
if (intel_dp_is_edp(intel_dp)) {
struct intel_connector *connector = intel_dp->attached_connector;
@@ -687,6 +687,12 @@ u32 intel_dp_dsc_nearest_valid_bpp(struct drm_i915_private *i915, u32 bpp, u32 p
/* From XE_LPD onwards we support from bpc upto uncompressed bpp-1 BPPs */
if (DISPLAY_VER(i915) >= 13) {
bits_per_pixel = min(bits_per_pixel, pipe_bpp - 1);
+
+ /*
+ * According to BSpec, 27 is the max DSC output bpp,
+ * 8 is the min DSC output bpp
+ */
+ bits_per_pixel = clamp_t(u32, bits_per_pixel, 8, 27);
} else {
/* Find the nearest match in the array of known BPPs from VESA */
for (i = 0; i < ARRAY_SIZE(valid_dsc_bpp) - 1; i++) {
@@ -716,9 +722,19 @@ u16 intel_dp_dsc_get_output_bpp(struct drm_i915_private *i915,
* (LinkSymbolClock)* 8 * (TimeSlots / 64)
* for SST -> TimeSlots is 64(i.e all TimeSlots that are available)
* for MST -> TimeSlots has to be calculated, based on mode requirements
+ *
+ * Due to FEC overhead, the available bw is reduced to 97.2261%.
+ * To support the given mode:
+ * Bandwidth required should be <= Available link Bandwidth * FEC Overhead
+ * =>ModeClock * bits_per_pixel <= Available Link Bandwidth * FEC Overhead
+ * =>bits_per_pixel <= Available link Bandwidth * FEC Overhead / ModeClock
+ * =>bits_per_pixel <= (NumberOfLanes * LinkSymbolClock) * 8 (TimeSlots / 64) /
+ * (ModeClock / FEC Overhead)
+ * =>bits_per_pixel <= (NumberOfLanes * LinkSymbolClock * TimeSlots) /
+ * (ModeClock / FEC Overhead * 8)
*/
- bits_per_pixel = DIV_ROUND_UP((link_clock * lane_count) * timeslots,
- intel_dp_mode_to_fec_clock(mode_clock) * 8);
+ bits_per_pixel = ((link_clock * lane_count) * timeslots) /
+ (intel_dp_mode_to_fec_clock(mode_clock) * 8);
drm_dbg_kms(&i915->drm, "Max link bpp is %u for %u timeslots "
"total bw %u pixel clock %u\n",
@@ -771,6 +787,13 @@ u8 intel_dp_dsc_get_slice_count(struct intel_dp *intel_dp,
min_slice_count = DIV_ROUND_UP(mode_clock,
DP_DSC_MAX_ENC_THROUGHPUT_1);
+ /*
+ * Due to some DSC engine BW limitations, we need to enable second
+ * slice and VDSC engine, whenever we approach close enough to max CDCLK
+ */
+ if (mode_clock >= ((i915->display.cdclk.max_cdclk_freq * 85) / 100))
+ min_slice_count = max_t(u8, min_slice_count, 2);
+
max_slice_width = drm_dp_dsc_sink_max_slice_width(intel_dp->dsc_dpcd);
if (max_slice_width < DP_DSC_MIN_SLICE_WIDTH_VALUE) {
drm_dbg_kms(&i915->drm,
@@ -1415,6 +1438,28 @@ static int intel_dp_sink_dsc_version_minor(struct intel_dp *intel_dp)
DP_DSC_MINOR_SHIFT;
}
+static int intel_dp_get_slice_height(int vactive)
+{
+ int slice_height;
+
+ /*
+ * VDSC 1.2a spec in Section 3.8 Options for Slices implies that 108
+ * lines is an optimal slice height, but any size can be used as long as
+ * vertical active integer multiple and maximum vertical slice count
+ * requirements are met.
+ */
+ for (slice_height = 108; slice_height <= vactive; slice_height += 2)
+ if (vactive % slice_height == 0)
+ return slice_height;
+
+ /*
+ * Highly unlikely we reach here as most of the resolutions will end up
+ * finding appropriate slice_height in above loop but returning
+ * slice_height as 2 here as it should work with all resolutions.
+ */
+ return 2;
+}
+
static int intel_dp_dsc_compute_params(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state)
{
@@ -1433,17 +1478,7 @@ static int intel_dp_dsc_compute_params(struct intel_encoder *encoder,
vdsc_cfg->rc_model_size = DSC_RC_MODEL_SIZE_CONST;
vdsc_cfg->pic_height = crtc_state->hw.adjusted_mode.crtc_vdisplay;
- /*
- * Slice Height of 8 works for all currently available panels. So start
- * with that if pic_height is an integral multiple of 8. Eventually add
- * logic to try multiple slice heights.
- */
- if (vdsc_cfg->pic_height % 8 == 0)
- vdsc_cfg->slice_height = 8;
- else if (vdsc_cfg->pic_height % 4 == 0)
- vdsc_cfg->slice_height = 4;
- else
- vdsc_cfg->slice_height = 2;
+ vdsc_cfg->slice_height = intel_dp_get_slice_height(vdsc_cfg->pic_height);
ret = intel_dsc_compute_params(crtc_state);
if (ret)
@@ -1585,16 +1620,8 @@ int intel_dp_dsc_compute_config(struct intel_dp *intel_dp,
* is greater than the maximum Cdclock and if slice count is even
* then we need to use 2 VDSC instances.
*/
- if (adjusted_mode->crtc_clock > dev_priv->display.cdclk.max_cdclk_freq ||
- pipe_config->bigjoiner_pipes) {
- if (pipe_config->dsc.slice_count > 1) {
- pipe_config->dsc.dsc_split = true;
- } else {
- drm_dbg_kms(&dev_priv->drm,
- "Cannot split stream to use 2 VDSC instances\n");
- return -EINVAL;
- }
- }
+ if (pipe_config->bigjoiner_pipes || pipe_config->dsc.slice_count > 1)
+ pipe_config->dsc.dsc_split = true;
ret = intel_dp_dsc_compute_params(&dig_port->base, pipe_config);
if (ret < 0) {
@@ -1727,7 +1754,7 @@ bool intel_dp_limited_color_range(const struct intel_crtc_state *crtc_state,
* Our YCbCr output is always limited range.
* crtc_state->limited_color_range only applies to RGB,
* and it must never be set for YCbCr or we risk setting
- * some conflicting bits in PIPECONF which will mess up
+ * some conflicting bits in TRANSCONF which will mess up
* the colors on the monitor.
*/
if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB)
@@ -1991,7 +2018,6 @@ intel_dp_drrs_compute_config(struct intel_connector *connector,
}
static bool intel_dp_has_audio(struct intel_encoder *encoder,
- const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
@@ -2057,7 +2083,7 @@ intel_dp_audio_compute_config(struct intel_encoder *encoder,
struct drm_connector *connector = conn_state->connector;
pipe_config->sdp_split_enable =
- intel_dp_has_audio(encoder, pipe_config, conn_state) &&
+ intel_dp_has_audio(encoder, conn_state) &&
intel_dp_is_uhbr(pipe_config);
drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] SDP split enable: %s\n",
@@ -2081,7 +2107,7 @@ intel_dp_compute_config(struct intel_encoder *encoder,
pipe_config->has_pch_encoder = true;
pipe_config->has_audio =
- intel_dp_has_audio(encoder, pipe_config, conn_state) &&
+ intel_dp_has_audio(encoder, conn_state) &&
intel_audio_compute_config(encoder, pipe_config, conn_state);
fixed_mode = intel_panel_fixed_mode(connector, adjusted_mode);
@@ -2281,10 +2307,15 @@ intel_edp_init_source_oui(struct intel_dp *intel_dp, bool careful)
void intel_dp_wait_source_oui(struct intel_dp *intel_dp)
{
+ struct intel_connector *connector = intel_dp->attached_connector;
struct drm_i915_private *i915 = dp_to_i915(intel_dp);
- drm_dbg_kms(&i915->drm, "Performing OUI wait\n");
- wait_remaining_ms_from_jiffies(intel_dp->last_oui_write, 30);
+ drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] Performing OUI wait (%u ms)\n",
+ connector->base.base.id, connector->base.name,
+ connector->panel.vbt.backlight.hdr_dpcd_refresh_timeout);
+
+ wait_remaining_ms_from_jiffies(intel_dp->last_oui_write,
+ connector->panel.vbt.backlight.hdr_dpcd_refresh_timeout);
}
/* If the device supports it, try to set the power state appropriately */
@@ -4851,7 +4882,7 @@ intel_dp_connector_register(struct drm_connector *connector)
if (!ret)
drm_dp_cec_register_connector(&intel_dp->aux, connector);
- if (!intel_bios_is_lspcon_present(i915, dig_port->base.port))
+ if (!intel_bios_encoder_is_lspcon(dig_port->base.devdata))
return ret;
/*
@@ -5129,8 +5160,9 @@ intel_dp_hpd_pulse(struct intel_digital_port *dig_port, bool long_hpd)
return IRQ_HANDLED;
}
-/* check the VBT to see whether the eDP is on another port */
-bool intel_dp_is_port_edp(struct drm_i915_private *dev_priv, enum port port)
+static bool _intel_dp_is_port_edp(struct drm_i915_private *dev_priv,
+ const struct intel_bios_encoder_data *devdata,
+ enum port port)
{
/*
* eDP not supported on g4x. so bail out early just
@@ -5142,13 +5174,24 @@ bool intel_dp_is_port_edp(struct drm_i915_private *dev_priv, enum port port)
if (DISPLAY_VER(dev_priv) < 9 && port == PORT_A)
return true;
- return intel_bios_is_port_edp(dev_priv, port);
+ return devdata && intel_bios_encoder_supports_edp(devdata);
+}
+
+bool intel_dp_is_port_edp(struct drm_i915_private *i915, enum port port)
+{
+ const struct intel_bios_encoder_data *devdata =
+ intel_bios_encoder_data_lookup(i915, port);
+
+ return _intel_dp_is_port_edp(i915, devdata, port);
}
static bool
-has_gamut_metadata_dip(struct drm_i915_private *i915, enum port port)
+has_gamut_metadata_dip(struct intel_encoder *encoder)
{
- if (intel_bios_is_lspcon_present(i915, port))
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ enum port port = encoder->port;
+
+ if (intel_bios_encoder_is_lspcon(encoder->devdata))
return false;
if (DISPLAY_VER(i915) >= 11)
@@ -5183,14 +5226,14 @@ intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connect
drm_connector_attach_max_bpc_property(connector, 6, 12);
/* Register HDMI colorspace for case of lspcon */
- if (intel_bios_is_lspcon_present(dev_priv, port)) {
+ if (intel_bios_encoder_is_lspcon(dp_to_dig_port(intel_dp)->base.devdata)) {
drm_connector_attach_content_type_property(connector);
intel_attach_hdmi_colorspace_property(connector);
} else {
intel_attach_dp_colorspace_property(connector);
}
- if (has_gamut_metadata_dip(dev_priv, port))
+ if (has_gamut_metadata_dip(&dp_to_dig_port(intel_dp)->base))
drm_connector_attach_hdr_output_metadata_property(connector);
if (HAS_VRR(dev_priv))
@@ -5232,11 +5275,6 @@ static void intel_edp_backlight_setup(struct intel_dp *intel_dp,
if (pipe != PIPE_A && pipe != PIPE_B)
pipe = PIPE_A;
-
- drm_dbg_kms(&i915->drm,
- "[CONNECTOR:%d:%s] using pipe %c for initial backlight setup\n",
- connector->base.base.id, connector->base.name,
- pipe_name(pipe));
}
intel_backlight_setup(connector, pipe);
@@ -5412,7 +5450,7 @@ intel_dp_init_connector(struct intel_digital_port *dig_port,
intel_dp->DP = intel_de_read(dev_priv, intel_dp->output_reg);
intel_dp->attached_connector = intel_connector;
- if (intel_dp_is_port_edp(dev_priv, port)) {
+ if (_intel_dp_is_port_edp(dev_priv, intel_encoder->devdata, port)) {
/*
* Currently we don't support eDP on TypeC ports, although in
* theory it could work on TypeC legacy ports.
diff --git a/drivers/gpu/drm/i915/display/intel_dp_aux.c b/drivers/gpu/drm/i915/display/intel_dp_aux.c
index 5a176bfb10a2..eb07dc5d8709 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_aux.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_aux.c
@@ -6,6 +6,7 @@
#include "i915_drv.h"
#include "i915_reg.h"
#include "i915_trace.h"
+#include "intel_bios.h"
#include "intel_de.h"
#include "intel_display_types.h"
#include "intel_dp_aux.h"
@@ -204,8 +205,19 @@ intel_dp_aux_xfer(struct intel_dp *intel_dp,
for (i = 0; i < ARRAY_SIZE(ch_data); i++)
ch_data[i] = intel_dp->aux_ch_data_reg(intel_dp, i);
- if (is_tc_port)
+ if (is_tc_port) {
intel_tc_port_lock(dig_port);
+ /*
+ * Abort transfers on a disconnected port as required by
+ * DP 1.4a link CTS 4.2.1.5, also avoiding the long AUX
+ * timeouts that would otherwise happen.
+ * TODO: abort the transfer on non-TC ports as well.
+ */
+ if (!intel_tc_port_connected_locked(&dig_port->base)) {
+ ret = -ENXIO;
+ goto out_unlock;
+ }
+ }
aux_domain = intel_aux_power_domain(dig_port);
@@ -366,7 +378,7 @@ out:
intel_pps_unlock(intel_dp, pps_wakeref);
intel_display_power_put_async(i915, aux_domain, aux_wakeref);
-
+out_unlock:
if (is_tc_port)
intel_tc_port_unlock(dig_port);
@@ -737,3 +749,37 @@ void intel_dp_aux_init(struct intel_dp *intel_dp)
intel_dp->aux.transfer = intel_dp_aux_transfer;
cpu_latency_qos_add_request(&intel_dp->pm_qos, PM_QOS_DEFAULT_VALUE);
}
+
+static enum aux_ch default_aux_ch(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+
+ /* SKL has DDI E but no AUX E */
+ if (DISPLAY_VER(i915) == 9 && encoder->port == PORT_E)
+ return AUX_CH_A;
+
+ return (enum aux_ch)encoder->port;
+}
+
+enum aux_ch intel_dp_aux_ch(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ enum aux_ch aux_ch;
+
+ aux_ch = intel_bios_dp_aux_ch(encoder->devdata);
+ if (aux_ch != AUX_CH_NONE) {
+ drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s] using AUX %c (VBT)\n",
+ encoder->base.base.id, encoder->base.name,
+ aux_ch_name(aux_ch));
+ return aux_ch;
+ }
+
+ aux_ch = default_aux_ch(encoder);
+
+ drm_dbg_kms(&i915->drm,
+ "[ENCODER:%d:%s] using AUX %c (platform default)\n",
+ encoder->base.base.id, encoder->base.name,
+ aux_ch_name(aux_ch));
+
+ return aux_ch;
+}
diff --git a/drivers/gpu/drm/i915/display/intel_dp_aux.h b/drivers/gpu/drm/i915/display/intel_dp_aux.h
index 738577537bc7..138e340f94ee 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_aux.h
+++ b/drivers/gpu/drm/i915/display/intel_dp_aux.h
@@ -6,9 +6,13 @@
#ifndef __INTEL_DP_AUX_H__
#define __INTEL_DP_AUX_H__
+enum aux_ch;
struct intel_dp;
+struct intel_encoder;
void intel_dp_aux_fini(struct intel_dp *intel_dp);
void intel_dp_aux_init(struct intel_dp *intel_dp);
+enum aux_ch intel_dp_aux_ch(struct intel_encoder *encoder);
+
#endif /* __INTEL_DP_AUX_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c b/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c
index 83af95bce98d..95cc5251843e 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c
@@ -105,6 +105,11 @@ enum intel_dp_aux_backlight_modparam {
INTEL_DP_AUX_BACKLIGHT_FORCE_INTEL = 3,
};
+static bool is_intel_tcon_cap(const u8 tcon_cap[4])
+{
+ return tcon_cap[0] >= 1;
+}
+
/* Intel EDP backlight callbacks */
static bool
intel_dp_aux_supports_hdr_backlight(struct intel_connector *connector)
@@ -125,14 +130,12 @@ intel_dp_aux_supports_hdr_backlight(struct intel_connector *connector)
if (!(tcon_cap[1] & INTEL_EDP_HDR_TCON_BRIGHTNESS_NITS_CAP))
return false;
- if (tcon_cap[0] >= 1) {
- drm_dbg_kms(&i915->drm, "Detected Intel HDR backlight interface version %d\n",
- tcon_cap[0]);
- } else {
- drm_dbg_kms(&i915->drm, "Detected unsupported HDR backlight interface version %d\n",
- tcon_cap[0]);
+ drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] Detected %s HDR backlight interface version %d\n",
+ connector->base.base.id, connector->base.name,
+ is_intel_tcon_cap(tcon_cap) ? "Intel" : "unsupported", tcon_cap[0]);
+
+ if (!is_intel_tcon_cap(tcon_cap))
return false;
- }
/*
* If we don't have HDR static metadata there is no way to
@@ -147,7 +150,8 @@ intel_dp_aux_supports_hdr_backlight(struct intel_connector *connector)
!(connector->base.hdr_sink_metadata.hdmi_type1.metadata_type &
BIT(HDMI_STATIC_METADATA_TYPE1))) {
drm_info(&i915->drm,
- "Panel is missing HDR static metadata. Possible support for Intel HDR backlight interface is not used. If your backlight controls don't work try booting with i915.enable_dpcd_backlight=%d. needs this, please file a _new_ bug report on drm/i915, see " FDO_BUG_URL " for details.\n",
+ "[CONNECTOR:%d:%s] Panel is missing HDR static metadata. Possible support for Intel HDR backlight interface is not used. If your backlight controls don't work try booting with i915.enable_dpcd_backlight=%d. needs this, please file a _new_ bug report on drm/i915, see " FDO_BUG_URL " for details.\n",
+ connector->base.base.id, connector->base.name,
INTEL_DP_AUX_BACKLIGHT_FORCE_INTEL);
return false;
}
@@ -168,7 +172,8 @@ intel_dp_aux_hdr_get_backlight(struct intel_connector *connector, enum pipe pipe
u8 buf[2] = { 0 };
if (drm_dp_dpcd_readb(&intel_dp->aux, INTEL_EDP_HDR_GETSET_CTRL_PARAMS, &tmp) != 1) {
- drm_err(&i915->drm, "Failed to read current backlight mode from DPCD\n");
+ drm_err(&i915->drm, "[CONNECTOR:%d:%s] Failed to read current backlight mode from DPCD\n",
+ connector->base.base.id, connector->base.name);
return 0;
}
@@ -185,7 +190,8 @@ intel_dp_aux_hdr_get_backlight(struct intel_connector *connector, enum pipe pipe
if (drm_dp_dpcd_read(&intel_dp->aux, INTEL_EDP_BRIGHTNESS_NITS_LSB, buf,
sizeof(buf)) != sizeof(buf)) {
- drm_err(&i915->drm, "Failed to read brightness from DPCD\n");
+ drm_err(&i915->drm, "[CONNECTOR:%d:%s] Failed to read brightness from DPCD\n",
+ connector->base.base.id, connector->base.name);
return 0;
}
@@ -205,7 +211,8 @@ intel_dp_aux_hdr_set_aux_backlight(const struct drm_connector_state *conn_state,
if (drm_dp_dpcd_write(&intel_dp->aux, INTEL_EDP_BRIGHTNESS_NITS_LSB, buf,
sizeof(buf)) != sizeof(buf))
- drm_err(dev, "Failed to write brightness level to DPCD\n");
+ drm_err(dev, "[CONNECTOR:%d:%s] Failed to write brightness level to DPCD\n",
+ connector->base.base.id, connector->base.name);
}
static void
@@ -238,7 +245,8 @@ intel_dp_aux_hdr_enable_backlight(const struct intel_crtc_state *crtc_state,
ret = drm_dp_dpcd_readb(&intel_dp->aux, INTEL_EDP_HDR_GETSET_CTRL_PARAMS, &old_ctrl);
if (ret != 1) {
- drm_err(&i915->drm, "Failed to read current backlight control mode: %d\n", ret);
+ drm_err(&i915->drm, "[CONNECTOR:%d:%s] Failed to read current backlight control mode: %d\n",
+ connector->base.base.id, connector->base.name, ret);
return;
}
@@ -254,9 +262,10 @@ intel_dp_aux_hdr_enable_backlight(const struct intel_crtc_state *crtc_state,
ctrl &= ~INTEL_EDP_HDR_TCON_BRIGHTNESS_AUX_ENABLE;
}
- if (ctrl != old_ctrl)
- if (drm_dp_dpcd_writeb(&intel_dp->aux, INTEL_EDP_HDR_GETSET_CTRL_PARAMS, ctrl) != 1)
- drm_err(&i915->drm, "Failed to configure DPCD brightness controls\n");
+ if (ctrl != old_ctrl &&
+ drm_dp_dpcd_writeb(&intel_dp->aux, INTEL_EDP_HDR_GETSET_CTRL_PARAMS, ctrl) != 1)
+ drm_err(&i915->drm, "[CONNECTOR:%d:%s] Failed to configure DPCD brightness controls\n",
+ connector->base.base.id, connector->base.name);
}
static void
@@ -273,6 +282,11 @@ intel_dp_aux_hdr_disable_backlight(const struct drm_connector_state *conn_state,
panel->backlight.pwm_funcs->disable(conn_state, intel_backlight_invert_pwm_level(connector, 0));
}
+static const char *dpcd_vs_pwm_str(bool aux)
+{
+ return aux ? "DPCD" : "PWM";
+}
+
static int
intel_dp_aux_hdr_setup_backlight(struct intel_connector *connector, enum pipe pipe)
{
@@ -282,15 +296,16 @@ intel_dp_aux_hdr_setup_backlight(struct intel_connector *connector, enum pipe pi
&connector->base.display_info.luminance_range;
int ret;
- if (panel->backlight.edp.intel.sdr_uses_aux) {
- drm_dbg_kms(&i915->drm, "SDR backlight is controlled through DPCD\n");
- } else {
- drm_dbg_kms(&i915->drm, "SDR backlight is controlled through PWM\n");
+ drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] SDR backlight is controlled through %s\n",
+ connector->base.base.id, connector->base.name,
+ dpcd_vs_pwm_str(panel->backlight.edp.intel.sdr_uses_aux));
+ if (!panel->backlight.edp.intel.sdr_uses_aux) {
ret = panel->backlight.pwm_funcs->setup(connector, pipe);
if (ret < 0) {
drm_err(&i915->drm,
- "Failed to setup SDR backlight controls through PWM: %d\n", ret);
+ "[CONNECTOR:%d:%s] Failed to setup SDR backlight controls through PWM: %d\n",
+ connector->base.base.id, connector->base.name, ret);
return ret;
}
}
@@ -303,8 +318,10 @@ intel_dp_aux_hdr_setup_backlight(struct intel_connector *connector, enum pipe pi
panel->backlight.min = 0;
}
- drm_dbg_kms(&i915->drm, "Using backlight range %d..%d\n", panel->backlight.min,
- panel->backlight.max);
+ drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] Using AUX HDR interface for backlight control (range %d..%d)\n",
+ connector->base.base.id, connector->base.name,
+ panel->backlight.min, panel->backlight.max);
+
panel->backlight.level = intel_dp_aux_hdr_get_backlight(connector, pipe);
panel->backlight.enabled = panel->backlight.level != 0;
@@ -386,12 +403,19 @@ static int intel_dp_aux_vesa_setup_backlight(struct intel_connector *connector,
if (ret < 0)
return ret;
+ drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] AUX VESA backlight enable is controlled through %s\n",
+ connector->base.base.id, connector->base.name,
+ dpcd_vs_pwm_str(panel->backlight.edp.vesa.info.aux_enable));
+ drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] AUX VESA backlight level is controlled through %s\n",
+ connector->base.base.id, connector->base.name,
+ dpcd_vs_pwm_str(panel->backlight.edp.vesa.info.aux_set));
+
if (!panel->backlight.edp.vesa.info.aux_set || !panel->backlight.edp.vesa.info.aux_enable) {
ret = panel->backlight.pwm_funcs->setup(connector, pipe);
if (ret < 0) {
drm_err(&i915->drm,
- "Failed to setup PWM backlight controls for eDP backlight: %d\n",
- ret);
+ "[CONNECTOR:%d:%s] Failed to setup PWM backlight controls for eDP backlight: %d\n",
+ connector->base.base.id, connector->base.name, ret);
return ret;
}
}
@@ -418,6 +442,9 @@ static int intel_dp_aux_vesa_setup_backlight(struct intel_connector *connector,
}
}
+ drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] Using AUX VESA interface for backlight control\n",
+ connector->base.base.id, connector->base.name);
+
return 0;
}
@@ -428,7 +455,8 @@ intel_dp_aux_supports_vesa_backlight(struct intel_connector *connector)
struct drm_i915_private *i915 = dp_to_i915(intel_dp);
if (drm_edp_backlight_supported(intel_dp->edp_dpcd)) {
- drm_dbg_kms(&i915->drm, "AUX Backlight Control Supported!\n");
+ drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] AUX Backlight Control Supported!\n",
+ connector->base.base.id, connector->base.name);
return true;
}
return false;
@@ -504,13 +532,15 @@ int intel_dp_aux_init_backlight_funcs(struct intel_connector *connector)
* interfaces is to probe for Intel's first, and VESA's second.
*/
if (try_intel_interface && intel_dp_aux_supports_hdr_backlight(connector)) {
- drm_dbg_kms(dev, "Using Intel proprietary eDP backlight controls\n");
+ drm_dbg_kms(dev, "[CONNECTOR:%d:%s] Using Intel proprietary eDP backlight controls\n",
+ connector->base.base.id, connector->base.name);
panel->backlight.funcs = &intel_dp_hdr_bl_funcs;
return 0;
}
if (try_vesa_interface && intel_dp_aux_supports_vesa_backlight(connector)) {
- drm_dbg_kms(dev, "Using VESA eDP backlight controls\n");
+ drm_dbg_kms(dev, "[CONNECTOR:%d:%s] Using VESA eDP backlight controls\n",
+ connector->base.base.id, connector->base.name);
panel->backlight.funcs = &intel_dp_vesa_bl_funcs;
return 0;
}
diff --git a/drivers/gpu/drm/i915/display/intel_dp_link_training.c b/drivers/gpu/drm/i915/display/intel_dp_link_training.c
index 3d3efcf02011..d638054c74ac 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_link_training.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_link_training.c
@@ -1379,10 +1379,6 @@ intel_dp_128b132b_lane_cds(struct intel_dp *intel_dp,
}
}
- /* FIXME: Should DP_TRAINING_PATTERN_DISABLE be written first? */
- if (intel_dp->set_idle_link_train)
- intel_dp->set_idle_link_train(intel_dp, crtc_state);
-
return true;
}
@@ -1433,7 +1429,11 @@ intel_dp_128b132b_link_train(struct intel_dp *intel_dp,
void intel_dp_start_link_train(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state)
{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_connector *connector = intel_dp->attached_connector;
+ struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
bool passed;
+
/*
* TODO: Reiniting LTTPRs here won't be needed once proper connector
* HW state readout is added.
@@ -1451,6 +1451,46 @@ void intel_dp_start_link_train(struct intel_dp *intel_dp,
else
passed = intel_dp_link_train_all_phys(intel_dp, crtc_state, lttpr_count);
+ /*
+ * Ignore the link failure in CI
+ *
+ * In fixed enviroments like CI, sometimes unexpected long HPDs are
+ * generated by the displays. If ignore_long_hpd flag is set, such long
+ * HPDs are ignored. And probably as a consequence of these ignored
+ * long HPDs, subsequent link trainings are failed resulting into CI
+ * execution failures.
+ *
+ * For test cases which rely on the link training or processing of HPDs
+ * ignore_long_hpd flag can unset from the testcase.
+ */
+ if (!passed && i915->display.hotplug.ignore_long_hpd) {
+ drm_dbg_kms(&i915->drm,
+ "[CONNECTOR:%d:%s][ENCODER:%d:%s] Ignore the link failure\n",
+ connector->base.base.id, connector->base.name,
+ encoder->base.base.id, encoder->base.name);
+ return;
+ }
+
if (!passed)
intel_dp_schedule_fallback_link_training(intel_dp, crtc_state);
}
+
+void intel_dp_128b132b_sdp_crc16(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+
+ /*
+ * VIDEO_DIP_CTL register bit 31 should be set to '0' to not
+ * disable SDP CRC. This is applicable for Display version 13.
+ * Default value of bit 31 is '0' hence discarding the write
+ * TODO: Corrective actions on SDP corruption yet to be defined
+ */
+ if (intel_dp_is_uhbr(crtc_state))
+ /* DP v2.0 SCR on SDP CRC16 for 128b/132b Link Layer */
+ drm_dp_dpcd_writeb(&intel_dp->aux,
+ DP_SDP_ERROR_DETECTION_CONFIGURATION,
+ DP_SDP_CRC16_128B132B_EN);
+
+ drm_dbg_kms(&i915->drm, "DP2.0 SDP CRC16 for 128b/132b enabled\n");
+}
diff --git a/drivers/gpu/drm/i915/display/intel_dp_link_training.h b/drivers/gpu/drm/i915/display/intel_dp_link_training.h
index 7fa1c0833096..2c8f2775891b 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_link_training.h
+++ b/drivers/gpu/drm/i915/display/intel_dp_link_training.h
@@ -39,4 +39,6 @@ static inline u8 intel_dp_training_pattern_symbol(u8 pattern)
return pattern & ~DP_LINK_SCRAMBLING_DISABLE;
}
+void intel_dp_128b132b_sdp_crc16(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state);
#endif /* __INTEL_DP_LINK_TRAINING_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.c b/drivers/gpu/drm/i915/display/intel_dp_mst.c
index 054a009e800d..a860cbc5dbea 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_mst.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_mst.c
@@ -265,6 +265,19 @@ static int intel_dp_mst_update_slots(struct intel_encoder *encoder,
return 0;
}
+static bool intel_dp_mst_has_audio(const struct drm_connector_state *conn_state)
+{
+ const struct intel_digital_connector_state *intel_conn_state =
+ to_intel_digital_connector_state(conn_state);
+ struct intel_connector *connector =
+ to_intel_connector(conn_state->connector);
+
+ if (intel_conn_state->force_audio == HDMI_AUDIO_AUTO)
+ return connector->port->has_audio;
+ else
+ return intel_conn_state->force_audio == HDMI_AUDIO_ON;
+}
+
static int intel_dp_mst_compute_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state)
@@ -272,10 +285,6 @@ static int intel_dp_mst_compute_config(struct intel_encoder *encoder,
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder);
struct intel_dp *intel_dp = &intel_mst->primary->dp;
- struct intel_connector *connector =
- to_intel_connector(conn_state->connector);
- struct intel_digital_connector_state *intel_conn_state =
- to_intel_digital_connector_state(conn_state);
const struct drm_display_mode *adjusted_mode =
&pipe_config->hw.adjusted_mode;
struct link_config_limits limits;
@@ -287,11 +296,9 @@ static int intel_dp_mst_compute_config(struct intel_encoder *encoder,
pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
pipe_config->has_pch_encoder = false;
- if (intel_conn_state->force_audio == HDMI_AUDIO_AUTO)
- pipe_config->has_audio = connector->port->has_audio;
- else
- pipe_config->has_audio =
- intel_conn_state->force_audio == HDMI_AUDIO_ON;
+ pipe_config->has_audio =
+ intel_dp_mst_has_audio(conn_state) &&
+ intel_audio_compute_config(encoder, pipe_config, conn_state);
/*
* for MST we always configure max link bw - the spec doesn't
@@ -604,7 +611,7 @@ static void intel_mst_post_disable_dp(struct intel_atomic_state *state,
* no clock to the transcoder"
*/
if (DISPLAY_VER(dev_priv) < 12 || !last_mst_stream)
- intel_ddi_disable_pipe_clock(old_crtc_state);
+ intel_ddi_disable_transcoder_clock(old_crtc_state);
intel_mst->connector = NULL;
@@ -684,7 +691,7 @@ static void intel_mst_pre_enable_dp(struct intel_atomic_state *state,
* here for the following ones.
*/
if (DISPLAY_VER(dev_priv) < 12 || !first_mst_stream)
- intel_ddi_enable_pipe_clock(encoder, pipe_config);
+ intel_ddi_enable_transcoder_clock(encoder, pipe_config);
intel_ddi_set_dp_msa(pipe_config, conn_state);
}
diff --git a/drivers/gpu/drm/i915/display/intel_dpio_phy.c b/drivers/gpu/drm/i915/display/intel_dpio_phy.c
index 565c06de2432..62b93d097e44 100644
--- a/drivers/gpu/drm/i915/display/intel_dpio_phy.c
+++ b/drivers/gpu/drm/i915/display/intel_dpio_phy.c
@@ -389,9 +389,7 @@ static void _bxt_ddi_phy_init(struct drm_i915_private *dev_priv,
"force reprogramming it\n", phy);
}
- val = intel_de_read(dev_priv, BXT_P_CR_GT_DISP_PWRON);
- val |= phy_info->pwron_mask;
- intel_de_write(dev_priv, BXT_P_CR_GT_DISP_PWRON, val);
+ intel_de_rmw(dev_priv, BXT_P_CR_GT_DISP_PWRON, 0, phy_info->pwron_mask);
/*
* The PHY registers start out inaccessible and respond to reads with
@@ -410,27 +408,19 @@ static void _bxt_ddi_phy_init(struct drm_i915_private *dev_priv,
phy);
/* Program PLL Rcomp code offset */
- val = intel_de_read(dev_priv, BXT_PORT_CL1CM_DW9(phy));
- val &= ~IREF0RC_OFFSET_MASK;
- val |= 0xE4 << IREF0RC_OFFSET_SHIFT;
- intel_de_write(dev_priv, BXT_PORT_CL1CM_DW9(phy), val);
+ intel_de_rmw(dev_priv, BXT_PORT_CL1CM_DW9(phy), IREF0RC_OFFSET_MASK,
+ 0xE4 << IREF0RC_OFFSET_SHIFT);
- val = intel_de_read(dev_priv, BXT_PORT_CL1CM_DW10(phy));
- val &= ~IREF1RC_OFFSET_MASK;
- val |= 0xE4 << IREF1RC_OFFSET_SHIFT;
- intel_de_write(dev_priv, BXT_PORT_CL1CM_DW10(phy), val);
+ intel_de_rmw(dev_priv, BXT_PORT_CL1CM_DW10(phy), IREF1RC_OFFSET_MASK,
+ 0xE4 << IREF1RC_OFFSET_SHIFT);
/* Program power gating */
- val = intel_de_read(dev_priv, BXT_PORT_CL1CM_DW28(phy));
- val |= OCL1_POWER_DOWN_EN | DW28_OLDO_DYN_PWR_DOWN_EN |
- SUS_CLK_CONFIG;
- intel_de_write(dev_priv, BXT_PORT_CL1CM_DW28(phy), val);
-
- if (phy_info->dual_channel) {
- val = intel_de_read(dev_priv, BXT_PORT_CL2CM_DW6(phy));
- val |= DW6_OLDO_DYN_PWR_DOWN_EN;
- intel_de_write(dev_priv, BXT_PORT_CL2CM_DW6(phy), val);
- }
+ intel_de_rmw(dev_priv, BXT_PORT_CL1CM_DW28(phy), 0,
+ OCL1_POWER_DOWN_EN | DW28_OLDO_DYN_PWR_DOWN_EN | SUS_CLK_CONFIG);
+
+ if (phy_info->dual_channel)
+ intel_de_rmw(dev_priv, BXT_PORT_CL2CM_DW6(phy), 0,
+ DW6_OLDO_DYN_PWR_DOWN_EN);
if (phy_info->rcomp_phy != -1) {
u32 grc_code;
@@ -449,34 +439,25 @@ static void _bxt_ddi_phy_init(struct drm_i915_private *dev_priv,
val << GRC_CODE_SLOW_SHIFT |
val;
intel_de_write(dev_priv, BXT_PORT_REF_DW6(phy), grc_code);
-
- val = intel_de_read(dev_priv, BXT_PORT_REF_DW8(phy));
- val |= GRC_DIS | GRC_RDY_OVRD;
- intel_de_write(dev_priv, BXT_PORT_REF_DW8(phy), val);
+ intel_de_rmw(dev_priv, BXT_PORT_REF_DW8(phy),
+ 0, GRC_DIS | GRC_RDY_OVRD);
}
if (phy_info->reset_delay)
udelay(phy_info->reset_delay);
- val = intel_de_read(dev_priv, BXT_PHY_CTL_FAMILY(phy));
- val |= COMMON_RESET_DIS;
- intel_de_write(dev_priv, BXT_PHY_CTL_FAMILY(phy), val);
+ intel_de_rmw(dev_priv, BXT_PHY_CTL_FAMILY(phy), 0, COMMON_RESET_DIS);
}
void bxt_ddi_phy_uninit(struct drm_i915_private *dev_priv, enum dpio_phy phy)
{
const struct bxt_ddi_phy_info *phy_info;
- u32 val;
phy_info = bxt_get_phy_info(dev_priv, phy);
- val = intel_de_read(dev_priv, BXT_PHY_CTL_FAMILY(phy));
- val &= ~COMMON_RESET_DIS;
- intel_de_write(dev_priv, BXT_PHY_CTL_FAMILY(phy), val);
+ intel_de_rmw(dev_priv, BXT_PHY_CTL_FAMILY(phy), COMMON_RESET_DIS, 0);
- val = intel_de_read(dev_priv, BXT_P_CR_GT_DISP_PWRON);
- val &= ~phy_info->pwron_mask;
- intel_de_write(dev_priv, BXT_P_CR_GT_DISP_PWRON, val);
+ intel_de_rmw(dev_priv, BXT_P_CR_GT_DISP_PWRON, phy_info->pwron_mask, 0);
}
void bxt_ddi_phy_init(struct drm_i915_private *dev_priv, enum dpio_phy phy)
diff --git a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
index 380368eff31a..22fc908b7e5d 100644
--- a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
+++ b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
@@ -608,10 +608,8 @@ static void hsw_ddi_wrpll_disable(struct drm_i915_private *dev_priv,
struct intel_shared_dpll *pll)
{
const enum intel_dpll_id id = pll->info->id;
- u32 val;
- val = intel_de_read(dev_priv, WRPLL_CTL(id));
- intel_de_write(dev_priv, WRPLL_CTL(id), val & ~WRPLL_PLL_ENABLE);
+ intel_de_rmw(dev_priv, WRPLL_CTL(id), WRPLL_PLL_ENABLE, 0);
intel_de_posting_read(dev_priv, WRPLL_CTL(id));
/*
@@ -626,10 +624,8 @@ static void hsw_ddi_spll_disable(struct drm_i915_private *dev_priv,
struct intel_shared_dpll *pll)
{
enum intel_dpll_id id = pll->info->id;
- u32 val;
- val = intel_de_read(dev_priv, SPLL_CTL);
- intel_de_write(dev_priv, SPLL_CTL, val & ~SPLL_PLL_ENABLE);
+ intel_de_rmw(dev_priv, SPLL_CTL, SPLL_PLL_ENABLE, 0);
intel_de_posting_read(dev_priv, SPLL_CTL);
/*
@@ -1238,16 +1234,10 @@ static void skl_ddi_pll_write_ctrl1(struct drm_i915_private *dev_priv,
struct intel_shared_dpll *pll)
{
const enum intel_dpll_id id = pll->info->id;
- u32 val;
- val = intel_de_read(dev_priv, DPLL_CTRL1);
-
- val &= ~(DPLL_CTRL1_HDMI_MODE(id) |
- DPLL_CTRL1_SSC(id) |
- DPLL_CTRL1_LINK_RATE_MASK(id));
- val |= pll->state.hw_state.ctrl1 << (id * 6);
-
- intel_de_write(dev_priv, DPLL_CTRL1, val);
+ intel_de_rmw(dev_priv, DPLL_CTRL1,
+ DPLL_CTRL1_HDMI_MODE(id) | DPLL_CTRL1_SSC(id) | DPLL_CTRL1_LINK_RATE_MASK(id),
+ pll->state.hw_state.ctrl1 << (id * 6));
intel_de_posting_read(dev_priv, DPLL_CTRL1);
}
@@ -1265,8 +1255,7 @@ static void skl_ddi_pll_enable(struct drm_i915_private *dev_priv,
intel_de_posting_read(dev_priv, regs[id].cfgcr2);
/* the enable bit is always bit 31 */
- intel_de_write(dev_priv, regs[id].ctl,
- intel_de_read(dev_priv, regs[id].ctl) | LCPLL_PLL_ENABLE);
+ intel_de_rmw(dev_priv, regs[id].ctl, 0, LCPLL_PLL_ENABLE);
if (intel_de_wait_for_set(dev_priv, DPLL_STATUS, DPLL_LOCK(id), 5))
drm_err(&dev_priv->drm, "DPLL %d not locked\n", id);
@@ -1285,8 +1274,7 @@ static void skl_ddi_pll_disable(struct drm_i915_private *dev_priv,
const enum intel_dpll_id id = pll->info->id;
/* the enable bit is always bit 31 */
- intel_de_write(dev_priv, regs[id].ctl,
- intel_de_read(dev_priv, regs[id].ctl) & ~LCPLL_PLL_ENABLE);
+ intel_de_rmw(dev_priv, regs[id].ctl, LCPLL_PLL_ENABLE, 0);
intel_de_posting_read(dev_priv, regs[id].ctl);
}
@@ -1902,14 +1890,11 @@ static void bxt_ddi_pll_enable(struct drm_i915_private *dev_priv,
bxt_port_to_phy_channel(dev_priv, port, &phy, &ch);
/* Non-SSC reference */
- temp = intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
- temp |= PORT_PLL_REF_SEL;
- intel_de_write(dev_priv, BXT_PORT_PLL_ENABLE(port), temp);
+ intel_de_rmw(dev_priv, BXT_PORT_PLL_ENABLE(port), 0, PORT_PLL_REF_SEL);
if (IS_GEMINILAKE(dev_priv)) {
- temp = intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
- temp |= PORT_PLL_POWER_ENABLE;
- intel_de_write(dev_priv, BXT_PORT_PLL_ENABLE(port), temp);
+ intel_de_rmw(dev_priv, BXT_PORT_PLL_ENABLE(port),
+ 0, PORT_PLL_POWER_ENABLE);
if (wait_for_us((intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port)) &
PORT_PLL_POWER_STATE), 200))
@@ -1918,39 +1903,28 @@ static void bxt_ddi_pll_enable(struct drm_i915_private *dev_priv,
}
/* Disable 10 bit clock */
- temp = intel_de_read(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch));
- temp &= ~PORT_PLL_10BIT_CLK_ENABLE;
- intel_de_write(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch), temp);
+ intel_de_rmw(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch),
+ PORT_PLL_10BIT_CLK_ENABLE, 0);
/* Write P1 & P2 */
- temp = intel_de_read(dev_priv, BXT_PORT_PLL_EBB_0(phy, ch));
- temp &= ~(PORT_PLL_P1_MASK | PORT_PLL_P2_MASK);
- temp |= pll->state.hw_state.ebb0;
- intel_de_write(dev_priv, BXT_PORT_PLL_EBB_0(phy, ch), temp);
+ intel_de_rmw(dev_priv, BXT_PORT_PLL_EBB_0(phy, ch),
+ PORT_PLL_P1_MASK | PORT_PLL_P2_MASK, pll->state.hw_state.ebb0);
/* Write M2 integer */
- temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 0));
- temp &= ~PORT_PLL_M2_INT_MASK;
- temp |= pll->state.hw_state.pll0;
- intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 0), temp);
+ intel_de_rmw(dev_priv, BXT_PORT_PLL(phy, ch, 0),
+ PORT_PLL_M2_INT_MASK, pll->state.hw_state.pll0);
/* Write N */
- temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 1));
- temp &= ~PORT_PLL_N_MASK;
- temp |= pll->state.hw_state.pll1;
- intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 1), temp);
+ intel_de_rmw(dev_priv, BXT_PORT_PLL(phy, ch, 1),
+ PORT_PLL_N_MASK, pll->state.hw_state.pll1);
/* Write M2 fraction */
- temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 2));
- temp &= ~PORT_PLL_M2_FRAC_MASK;
- temp |= pll->state.hw_state.pll2;
- intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 2), temp);
+ intel_de_rmw(dev_priv, BXT_PORT_PLL(phy, ch, 2),
+ PORT_PLL_M2_FRAC_MASK, pll->state.hw_state.pll2);
/* Write M2 fraction enable */
- temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 3));
- temp &= ~PORT_PLL_M2_FRAC_ENABLE;
- temp |= pll->state.hw_state.pll3;
- intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 3), temp);
+ intel_de_rmw(dev_priv, BXT_PORT_PLL(phy, ch, 3),
+ PORT_PLL_M2_FRAC_ENABLE, pll->state.hw_state.pll3);
/* Write coeff */
temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 6));
@@ -1961,15 +1935,11 @@ static void bxt_ddi_pll_enable(struct drm_i915_private *dev_priv,
intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 6), temp);
/* Write calibration val */
- temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 8));
- temp &= ~PORT_PLL_TARGET_CNT_MASK;
- temp |= pll->state.hw_state.pll8;
- intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 8), temp);
+ intel_de_rmw(dev_priv, BXT_PORT_PLL(phy, ch, 8),
+ PORT_PLL_TARGET_CNT_MASK, pll->state.hw_state.pll8);
- temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 9));
- temp &= ~PORT_PLL_LOCK_THRESHOLD_MASK;
- temp |= pll->state.hw_state.pll9;
- intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 9), temp);
+ intel_de_rmw(dev_priv, BXT_PORT_PLL(phy, ch, 9),
+ PORT_PLL_LOCK_THRESHOLD_MASK, pll->state.hw_state.pll9);
temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 10));
temp &= ~PORT_PLL_DCO_AMP_OVR_EN_H;
@@ -1986,9 +1956,7 @@ static void bxt_ddi_pll_enable(struct drm_i915_private *dev_priv,
intel_de_write(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch), temp);
/* Enable PLL */
- temp = intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
- temp |= PORT_PLL_ENABLE;
- intel_de_write(dev_priv, BXT_PORT_PLL_ENABLE(port), temp);
+ intel_de_rmw(dev_priv, BXT_PORT_PLL_ENABLE(port), 0, PORT_PLL_ENABLE);
intel_de_posting_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
if (wait_for_us((intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port)) & PORT_PLL_LOCK),
@@ -2016,17 +1984,13 @@ static void bxt_ddi_pll_disable(struct drm_i915_private *dev_priv,
struct intel_shared_dpll *pll)
{
enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */
- u32 temp;
- temp = intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
- temp &= ~PORT_PLL_ENABLE;
- intel_de_write(dev_priv, BXT_PORT_PLL_ENABLE(port), temp);
+ intel_de_rmw(dev_priv, BXT_PORT_PLL_ENABLE(port), PORT_PLL_ENABLE, 0);
intel_de_posting_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
if (IS_GEMINILAKE(dev_priv)) {
- temp = intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
- temp &= ~PORT_PLL_POWER_ENABLE;
- intel_de_write(dev_priv, BXT_PORT_PLL_ENABLE(port), temp);
+ intel_de_rmw(dev_priv, BXT_PORT_PLL_ENABLE(port),
+ PORT_PLL_POWER_ENABLE, 0);
if (wait_for_us(!(intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port)) &
PORT_PLL_POWER_STATE), 200))
@@ -3641,8 +3605,8 @@ static void icl_dpll_write(struct drm_i915_private *dev_priv,
!i915_mmio_reg_valid(div0_reg));
if (dev_priv->display.vbt.override_afc_startup &&
i915_mmio_reg_valid(div0_reg))
- intel_de_rmw(dev_priv, div0_reg, TGL_DPLL0_DIV0_AFC_STARTUP_MASK,
- hw_state->div0);
+ intel_de_rmw(dev_priv, div0_reg,
+ TGL_DPLL0_DIV0_AFC_STARTUP_MASK, hw_state->div0);
intel_de_posting_read(dev_priv, cfgcr1_reg);
}
@@ -3651,7 +3615,6 @@ static void icl_mg_pll_write(struct drm_i915_private *dev_priv,
{
struct intel_dpll_hw_state *hw_state = &pll->state.hw_state;
enum tc_port tc_port = icl_pll_id_to_tc_port(pll->info->id);
- u32 val;
/*
* Some of the following registers have reserved fields, so program
@@ -3659,23 +3622,19 @@ static void icl_mg_pll_write(struct drm_i915_private *dev_priv,
* during the calc/readout phase if the mask depends on some other HW
* state like refclk, see icl_calc_mg_pll_state().
*/
- val = intel_de_read(dev_priv, MG_REFCLKIN_CTL(tc_port));
- val &= ~MG_REFCLKIN_CTL_OD_2_MUX_MASK;
- val |= hw_state->mg_refclkin_ctl;
- intel_de_write(dev_priv, MG_REFCLKIN_CTL(tc_port), val);
+ intel_de_rmw(dev_priv, MG_REFCLKIN_CTL(tc_port),
+ MG_REFCLKIN_CTL_OD_2_MUX_MASK, hw_state->mg_refclkin_ctl);
- val = intel_de_read(dev_priv, MG_CLKTOP2_CORECLKCTL1(tc_port));
- val &= ~MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
- val |= hw_state->mg_clktop2_coreclkctl1;
- intel_de_write(dev_priv, MG_CLKTOP2_CORECLKCTL1(tc_port), val);
+ intel_de_rmw(dev_priv, MG_CLKTOP2_CORECLKCTL1(tc_port),
+ MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK,
+ hw_state->mg_clktop2_coreclkctl1);
- val = intel_de_read(dev_priv, MG_CLKTOP2_HSCLKCTL(tc_port));
- val &= ~(MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
- MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
- MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
- MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK);
- val |= hw_state->mg_clktop2_hsclkctl;
- intel_de_write(dev_priv, MG_CLKTOP2_HSCLKCTL(tc_port), val);
+ intel_de_rmw(dev_priv, MG_CLKTOP2_HSCLKCTL(tc_port),
+ MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
+ MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
+ MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
+ MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK,
+ hw_state->mg_clktop2_hsclkctl);
intel_de_write(dev_priv, MG_PLL_DIV0(tc_port), hw_state->mg_pll_div0);
intel_de_write(dev_priv, MG_PLL_DIV1(tc_port), hw_state->mg_pll_div1);
@@ -3684,15 +3643,12 @@ static void icl_mg_pll_write(struct drm_i915_private *dev_priv,
hw_state->mg_pll_frac_lock);
intel_de_write(dev_priv, MG_PLL_SSC(tc_port), hw_state->mg_pll_ssc);
- val = intel_de_read(dev_priv, MG_PLL_BIAS(tc_port));
- val &= ~hw_state->mg_pll_bias_mask;
- val |= hw_state->mg_pll_bias;
- intel_de_write(dev_priv, MG_PLL_BIAS(tc_port), val);
+ intel_de_rmw(dev_priv, MG_PLL_BIAS(tc_port),
+ hw_state->mg_pll_bias_mask, hw_state->mg_pll_bias);
- val = intel_de_read(dev_priv, MG_PLL_TDC_COLDST_BIAS(tc_port));
- val &= ~hw_state->mg_pll_tdc_coldst_bias_mask;
- val |= hw_state->mg_pll_tdc_coldst_bias;
- intel_de_write(dev_priv, MG_PLL_TDC_COLDST_BIAS(tc_port), val);
+ intel_de_rmw(dev_priv, MG_PLL_TDC_COLDST_BIAS(tc_port),
+ hw_state->mg_pll_tdc_coldst_bias_mask,
+ hw_state->mg_pll_tdc_coldst_bias);
intel_de_posting_read(dev_priv, MG_PLL_TDC_COLDST_BIAS(tc_port));
}
@@ -3766,11 +3722,7 @@ static void icl_pll_power_enable(struct drm_i915_private *dev_priv,
struct intel_shared_dpll *pll,
i915_reg_t enable_reg)
{
- u32 val;
-
- val = intel_de_read(dev_priv, enable_reg);
- val |= PLL_POWER_ENABLE;
- intel_de_write(dev_priv, enable_reg, val);
+ intel_de_rmw(dev_priv, enable_reg, 0, PLL_POWER_ENABLE);
/*
* The spec says we need to "wait" but it also says it should be
@@ -3785,11 +3737,7 @@ static void icl_pll_enable(struct drm_i915_private *dev_priv,
struct intel_shared_dpll *pll,
i915_reg_t enable_reg)
{
- u32 val;
-
- val = intel_de_read(dev_priv, enable_reg);
- val |= PLL_ENABLE;
- intel_de_write(dev_priv, enable_reg, val);
+ intel_de_rmw(dev_priv, enable_reg, 0, PLL_ENABLE);
/* Timeout is actually 600us. */
if (intel_de_wait_for_set(dev_priv, enable_reg, PLL_LOCK, 1))
@@ -3815,8 +3763,7 @@ static void adlp_cmtg_clock_gating_wa(struct drm_i915_private *i915, struct inte
* since TRANS_CMTG_CHICKEN is only accessible while DPLL0 is enabled.
*/
val = intel_de_read(i915, TRANS_CMTG_CHICKEN);
- val = intel_de_read(i915, TRANS_CMTG_CHICKEN);
- intel_de_write(i915, TRANS_CMTG_CHICKEN, DISABLE_DPT_CLK_GATING);
+ val = intel_de_rmw(i915, TRANS_CMTG_CHICKEN, ~0, DISABLE_DPT_CLK_GATING);
if (drm_WARN_ON(&i915->drm, val & ~DISABLE_DPT_CLK_GATING))
drm_dbg_kms(&i915->drm, "Unexpected flags in TRANS_CMTG_CHICKEN: %08x\n", val);
}
@@ -3900,8 +3847,6 @@ static void icl_pll_disable(struct drm_i915_private *dev_priv,
struct intel_shared_dpll *pll,
i915_reg_t enable_reg)
{
- u32 val;
-
/* The first steps are done by intel_ddi_post_disable(). */
/*
@@ -3910,9 +3855,7 @@ static void icl_pll_disable(struct drm_i915_private *dev_priv,
* nothing here.
*/
- val = intel_de_read(dev_priv, enable_reg);
- val &= ~PLL_ENABLE;
- intel_de_write(dev_priv, enable_reg, val);
+ intel_de_rmw(dev_priv, enable_reg, PLL_ENABLE, 0);
/* Timeout is actually 1us. */
if (intel_de_wait_for_clear(dev_priv, enable_reg, PLL_LOCK, 1))
@@ -3920,9 +3863,7 @@ static void icl_pll_disable(struct drm_i915_private *dev_priv,
/* DVFS post sequence would be here. See the comment above. */
- val = intel_de_read(dev_priv, enable_reg);
- val &= ~PLL_POWER_ENABLE;
- intel_de_write(dev_priv, enable_reg, val);
+ intel_de_rmw(dev_priv, enable_reg, PLL_POWER_ENABLE, 0);
/*
* The spec says we need to "wait" but it also says it should be
diff --git a/drivers/gpu/drm/i915/display/intel_drrs.c b/drivers/gpu/drm/i915/display/intel_drrs.c
index 29c6421cd666..760e63cdc0c8 100644
--- a/drivers/gpu/drm/i915/display/intel_drrs.c
+++ b/drivers/gpu/drm/i915/display/intel_drrs.c
@@ -68,21 +68,15 @@ intel_drrs_set_refresh_rate_pipeconf(struct intel_crtc *crtc,
{
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum transcoder cpu_transcoder = crtc->drrs.cpu_transcoder;
- u32 val, bit;
+ u32 bit;
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
- bit = PIPECONF_REFRESH_RATE_ALT_VLV;
+ bit = TRANSCONF_REFRESH_RATE_ALT_VLV;
else
- bit = PIPECONF_REFRESH_RATE_ALT_ILK;
+ bit = TRANSCONF_REFRESH_RATE_ALT_ILK;
- val = intel_de_read(dev_priv, PIPECONF(cpu_transcoder));
-
- if (refresh_rate == DRRS_REFRESH_RATE_LOW)
- val |= bit;
- else
- val &= ~bit;
-
- intel_de_write(dev_priv, PIPECONF(cpu_transcoder), val);
+ intel_de_rmw(dev_priv, TRANSCONF(cpu_transcoder),
+ bit, refresh_rate == DRRS_REFRESH_RATE_LOW ? bit : 0);
}
static void
diff --git a/drivers/gpu/drm/i915/display/intel_dsb.c b/drivers/gpu/drm/i915/display/intel_dsb.c
index 96bc117fd6a0..19e422da57dc 100644
--- a/drivers/gpu/drm/i915/display/intel_dsb.c
+++ b/drivers/gpu/drm/i915/display/intel_dsb.c
@@ -88,7 +88,8 @@ static bool assert_dsb_has_room(struct intel_dsb *dsb)
/* each instruction is 2 dwords */
return !drm_WARN(&i915->drm, dsb->free_pos > dsb->size - 2,
- "DSB buffer overflow\n");
+ "[CRTC:%d:%s] DSB %d buffer overflow\n",
+ crtc->base.base.id, crtc->base.name, dsb->id);
}
static bool is_dsb_busy(struct drm_i915_private *i915, enum pipe pipe,
@@ -198,7 +199,7 @@ void intel_dsb_reg_write(struct intel_dsb *dsb,
}
}
-static u32 intel_dsb_align_tail(struct intel_dsb *dsb)
+static void intel_dsb_align_tail(struct intel_dsb *dsb)
{
u32 aligned_tail, tail;
@@ -210,49 +211,58 @@ static u32 intel_dsb_align_tail(struct intel_dsb *dsb)
aligned_tail - tail);
dsb->free_pos = aligned_tail / 4;
+}
- return aligned_tail;
+void intel_dsb_finish(struct intel_dsb *dsb)
+{
+ intel_dsb_align_tail(dsb);
}
/**
* intel_dsb_commit() - Trigger workload execution of DSB.
* @dsb: DSB context
+ * @wait_for_vblank: wait for vblank before executing
*
* This function is used to do actual write to hardware using DSB.
*/
-void intel_dsb_commit(struct intel_dsb *dsb)
+void intel_dsb_commit(struct intel_dsb *dsb, bool wait_for_vblank)
{
struct intel_crtc *crtc = dsb->crtc;
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
u32 tail;
- tail = intel_dsb_align_tail(dsb);
- if (tail == 0)
+ tail = dsb->free_pos * 4;
+ if (drm_WARN_ON(&dev_priv->drm, !IS_ALIGNED(tail, CACHELINE_BYTES)))
return;
if (is_dsb_busy(dev_priv, pipe, dsb->id)) {
- drm_err(&dev_priv->drm, "DSB engine is busy.\n");
- goto reset;
+ drm_err(&dev_priv->drm, "[CRTC:%d:%s] DSB %d is busy\n",
+ crtc->base.base.id, crtc->base.name, dsb->id);
+ return;
}
intel_de_write(dev_priv, DSB_CTRL(pipe, dsb->id),
+ (wait_for_vblank ? DSB_WAIT_FOR_VBLANK : 0) |
DSB_ENABLE);
intel_de_write(dev_priv, DSB_HEAD(pipe, dsb->id),
i915_ggtt_offset(dsb->vma));
intel_de_write(dev_priv, DSB_TAIL(pipe, dsb->id),
i915_ggtt_offset(dsb->vma) + tail);
+}
- drm_dbg_kms(&dev_priv->drm,
- "DSB execution started - head 0x%x, tail 0x%x\n",
- i915_ggtt_offset(dsb->vma),
- i915_ggtt_offset(dsb->vma) + tail);
+void intel_dsb_wait(struct intel_dsb *dsb)
+{
+ struct intel_crtc *crtc = dsb->crtc;
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum pipe pipe = crtc->pipe;
if (wait_for(!is_dsb_busy(dev_priv, pipe, dsb->id), 1))
drm_err(&dev_priv->drm,
- "Timed out waiting for DSB workload completion.\n");
+ "[CRTC:%d:%s] DSB %d timed out waiting for idle\n",
+ crtc->base.base.id, crtc->base.name, dsb->id);
-reset:
+ /* Attempt to reset it */
dsb->free_pos = 0;
dsb->ins_start_offset = 0;
intel_de_write(dev_priv, DSB_CTRL(pipe, dsb->id), 0);
@@ -325,7 +335,8 @@ out_put_rpm:
kfree(dsb);
out:
drm_info_once(&i915->drm,
- "DSB queue setup failed, will fallback to MMIO for display HW programming\n");
+ "[CRTC:%d:%s] DSB %d queue setup failed, will fallback to MMIO for display HW programming\n",
+ crtc->base.base.id, crtc->base.name, DSB1);
return NULL;
}
diff --git a/drivers/gpu/drm/i915/display/intel_dsb.h b/drivers/gpu/drm/i915/display/intel_dsb.h
index 05c221b6d0a4..b8148b47022d 100644
--- a/drivers/gpu/drm/i915/display/intel_dsb.h
+++ b/drivers/gpu/drm/i915/display/intel_dsb.h
@@ -15,9 +15,12 @@ struct intel_dsb;
struct intel_dsb *intel_dsb_prepare(struct intel_crtc *crtc,
unsigned int max_cmds);
+void intel_dsb_finish(struct intel_dsb *dsb);
void intel_dsb_cleanup(struct intel_dsb *dsb);
void intel_dsb_reg_write(struct intel_dsb *dsb,
i915_reg_t reg, u32 val);
-void intel_dsb_commit(struct intel_dsb *dsb);
+void intel_dsb_commit(struct intel_dsb *dsb,
+ bool wait_for_vblank);
+void intel_dsb_wait(struct intel_dsb *dsb);
#endif
diff --git a/drivers/gpu/drm/i915/display/intel_dsi_dcs_backlight.c b/drivers/gpu/drm/i915/display/intel_dsi_dcs_backlight.c
index 20e466d843ce..049443245310 100644
--- a/drivers/gpu/drm/i915/display/intel_dsi_dcs_backlight.c
+++ b/drivers/gpu/drm/i915/display/intel_dsi_dcs_backlight.c
@@ -162,6 +162,7 @@ static void dcs_enable_backlight(const struct intel_crtc_state *crtc_state,
static int dcs_setup_backlight(struct intel_connector *connector,
enum pipe unused)
{
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
struct intel_panel *panel = &connector->panel;
if (panel->vbt.backlight.brightness_precision_bits > 8)
@@ -171,6 +172,10 @@ static int dcs_setup_backlight(struct intel_connector *connector,
panel->backlight.level = panel->backlight.max;
+ drm_dbg_kms(&i915->drm,
+ "[CONNECTOR:%d:%s] Using DCS for backlight control\n",
+ connector->base.base.id, connector->base.name);
+
return 0;
}
diff --git a/drivers/gpu/drm/i915/display/intel_dvo.c b/drivers/gpu/drm/i915/display/intel_dvo.c
index 0be8105cb18a..eb2dcd866cc8 100644
--- a/drivers/gpu/drm/i915/display/intel_dvo.c
+++ b/drivers/gpu/drm/i915/display/intel_dvo.c
@@ -444,11 +444,8 @@ static bool intel_dvo_init_dev(struct drm_i915_private *dev_priv,
* the clock enabled before we attempt to initialize
* the device.
*/
- for_each_pipe(dev_priv, pipe) {
- dpll[pipe] = intel_de_read(dev_priv, DPLL(pipe));
- intel_de_write(dev_priv, DPLL(pipe),
- dpll[pipe] | DPLL_DVO_2X_MODE);
- }
+ for_each_pipe(dev_priv, pipe)
+ dpll[pipe] = intel_de_rmw(dev_priv, DPLL(pipe), 0, DPLL_DVO_2X_MODE);
ret = dvo->dev_ops->init(&intel_dvo->dev, i2c);
diff --git a/drivers/gpu/drm/i915/display/intel_fb.c b/drivers/gpu/drm/i915/display/intel_fb.c
index 93d0e46e5481..799bdc81a6a9 100644
--- a/drivers/gpu/drm/i915/display/intel_fb.c
+++ b/drivers/gpu/drm/i915/display/intel_fb.c
@@ -2007,6 +2007,7 @@ int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
vm = intel_dpt_create(intel_fb);
if (IS_ERR(vm)) {
+ drm_dbg_kms(&dev_priv->drm, "failed to create DPT\n");
ret = PTR_ERR(vm);
goto err;
}
@@ -2017,11 +2018,14 @@ int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
ret = drm_framebuffer_init(&dev_priv->drm, fb, &intel_fb_funcs);
if (ret) {
drm_err(&dev_priv->drm, "framebuffer init failed %d\n", ret);
- goto err;
+ goto err_free_dpt;
}
return 0;
+err_free_dpt:
+ if (intel_fb_uses_dpt(fb))
+ intel_dpt_destroy(intel_fb->dpt_vm);
err:
intel_frontbuffer_put(intel_fb->frontbuffer);
return ret;
@@ -2046,6 +2050,7 @@ intel_user_framebuffer_create(struct drm_device *dev,
if (HAS_LMEM(i915) && !i915_gem_object_can_migrate(obj, INTEL_REGION_LMEM_0)) {
/* object is "remote", not in local memory */
i915_gem_object_put(obj);
+ drm_dbg_kms(&i915->drm, "framebuffer must reside in local memory\n");
return ERR_PTR(-EREMOTE);
}
diff --git a/drivers/gpu/drm/i915/display/intel_fbdev.c b/drivers/gpu/drm/i915/display/intel_fbdev.c
index f76b06293eb9..673bcdfb7ff6 100644
--- a/drivers/gpu/drm/i915/display/intel_fbdev.c
+++ b/drivers/gpu/drm/i915/display/intel_fbdev.c
@@ -210,6 +210,7 @@ static int intelfb_create(struct drm_fb_helper *helper,
bool prealloc = false;
void __iomem *vaddr;
struct drm_i915_gem_object *obj;
+ struct i915_gem_ww_ctx ww;
int ret;
mutex_lock(&ifbdev->hpd_lock);
@@ -283,13 +284,24 @@ static int intelfb_create(struct drm_fb_helper *helper,
info->fix.smem_len = vma->size;
}
- vaddr = i915_vma_pin_iomap(vma);
- if (IS_ERR(vaddr)) {
- drm_err(&dev_priv->drm,
- "Failed to remap framebuffer into virtual memory (%pe)\n", vaddr);
- ret = PTR_ERR(vaddr);
- goto out_unpin;
+ for_i915_gem_ww(&ww, ret, false) {
+ ret = i915_gem_object_lock(vma->obj, &ww);
+
+ if (ret)
+ continue;
+
+ vaddr = i915_vma_pin_iomap(vma);
+ if (IS_ERR(vaddr)) {
+ drm_err(&dev_priv->drm,
+ "Failed to remap framebuffer into virtual memory (%pe)\n", vaddr);
+ ret = PTR_ERR(vaddr);
+ continue;
+ }
}
+
+ if (ret)
+ goto out_unpin;
+
info->screen_base = vaddr;
info->screen_size = vma->size;
@@ -561,9 +573,9 @@ static void intel_fbdev_initial_config(void *data, async_cookie_t cookie)
intel_fbdev_unregister(to_i915(ifbdev->helper.dev));
}
-void intel_fbdev_initial_config_async(struct drm_device *dev)
+void intel_fbdev_initial_config_async(struct drm_i915_private *dev_priv)
{
- struct intel_fbdev *ifbdev = to_i915(dev)->display.fbdev.fbdev;
+ struct intel_fbdev *ifbdev = dev_priv->display.fbdev.fbdev;
if (!ifbdev)
return;
@@ -706,9 +718,9 @@ void intel_fbdev_output_poll_changed(struct drm_device *dev)
drm_fb_helper_hotplug_event(&ifbdev->helper);
}
-void intel_fbdev_restore_mode(struct drm_device *dev)
+void intel_fbdev_restore_mode(struct drm_i915_private *dev_priv)
{
- struct intel_fbdev *ifbdev = to_i915(dev)->display.fbdev.fbdev;
+ struct intel_fbdev *ifbdev = dev_priv->display.fbdev.fbdev;
if (!ifbdev)
return;
diff --git a/drivers/gpu/drm/i915/display/intel_fbdev.h b/drivers/gpu/drm/i915/display/intel_fbdev.h
index 0e95e9472fa3..04fd523a5023 100644
--- a/drivers/gpu/drm/i915/display/intel_fbdev.h
+++ b/drivers/gpu/drm/i915/display/intel_fbdev.h
@@ -15,12 +15,12 @@ struct intel_framebuffer;
#ifdef CONFIG_DRM_FBDEV_EMULATION
int intel_fbdev_init(struct drm_device *dev);
-void intel_fbdev_initial_config_async(struct drm_device *dev);
+void intel_fbdev_initial_config_async(struct drm_i915_private *dev_priv);
void intel_fbdev_unregister(struct drm_i915_private *dev_priv);
void intel_fbdev_fini(struct drm_i915_private *dev_priv);
void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous);
void intel_fbdev_output_poll_changed(struct drm_device *dev);
-void intel_fbdev_restore_mode(struct drm_device *dev);
+void intel_fbdev_restore_mode(struct drm_i915_private *dev_priv);
struct intel_framebuffer *intel_fbdev_framebuffer(struct intel_fbdev *fbdev);
#else
static inline int intel_fbdev_init(struct drm_device *dev)
@@ -28,7 +28,7 @@ static inline int intel_fbdev_init(struct drm_device *dev)
return 0;
}
-static inline void intel_fbdev_initial_config_async(struct drm_device *dev)
+static inline void intel_fbdev_initial_config_async(struct drm_i915_private *dev_priv)
{
}
@@ -48,7 +48,7 @@ static inline void intel_fbdev_output_poll_changed(struct drm_device *dev)
{
}
-static inline void intel_fbdev_restore_mode(struct drm_device *dev)
+static inline void intel_fbdev_restore_mode(struct drm_i915_private *i915)
{
}
static inline struct intel_framebuffer *intel_fbdev_framebuffer(struct intel_fbdev *fbdev)
diff --git a/drivers/gpu/drm/i915/display/intel_fdi.c b/drivers/gpu/drm/i915/display/intel_fdi.c
index 063f1da4f229..c08c26a321b3 100644
--- a/drivers/gpu/drm/i915/display/intel_fdi.c
+++ b/drivers/gpu/drm/i915/display/intel_fdi.c
@@ -366,8 +366,7 @@ void intel_fdi_normal_train(struct intel_crtc *crtc)
/* IVB wants error correction enabled */
if (IS_IVYBRIDGE(dev_priv))
- intel_de_write(dev_priv, reg,
- intel_de_read(dev_priv, reg) | FDI_FS_ERRC_ENABLE | FDI_FE_ERRC_ENABLE);
+ intel_de_rmw(dev_priv, reg, 0, FDI_FS_ERRC_ENABLE | FDI_FE_ERRC_ENABLE);
}
/* The FDI link training functions for ILK/Ibexpeak. */
@@ -439,19 +438,11 @@ static void ilk_fdi_link_train(struct intel_crtc *crtc,
drm_err(&dev_priv->drm, "FDI train 1 fail!\n");
/* Train 2 */
- reg = FDI_TX_CTL(pipe);
- temp = intel_de_read(dev_priv, reg);
- temp &= ~FDI_LINK_TRAIN_NONE;
- temp |= FDI_LINK_TRAIN_PATTERN_2;
- intel_de_write(dev_priv, reg, temp);
-
- reg = FDI_RX_CTL(pipe);
- temp = intel_de_read(dev_priv, reg);
- temp &= ~FDI_LINK_TRAIN_NONE;
- temp |= FDI_LINK_TRAIN_PATTERN_2;
- intel_de_write(dev_priv, reg, temp);
-
- intel_de_posting_read(dev_priv, reg);
+ intel_de_rmw(dev_priv, FDI_TX_CTL(pipe),
+ FDI_LINK_TRAIN_NONE, FDI_LINK_TRAIN_PATTERN_2);
+ intel_de_rmw(dev_priv, FDI_RX_CTL(pipe),
+ FDI_LINK_TRAIN_NONE, FDI_LINK_TRAIN_PATTERN_2);
+ intel_de_posting_read(dev_priv, FDI_RX_CTL(pipe));
udelay(150);
reg = FDI_RX_IIR(pipe);
@@ -538,13 +529,9 @@ static void gen6_fdi_link_train(struct intel_crtc *crtc,
udelay(150);
for (i = 0; i < 4; i++) {
- reg = FDI_TX_CTL(pipe);
- temp = intel_de_read(dev_priv, reg);
- temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
- temp |= snb_b_fdi_train_param[i];
- intel_de_write(dev_priv, reg, temp);
-
- intel_de_posting_read(dev_priv, reg);
+ intel_de_rmw(dev_priv, FDI_TX_CTL(pipe),
+ FDI_LINK_TRAIN_VOL_EMP_MASK, snb_b_fdi_train_param[i]);
+ intel_de_posting_read(dev_priv, FDI_TX_CTL(pipe));
udelay(500);
for (retry = 0; retry < 5; retry++) {
@@ -593,13 +580,9 @@ static void gen6_fdi_link_train(struct intel_crtc *crtc,
udelay(150);
for (i = 0; i < 4; i++) {
- reg = FDI_TX_CTL(pipe);
- temp = intel_de_read(dev_priv, reg);
- temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
- temp |= snb_b_fdi_train_param[i];
- intel_de_write(dev_priv, reg, temp);
-
- intel_de_posting_read(dev_priv, reg);
+ intel_de_rmw(dev_priv, FDI_TX_CTL(pipe),
+ FDI_LINK_TRAIN_VOL_EMP_MASK, snb_b_fdi_train_param[i]);
+ intel_de_posting_read(dev_priv, FDI_TX_CTL(pipe));
udelay(500);
for (retry = 0; retry < 5; retry++) {
@@ -719,19 +702,13 @@ static void ivb_manual_fdi_link_train(struct intel_crtc *crtc,
}
/* Train 2 */
- reg = FDI_TX_CTL(pipe);
- temp = intel_de_read(dev_priv, reg);
- temp &= ~FDI_LINK_TRAIN_NONE_IVB;
- temp |= FDI_LINK_TRAIN_PATTERN_2_IVB;
- intel_de_write(dev_priv, reg, temp);
-
- reg = FDI_RX_CTL(pipe);
- temp = intel_de_read(dev_priv, reg);
- temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
- temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
- intel_de_write(dev_priv, reg, temp);
-
- intel_de_posting_read(dev_priv, reg);
+ intel_de_rmw(dev_priv, FDI_TX_CTL(pipe),
+ FDI_LINK_TRAIN_NONE_IVB,
+ FDI_LINK_TRAIN_PATTERN_2_IVB);
+ intel_de_rmw(dev_priv, FDI_RX_CTL(pipe),
+ FDI_LINK_TRAIN_PATTERN_MASK_CPT,
+ FDI_LINK_TRAIN_PATTERN_2_CPT);
+ intel_de_posting_read(dev_priv, FDI_RX_CTL(pipe));
udelay(2); /* should be 1.5us */
for (i = 0; i < 4; i++) {
@@ -837,9 +814,8 @@ void hsw_fdi_link_train(struct intel_encoder *encoder,
udelay(30);
/* Unset FDI_RX_MISC pwrdn lanes */
- temp = intel_de_read(dev_priv, FDI_RX_MISC(PIPE_A));
- temp &= ~(FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK);
- intel_de_write(dev_priv, FDI_RX_MISC(PIPE_A), temp);
+ intel_de_rmw(dev_priv, FDI_RX_MISC(PIPE_A),
+ FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK, 0);
intel_de_posting_read(dev_priv, FDI_RX_MISC(PIPE_A));
/* Wait for FDI auto training time */
@@ -865,25 +841,19 @@ void hsw_fdi_link_train(struct intel_encoder *encoder,
intel_de_write(dev_priv, FDI_RX_CTL(PIPE_A), rx_ctl_val);
intel_de_posting_read(dev_priv, FDI_RX_CTL(PIPE_A));
- temp = intel_de_read(dev_priv, DDI_BUF_CTL(PORT_E));
- temp &= ~DDI_BUF_CTL_ENABLE;
- intel_de_write(dev_priv, DDI_BUF_CTL(PORT_E), temp);
+ intel_de_rmw(dev_priv, DDI_BUF_CTL(PORT_E), DDI_BUF_CTL_ENABLE, 0);
intel_de_posting_read(dev_priv, DDI_BUF_CTL(PORT_E));
/* Disable DP_TP_CTL and FDI_RX_CTL and retry */
- temp = intel_de_read(dev_priv, DP_TP_CTL(PORT_E));
- temp &= ~(DP_TP_CTL_ENABLE | DP_TP_CTL_LINK_TRAIN_MASK);
- temp |= DP_TP_CTL_LINK_TRAIN_PAT1;
- intel_de_write(dev_priv, DP_TP_CTL(PORT_E), temp);
+ intel_de_rmw(dev_priv, DP_TP_CTL(PORT_E), DP_TP_CTL_ENABLE, 0);
intel_de_posting_read(dev_priv, DP_TP_CTL(PORT_E));
intel_wait_ddi_buf_idle(dev_priv, PORT_E);
/* Reset FDI_RX_MISC pwrdn lanes */
- temp = intel_de_read(dev_priv, FDI_RX_MISC(PIPE_A));
- temp &= ~(FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK);
- temp |= FDI_RX_PWRDN_LANE1_VAL(2) | FDI_RX_PWRDN_LANE0_VAL(2);
- intel_de_write(dev_priv, FDI_RX_MISC(PIPE_A), temp);
+ intel_de_rmw(dev_priv, FDI_RX_MISC(PIPE_A),
+ FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK,
+ FDI_RX_PWRDN_LANE1_VAL(2) | FDI_RX_PWRDN_LANE0_VAL(2));
intel_de_posting_read(dev_priv, FDI_RX_MISC(PIPE_A));
}
@@ -898,7 +868,6 @@ void hsw_fdi_link_train(struct intel_encoder *encoder,
void hsw_fdi_disable(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- u32 val;
/*
* Bspec lists this as both step 13 (before DDI_BUF_CTL disable)
@@ -906,30 +875,15 @@ void hsw_fdi_disable(struct intel_encoder *encoder)
* step 13 is the correct place for it. Step 18 is where it was
* originally before the BUN.
*/
- val = intel_de_read(dev_priv, FDI_RX_CTL(PIPE_A));
- val &= ~FDI_RX_ENABLE;
- intel_de_write(dev_priv, FDI_RX_CTL(PIPE_A), val);
-
- val = intel_de_read(dev_priv, DDI_BUF_CTL(PORT_E));
- val &= ~DDI_BUF_CTL_ENABLE;
- intel_de_write(dev_priv, DDI_BUF_CTL(PORT_E), val);
-
+ intel_de_rmw(dev_priv, FDI_RX_CTL(PIPE_A), FDI_RX_ENABLE, 0);
+ intel_de_rmw(dev_priv, DDI_BUF_CTL(PORT_E), DDI_BUF_CTL_ENABLE, 0);
intel_wait_ddi_buf_idle(dev_priv, PORT_E);
-
intel_ddi_disable_clock(encoder);
-
- val = intel_de_read(dev_priv, FDI_RX_MISC(PIPE_A));
- val &= ~(FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK);
- val |= FDI_RX_PWRDN_LANE1_VAL(2) | FDI_RX_PWRDN_LANE0_VAL(2);
- intel_de_write(dev_priv, FDI_RX_MISC(PIPE_A), val);
-
- val = intel_de_read(dev_priv, FDI_RX_CTL(PIPE_A));
- val &= ~FDI_PCDCLK;
- intel_de_write(dev_priv, FDI_RX_CTL(PIPE_A), val);
-
- val = intel_de_read(dev_priv, FDI_RX_CTL(PIPE_A));
- val &= ~FDI_RX_PLL_ENABLE;
- intel_de_write(dev_priv, FDI_RX_CTL(PIPE_A), val);
+ intel_de_rmw(dev_priv, FDI_RX_MISC(PIPE_A),
+ FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK,
+ FDI_RX_PWRDN_LANE1_VAL(2) | FDI_RX_PWRDN_LANE0_VAL(2));
+ intel_de_rmw(dev_priv, FDI_RX_CTL(PIPE_A), FDI_PCDCLK, 0);
+ intel_de_rmw(dev_priv, FDI_RX_CTL(PIPE_A), FDI_RX_PLL_ENABLE, 0);
}
void ilk_fdi_pll_enable(const struct intel_crtc_state *crtc_state)
@@ -945,16 +899,14 @@ void ilk_fdi_pll_enable(const struct intel_crtc_state *crtc_state)
temp = intel_de_read(dev_priv, reg);
temp &= ~(FDI_DP_PORT_WIDTH_MASK | (0x7 << 16));
temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
- temp |= (intel_de_read(dev_priv, PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
+ temp |= (intel_de_read(dev_priv, TRANSCONF(pipe)) & TRANSCONF_BPC_MASK) << 11;
intel_de_write(dev_priv, reg, temp | FDI_RX_PLL_ENABLE);
intel_de_posting_read(dev_priv, reg);
udelay(200);
/* Switch from Rawclk to PCDclk */
- temp = intel_de_read(dev_priv, reg);
- intel_de_write(dev_priv, reg, temp | FDI_PCDCLK);
-
+ intel_de_rmw(dev_priv, reg, 0, FDI_PCDCLK);
intel_de_posting_read(dev_priv, reg);
udelay(200);
@@ -974,28 +926,18 @@ void ilk_fdi_pll_disable(struct intel_crtc *crtc)
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
enum pipe pipe = crtc->pipe;
- i915_reg_t reg;
- u32 temp;
/* Switch from PCDclk to Rawclk */
- reg = FDI_RX_CTL(pipe);
- temp = intel_de_read(dev_priv, reg);
- intel_de_write(dev_priv, reg, temp & ~FDI_PCDCLK);
+ intel_de_rmw(dev_priv, FDI_RX_CTL(pipe), FDI_PCDCLK, 0);
/* Disable CPU FDI TX PLL */
- reg = FDI_TX_CTL(pipe);
- temp = intel_de_read(dev_priv, reg);
- intel_de_write(dev_priv, reg, temp & ~FDI_TX_PLL_ENABLE);
-
- intel_de_posting_read(dev_priv, reg);
+ intel_de_rmw(dev_priv, FDI_TX_CTL(pipe), FDI_TX_PLL_ENABLE, 0);
+ intel_de_posting_read(dev_priv, FDI_TX_CTL(pipe));
udelay(100);
- reg = FDI_RX_CTL(pipe);
- temp = intel_de_read(dev_priv, reg);
- intel_de_write(dev_priv, reg, temp & ~FDI_RX_PLL_ENABLE);
-
/* Wait for the clocks to turn off. */
- intel_de_posting_read(dev_priv, reg);
+ intel_de_rmw(dev_priv, FDI_RX_CTL(pipe), FDI_RX_PLL_ENABLE, 0);
+ intel_de_posting_read(dev_priv, FDI_RX_CTL(pipe));
udelay(100);
}
@@ -1007,15 +949,13 @@ void ilk_fdi_disable(struct intel_crtc *crtc)
u32 temp;
/* disable CPU FDI tx and PCH FDI rx */
- reg = FDI_TX_CTL(pipe);
- temp = intel_de_read(dev_priv, reg);
- intel_de_write(dev_priv, reg, temp & ~FDI_TX_ENABLE);
- intel_de_posting_read(dev_priv, reg);
+ intel_de_rmw(dev_priv, FDI_TX_CTL(pipe), FDI_TX_ENABLE, 0);
+ intel_de_posting_read(dev_priv, FDI_TX_CTL(pipe));
reg = FDI_RX_CTL(pipe);
temp = intel_de_read(dev_priv, reg);
temp &= ~(0x7 << 16);
- temp |= (intel_de_read(dev_priv, PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
+ temp |= (intel_de_read(dev_priv, TRANSCONF(pipe)) & TRANSCONF_BPC_MASK) << 11;
intel_de_write(dev_priv, reg, temp & ~FDI_RX_ENABLE);
intel_de_posting_read(dev_priv, reg);
@@ -1027,11 +967,8 @@ void ilk_fdi_disable(struct intel_crtc *crtc)
FDI_RX_PHASE_SYNC_POINTER_OVR);
/* still set train pattern 1 */
- reg = FDI_TX_CTL(pipe);
- temp = intel_de_read(dev_priv, reg);
- temp &= ~FDI_LINK_TRAIN_NONE;
- temp |= FDI_LINK_TRAIN_PATTERN_1;
- intel_de_write(dev_priv, reg, temp);
+ intel_de_rmw(dev_priv, FDI_TX_CTL(pipe),
+ FDI_LINK_TRAIN_NONE, FDI_LINK_TRAIN_PATTERN_1);
reg = FDI_RX_CTL(pipe);
temp = intel_de_read(dev_priv, reg);
@@ -1042,9 +979,9 @@ void ilk_fdi_disable(struct intel_crtc *crtc)
temp &= ~FDI_LINK_TRAIN_NONE;
temp |= FDI_LINK_TRAIN_PATTERN_1;
}
- /* BPC in FDI rx is consistent with that in PIPECONF */
+ /* BPC in FDI rx is consistent with that in TRANSCONF */
temp &= ~(0x07 << 16);
- temp |= (intel_de_read(dev_priv, PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
+ temp |= (intel_de_read(dev_priv, TRANSCONF(pipe)) & TRANSCONF_BPC_MASK) << 11;
intel_de_write(dev_priv, reg, temp);
intel_de_posting_read(dev_priv, reg);
diff --git a/drivers/gpu/drm/i915/display/intel_fifo_underrun.c b/drivers/gpu/drm/i915/display/intel_fifo_underrun.c
index d636d21fa9ce..b708a62e509a 100644
--- a/drivers/gpu/drm/i915/display/intel_fifo_underrun.c
+++ b/drivers/gpu/drm/i915/display/intel_fifo_underrun.c
@@ -31,6 +31,7 @@
#include "intel_display_types.h"
#include "intel_fbc.h"
#include "intel_fifo_underrun.h"
+#include "intel_pch_display.h"
/**
* DOC: fifo underrun handling
@@ -509,3 +510,22 @@ void intel_check_pch_fifo_underruns(struct drm_i915_private *dev_priv)
spin_unlock_irq(&dev_priv->irq_lock);
}
+
+void intel_init_fifo_underrun_reporting(struct drm_i915_private *i915,
+ struct intel_crtc *crtc,
+ bool enable)
+{
+ crtc->cpu_fifo_underrun_disabled = !enable;
+
+ /*
+ * We track the PCH trancoder underrun reporting state
+ * within the crtc. With crtc for pipe A housing the underrun
+ * reporting state for PCH transcoder A, crtc for pipe B housing
+ * it for PCH transcoder B, etc. LPT-H has only PCH transcoder A,
+ * and marking underrun reporting as disabled for the non-existing
+ * PCH transcoders B and C would prevent enabling the south
+ * error interrupt (see cpt_can_enable_serr_int()).
+ */
+ if (intel_has_pch_trancoder(i915, crtc->pipe))
+ crtc->pch_fifo_underrun_disabled = !enable;
+}
diff --git a/drivers/gpu/drm/i915/display/intel_fifo_underrun.h b/drivers/gpu/drm/i915/display/intel_fifo_underrun.h
index 2e47d7d3c101..b00d8abebcf9 100644
--- a/drivers/gpu/drm/i915/display/intel_fifo_underrun.h
+++ b/drivers/gpu/drm/i915/display/intel_fifo_underrun.h
@@ -9,8 +9,11 @@
#include <linux/types.h>
struct drm_i915_private;
+struct intel_crtc;
enum pipe;
+void intel_init_fifo_underrun_reporting(struct drm_i915_private *i915,
+ struct intel_crtc *crtc, bool enable);
bool intel_set_cpu_fifo_underrun_reporting(struct drm_i915_private *dev_priv,
enum pipe pipe, bool enable);
bool intel_set_pch_fifo_underrun_reporting(struct drm_i915_private *dev_priv,
diff --git a/drivers/gpu/drm/i915/display/intel_gmbus.c b/drivers/gpu/drm/i915/display/intel_gmbus.c
index 0bc4f6b48e80..3ddfc8080ee8 100644
--- a/drivers/gpu/drm/i915/display/intel_gmbus.c
+++ b/drivers/gpu/drm/i915/display/intel_gmbus.c
@@ -215,41 +215,23 @@ intel_gmbus_reset(struct drm_i915_private *i915)
static void pnv_gmbus_clock_gating(struct drm_i915_private *i915,
bool enable)
{
- u32 val;
-
/* When using bit bashing for I2C, this bit needs to be set to 1 */
- val = intel_de_read(i915, DSPCLK_GATE_D(i915));
- if (!enable)
- val |= PNV_GMBUSUNIT_CLOCK_GATE_DISABLE;
- else
- val &= ~PNV_GMBUSUNIT_CLOCK_GATE_DISABLE;
- intel_de_write(i915, DSPCLK_GATE_D(i915), val);
+ intel_de_rmw(i915, DSPCLK_GATE_D(i915), PNV_GMBUSUNIT_CLOCK_GATE_DISABLE,
+ !enable ? PNV_GMBUSUNIT_CLOCK_GATE_DISABLE : 0);
}
static void pch_gmbus_clock_gating(struct drm_i915_private *i915,
bool enable)
{
- u32 val;
-
- val = intel_de_read(i915, SOUTH_DSPCLK_GATE_D);
- if (!enable)
- val |= PCH_GMBUSUNIT_CLOCK_GATE_DISABLE;
- else
- val &= ~PCH_GMBUSUNIT_CLOCK_GATE_DISABLE;
- intel_de_write(i915, SOUTH_DSPCLK_GATE_D, val);
+ intel_de_rmw(i915, SOUTH_DSPCLK_GATE_D, PCH_GMBUSUNIT_CLOCK_GATE_DISABLE,
+ !enable ? PCH_GMBUSUNIT_CLOCK_GATE_DISABLE : 0);
}
static void bxt_gmbus_clock_gating(struct drm_i915_private *i915,
bool enable)
{
- u32 val;
-
- val = intel_de_read(i915, GEN9_CLKGATE_DIS_4);
- if (!enable)
- val |= BXT_GMBUS_GATING_DIS;
- else
- val &= ~BXT_GMBUS_GATING_DIS;
- intel_de_write(i915, GEN9_CLKGATE_DIS_4, val);
+ intel_de_rmw(i915, GEN9_CLKGATE_DIS_4, BXT_GMBUS_GATING_DIS,
+ !enable ? BXT_GMBUS_GATING_DIS : 0);
}
static u32 get_reserved(struct intel_gmbus *bus)
diff --git a/drivers/gpu/drm/i915/display/intel_hdcp.c b/drivers/gpu/drm/i915/display/intel_hdcp.c
index 6406fd487ee5..650232c4892b 100644
--- a/drivers/gpu/drm/i915/display/intel_hdcp.c
+++ b/drivers/gpu/drm/i915/display/intel_hdcp.c
@@ -23,6 +23,7 @@
#include "intel_display_power_well.h"
#include "intel_display_types.h"
#include "intel_hdcp.h"
+#include "intel_hdcp_gsc.h"
#include "intel_hdcp_regs.h"
#include "intel_pcode.h"
@@ -203,13 +204,20 @@ bool intel_hdcp2_capable(struct intel_connector *connector)
struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct intel_hdcp *hdcp = &connector->hdcp;
+ struct intel_gt *gt = dev_priv->media_gt;
+ struct intel_gsc_uc *gsc = &gt->uc.gsc;
bool capable = false;
/* I915 support for HDCP2.2 */
if (!hdcp->hdcp2_supported)
return false;
- /* MEI interface is solid */
+ /* If MTL+ make sure gsc is loaded and proxy is setup */
+ if (intel_hdcp_gsc_cs_required(dev_priv))
+ if (!intel_uc_fw_is_running(&gsc->fw))
+ return false;
+
+ /* MEI/GSC interface is solid depending on which is used */
mutex_lock(&dev_priv->display.hdcp.comp_mutex);
if (!dev_priv->display.hdcp.comp_added || !dev_priv->display.hdcp.master) {
mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
@@ -943,8 +951,7 @@ static int _intel_hdcp_disable(struct intel_connector *connector)
repeater_ctl = intel_hdcp_get_repeater_ctl(dev_priv, cpu_transcoder,
port);
- intel_de_write(dev_priv, HDCP_REP_CTL,
- intel_de_read(dev_priv, HDCP_REP_CTL) & ~repeater_ctl);
+ intel_de_rmw(dev_priv, HDCP_REP_CTL, repeater_ctl, 0);
ret = hdcp->shim->toggle_signalling(dig_port, cpu_transcoder, false);
if (ret) {
@@ -1143,18 +1150,18 @@ hdcp2_prepare_ake_init(struct intel_connector *connector,
struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
struct hdcp_port_data *data = &dig_port->hdcp_port_data;
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
- struct i915_hdcp_comp_master *comp;
+ struct i915_hdcp_master *arbiter;
int ret;
mutex_lock(&dev_priv->display.hdcp.comp_mutex);
- comp = dev_priv->display.hdcp.master;
+ arbiter = dev_priv->display.hdcp.master;
- if (!comp || !comp->ops) {
+ if (!arbiter || !arbiter->ops) {
mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
return -EINVAL;
}
- ret = comp->ops->initiate_hdcp2_session(comp->mei_dev, data, ake_data);
+ ret = arbiter->ops->initiate_hdcp2_session(arbiter->hdcp_dev, data, ake_data);
if (ret)
drm_dbg_kms(&dev_priv->drm, "Prepare_ake_init failed. %d\n",
ret);
@@ -1173,18 +1180,18 @@ hdcp2_verify_rx_cert_prepare_km(struct intel_connector *connector,
struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
struct hdcp_port_data *data = &dig_port->hdcp_port_data;
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
- struct i915_hdcp_comp_master *comp;
+ struct i915_hdcp_master *arbiter;
int ret;
mutex_lock(&dev_priv->display.hdcp.comp_mutex);
- comp = dev_priv->display.hdcp.master;
+ arbiter = dev_priv->display.hdcp.master;
- if (!comp || !comp->ops) {
+ if (!arbiter || !arbiter->ops) {
mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
return -EINVAL;
}
- ret = comp->ops->verify_receiver_cert_prepare_km(comp->mei_dev, data,
+ ret = arbiter->ops->verify_receiver_cert_prepare_km(arbiter->hdcp_dev, data,
rx_cert, paired,
ek_pub_km, msg_sz);
if (ret < 0)
@@ -1201,18 +1208,18 @@ static int hdcp2_verify_hprime(struct intel_connector *connector,
struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
struct hdcp_port_data *data = &dig_port->hdcp_port_data;
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
- struct i915_hdcp_comp_master *comp;
+ struct i915_hdcp_master *arbiter;
int ret;
mutex_lock(&dev_priv->display.hdcp.comp_mutex);
- comp = dev_priv->display.hdcp.master;
+ arbiter = dev_priv->display.hdcp.master;
- if (!comp || !comp->ops) {
+ if (!arbiter || !arbiter->ops) {
mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
return -EINVAL;
}
- ret = comp->ops->verify_hprime(comp->mei_dev, data, rx_hprime);
+ ret = arbiter->ops->verify_hprime(arbiter->hdcp_dev, data, rx_hprime);
if (ret < 0)
drm_dbg_kms(&dev_priv->drm, "Verify hprime failed. %d\n", ret);
mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
@@ -1227,18 +1234,18 @@ hdcp2_store_pairing_info(struct intel_connector *connector,
struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
struct hdcp_port_data *data = &dig_port->hdcp_port_data;
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
- struct i915_hdcp_comp_master *comp;
+ struct i915_hdcp_master *arbiter;
int ret;
mutex_lock(&dev_priv->display.hdcp.comp_mutex);
- comp = dev_priv->display.hdcp.master;
+ arbiter = dev_priv->display.hdcp.master;
- if (!comp || !comp->ops) {
+ if (!arbiter || !arbiter->ops) {
mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
return -EINVAL;
}
- ret = comp->ops->store_pairing_info(comp->mei_dev, data, pairing_info);
+ ret = arbiter->ops->store_pairing_info(arbiter->hdcp_dev, data, pairing_info);
if (ret < 0)
drm_dbg_kms(&dev_priv->drm, "Store pairing info failed. %d\n",
ret);
@@ -1254,18 +1261,18 @@ hdcp2_prepare_lc_init(struct intel_connector *connector,
struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
struct hdcp_port_data *data = &dig_port->hdcp_port_data;
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
- struct i915_hdcp_comp_master *comp;
+ struct i915_hdcp_master *arbiter;
int ret;
mutex_lock(&dev_priv->display.hdcp.comp_mutex);
- comp = dev_priv->display.hdcp.master;
+ arbiter = dev_priv->display.hdcp.master;
- if (!comp || !comp->ops) {
+ if (!arbiter || !arbiter->ops) {
mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
return -EINVAL;
}
- ret = comp->ops->initiate_locality_check(comp->mei_dev, data, lc_init);
+ ret = arbiter->ops->initiate_locality_check(arbiter->hdcp_dev, data, lc_init);
if (ret < 0)
drm_dbg_kms(&dev_priv->drm, "Prepare lc_init failed. %d\n",
ret);
@@ -1281,18 +1288,18 @@ hdcp2_verify_lprime(struct intel_connector *connector,
struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
struct hdcp_port_data *data = &dig_port->hdcp_port_data;
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
- struct i915_hdcp_comp_master *comp;
+ struct i915_hdcp_master *arbiter;
int ret;
mutex_lock(&dev_priv->display.hdcp.comp_mutex);
- comp = dev_priv->display.hdcp.master;
+ arbiter = dev_priv->display.hdcp.master;
- if (!comp || !comp->ops) {
+ if (!arbiter || !arbiter->ops) {
mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
return -EINVAL;
}
- ret = comp->ops->verify_lprime(comp->mei_dev, data, rx_lprime);
+ ret = arbiter->ops->verify_lprime(arbiter->hdcp_dev, data, rx_lprime);
if (ret < 0)
drm_dbg_kms(&dev_priv->drm, "Verify L_Prime failed. %d\n",
ret);
@@ -1307,18 +1314,18 @@ static int hdcp2_prepare_skey(struct intel_connector *connector,
struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
struct hdcp_port_data *data = &dig_port->hdcp_port_data;
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
- struct i915_hdcp_comp_master *comp;
+ struct i915_hdcp_master *arbiter;
int ret;
mutex_lock(&dev_priv->display.hdcp.comp_mutex);
- comp = dev_priv->display.hdcp.master;
+ arbiter = dev_priv->display.hdcp.master;
- if (!comp || !comp->ops) {
+ if (!arbiter || !arbiter->ops) {
mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
return -EINVAL;
}
- ret = comp->ops->get_session_key(comp->mei_dev, data, ske_data);
+ ret = arbiter->ops->get_session_key(arbiter->hdcp_dev, data, ske_data);
if (ret < 0)
drm_dbg_kms(&dev_priv->drm, "Get session key failed. %d\n",
ret);
@@ -1336,20 +1343,21 @@ hdcp2_verify_rep_topology_prepare_ack(struct intel_connector *connector,
struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
struct hdcp_port_data *data = &dig_port->hdcp_port_data;
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
- struct i915_hdcp_comp_master *comp;
+ struct i915_hdcp_master *arbiter;
int ret;
mutex_lock(&dev_priv->display.hdcp.comp_mutex);
- comp = dev_priv->display.hdcp.master;
+ arbiter = dev_priv->display.hdcp.master;
- if (!comp || !comp->ops) {
+ if (!arbiter || !arbiter->ops) {
mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
return -EINVAL;
}
- ret = comp->ops->repeater_check_flow_prepare_ack(comp->mei_dev, data,
- rep_topology,
- rep_send_ack);
+ ret = arbiter->ops->repeater_check_flow_prepare_ack(arbiter->hdcp_dev,
+ data,
+ rep_topology,
+ rep_send_ack);
if (ret < 0)
drm_dbg_kms(&dev_priv->drm,
"Verify rep topology failed. %d\n", ret);
@@ -1365,18 +1373,18 @@ hdcp2_verify_mprime(struct intel_connector *connector,
struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
struct hdcp_port_data *data = &dig_port->hdcp_port_data;
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
- struct i915_hdcp_comp_master *comp;
+ struct i915_hdcp_master *arbiter;
int ret;
mutex_lock(&dev_priv->display.hdcp.comp_mutex);
- comp = dev_priv->display.hdcp.master;
+ arbiter = dev_priv->display.hdcp.master;
- if (!comp || !comp->ops) {
+ if (!arbiter || !arbiter->ops) {
mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
return -EINVAL;
}
- ret = comp->ops->verify_mprime(comp->mei_dev, data, stream_ready);
+ ret = arbiter->ops->verify_mprime(arbiter->hdcp_dev, data, stream_ready);
if (ret < 0)
drm_dbg_kms(&dev_priv->drm, "Verify mprime failed. %d\n", ret);
mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
@@ -1389,18 +1397,18 @@ static int hdcp2_authenticate_port(struct intel_connector *connector)
struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
struct hdcp_port_data *data = &dig_port->hdcp_port_data;
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
- struct i915_hdcp_comp_master *comp;
+ struct i915_hdcp_master *arbiter;
int ret;
mutex_lock(&dev_priv->display.hdcp.comp_mutex);
- comp = dev_priv->display.hdcp.master;
+ arbiter = dev_priv->display.hdcp.master;
- if (!comp || !comp->ops) {
+ if (!arbiter || !arbiter->ops) {
mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
return -EINVAL;
}
- ret = comp->ops->enable_hdcp_authentication(comp->mei_dev, data);
+ ret = arbiter->ops->enable_hdcp_authentication(arbiter->hdcp_dev, data);
if (ret < 0)
drm_dbg_kms(&dev_priv->drm, "Enable hdcp auth failed. %d\n",
ret);
@@ -1409,22 +1417,22 @@ static int hdcp2_authenticate_port(struct intel_connector *connector)
return ret;
}
-static int hdcp2_close_mei_session(struct intel_connector *connector)
+static int hdcp2_close_session(struct intel_connector *connector)
{
struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
- struct i915_hdcp_comp_master *comp;
+ struct i915_hdcp_master *arbiter;
int ret;
mutex_lock(&dev_priv->display.hdcp.comp_mutex);
- comp = dev_priv->display.hdcp.master;
+ arbiter = dev_priv->display.hdcp.master;
- if (!comp || !comp->ops) {
+ if (!arbiter || !arbiter->ops) {
mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
return -EINVAL;
}
- ret = comp->ops->close_hdcp_session(comp->mei_dev,
+ ret = arbiter->ops->close_hdcp_session(arbiter->hdcp_dev,
&dig_port->hdcp_port_data);
mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
@@ -1433,7 +1441,7 @@ static int hdcp2_close_mei_session(struct intel_connector *connector)
static int hdcp2_deauthenticate_port(struct intel_connector *connector)
{
- return hdcp2_close_mei_session(connector);
+ return hdcp2_close_session(connector);
}
/* Authentication flow starts from here */
@@ -1819,12 +1827,10 @@ static int hdcp2_enable_encryption(struct intel_connector *connector)
}
if (intel_de_read(dev_priv, HDCP2_STATUS(dev_priv, cpu_transcoder, port)) &
- LINK_AUTH_STATUS) {
+ LINK_AUTH_STATUS)
/* Link is Authenticated. Now set for Encryption */
- intel_de_write(dev_priv,
- HDCP2_CTL(dev_priv, cpu_transcoder, port),
- intel_de_read(dev_priv, HDCP2_CTL(dev_priv, cpu_transcoder, port)) | CTL_LINK_ENCRYPTION_REQ);
- }
+ intel_de_rmw(dev_priv, HDCP2_CTL(dev_priv, cpu_transcoder, port),
+ 0, CTL_LINK_ENCRYPTION_REQ);
ret = intel_de_wait_for_set(dev_priv,
HDCP2_STATUS(dev_priv, cpu_transcoder,
@@ -1848,8 +1854,8 @@ static int hdcp2_disable_encryption(struct intel_connector *connector)
drm_WARN_ON(&dev_priv->drm, !(intel_de_read(dev_priv, HDCP2_STATUS(dev_priv, cpu_transcoder, port)) &
LINK_ENCRYPTION_STATUS));
- intel_de_write(dev_priv, HDCP2_CTL(dev_priv, cpu_transcoder, port),
- intel_de_read(dev_priv, HDCP2_CTL(dev_priv, cpu_transcoder, port)) & ~CTL_LINK_ENCRYPTION_REQ);
+ intel_de_rmw(dev_priv, HDCP2_CTL(dev_priv, cpu_transcoder, port),
+ CTL_LINK_ENCRYPTION_REQ, 0);
ret = intel_de_wait_for_clear(dev_priv,
HDCP2_STATUS(dev_priv, cpu_transcoder,
@@ -2145,8 +2151,8 @@ static int i915_hdcp_component_bind(struct device *i915_kdev,
drm_dbg(&dev_priv->drm, "I915 HDCP comp bind\n");
mutex_lock(&dev_priv->display.hdcp.comp_mutex);
- dev_priv->display.hdcp.master = (struct i915_hdcp_comp_master *)data;
- dev_priv->display.hdcp.master->mei_dev = mei_kdev;
+ dev_priv->display.hdcp.master = (struct i915_hdcp_master *)data;
+ dev_priv->display.hdcp.master->hdcp_dev = mei_kdev;
mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
return 0;
@@ -2163,30 +2169,30 @@ static void i915_hdcp_component_unbind(struct device *i915_kdev,
mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
}
-static const struct component_ops i915_hdcp_component_ops = {
+static const struct component_ops i915_hdcp_ops = {
.bind = i915_hdcp_component_bind,
.unbind = i915_hdcp_component_unbind,
};
-static enum mei_fw_ddi intel_get_mei_fw_ddi_index(enum port port)
+static enum hdcp_ddi intel_get_hdcp_ddi_index(enum port port)
{
switch (port) {
case PORT_A:
- return MEI_DDI_A;
+ return HDCP_DDI_A;
case PORT_B ... PORT_F:
- return (enum mei_fw_ddi)port;
+ return (enum hdcp_ddi)port;
default:
- return MEI_DDI_INVALID_PORT;
+ return HDCP_DDI_INVALID_PORT;
}
}
-static enum mei_fw_tc intel_get_mei_fw_tc(enum transcoder cpu_transcoder)
+static enum hdcp_transcoder intel_get_hdcp_transcoder(enum transcoder cpu_transcoder)
{
switch (cpu_transcoder) {
case TRANSCODER_A ... TRANSCODER_D:
- return (enum mei_fw_tc)(cpu_transcoder | 0x10);
+ return (enum hdcp_transcoder)(cpu_transcoder | 0x10);
default: /* eDP, DSI TRANSCODERS are non HDCP capable */
- return MEI_INVALID_TRANSCODER;
+ return HDCP_INVALID_TRANSCODER;
}
}
@@ -2200,20 +2206,20 @@ static int initialize_hdcp_port_data(struct intel_connector *connector,
enum port port = dig_port->base.port;
if (DISPLAY_VER(dev_priv) < 12)
- data->fw_ddi = intel_get_mei_fw_ddi_index(port);
+ data->hdcp_ddi = intel_get_hdcp_ddi_index(port);
else
/*
- * As per ME FW API expectation, for GEN 12+, fw_ddi is filled
+ * As per ME FW API expectation, for GEN 12+, hdcp_ddi is filled
* with zero(INVALID PORT index).
*/
- data->fw_ddi = MEI_DDI_INVALID_PORT;
+ data->hdcp_ddi = HDCP_DDI_INVALID_PORT;
/*
- * As associated transcoder is set and modified at modeset, here fw_tc
+ * As associated transcoder is set and modified at modeset, here hdcp_transcoder
* is initialized to zero (invalid transcoder index). This will be
* retained for <Gen12 forever.
*/
- data->fw_tc = MEI_INVALID_TRANSCODER;
+ data->hdcp_transcoder = HDCP_INVALID_TRANSCODER;
data->port_type = (u8)HDCP_PORT_TYPE_INTEGRATED;
data->protocol = (u8)shim->protocol;
@@ -2235,6 +2241,9 @@ static int initialize_hdcp_port_data(struct intel_connector *connector,
static bool is_hdcp2_supported(struct drm_i915_private *dev_priv)
{
+ if (intel_hdcp_gsc_cs_required(dev_priv))
+ return true;
+
if (!IS_ENABLED(CONFIG_INTEL_MEI_HDCP))
return false;
@@ -2256,10 +2265,14 @@ void intel_hdcp_component_init(struct drm_i915_private *dev_priv)
dev_priv->display.hdcp.comp_added = true;
mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
- ret = component_add_typed(dev_priv->drm.dev, &i915_hdcp_component_ops,
- I915_COMPONENT_HDCP);
+ if (intel_hdcp_gsc_cs_required(dev_priv))
+ ret = intel_hdcp_gsc_init(dev_priv);
+ else
+ ret = component_add_typed(dev_priv->drm.dev, &i915_hdcp_ops,
+ I915_COMPONENT_HDCP);
+
if (ret < 0) {
- drm_dbg_kms(&dev_priv->drm, "Failed at component add(%d)\n",
+ drm_dbg_kms(&dev_priv->drm, "Failed at fw component add(%d)\n",
ret);
mutex_lock(&dev_priv->display.hdcp.comp_mutex);
dev_priv->display.hdcp.comp_added = false;
@@ -2350,7 +2363,8 @@ int intel_hdcp_enable(struct intel_connector *connector,
}
if (DISPLAY_VER(dev_priv) >= 12)
- dig_port->hdcp_port_data.fw_tc = intel_get_mei_fw_tc(hdcp->cpu_transcoder);
+ dig_port->hdcp_port_data.hdcp_transcoder =
+ intel_get_hdcp_transcoder(hdcp->cpu_transcoder);
/*
* Considering that HDCP2.2 is more secure than HDCP1.4, If the setup
@@ -2485,7 +2499,10 @@ void intel_hdcp_component_fini(struct drm_i915_private *dev_priv)
dev_priv->display.hdcp.comp_added = false;
mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
- component_del(dev_priv->drm.dev, &i915_hdcp_component_ops);
+ if (intel_hdcp_gsc_cs_required(dev_priv))
+ intel_hdcp_gsc_fini(dev_priv);
+ else
+ component_del(dev_priv->drm.dev, &i915_hdcp_ops);
}
void intel_hdcp_cleanup(struct intel_connector *connector)
diff --git a/drivers/gpu/drm/i915/display/intel_hdcp_gsc.c b/drivers/gpu/drm/i915/display/intel_hdcp_gsc.c
new file mode 100644
index 000000000000..7e52aea6aa17
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_hdcp_gsc.c
@@ -0,0 +1,831 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright 2023, Intel Corporation.
+ */
+
+#include <drm/i915_hdcp_interface.h>
+
+#include "display/intel_hdcp_gsc.h"
+#include "gem/i915_gem_region.h"
+#include "gt/uc/intel_gsc_uc_heci_cmd_submit.h"
+#include "i915_drv.h"
+#include "i915_utils.h"
+
+bool intel_hdcp_gsc_cs_required(struct drm_i915_private *i915)
+{
+ return DISPLAY_VER(i915) >= 14;
+}
+
+static int
+gsc_hdcp_initiate_session(struct device *dev, struct hdcp_port_data *data,
+ struct hdcp2_ake_init *ake_data)
+{
+ struct wired_cmd_initiate_hdcp2_session_in session_init_in = { { 0 } };
+ struct wired_cmd_initiate_hdcp2_session_out
+ session_init_out = { { 0 } };
+ struct drm_i915_private *i915;
+ ssize_t byte;
+
+ if (!dev || !data || !ake_data)
+ return -EINVAL;
+
+ i915 = kdev_to_i915(dev);
+ if (!i915) {
+ dev_err(dev, "DRM not initialized, aborting HDCP.\n");
+ return -ENODEV;
+ }
+
+ session_init_in.header.api_version = HDCP_API_VERSION;
+ session_init_in.header.command_id = WIRED_INITIATE_HDCP2_SESSION;
+ session_init_in.header.status = FW_HDCP_STATUS_SUCCESS;
+ session_init_in.header.buffer_len =
+ WIRED_CMD_BUF_LEN_INITIATE_HDCP2_SESSION_IN;
+
+ session_init_in.port.integrated_port_type = data->port_type;
+ session_init_in.port.physical_port = (u8)data->hdcp_ddi;
+ session_init_in.port.attached_transcoder = (u8)data->hdcp_transcoder;
+ session_init_in.protocol = data->protocol;
+
+ byte = intel_hdcp_gsc_msg_send(i915, (u8 *)&session_init_in,
+ sizeof(session_init_in),
+ (u8 *)&session_init_out,
+ sizeof(session_init_out));
+ if (byte < 0) {
+ drm_dbg_kms(&i915->drm, "intel_hdcp_gsc_msg_send failed. %zd\n", byte);
+ return byte;
+ }
+
+ if (session_init_out.header.status != FW_HDCP_STATUS_SUCCESS) {
+ drm_dbg_kms(&i915->drm, "FW cmd 0x%08X Failed. Status: 0x%X\n",
+ WIRED_INITIATE_HDCP2_SESSION,
+ session_init_out.header.status);
+ return -EIO;
+ }
+
+ ake_data->msg_id = HDCP_2_2_AKE_INIT;
+ ake_data->tx_caps = session_init_out.tx_caps;
+ memcpy(ake_data->r_tx, session_init_out.r_tx, HDCP_2_2_RTX_LEN);
+
+ return 0;
+}
+
+static int
+gsc_hdcp_verify_receiver_cert_prepare_km(struct device *dev,
+ struct hdcp_port_data *data,
+ struct hdcp2_ake_send_cert *rx_cert,
+ bool *km_stored,
+ struct hdcp2_ake_no_stored_km
+ *ek_pub_km,
+ size_t *msg_sz)
+{
+ struct wired_cmd_verify_receiver_cert_in verify_rxcert_in = { { 0 } };
+ struct wired_cmd_verify_receiver_cert_out verify_rxcert_out = { { 0 } };
+ struct drm_i915_private *i915;
+ ssize_t byte;
+
+ if (!dev || !data || !rx_cert || !km_stored || !ek_pub_km || !msg_sz)
+ return -EINVAL;
+
+ i915 = kdev_to_i915(dev);
+ if (!i915) {
+ dev_err(dev, "DRM not initialized, aborting HDCP.\n");
+ return -ENODEV;
+ }
+
+ verify_rxcert_in.header.api_version = HDCP_API_VERSION;
+ verify_rxcert_in.header.command_id = WIRED_VERIFY_RECEIVER_CERT;
+ verify_rxcert_in.header.status = FW_HDCP_STATUS_SUCCESS;
+ verify_rxcert_in.header.buffer_len =
+ WIRED_CMD_BUF_LEN_VERIFY_RECEIVER_CERT_IN;
+
+ verify_rxcert_in.port.integrated_port_type = data->port_type;
+ verify_rxcert_in.port.physical_port = (u8)data->hdcp_ddi;
+ verify_rxcert_in.port.attached_transcoder = (u8)data->hdcp_transcoder;
+
+ verify_rxcert_in.cert_rx = rx_cert->cert_rx;
+ memcpy(verify_rxcert_in.r_rx, &rx_cert->r_rx, HDCP_2_2_RRX_LEN);
+ memcpy(verify_rxcert_in.rx_caps, rx_cert->rx_caps, HDCP_2_2_RXCAPS_LEN);
+
+ byte = intel_hdcp_gsc_msg_send(i915, (u8 *)&verify_rxcert_in,
+ sizeof(verify_rxcert_in),
+ (u8 *)&verify_rxcert_out,
+ sizeof(verify_rxcert_out));
+ if (byte < 0) {
+ drm_dbg_kms(&i915->drm, "intel_hdcp_gsc_msg_send failed: %zd\n", byte);
+ return byte;
+ }
+
+ if (verify_rxcert_out.header.status != FW_HDCP_STATUS_SUCCESS) {
+ drm_dbg_kms(&i915->drm, "FW cmd 0x%08X Failed. Status: 0x%X\n",
+ WIRED_VERIFY_RECEIVER_CERT,
+ verify_rxcert_out.header.status);
+ return -EIO;
+ }
+
+ *km_stored = !!verify_rxcert_out.km_stored;
+ if (verify_rxcert_out.km_stored) {
+ ek_pub_km->msg_id = HDCP_2_2_AKE_STORED_KM;
+ *msg_sz = sizeof(struct hdcp2_ake_stored_km);
+ } else {
+ ek_pub_km->msg_id = HDCP_2_2_AKE_NO_STORED_KM;
+ *msg_sz = sizeof(struct hdcp2_ake_no_stored_km);
+ }
+
+ memcpy(ek_pub_km->e_kpub_km, &verify_rxcert_out.ekm_buff,
+ sizeof(verify_rxcert_out.ekm_buff));
+
+ return 0;
+}
+
+static int
+gsc_hdcp_verify_hprime(struct device *dev, struct hdcp_port_data *data,
+ struct hdcp2_ake_send_hprime *rx_hprime)
+{
+ struct wired_cmd_ake_send_hprime_in send_hprime_in = { { 0 } };
+ struct wired_cmd_ake_send_hprime_out send_hprime_out = { { 0 } };
+ struct drm_i915_private *i915;
+ ssize_t byte;
+
+ if (!dev || !data || !rx_hprime)
+ return -EINVAL;
+
+ i915 = kdev_to_i915(dev);
+ if (!i915) {
+ dev_err(dev, "DRM not initialized, aborting HDCP.\n");
+ return -ENODEV;
+ }
+
+ send_hprime_in.header.api_version = HDCP_API_VERSION;
+ send_hprime_in.header.command_id = WIRED_AKE_SEND_HPRIME;
+ send_hprime_in.header.status = FW_HDCP_STATUS_SUCCESS;
+ send_hprime_in.header.buffer_len = WIRED_CMD_BUF_LEN_AKE_SEND_HPRIME_IN;
+
+ send_hprime_in.port.integrated_port_type = data->port_type;
+ send_hprime_in.port.physical_port = (u8)data->hdcp_ddi;
+ send_hprime_in.port.attached_transcoder = (u8)data->hdcp_transcoder;
+
+ memcpy(send_hprime_in.h_prime, rx_hprime->h_prime,
+ HDCP_2_2_H_PRIME_LEN);
+
+ byte = intel_hdcp_gsc_msg_send(i915, (u8 *)&send_hprime_in,
+ sizeof(send_hprime_in),
+ (u8 *)&send_hprime_out,
+ sizeof(send_hprime_out));
+ if (byte < 0) {
+ drm_dbg_kms(&i915->drm, "intel_hdcp_gsc_msg_send failed. %zd\n", byte);
+ return byte;
+ }
+
+ if (send_hprime_out.header.status != FW_HDCP_STATUS_SUCCESS) {
+ drm_dbg_kms(&i915->drm, "FW cmd 0x%08X Failed. Status: 0x%X\n",
+ WIRED_AKE_SEND_HPRIME, send_hprime_out.header.status);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int
+gsc_hdcp_store_pairing_info(struct device *dev, struct hdcp_port_data *data,
+ struct hdcp2_ake_send_pairing_info *pairing_info)
+{
+ struct wired_cmd_ake_send_pairing_info_in pairing_info_in = { { 0 } };
+ struct wired_cmd_ake_send_pairing_info_out pairing_info_out = { { 0 } };
+ struct drm_i915_private *i915;
+ ssize_t byte;
+
+ if (!dev || !data || !pairing_info)
+ return -EINVAL;
+
+ i915 = kdev_to_i915(dev);
+ if (!i915) {
+ dev_err(dev, "DRM not initialized, aborting HDCP.\n");
+ return -ENODEV;
+ }
+
+ pairing_info_in.header.api_version = HDCP_API_VERSION;
+ pairing_info_in.header.command_id = WIRED_AKE_SEND_PAIRING_INFO;
+ pairing_info_in.header.status = FW_HDCP_STATUS_SUCCESS;
+ pairing_info_in.header.buffer_len =
+ WIRED_CMD_BUF_LEN_SEND_PAIRING_INFO_IN;
+
+ pairing_info_in.port.integrated_port_type = data->port_type;
+ pairing_info_in.port.physical_port = (u8)data->hdcp_ddi;
+ pairing_info_in.port.attached_transcoder = (u8)data->hdcp_transcoder;
+
+ memcpy(pairing_info_in.e_kh_km, pairing_info->e_kh_km,
+ HDCP_2_2_E_KH_KM_LEN);
+
+ byte = intel_hdcp_gsc_msg_send(i915, (u8 *)&pairing_info_in,
+ sizeof(pairing_info_in),
+ (u8 *)&pairing_info_out,
+ sizeof(pairing_info_out));
+ if (byte < 0) {
+ drm_dbg_kms(&i915->drm, "intel_hdcp_gsc_msg_send failed. %zd\n", byte);
+ return byte;
+ }
+
+ if (pairing_info_out.header.status != FW_HDCP_STATUS_SUCCESS) {
+ drm_dbg_kms(&i915->drm, "FW cmd 0x%08X failed. Status: 0x%X\n",
+ WIRED_AKE_SEND_PAIRING_INFO,
+ pairing_info_out.header.status);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int
+gsc_hdcp_initiate_locality_check(struct device *dev,
+ struct hdcp_port_data *data,
+ struct hdcp2_lc_init *lc_init_data)
+{
+ struct wired_cmd_init_locality_check_in lc_init_in = { { 0 } };
+ struct wired_cmd_init_locality_check_out lc_init_out = { { 0 } };
+ struct drm_i915_private *i915;
+ ssize_t byte;
+
+ if (!dev || !data || !lc_init_data)
+ return -EINVAL;
+
+ i915 = kdev_to_i915(dev);
+ if (!i915) {
+ dev_err(dev, "DRM not initialized, aborting HDCP.\n");
+ return -ENODEV;
+ }
+
+ lc_init_in.header.api_version = HDCP_API_VERSION;
+ lc_init_in.header.command_id = WIRED_INIT_LOCALITY_CHECK;
+ lc_init_in.header.status = FW_HDCP_STATUS_SUCCESS;
+ lc_init_in.header.buffer_len = WIRED_CMD_BUF_LEN_INIT_LOCALITY_CHECK_IN;
+
+ lc_init_in.port.integrated_port_type = data->port_type;
+ lc_init_in.port.physical_port = (u8)data->hdcp_ddi;
+ lc_init_in.port.attached_transcoder = (u8)data->hdcp_transcoder;
+
+ byte = intel_hdcp_gsc_msg_send(i915, (u8 *)&lc_init_in, sizeof(lc_init_in),
+ (u8 *)&lc_init_out, sizeof(lc_init_out));
+ if (byte < 0) {
+ drm_dbg_kms(&i915->drm, "intel_hdcp_gsc_msg_send failed. %zd\n", byte);
+ return byte;
+ }
+
+ if (lc_init_out.header.status != FW_HDCP_STATUS_SUCCESS) {
+ drm_dbg_kms(&i915->drm, "FW cmd 0x%08X Failed. status: 0x%X\n",
+ WIRED_INIT_LOCALITY_CHECK, lc_init_out.header.status);
+ return -EIO;
+ }
+
+ lc_init_data->msg_id = HDCP_2_2_LC_INIT;
+ memcpy(lc_init_data->r_n, lc_init_out.r_n, HDCP_2_2_RN_LEN);
+
+ return 0;
+}
+
+static int
+gsc_hdcp_verify_lprime(struct device *dev, struct hdcp_port_data *data,
+ struct hdcp2_lc_send_lprime *rx_lprime)
+{
+ struct wired_cmd_validate_locality_in verify_lprime_in = { { 0 } };
+ struct wired_cmd_validate_locality_out verify_lprime_out = { { 0 } };
+ struct drm_i915_private *i915;
+ ssize_t byte;
+
+ if (!dev || !data || !rx_lprime)
+ return -EINVAL;
+
+ i915 = kdev_to_i915(dev);
+ if (!i915) {
+ dev_err(dev, "DRM not initialized, aborting HDCP.\n");
+ return -ENODEV;
+ }
+
+ verify_lprime_in.header.api_version = HDCP_API_VERSION;
+ verify_lprime_in.header.command_id = WIRED_VALIDATE_LOCALITY;
+ verify_lprime_in.header.status = FW_HDCP_STATUS_SUCCESS;
+ verify_lprime_in.header.buffer_len =
+ WIRED_CMD_BUF_LEN_VALIDATE_LOCALITY_IN;
+
+ verify_lprime_in.port.integrated_port_type = data->port_type;
+ verify_lprime_in.port.physical_port = (u8)data->hdcp_ddi;
+ verify_lprime_in.port.attached_transcoder = (u8)data->hdcp_transcoder;
+
+ memcpy(verify_lprime_in.l_prime, rx_lprime->l_prime,
+ HDCP_2_2_L_PRIME_LEN);
+
+ byte = intel_hdcp_gsc_msg_send(i915, (u8 *)&verify_lprime_in,
+ sizeof(verify_lprime_in),
+ (u8 *)&verify_lprime_out,
+ sizeof(verify_lprime_out));
+ if (byte < 0) {
+ drm_dbg_kms(&i915->drm, "intel_hdcp_gsc_msg_send failed. %zd\n", byte);
+ return byte;
+ }
+
+ if (verify_lprime_out.header.status != FW_HDCP_STATUS_SUCCESS) {
+ drm_dbg_kms(&i915->drm, "FW cmd 0x%08X failed. status: 0x%X\n",
+ WIRED_VALIDATE_LOCALITY,
+ verify_lprime_out.header.status);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int gsc_hdcp_get_session_key(struct device *dev,
+ struct hdcp_port_data *data,
+ struct hdcp2_ske_send_eks *ske_data)
+{
+ struct wired_cmd_get_session_key_in get_skey_in = { { 0 } };
+ struct wired_cmd_get_session_key_out get_skey_out = { { 0 } };
+ struct drm_i915_private *i915;
+ ssize_t byte;
+
+ if (!dev || !data || !ske_data)
+ return -EINVAL;
+
+ i915 = kdev_to_i915(dev);
+ if (!i915) {
+ dev_err(dev, "DRM not initialized, aborting HDCP.\n");
+ return -ENODEV;
+ }
+
+ get_skey_in.header.api_version = HDCP_API_VERSION;
+ get_skey_in.header.command_id = WIRED_GET_SESSION_KEY;
+ get_skey_in.header.status = FW_HDCP_STATUS_SUCCESS;
+ get_skey_in.header.buffer_len = WIRED_CMD_BUF_LEN_GET_SESSION_KEY_IN;
+
+ get_skey_in.port.integrated_port_type = data->port_type;
+ get_skey_in.port.physical_port = (u8)data->hdcp_ddi;
+ get_skey_in.port.attached_transcoder = (u8)data->hdcp_transcoder;
+
+ byte = intel_hdcp_gsc_msg_send(i915, (u8 *)&get_skey_in, sizeof(get_skey_in),
+ (u8 *)&get_skey_out, sizeof(get_skey_out));
+ if (byte < 0) {
+ drm_dbg_kms(&i915->drm, "intel_hdcp_gsc_msg_send failed. %zd\n", byte);
+ return byte;
+ }
+
+ if (get_skey_out.header.status != FW_HDCP_STATUS_SUCCESS) {
+ drm_dbg_kms(&i915->drm, "FW cmd 0x%08X failed. status: 0x%X\n",
+ WIRED_GET_SESSION_KEY, get_skey_out.header.status);
+ return -EIO;
+ }
+
+ ske_data->msg_id = HDCP_2_2_SKE_SEND_EKS;
+ memcpy(ske_data->e_dkey_ks, get_skey_out.e_dkey_ks,
+ HDCP_2_2_E_DKEY_KS_LEN);
+ memcpy(ske_data->riv, get_skey_out.r_iv, HDCP_2_2_RIV_LEN);
+
+ return 0;
+}
+
+static int
+gsc_hdcp_repeater_check_flow_prepare_ack(struct device *dev,
+ struct hdcp_port_data *data,
+ struct hdcp2_rep_send_receiverid_list
+ *rep_topology,
+ struct hdcp2_rep_send_ack
+ *rep_send_ack)
+{
+ struct wired_cmd_verify_repeater_in verify_repeater_in = { { 0 } };
+ struct wired_cmd_verify_repeater_out verify_repeater_out = { { 0 } };
+ struct drm_i915_private *i915;
+ ssize_t byte;
+
+ if (!dev || !rep_topology || !rep_send_ack || !data)
+ return -EINVAL;
+
+ i915 = kdev_to_i915(dev);
+ if (!i915) {
+ dev_err(dev, "DRM not initialized, aborting HDCP.\n");
+ return -ENODEV;
+ }
+
+ verify_repeater_in.header.api_version = HDCP_API_VERSION;
+ verify_repeater_in.header.command_id = WIRED_VERIFY_REPEATER;
+ verify_repeater_in.header.status = FW_HDCP_STATUS_SUCCESS;
+ verify_repeater_in.header.buffer_len =
+ WIRED_CMD_BUF_LEN_VERIFY_REPEATER_IN;
+
+ verify_repeater_in.port.integrated_port_type = data->port_type;
+ verify_repeater_in.port.physical_port = (u8)data->hdcp_ddi;
+ verify_repeater_in.port.attached_transcoder = (u8)data->hdcp_transcoder;
+
+ memcpy(verify_repeater_in.rx_info, rep_topology->rx_info,
+ HDCP_2_2_RXINFO_LEN);
+ memcpy(verify_repeater_in.seq_num_v, rep_topology->seq_num_v,
+ HDCP_2_2_SEQ_NUM_LEN);
+ memcpy(verify_repeater_in.v_prime, rep_topology->v_prime,
+ HDCP_2_2_V_PRIME_HALF_LEN);
+ memcpy(verify_repeater_in.receiver_ids, rep_topology->receiver_ids,
+ HDCP_2_2_RECEIVER_IDS_MAX_LEN);
+
+ byte = intel_hdcp_gsc_msg_send(i915, (u8 *)&verify_repeater_in,
+ sizeof(verify_repeater_in),
+ (u8 *)&verify_repeater_out,
+ sizeof(verify_repeater_out));
+ if (byte < 0) {
+ drm_dbg_kms(&i915->drm, "intel_hdcp_gsc_msg_send failed. %zd\n", byte);
+ return byte;
+ }
+
+ if (verify_repeater_out.header.status != FW_HDCP_STATUS_SUCCESS) {
+ drm_dbg_kms(&i915->drm, "FW cmd 0x%08X failed. status: 0x%X\n",
+ WIRED_VERIFY_REPEATER,
+ verify_repeater_out.header.status);
+ return -EIO;
+ }
+
+ memcpy(rep_send_ack->v, verify_repeater_out.v,
+ HDCP_2_2_V_PRIME_HALF_LEN);
+ rep_send_ack->msg_id = HDCP_2_2_REP_SEND_ACK;
+
+ return 0;
+}
+
+static int gsc_hdcp_verify_mprime(struct device *dev,
+ struct hdcp_port_data *data,
+ struct hdcp2_rep_stream_ready *stream_ready)
+{
+ struct wired_cmd_repeater_auth_stream_req_in *verify_mprime_in;
+ struct wired_cmd_repeater_auth_stream_req_out
+ verify_mprime_out = { { 0 } };
+ struct drm_i915_private *i915;
+ ssize_t byte;
+ size_t cmd_size;
+
+ if (!dev || !stream_ready || !data)
+ return -EINVAL;
+
+ i915 = kdev_to_i915(dev);
+ if (!i915) {
+ dev_err(dev, "DRM not initialized, aborting HDCP.\n");
+ return -ENODEV;
+ }
+
+ cmd_size = struct_size(verify_mprime_in, streams, data->k);
+ if (cmd_size == SIZE_MAX)
+ return -EINVAL;
+
+ verify_mprime_in = kzalloc(cmd_size, GFP_KERNEL);
+ if (!verify_mprime_in)
+ return -ENOMEM;
+
+ verify_mprime_in->header.api_version = HDCP_API_VERSION;
+ verify_mprime_in->header.command_id = WIRED_REPEATER_AUTH_STREAM_REQ;
+ verify_mprime_in->header.status = FW_HDCP_STATUS_SUCCESS;
+ verify_mprime_in->header.buffer_len = cmd_size - sizeof(verify_mprime_in->header);
+
+ verify_mprime_in->port.integrated_port_type = data->port_type;
+ verify_mprime_in->port.physical_port = (u8)data->hdcp_ddi;
+ verify_mprime_in->port.attached_transcoder = (u8)data->hdcp_transcoder;
+
+ memcpy(verify_mprime_in->m_prime, stream_ready->m_prime, HDCP_2_2_MPRIME_LEN);
+ drm_hdcp_cpu_to_be24(verify_mprime_in->seq_num_m, data->seq_num_m);
+
+ memcpy(verify_mprime_in->streams, data->streams,
+ array_size(data->k, sizeof(*data->streams)));
+
+ verify_mprime_in->k = cpu_to_be16(data->k);
+
+ byte = intel_hdcp_gsc_msg_send(i915, (u8 *)verify_mprime_in, cmd_size,
+ (u8 *)&verify_mprime_out,
+ sizeof(verify_mprime_out));
+ kfree(verify_mprime_in);
+ if (byte < 0) {
+ drm_dbg_kms(&i915->drm, "intel_hdcp_gsc_msg_send failed. %zd\n", byte);
+ return byte;
+ }
+
+ if (verify_mprime_out.header.status != FW_HDCP_STATUS_SUCCESS) {
+ drm_dbg_kms(&i915->drm, "FW cmd 0x%08X failed. status: 0x%X\n",
+ WIRED_REPEATER_AUTH_STREAM_REQ,
+ verify_mprime_out.header.status);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int gsc_hdcp_enable_authentication(struct device *dev,
+ struct hdcp_port_data *data)
+{
+ struct wired_cmd_enable_auth_in enable_auth_in = { { 0 } };
+ struct wired_cmd_enable_auth_out enable_auth_out = { { 0 } };
+ struct drm_i915_private *i915;
+ ssize_t byte;
+
+ if (!dev || !data)
+ return -EINVAL;
+
+ i915 = kdev_to_i915(dev);
+ if (!i915) {
+ dev_err(dev, "DRM not initialized, aborting HDCP.\n");
+ return -ENODEV;
+ }
+
+ enable_auth_in.header.api_version = HDCP_API_VERSION;
+ enable_auth_in.header.command_id = WIRED_ENABLE_AUTH;
+ enable_auth_in.header.status = FW_HDCP_STATUS_SUCCESS;
+ enable_auth_in.header.buffer_len = WIRED_CMD_BUF_LEN_ENABLE_AUTH_IN;
+
+ enable_auth_in.port.integrated_port_type = data->port_type;
+ enable_auth_in.port.physical_port = (u8)data->hdcp_ddi;
+ enable_auth_in.port.attached_transcoder = (u8)data->hdcp_transcoder;
+ enable_auth_in.stream_type = data->streams[0].stream_type;
+
+ byte = intel_hdcp_gsc_msg_send(i915, (u8 *)&enable_auth_in,
+ sizeof(enable_auth_in),
+ (u8 *)&enable_auth_out,
+ sizeof(enable_auth_out));
+ if (byte < 0) {
+ drm_dbg_kms(&i915->drm, "intel_hdcp_gsc_msg_send failed. %zd\n", byte);
+ return byte;
+ }
+
+ if (enable_auth_out.header.status != FW_HDCP_STATUS_SUCCESS) {
+ drm_dbg_kms(&i915->drm, "FW cmd 0x%08X failed. status: 0x%X\n",
+ WIRED_ENABLE_AUTH, enable_auth_out.header.status);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int
+gsc_hdcp_close_session(struct device *dev, struct hdcp_port_data *data)
+{
+ struct wired_cmd_close_session_in session_close_in = { { 0 } };
+ struct wired_cmd_close_session_out session_close_out = { { 0 } };
+ struct drm_i915_private *i915;
+ ssize_t byte;
+
+ if (!dev || !data)
+ return -EINVAL;
+
+ i915 = kdev_to_i915(dev);
+ if (!i915) {
+ dev_err(dev, "DRM not initialized, aborting HDCP.\n");
+ return -ENODEV;
+ }
+
+ session_close_in.header.api_version = HDCP_API_VERSION;
+ session_close_in.header.command_id = WIRED_CLOSE_SESSION;
+ session_close_in.header.status = FW_HDCP_STATUS_SUCCESS;
+ session_close_in.header.buffer_len =
+ WIRED_CMD_BUF_LEN_CLOSE_SESSION_IN;
+
+ session_close_in.port.integrated_port_type = data->port_type;
+ session_close_in.port.physical_port = (u8)data->hdcp_ddi;
+ session_close_in.port.attached_transcoder = (u8)data->hdcp_transcoder;
+
+ byte = intel_hdcp_gsc_msg_send(i915, (u8 *)&session_close_in,
+ sizeof(session_close_in),
+ (u8 *)&session_close_out,
+ sizeof(session_close_out));
+ if (byte < 0) {
+ drm_dbg_kms(&i915->drm, "intel_hdcp_gsc_msg_send failed. %zd\n", byte);
+ return byte;
+ }
+
+ if (session_close_out.header.status != FW_HDCP_STATUS_SUCCESS) {
+ drm_dbg_kms(&i915->drm, "Session Close Failed. status: 0x%X\n",
+ session_close_out.header.status);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static const struct i915_hdcp_ops gsc_hdcp_ops = {
+ .initiate_hdcp2_session = gsc_hdcp_initiate_session,
+ .verify_receiver_cert_prepare_km =
+ gsc_hdcp_verify_receiver_cert_prepare_km,
+ .verify_hprime = gsc_hdcp_verify_hprime,
+ .store_pairing_info = gsc_hdcp_store_pairing_info,
+ .initiate_locality_check = gsc_hdcp_initiate_locality_check,
+ .verify_lprime = gsc_hdcp_verify_lprime,
+ .get_session_key = gsc_hdcp_get_session_key,
+ .repeater_check_flow_prepare_ack =
+ gsc_hdcp_repeater_check_flow_prepare_ack,
+ .verify_mprime = gsc_hdcp_verify_mprime,
+ .enable_hdcp_authentication = gsc_hdcp_enable_authentication,
+ .close_hdcp_session = gsc_hdcp_close_session,
+};
+
+/*This function helps allocate memory for the command that we will send to gsc cs */
+static int intel_hdcp_gsc_initialize_message(struct drm_i915_private *i915,
+ struct intel_hdcp_gsc_message *hdcp_message)
+{
+ struct intel_gt *gt = i915->media_gt;
+ struct drm_i915_gem_object *obj = NULL;
+ struct i915_vma *vma = NULL;
+ void *cmd;
+ int err;
+
+ /* allocate object of one page for HDCP command memory and store it */
+ obj = i915_gem_object_create_shmem(i915, PAGE_SIZE);
+
+ if (IS_ERR(obj)) {
+ drm_err(&i915->drm, "Failed to allocate HDCP streaming command!\n");
+ return PTR_ERR(obj);
+ }
+
+ cmd = i915_gem_object_pin_map_unlocked(obj, i915_coherent_map_type(i915, obj, true));
+ if (IS_ERR(cmd)) {
+ drm_err(&i915->drm, "Failed to map gsc message page!\n");
+ err = PTR_ERR(cmd);
+ goto out_unpin;
+ }
+
+ vma = i915_vma_instance(obj, &gt->ggtt->vm, NULL);
+ if (IS_ERR(vma)) {
+ err = PTR_ERR(vma);
+ goto out_unmap;
+ }
+
+ err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL);
+ if (err)
+ goto out_unmap;
+
+ memset(cmd, 0, obj->base.size);
+
+ hdcp_message->hdcp_cmd = cmd;
+ hdcp_message->vma = vma;
+
+ return 0;
+
+out_unmap:
+ i915_gem_object_unpin_map(obj);
+out_unpin:
+ i915_gem_object_put(obj);
+ return err;
+}
+
+static int intel_hdcp_gsc_hdcp2_init(struct drm_i915_private *i915)
+{
+ struct intel_hdcp_gsc_message *hdcp_message;
+ int ret;
+
+ hdcp_message = kzalloc(sizeof(*hdcp_message), GFP_KERNEL);
+
+ if (!hdcp_message)
+ return -ENOMEM;
+
+ /*
+ * NOTE: No need to lock the comp mutex here as it is already
+ * going to be taken before this function called
+ */
+ i915->display.hdcp.hdcp_message = hdcp_message;
+ ret = intel_hdcp_gsc_initialize_message(i915, hdcp_message);
+
+ if (ret)
+ drm_err(&i915->drm, "Could not initialize hdcp_message\n");
+
+ return ret;
+}
+
+static void intel_hdcp_gsc_free_message(struct drm_i915_private *i915)
+{
+ struct intel_hdcp_gsc_message *hdcp_message =
+ i915->display.hdcp.hdcp_message;
+
+ i915_vma_unpin_and_release(&hdcp_message->vma, I915_VMA_RELEASE_MAP);
+ kfree(hdcp_message);
+}
+
+int intel_hdcp_gsc_init(struct drm_i915_private *i915)
+{
+ struct i915_hdcp_master *data;
+ int ret;
+
+ data = kzalloc(sizeof(struct i915_hdcp_master), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ mutex_lock(&i915->display.hdcp.comp_mutex);
+ i915->display.hdcp.master = data;
+ i915->display.hdcp.master->hdcp_dev = i915->drm.dev;
+ i915->display.hdcp.master->ops = &gsc_hdcp_ops;
+ ret = intel_hdcp_gsc_hdcp2_init(i915);
+ mutex_unlock(&i915->display.hdcp.comp_mutex);
+
+ return ret;
+}
+
+void intel_hdcp_gsc_fini(struct drm_i915_private *i915)
+{
+ intel_hdcp_gsc_free_message(i915);
+ kfree(i915->display.hdcp.master);
+}
+
+static int intel_gsc_send_sync(struct drm_i915_private *i915,
+ struct intel_gsc_mtl_header *header, u64 addr,
+ size_t msg_out_len)
+{
+ struct intel_gt *gt = i915->media_gt;
+ int ret;
+
+ header->flags = 0;
+ ret = intel_gsc_uc_heci_cmd_submit_packet(&gt->uc.gsc, addr,
+ header->message_size,
+ addr,
+ msg_out_len + sizeof(*header));
+ if (ret) {
+ drm_err(&i915->drm, "failed to send gsc HDCP msg (%d)\n", ret);
+ return ret;
+ }
+
+ /*
+ * Checking validity marker for memory sanity
+ */
+ if (header->validity_marker != GSC_HECI_VALIDITY_MARKER) {
+ drm_err(&i915->drm, "invalid validity marker\n");
+ return -EINVAL;
+ }
+
+ if (header->status != 0) {
+ drm_err(&i915->drm, "header status indicates error %d\n",
+ header->status);
+ return -EINVAL;
+ }
+
+ if (header->flags & GSC_OUTFLAG_MSG_PENDING)
+ return -EAGAIN;
+
+ return 0;
+}
+
+/*
+ * This function can now be used for sending requests and will also handle
+ * receipt of reply messages hence no different function of message retrieval
+ * is required. We will initialize intel_hdcp_gsc_message structure then add
+ * gsc cs memory header as stated in specs after which the normal HDCP payload
+ * will follow
+ */
+ssize_t intel_hdcp_gsc_msg_send(struct drm_i915_private *i915, u8 *msg_in,
+ size_t msg_in_len, u8 *msg_out,
+ size_t msg_out_len)
+{
+ struct intel_gt *gt = i915->media_gt;
+ struct intel_gsc_mtl_header *header;
+ const size_t max_msg_size = PAGE_SIZE - sizeof(*header);
+ struct intel_hdcp_gsc_message *hdcp_message;
+ u64 addr, host_session_id;
+ u32 reply_size, msg_size;
+ int ret, tries = 0;
+
+ if (!intel_uc_uses_gsc_uc(&gt->uc))
+ return -ENODEV;
+
+ if (msg_in_len > max_msg_size || msg_out_len > max_msg_size)
+ return -ENOSPC;
+
+ hdcp_message = i915->display.hdcp.hdcp_message;
+ header = hdcp_message->hdcp_cmd;
+ addr = i915_ggtt_offset(hdcp_message->vma);
+
+ msg_size = msg_in_len + sizeof(*header);
+ memset(header, 0, msg_size);
+ get_random_bytes(&host_session_id, sizeof(u64));
+ intel_gsc_uc_heci_cmd_emit_mtl_header(header, HECI_MEADDRESS_HDCP,
+ msg_size, host_session_id);
+ memcpy(hdcp_message->hdcp_cmd + sizeof(*header), msg_in, msg_in_len);
+
+ /*
+ * Keep sending request in case the pending bit is set no need to add
+ * message handle as we are using same address hence loc. of header is
+ * same and it will contain the message handle. we will send the message
+ * 20 times each message 50 ms apart
+ */
+ do {
+ ret = intel_gsc_send_sync(i915, header, addr, msg_out_len);
+
+ /* Only try again if gsc says so */
+ if (ret != -EAGAIN)
+ break;
+
+ msleep(50);
+
+ } while (++tries < 20);
+
+ if (ret)
+ goto err;
+
+ /* we use the same mem for the reply, so header is in the same loc */
+ reply_size = header->message_size - sizeof(*header);
+ if (reply_size > msg_out_len) {
+ drm_warn(&i915->drm, "caller with insufficient HDCP reply size %u (%d)\n",
+ reply_size, (u32)msg_out_len);
+ reply_size = msg_out_len;
+ } else if (reply_size != msg_out_len) {
+ drm_dbg_kms(&i915->drm, "caller unexpected HCDP reply size %u (%d)\n",
+ reply_size, (u32)msg_out_len);
+ }
+
+ memcpy(msg_out, hdcp_message->hdcp_cmd + sizeof(*header), msg_out_len);
+
+err:
+ return ret;
+}
diff --git a/drivers/gpu/drm/i915/display/intel_hdcp_gsc.h b/drivers/gpu/drm/i915/display/intel_hdcp_gsc.h
new file mode 100644
index 000000000000..5cc9fd2e88f6
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_hdcp_gsc.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#ifndef __INTEL_HDCP_GSC_H__
+#define __INTEL_HDCP_GSC_H__
+
+#include <linux/err.h>
+#include <linux/types.h>
+
+struct drm_i915_private;
+
+struct intel_hdcp_gsc_message {
+ struct i915_vma *vma;
+ void *hdcp_cmd;
+};
+
+bool intel_hdcp_gsc_cs_required(struct drm_i915_private *i915);
+ssize_t intel_hdcp_gsc_msg_send(struct drm_i915_private *i915, u8 *msg_in,
+ size_t msg_in_len, u8 *msg_out,
+ size_t msg_out_len);
+int intel_hdcp_gsc_init(struct drm_i915_private *i915);
+void intel_hdcp_gsc_fini(struct drm_i915_private *i915);
+
+#endif /* __INTEL_HDCP_GCS_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_hdmi.c b/drivers/gpu/drm/i915/display/intel_hdmi.c
index c0ce6d3dc505..c7e9e1fbed37 100644
--- a/drivers/gpu/drm/i915/display/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/display/intel_hdmi.c
@@ -238,15 +238,11 @@ static void g4x_read_infoframe(struct intel_encoder *encoder,
void *frame, ssize_t len)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- u32 val, *data = frame;
+ u32 *data = frame;
int i;
- val = intel_de_read(dev_priv, VIDEO_DIP_CTL);
-
- val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */
- val |= g4x_infoframe_index(type);
-
- intel_de_write(dev_priv, VIDEO_DIP_CTL, val);
+ intel_de_rmw(dev_priv, VIDEO_DIP_CTL,
+ VIDEO_DIP_SELECT_MASK | 0xf, g4x_infoframe_index(type));
for (i = 0; i < len; i += 4)
*data++ = intel_de_read(dev_priv, VIDEO_DIP_DATA);
@@ -314,15 +310,11 @@ static void ibx_read_infoframe(struct intel_encoder *encoder,
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- u32 val, *data = frame;
+ u32 *data = frame;
int i;
- val = intel_de_read(dev_priv, TVIDEO_DIP_CTL(crtc->pipe));
-
- val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */
- val |= g4x_infoframe_index(type);
-
- intel_de_write(dev_priv, TVIDEO_DIP_CTL(crtc->pipe), val);
+ intel_de_rmw(dev_priv, TVIDEO_DIP_CTL(crtc->pipe),
+ VIDEO_DIP_SELECT_MASK | 0xf, g4x_infoframe_index(type));
for (i = 0; i < len; i += 4)
*data++ = intel_de_read(dev_priv, TVIDEO_DIP_DATA(crtc->pipe));
@@ -396,15 +388,11 @@ static void cpt_read_infoframe(struct intel_encoder *encoder,
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- u32 val, *data = frame;
+ u32 *data = frame;
int i;
- val = intel_de_read(dev_priv, TVIDEO_DIP_CTL(crtc->pipe));
-
- val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */
- val |= g4x_infoframe_index(type);
-
- intel_de_write(dev_priv, TVIDEO_DIP_CTL(crtc->pipe), val);
+ intel_de_rmw(dev_priv, TVIDEO_DIP_CTL(crtc->pipe),
+ VIDEO_DIP_SELECT_MASK | 0xf, g4x_infoframe_index(type));
for (i = 0; i < len; i += 4)
*data++ = intel_de_read(dev_priv, TVIDEO_DIP_DATA(crtc->pipe));
@@ -472,15 +460,11 @@ static void vlv_read_infoframe(struct intel_encoder *encoder,
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- u32 val, *data = frame;
+ u32 *data = frame;
int i;
- val = intel_de_read(dev_priv, VLV_TVIDEO_DIP_CTL(crtc->pipe));
-
- val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */
- val |= g4x_infoframe_index(type);
-
- intel_de_write(dev_priv, VLV_TVIDEO_DIP_CTL(crtc->pipe), val);
+ intel_de_rmw(dev_priv, VLV_TVIDEO_DIP_CTL(crtc->pipe),
+ VIDEO_DIP_SELECT_MASK | 0xf, g4x_infoframe_index(type));
for (i = 0; i < len; i += 4)
*data++ = intel_de_read(dev_priv,
@@ -1795,7 +1779,7 @@ static int intel_hdmi_source_max_tmds_clock(struct intel_encoder *encoder)
else
max_tmds_clock = 165000;
- vbt_max_tmds_clock = intel_bios_max_tmds_clock(encoder);
+ vbt_max_tmds_clock = intel_bios_hdmi_max_tmds_clock(encoder->devdata);
if (vbt_max_tmds_clock)
max_tmds_clock = min(max_tmds_clock, vbt_max_tmds_clock);
@@ -2152,7 +2136,7 @@ bool intel_hdmi_limited_color_range(const struct intel_crtc_state *crtc_state,
* Our YCbCr output is always limited range.
* crtc_state->limited_color_range only applies to RGB,
* and it must never be set for YCbCr or we risk setting
- * some conflicting bits in PIPECONF which will mess up
+ * some conflicting bits in TRANSCONF which will mess up
* the colors on the monitor.
*/
if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB)
@@ -2240,6 +2224,25 @@ static bool intel_hdmi_is_cloned(const struct intel_crtc_state *crtc_state)
!is_power_of_2(crtc_state->uapi.encoder_mask);
}
+static bool source_supports_scrambling(struct intel_encoder *encoder)
+{
+ /*
+ * Gen 10+ support HDMI 2.0 : the max tmds clock is 594MHz, and
+ * scrambling is supported.
+ * But there seem to be cases where certain platforms that support
+ * HDMI 2.0, have an HDMI1.4 retimer chip, and the max tmds clock is
+ * capped by VBT to less than 340MHz.
+ *
+ * In such cases when an HDMI2.0 sink is connected, it creates a
+ * problem : the platform and the sink both support scrambling but the
+ * HDMI 1.4 retimer chip doesn't.
+ *
+ * So go for scrambling, based on the max tmds clock taking into account,
+ * restrictions coming from VBT.
+ */
+ return intel_hdmi_source_max_tmds_clock(encoder) > 340000;
+}
+
int intel_hdmi_compute_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state)
@@ -2302,7 +2305,7 @@ int intel_hdmi_compute_config(struct intel_encoder *encoder,
pipe_config->lane_count = 4;
- if (scdc->scrambling.supported && DISPLAY_VER(dev_priv) >= 10) {
+ if (scdc->scrambling.supported && source_supports_scrambling(encoder)) {
if (scdc->scrambling.low_rates)
pipe_config->hdmi_scrambling = true;
@@ -2852,11 +2855,12 @@ static u8 intel_hdmi_ddc_pin(struct intel_encoder *encoder)
enum port port = encoder->port;
u8 ddc_pin;
- ddc_pin = intel_bios_alternate_ddc_pin(encoder);
+ ddc_pin = intel_bios_hdmi_ddc_pin(encoder->devdata);
if (ddc_pin) {
drm_dbg_kms(&dev_priv->drm,
- "Using DDC pin 0x%x for port %c (VBT)\n",
- ddc_pin, port_name(port));
+ "[ENCODER:%d:%s] Using DDC pin 0x%x (VBT)\n",
+ encoder->base.base.id, encoder->base.name,
+ ddc_pin);
return ddc_pin;
}
@@ -2882,8 +2886,9 @@ static u8 intel_hdmi_ddc_pin(struct intel_encoder *encoder)
ddc_pin = g4x_port_to_ddc_pin(dev_priv, port);
drm_dbg_kms(&dev_priv->drm,
- "Using DDC pin 0x%x for port %c (platform default)\n",
- ddc_pin, port_name(port));
+ "[ENCODER:%d:%s] Using DDC pin 0x%x (platform default)\n",
+ encoder->base.base.id, encoder->base.name,
+ ddc_pin);
return ddc_pin;
}
@@ -2904,7 +2909,7 @@ void intel_infoframe_init(struct intel_digital_port *dig_port)
dig_port->set_infoframes = g4x_set_infoframes;
dig_port->infoframes_enabled = g4x_infoframes_enabled;
} else if (HAS_DDI(dev_priv)) {
- if (intel_bios_is_lspcon_present(dev_priv, dig_port->base.port)) {
+ if (intel_bios_encoder_is_lspcon(dig_port->base.devdata)) {
dig_port->write_infoframe = lspcon_write_infoframe;
dig_port->read_infoframe = lspcon_read_infoframe;
dig_port->set_infoframes = lspcon_set_infoframes;
diff --git a/drivers/gpu/drm/i915/display/intel_hotplug.c b/drivers/gpu/drm/i915/display/intel_hotplug.c
index 907ab7526cb4..b12900446828 100644
--- a/drivers/gpu/drm/i915/display/intel_hotplug.c
+++ b/drivers/gpu/drm/i915/display/intel_hotplug.c
@@ -389,6 +389,13 @@ static void i915_hotplug_work_func(struct work_struct *work)
spin_unlock_irq(&dev_priv->irq_lock);
+ /* Skip calling encode hotplug handlers if ignore long HPD set*/
+ if (dev_priv->display.hotplug.ignore_long_hpd) {
+ drm_dbg_kms(&dev_priv->drm, "Ignore HPD flag on - skip encoder hotplug handlers\n");
+ mutex_unlock(&dev_priv->drm.mode_config.mutex);
+ return;
+ }
+
drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter);
for_each_intel_connector_iter(connector, &conn_iter) {
enum hpd_pin pin;
@@ -940,4 +947,6 @@ void intel_hpd_debugfs_register(struct drm_i915_private *i915)
i915, &i915_hpd_storm_ctl_fops);
debugfs_create_file("i915_hpd_short_storm_ctl", 0644, minor->debugfs_root,
i915, &i915_hpd_short_storm_ctl_fops);
+ debugfs_create_bool("i915_ignore_long_hpd", 0644, minor->debugfs_root,
+ &i915->display.hotplug.ignore_long_hpd);
}
diff --git a/drivers/gpu/drm/i915/display/intel_lpe_audio.c b/drivers/gpu/drm/i915/display/intel_lpe_audio.c
index 8aaaef4d7856..5863763de530 100644
--- a/drivers/gpu/drm/i915/display/intel_lpe_audio.c
+++ b/drivers/gpu/drm/i915/display/intel_lpe_audio.c
@@ -315,7 +315,7 @@ void intel_lpe_audio_teardown(struct drm_i915_private *dev_priv)
* intel_lpe_audio_notify() - notify lpe audio event
* audio driver and i915
* @dev_priv: the i915 drm device private data
- * @pipe: pipe
+ * @cpu_transcoder: CPU transcoder
* @port: port
* @eld : ELD data
* @ls_clock: Link symbol clock in kHz
@@ -324,7 +324,7 @@ void intel_lpe_audio_teardown(struct drm_i915_private *dev_priv)
* Notify lpe audio driver of eld change.
*/
void intel_lpe_audio_notify(struct drm_i915_private *dev_priv,
- enum pipe pipe, enum port port,
+ enum transcoder cpu_transcoder, enum port port,
const void *eld, int ls_clock, bool dp_output)
{
unsigned long irqflags;
@@ -344,7 +344,7 @@ void intel_lpe_audio_notify(struct drm_i915_private *dev_priv,
if (eld != NULL) {
memcpy(ppdata->eld, eld, HDMI_MAX_ELD_BYTES);
- ppdata->pipe = pipe;
+ ppdata->pipe = cpu_transcoder;
ppdata->ls_clock = ls_clock;
ppdata->dp_output = dp_output;
diff --git a/drivers/gpu/drm/i915/display/intel_lpe_audio.h b/drivers/gpu/drm/i915/display/intel_lpe_audio.h
index f848c5038714..0beecac267ae 100644
--- a/drivers/gpu/drm/i915/display/intel_lpe_audio.h
+++ b/drivers/gpu/drm/i915/display/intel_lpe_audio.h
@@ -8,15 +8,15 @@
#include <linux/types.h>
-enum pipe;
enum port;
+enum transcoder;
struct drm_i915_private;
int intel_lpe_audio_init(struct drm_i915_private *dev_priv);
void intel_lpe_audio_teardown(struct drm_i915_private *dev_priv);
void intel_lpe_audio_irq_handler(struct drm_i915_private *dev_priv);
void intel_lpe_audio_notify(struct drm_i915_private *dev_priv,
- enum pipe pipe, enum port port,
+ enum transcoder cpu_transcoder, enum port port,
const void *eld, int ls_clock, bool dp_output);
#endif /* __INTEL_LPE_AUDIO_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_lspcon.c b/drivers/gpu/drm/i915/display/intel_lspcon.c
index 9ff1c0b223ad..bb3b5355a0d9 100644
--- a/drivers/gpu/drm/i915/display/intel_lspcon.c
+++ b/drivers/gpu/drm/i915/display/intel_lspcon.c
@@ -689,7 +689,7 @@ void lspcon_resume(struct intel_digital_port *dig_port)
struct drm_i915_private *i915 = to_i915(dev);
enum drm_lspcon_mode expected_mode;
- if (!intel_bios_is_lspcon_present(i915, dig_port->base.port))
+ if (!intel_bios_encoder_is_lspcon(dig_port->base.devdata))
return;
if (!lspcon->active) {
diff --git a/drivers/gpu/drm/i915/display/intel_lvds.c b/drivers/gpu/drm/i915/display/intel_lvds.c
index a1557d84ce0a..a504b3a7fbd5 100644
--- a/drivers/gpu/drm/i915/display/intel_lvds.c
+++ b/drivers/gpu/drm/i915/display/intel_lvds.c
@@ -49,6 +49,7 @@
#include "intel_fdi.h"
#include "intel_gmbus.h"
#include "intel_lvds.h"
+#include "intel_lvds_regs.h"
#include "intel_panel.h"
/* Private structure for the integrated LVDS support */
@@ -84,18 +85,18 @@ static struct intel_lvds_encoder *to_lvds_encoder(struct intel_encoder *encoder)
return container_of(encoder, struct intel_lvds_encoder, base);
}
-bool intel_lvds_port_enabled(struct drm_i915_private *dev_priv,
+bool intel_lvds_port_enabled(struct drm_i915_private *i915,
i915_reg_t lvds_reg, enum pipe *pipe)
{
u32 val;
- val = intel_de_read(dev_priv, lvds_reg);
+ val = intel_de_read(i915, lvds_reg);
/* asserts want to know the pipe even if the port is disabled */
- if (HAS_PCH_CPT(dev_priv))
- *pipe = (val & LVDS_PIPE_SEL_MASK_CPT) >> LVDS_PIPE_SEL_SHIFT_CPT;
+ if (HAS_PCH_CPT(i915))
+ *pipe = REG_FIELD_GET(LVDS_PIPE_SEL_MASK_CPT, val);
else
- *pipe = (val & LVDS_PIPE_SEL_MASK) >> LVDS_PIPE_SEL_SHIFT;
+ *pipe = REG_FIELD_GET(LVDS_PIPE_SEL_MASK, val);
return val & LVDS_PORT_EN;
}
@@ -103,31 +104,30 @@ bool intel_lvds_port_enabled(struct drm_i915_private *dev_priv,
static bool intel_lvds_get_hw_state(struct intel_encoder *encoder,
enum pipe *pipe)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(encoder);
intel_wakeref_t wakeref;
bool ret;
- wakeref = intel_display_power_get_if_enabled(dev_priv,
- encoder->power_domain);
+ wakeref = intel_display_power_get_if_enabled(i915, encoder->power_domain);
if (!wakeref)
return false;
- ret = intel_lvds_port_enabled(dev_priv, lvds_encoder->reg, pipe);
+ ret = intel_lvds_port_enabled(i915, lvds_encoder->reg, pipe);
- intel_display_power_put(dev_priv, encoder->power_domain, wakeref);
+ intel_display_power_put(i915, encoder->power_domain, wakeref);
return ret;
}
static void intel_lvds_get_config(struct intel_encoder *encoder,
- struct intel_crtc_state *pipe_config)
+ struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(encoder);
u32 tmp, flags = 0;
- pipe_config->output_types |= BIT(INTEL_OUTPUT_LVDS);
+ crtc_state->output_types |= BIT(INTEL_OUTPUT_LVDS);
tmp = intel_de_read(dev_priv, lvds_encoder->reg);
if (tmp & LVDS_HSYNC_POLARITY)
@@ -139,20 +139,20 @@ static void intel_lvds_get_config(struct intel_encoder *encoder,
else
flags |= DRM_MODE_FLAG_PVSYNC;
- pipe_config->hw.adjusted_mode.flags |= flags;
+ crtc_state->hw.adjusted_mode.flags |= flags;
if (DISPLAY_VER(dev_priv) < 5)
- pipe_config->gmch_pfit.lvds_border_bits =
+ crtc_state->gmch_pfit.lvds_border_bits =
tmp & LVDS_BORDER_ENABLE;
/* gen2/3 store dither state in pfit control, needs to match */
if (DISPLAY_VER(dev_priv) < 4) {
tmp = intel_de_read(dev_priv, PFIT_CONTROL);
- pipe_config->gmch_pfit.control |= tmp & PANEL_8TO6_DITHER_ENABLE;
+ crtc_state->gmch_pfit.control |= tmp & PANEL_8TO6_DITHER_ENABLE;
}
- pipe_config->hw.adjusted_mode.crtc_clock = pipe_config->port_clock;
+ crtc_state->hw.adjusted_mode.crtc_clock = crtc_state->port_clock;
}
static void intel_lvds_pps_get_hw_state(struct drm_i915_private *dev_priv,
@@ -216,41 +216,44 @@ static void intel_lvds_pps_init_hw(struct drm_i915_private *dev_priv,
intel_de_write(dev_priv, PP_CONTROL(0), val);
intel_de_write(dev_priv, PP_ON_DELAYS(0),
- REG_FIELD_PREP(PANEL_PORT_SELECT_MASK, pps->port) | REG_FIELD_PREP(PANEL_POWER_UP_DELAY_MASK, pps->t1_t2) | REG_FIELD_PREP(PANEL_LIGHT_ON_DELAY_MASK, pps->t5));
+ REG_FIELD_PREP(PANEL_PORT_SELECT_MASK, pps->port) |
+ REG_FIELD_PREP(PANEL_POWER_UP_DELAY_MASK, pps->t1_t2) |
+ REG_FIELD_PREP(PANEL_LIGHT_ON_DELAY_MASK, pps->t5));
intel_de_write(dev_priv, PP_OFF_DELAYS(0),
- REG_FIELD_PREP(PANEL_POWER_DOWN_DELAY_MASK, pps->t3) | REG_FIELD_PREP(PANEL_LIGHT_OFF_DELAY_MASK, pps->tx));
+ REG_FIELD_PREP(PANEL_POWER_DOWN_DELAY_MASK, pps->t3) |
+ REG_FIELD_PREP(PANEL_LIGHT_OFF_DELAY_MASK, pps->tx));
intel_de_write(dev_priv, PP_DIVISOR(0),
- REG_FIELD_PREP(PP_REFERENCE_DIVIDER_MASK, pps->divider) | REG_FIELD_PREP(PANEL_POWER_CYCLE_DELAY_MASK, DIV_ROUND_UP(pps->t4, 1000) + 1));
+ REG_FIELD_PREP(PP_REFERENCE_DIVIDER_MASK, pps->divider) |
+ REG_FIELD_PREP(PANEL_POWER_CYCLE_DELAY_MASK, DIV_ROUND_UP(pps->t4, 1000) + 1));
}
static void intel_pre_enable_lvds(struct intel_atomic_state *state,
struct intel_encoder *encoder,
- const struct intel_crtc_state *pipe_config,
+ const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(encoder);
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
- const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
enum pipe pipe = crtc->pipe;
u32 temp;
- if (HAS_PCH_SPLIT(dev_priv)) {
- assert_fdi_rx_pll_disabled(dev_priv, pipe);
- assert_shared_dpll_disabled(dev_priv,
- pipe_config->shared_dpll);
+ if (HAS_PCH_SPLIT(i915)) {
+ assert_fdi_rx_pll_disabled(i915, pipe);
+ assert_shared_dpll_disabled(i915, crtc_state->shared_dpll);
} else {
- assert_pll_disabled(dev_priv, pipe);
+ assert_pll_disabled(i915, pipe);
}
- intel_lvds_pps_init_hw(dev_priv, &lvds_encoder->init_pps);
+ intel_lvds_pps_init_hw(i915, &lvds_encoder->init_pps);
temp = lvds_encoder->init_lvds_val;
temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP;
- if (HAS_PCH_CPT(dev_priv)) {
+ if (HAS_PCH_CPT(i915)) {
temp &= ~LVDS_PIPE_SEL_MASK_CPT;
temp |= LVDS_PIPE_SEL_CPT(pipe);
} else {
@@ -260,7 +263,7 @@ static void intel_pre_enable_lvds(struct intel_atomic_state *state,
/* set the corresponsding LVDS_BORDER bit */
temp &= ~LVDS_BORDER_ENABLE;
- temp |= pipe_config->gmch_pfit.lvds_border_bits;
+ temp |= crtc_state->gmch_pfit.lvds_border_bits;
/*
* Set the B0-B3 data pairs corresponding to whether we're going to
@@ -283,14 +286,14 @@ static void intel_pre_enable_lvds(struct intel_atomic_state *state,
/*
* Set the dithering flag on LVDS as needed, note that there is no
* special lvds dither control bit on pch-split platforms, dithering is
- * only controlled through the PIPECONF reg.
+ * only controlled through the TRANSCONF reg.
*/
- if (DISPLAY_VER(dev_priv) == 4) {
+ if (DISPLAY_VER(i915) == 4) {
/*
* Bspec wording suggests that LVDS port dithering only exists
* for 18bpp panels.
*/
- if (pipe_config->dither && pipe_config->pipe_bpp == 18)
+ if (crtc_state->dither && crtc_state->pipe_bpp == 18)
temp |= LVDS_ENABLE_DITHER;
else
temp &= ~LVDS_ENABLE_DITHER;
@@ -301,7 +304,7 @@ static void intel_pre_enable_lvds(struct intel_atomic_state *state,
if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC)
temp |= LVDS_VSYNC_POLARITY;
- intel_de_write(dev_priv, lvds_encoder->reg, temp);
+ intel_de_write(i915, lvds_encoder->reg, temp);
}
/*
@@ -309,25 +312,22 @@ static void intel_pre_enable_lvds(struct intel_atomic_state *state,
*/
static void intel_enable_lvds(struct intel_atomic_state *state,
struct intel_encoder *encoder,
- const struct intel_crtc_state *pipe_config,
+ const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
- struct drm_device *dev = encoder->base.dev;
struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(encoder);
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- intel_de_write(dev_priv, lvds_encoder->reg,
- intel_de_read(dev_priv, lvds_encoder->reg) | LVDS_PORT_EN);
+ intel_de_rmw(dev_priv, lvds_encoder->reg, 0, LVDS_PORT_EN);
- intel_de_write(dev_priv, PP_CONTROL(0),
- intel_de_read(dev_priv, PP_CONTROL(0)) | PANEL_POWER_ON);
+ intel_de_rmw(dev_priv, PP_CONTROL(0), 0, PANEL_POWER_ON);
intel_de_posting_read(dev_priv, lvds_encoder->reg);
if (intel_de_wait_for_set(dev_priv, PP_STATUS(0), PP_ON, 5000))
drm_err(&dev_priv->drm,
"timed out waiting for panel to power on\n");
- intel_backlight_enable(pipe_config, conn_state);
+ intel_backlight_enable(crtc_state, conn_state);
}
static void intel_disable_lvds(struct intel_atomic_state *state,
@@ -338,14 +338,12 @@ static void intel_disable_lvds(struct intel_atomic_state *state,
struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(encoder);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- intel_de_write(dev_priv, PP_CONTROL(0),
- intel_de_read(dev_priv, PP_CONTROL(0)) & ~PANEL_POWER_ON);
+ intel_de_rmw(dev_priv, PP_CONTROL(0), PANEL_POWER_ON, 0);
if (intel_de_wait_for_clear(dev_priv, PP_STATUS(0), PP_ON, 1000))
drm_err(&dev_priv->drm,
"timed out waiting for panel to power off\n");
- intel_de_write(dev_priv, lvds_encoder->reg,
- intel_de_read(dev_priv, lvds_encoder->reg) & ~LVDS_PORT_EN);
+ intel_de_rmw(dev_priv, lvds_encoder->reg, LVDS_PORT_EN, 0);
intel_de_posting_read(dev_priv, lvds_encoder->reg);
}
@@ -386,19 +384,19 @@ static void intel_lvds_shutdown(struct intel_encoder *encoder)
}
static enum drm_mode_status
-intel_lvds_mode_valid(struct drm_connector *connector,
+intel_lvds_mode_valid(struct drm_connector *_connector,
struct drm_display_mode *mode)
{
- struct intel_connector *intel_connector = to_intel_connector(connector);
+ struct intel_connector *connector = to_intel_connector(_connector);
const struct drm_display_mode *fixed_mode =
- intel_panel_fixed_mode(intel_connector, mode);
- int max_pixclk = to_i915(connector->dev)->max_dotclk_freq;
+ intel_panel_fixed_mode(connector, mode);
+ int max_pixclk = to_i915(connector->base.dev)->max_dotclk_freq;
enum drm_mode_status status;
if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
return MODE_NO_DBLESCAN;
- status = intel_panel_mode_valid(intel_connector, mode);
+ status = intel_panel_mode_valid(connector, mode);
if (status != MODE_OK)
return status;
@@ -408,23 +406,21 @@ intel_lvds_mode_valid(struct drm_connector *connector,
return MODE_OK;
}
-static int intel_lvds_compute_config(struct intel_encoder *intel_encoder,
- struct intel_crtc_state *pipe_config,
+static int intel_lvds_compute_config(struct intel_encoder *encoder,
+ struct intel_crtc_state *crtc_state,
struct drm_connector_state *conn_state)
{
- struct drm_i915_private *dev_priv = to_i915(intel_encoder->base.dev);
- struct intel_lvds_encoder *lvds_encoder =
- to_lvds_encoder(intel_encoder);
- struct intel_connector *intel_connector =
- lvds_encoder->attached_connector;
- struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
- struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(encoder);
+ struct intel_connector *connector = lvds_encoder->attached_connector;
+ struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
unsigned int lvds_bpp;
int ret;
/* Should never happen!! */
- if (DISPLAY_VER(dev_priv) < 4 && crtc->pipe == 0) {
- drm_err(&dev_priv->drm, "Can't support LVDS on pipe A\n");
+ if (DISPLAY_VER(i915) < 4 && crtc->pipe == 0) {
+ drm_err(&i915->drm, "Can't support LVDS on pipe A\n");
return -EINVAL;
}
@@ -433,14 +429,14 @@ static int intel_lvds_compute_config(struct intel_encoder *intel_encoder,
else
lvds_bpp = 6*3;
- if (lvds_bpp != pipe_config->pipe_bpp && !pipe_config->bw_constrained) {
- drm_dbg_kms(&dev_priv->drm,
+ if (lvds_bpp != crtc_state->pipe_bpp && !crtc_state->bw_constrained) {
+ drm_dbg_kms(&i915->drm,
"forcing display bpp (was %d) to LVDS (%d)\n",
- pipe_config->pipe_bpp, lvds_bpp);
- pipe_config->pipe_bpp = lvds_bpp;
+ crtc_state->pipe_bpp, lvds_bpp);
+ crtc_state->pipe_bpp = lvds_bpp;
}
- pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
+ crtc_state->output_format = INTEL_OUTPUT_FORMAT_RGB;
/*
* We have timings from the BIOS for the panel, put them in
@@ -448,17 +444,17 @@ static int intel_lvds_compute_config(struct intel_encoder *intel_encoder,
* with the panel scaling set up to source from the H/VDisplay
* of the original mode.
*/
- ret = intel_panel_compute_config(intel_connector, adjusted_mode);
+ ret = intel_panel_compute_config(connector, adjusted_mode);
if (ret)
return ret;
if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
return -EINVAL;
- if (HAS_PCH_SPLIT(dev_priv))
- pipe_config->has_pch_encoder = true;
+ if (HAS_PCH_SPLIT(i915))
+ crtc_state->has_pch_encoder = true;
- ret = intel_panel_fitting(pipe_config, conn_state);
+ ret = intel_panel_fitting(crtc_state, conn_state);
if (ret)
return ret;
@@ -474,19 +470,19 @@ static int intel_lvds_compute_config(struct intel_encoder *intel_encoder,
/*
* Return the list of DDC modes if available, or the BIOS fixed mode otherwise.
*/
-static int intel_lvds_get_modes(struct drm_connector *connector)
+static int intel_lvds_get_modes(struct drm_connector *_connector)
{
- struct intel_connector *intel_connector = to_intel_connector(connector);
- const struct drm_edid *fixed_edid = intel_connector->panel.fixed_edid;
+ struct intel_connector *connector = to_intel_connector(_connector);
+ const struct drm_edid *fixed_edid = connector->panel.fixed_edid;
/* Use panel fixed edid if we have one */
if (!IS_ERR_OR_NULL(fixed_edid)) {
- drm_edid_connector_update(connector, fixed_edid);
+ drm_edid_connector_update(&connector->base, fixed_edid);
- return drm_edid_connector_add_modes(connector);
+ return drm_edid_connector_add_modes(&connector->base);
}
- return intel_panel_get_modes(intel_connector);
+ return intel_panel_get_modes(connector);
}
static const struct drm_connector_helper_funcs intel_lvds_connector_helper_funcs = {
@@ -585,12 +581,12 @@ static const struct dmi_system_id intel_no_lvds[] = {
},
{
.callback = intel_no_lvds_dmi_callback,
- .ident = "AOpen i45GMx-I",
- .matches = {
- DMI_MATCH(DMI_BOARD_VENDOR, "AOpen"),
- DMI_MATCH(DMI_BOARD_NAME, "i45GMx-I"),
- },
- },
+ .ident = "AOpen i45GMx-I",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "AOpen"),
+ DMI_MATCH(DMI_BOARD_NAME, "i45GMx-I"),
+ },
+ },
{
.callback = intel_no_lvds_dmi_callback,
.ident = "Aopen i945GTt-VFA",
@@ -607,14 +603,14 @@ static const struct dmi_system_id intel_no_lvds[] = {
},
},
{
- .callback = intel_no_lvds_dmi_callback,
- .ident = "Clientron E830",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Clientron"),
- DMI_MATCH(DMI_PRODUCT_NAME, "E830"),
- },
- },
- {
+ .callback = intel_no_lvds_dmi_callback,
+ .ident = "Clientron E830",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Clientron"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "E830"),
+ },
+ },
+ {
.callback = intel_no_lvds_dmi_callback,
.ident = "Asus EeeBox PC EB1007",
.matches = {
@@ -764,11 +760,11 @@ static const struct dmi_system_id intel_dual_link_lvds[] = {
{ } /* terminating entry */
};
-struct intel_encoder *intel_get_lvds_encoder(struct drm_i915_private *dev_priv)
+struct intel_encoder *intel_get_lvds_encoder(struct drm_i915_private *i915)
{
struct intel_encoder *encoder;
- for_each_intel_encoder(&dev_priv->drm, encoder) {
+ for_each_intel_encoder(&i915->drm, encoder) {
if (encoder->type == INTEL_OUTPUT_LVDS)
return encoder;
}
@@ -776,24 +772,24 @@ struct intel_encoder *intel_get_lvds_encoder(struct drm_i915_private *dev_priv)
return NULL;
}
-bool intel_is_dual_link_lvds(struct drm_i915_private *dev_priv)
+bool intel_is_dual_link_lvds(struct drm_i915_private *i915)
{
- struct intel_encoder *encoder = intel_get_lvds_encoder(dev_priv);
+ struct intel_encoder *encoder = intel_get_lvds_encoder(i915);
return encoder && to_lvds_encoder(encoder)->is_dual_link;
}
static bool compute_is_dual_link_lvds(struct intel_lvds_encoder *lvds_encoder)
{
- struct drm_i915_private *dev_priv = to_i915(lvds_encoder->base.base.dev);
+ struct drm_i915_private *i915 = to_i915(lvds_encoder->base.base.dev);
struct intel_connector *connector = lvds_encoder->attached_connector;
const struct drm_display_mode *fixed_mode =
intel_panel_preferred_fixed_mode(connector);
unsigned int val;
/* use the module option value if specified */
- if (dev_priv->params.lvds_channel_mode > 0)
- return dev_priv->params.lvds_channel_mode == 2;
+ if (i915->params.lvds_channel_mode > 0)
+ return i915->params.lvds_channel_mode == 2;
/* single channel LVDS is limited to 112 MHz */
if (fixed_mode->clock > 112999)
@@ -808,8 +804,8 @@ static bool compute_is_dual_link_lvds(struct intel_lvds_encoder *lvds_encoder)
* we need to check "the value to be set" in VBT when LVDS
* register is uninitialized.
*/
- val = intel_de_read(dev_priv, lvds_encoder->reg);
- if (HAS_PCH_CPT(dev_priv))
+ val = intel_de_read(i915, lvds_encoder->reg);
+ if (HAS_PCH_CPT(i915))
val &= ~(LVDS_DETECTED | LVDS_PIPE_SEL_MASK_CPT);
else
val &= ~(LVDS_DETECTED | LVDS_PIPE_SEL_MASK);
@@ -826,56 +822,54 @@ static void intel_lvds_add_properties(struct drm_connector *connector)
/**
* intel_lvds_init - setup LVDS connectors on this device
- * @dev_priv: i915 device
+ * @i915: i915 device
*
* Create the connector, register the LVDS DDC bus, and try to figure out what
* modes we can display on the LVDS panel (if present).
*/
-void intel_lvds_init(struct drm_i915_private *dev_priv)
+void intel_lvds_init(struct drm_i915_private *i915)
{
struct intel_lvds_encoder *lvds_encoder;
- struct intel_encoder *intel_encoder;
- struct intel_connector *intel_connector;
- struct drm_connector *connector;
- struct drm_encoder *encoder;
+ struct intel_connector *connector;
const struct drm_edid *drm_edid;
+ struct intel_encoder *encoder;
i915_reg_t lvds_reg;
u32 lvds;
u8 pin;
/* Skip init on machines we know falsely report LVDS */
if (dmi_check_system(intel_no_lvds)) {
- drm_WARN(&dev_priv->drm, !dev_priv->display.vbt.int_lvds_support,
+ drm_WARN(&i915->drm, !i915->display.vbt.int_lvds_support,
"Useless DMI match. Internal LVDS support disabled by VBT\n");
return;
}
- if (!dev_priv->display.vbt.int_lvds_support) {
- drm_dbg_kms(&dev_priv->drm,
+ if (!i915->display.vbt.int_lvds_support) {
+ drm_dbg_kms(&i915->drm,
"Internal LVDS support disabled by VBT\n");
return;
}
- if (HAS_PCH_SPLIT(dev_priv))
+ if (HAS_PCH_SPLIT(i915))
lvds_reg = PCH_LVDS;
else
lvds_reg = LVDS;
- lvds = intel_de_read(dev_priv, lvds_reg);
+ lvds = intel_de_read(i915, lvds_reg);
- if (HAS_PCH_SPLIT(dev_priv)) {
+ if (HAS_PCH_SPLIT(i915)) {
if ((lvds & LVDS_DETECTED) == 0)
return;
}
pin = GMBUS_PIN_PANEL;
- if (!intel_bios_is_lvds_present(dev_priv, &pin)) {
+ if (!intel_bios_is_lvds_present(i915, &pin)) {
if ((lvds & LVDS_PORT_EN) == 0) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(&i915->drm,
"LVDS is not present in VBT\n");
return;
}
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(&i915->drm,
"LVDS is not present in VBT, but enabled anyway\n");
}
@@ -883,57 +877,55 @@ void intel_lvds_init(struct drm_i915_private *dev_priv)
if (!lvds_encoder)
return;
- intel_connector = intel_connector_alloc();
- if (!intel_connector) {
+ connector = intel_connector_alloc();
+ if (!connector) {
kfree(lvds_encoder);
return;
}
- lvds_encoder->attached_connector = intel_connector;
+ lvds_encoder->attached_connector = connector;
+ encoder = &lvds_encoder->base;
- intel_encoder = &lvds_encoder->base;
- encoder = &intel_encoder->base;
- connector = &intel_connector->base;
- drm_connector_init(&dev_priv->drm, &intel_connector->base, &intel_lvds_connector_funcs,
+ drm_connector_init(&i915->drm, &connector->base, &intel_lvds_connector_funcs,
DRM_MODE_CONNECTOR_LVDS);
- drm_encoder_init(&dev_priv->drm, &intel_encoder->base, &intel_lvds_enc_funcs,
+ drm_encoder_init(&i915->drm, &encoder->base, &intel_lvds_enc_funcs,
DRM_MODE_ENCODER_LVDS, "LVDS");
- intel_encoder->enable = intel_enable_lvds;
- intel_encoder->pre_enable = intel_pre_enable_lvds;
- intel_encoder->compute_config = intel_lvds_compute_config;
- if (HAS_PCH_SPLIT(dev_priv)) {
- intel_encoder->disable = pch_disable_lvds;
- intel_encoder->post_disable = pch_post_disable_lvds;
+ encoder->enable = intel_enable_lvds;
+ encoder->pre_enable = intel_pre_enable_lvds;
+ encoder->compute_config = intel_lvds_compute_config;
+ if (HAS_PCH_SPLIT(i915)) {
+ encoder->disable = pch_disable_lvds;
+ encoder->post_disable = pch_post_disable_lvds;
} else {
- intel_encoder->disable = gmch_disable_lvds;
+ encoder->disable = gmch_disable_lvds;
}
- intel_encoder->get_hw_state = intel_lvds_get_hw_state;
- intel_encoder->get_config = intel_lvds_get_config;
- intel_encoder->update_pipe = intel_backlight_update;
- intel_encoder->shutdown = intel_lvds_shutdown;
- intel_connector->get_hw_state = intel_connector_get_hw_state;
-
- intel_connector_attach_encoder(intel_connector, intel_encoder);
-
- intel_encoder->type = INTEL_OUTPUT_LVDS;
- intel_encoder->power_domain = POWER_DOMAIN_PORT_OTHER;
- intel_encoder->port = PORT_NONE;
- intel_encoder->cloneable = 0;
- if (DISPLAY_VER(dev_priv) < 4)
- intel_encoder->pipe_mask = BIT(PIPE_B);
+ encoder->get_hw_state = intel_lvds_get_hw_state;
+ encoder->get_config = intel_lvds_get_config;
+ encoder->update_pipe = intel_backlight_update;
+ encoder->shutdown = intel_lvds_shutdown;
+ connector->get_hw_state = intel_connector_get_hw_state;
+
+ intel_connector_attach_encoder(connector, encoder);
+
+ encoder->type = INTEL_OUTPUT_LVDS;
+ encoder->power_domain = POWER_DOMAIN_PORT_OTHER;
+ encoder->port = PORT_NONE;
+ encoder->cloneable = 0;
+ if (DISPLAY_VER(i915) < 4)
+ encoder->pipe_mask = BIT(PIPE_B);
else
- intel_encoder->pipe_mask = ~0;
+ encoder->pipe_mask = ~0;
- drm_connector_helper_add(connector, &intel_lvds_connector_helper_funcs);
- connector->display_info.subpixel_order = SubPixelHorizontalRGB;
+ drm_connector_helper_add(&connector->base, &intel_lvds_connector_helper_funcs);
+ connector->base.display_info.subpixel_order = SubPixelHorizontalRGB;
lvds_encoder->reg = lvds_reg;
- intel_lvds_add_properties(connector);
+ intel_lvds_add_properties(&connector->base);
- intel_lvds_pps_get_hw_state(dev_priv, &lvds_encoder->init_pps);
+ intel_lvds_pps_get_hw_state(i915, &lvds_encoder->init_pps);
lvds_encoder->init_lvds_val = lvds;
/*
@@ -948,13 +940,13 @@ void intel_lvds_init(struct drm_i915_private *dev_priv)
* Attempt to get the fixed panel mode from DDC. Assume that the
* preferred mode is the right one.
*/
- mutex_lock(&dev_priv->drm.mode_config.mutex);
+ mutex_lock(&i915->drm.mode_config.mutex);
if (vga_switcheroo_handler_flags() & VGA_SWITCHEROO_CAN_SWITCH_DDC) {
const struct edid *edid;
/* FIXME: Make drm_get_edid_switcheroo() return drm_edid */
- edid = drm_get_edid_switcheroo(connector,
- intel_gmbus_get_adapter(dev_priv, pin));
+ edid = drm_get_edid_switcheroo(&connector->base,
+ intel_gmbus_get_adapter(i915, pin));
if (edid) {
drm_edid = drm_edid_alloc(edid, (edid->extensions + 1) * EDID_LENGTH);
kfree(edid);
@@ -962,49 +954,49 @@ void intel_lvds_init(struct drm_i915_private *dev_priv)
drm_edid = NULL;
}
} else {
- drm_edid = drm_edid_read_ddc(connector,
- intel_gmbus_get_adapter(dev_priv, pin));
+ drm_edid = drm_edid_read_ddc(&connector->base,
+ intel_gmbus_get_adapter(i915, pin));
}
if (drm_edid) {
- if (drm_edid_connector_update(connector, drm_edid) ||
- !drm_edid_connector_add_modes(connector)) {
- drm_edid_connector_update(connector, NULL);
+ if (drm_edid_connector_update(&connector->base, drm_edid) ||
+ !drm_edid_connector_add_modes(&connector->base)) {
+ drm_edid_connector_update(&connector->base, NULL);
drm_edid_free(drm_edid);
drm_edid = ERR_PTR(-EINVAL);
}
} else {
drm_edid = ERR_PTR(-ENOENT);
}
- intel_bios_init_panel_late(dev_priv, &intel_connector->panel, NULL,
+ intel_bios_init_panel_late(i915, &connector->panel, NULL,
IS_ERR(drm_edid) ? NULL : drm_edid);
/* Try EDID first */
- intel_panel_add_edid_fixed_modes(intel_connector, true);
+ intel_panel_add_edid_fixed_modes(connector, true);
/* Failed to get EDID, what about VBT? */
- if (!intel_panel_preferred_fixed_mode(intel_connector))
- intel_panel_add_vbt_lfp_fixed_mode(intel_connector);
+ if (!intel_panel_preferred_fixed_mode(connector))
+ intel_panel_add_vbt_lfp_fixed_mode(connector);
/*
* If we didn't get a fixed mode from EDID or VBT, try checking
* if the panel is already turned on. If so, assume that
* whatever is currently programmed is the correct mode.
*/
- if (!intel_panel_preferred_fixed_mode(intel_connector))
- intel_panel_add_encoder_fixed_mode(intel_connector, intel_encoder);
+ if (!intel_panel_preferred_fixed_mode(connector))
+ intel_panel_add_encoder_fixed_mode(connector, encoder);
- mutex_unlock(&dev_priv->drm.mode_config.mutex);
+ mutex_unlock(&i915->drm.mode_config.mutex);
/* If we still don't have a mode after all that, give up. */
- if (!intel_panel_preferred_fixed_mode(intel_connector))
+ if (!intel_panel_preferred_fixed_mode(connector))
goto failed;
- intel_panel_init(intel_connector, drm_edid);
+ intel_panel_init(connector, drm_edid);
- intel_backlight_setup(intel_connector, INVALID_PIPE);
+ intel_backlight_setup(connector, INVALID_PIPE);
lvds_encoder->is_dual_link = compute_is_dual_link_lvds(lvds_encoder);
- drm_dbg_kms(&dev_priv->drm, "detected %s-link lvds configuration\n",
+ drm_dbg_kms(&i915->drm, "detected %s-link lvds configuration\n",
lvds_encoder->is_dual_link ? "dual" : "single");
lvds_encoder->a3_power = lvds & LVDS_A3_POWER_MASK;
@@ -1012,10 +1004,10 @@ void intel_lvds_init(struct drm_i915_private *dev_priv)
return;
failed:
- drm_dbg_kms(&dev_priv->drm, "No LVDS modes found, disabling.\n");
- drm_connector_cleanup(connector);
- drm_encoder_cleanup(encoder);
+ drm_dbg_kms(&i915->drm, "No LVDS modes found, disabling.\n");
+ drm_connector_cleanup(&connector->base);
+ drm_encoder_cleanup(&encoder->base);
kfree(lvds_encoder);
- intel_connector_free(intel_connector);
+ intel_connector_free(connector);
return;
}
diff --git a/drivers/gpu/drm/i915/display/intel_lvds_regs.h b/drivers/gpu/drm/i915/display/intel_lvds_regs.h
new file mode 100644
index 000000000000..47c1832819ee
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_lvds_regs.h
@@ -0,0 +1,65 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#ifndef __INTEL_LVDS_REGS_H__
+#define __INTEL_LVDS_REGS_H__
+
+#include "intel_display_reg_defs.h"
+
+/* LVDS port control */
+#define LVDS _MMIO(0x61180)
+/*
+ * Enables the LVDS port. This bit must be set before DPLLs are enabled, as
+ * the DPLL semantics change when the LVDS is assigned to that pipe.
+ */
+#define LVDS_PORT_EN REG_BIT(31)
+/* Selects pipe B for LVDS data. Must be set on pre-965. */
+#define LVDS_PIPE_SEL_MASK REG_BIT(30)
+#define LVDS_PIPE_SEL(pipe) REG_FIELD_PREP(LVDS_PIPE_SEL_MASK, (pipe))
+#define LVDS_PIPE_SEL_MASK_CPT REG_GENMASK(30, 29)
+#define LVDS_PIPE_SEL_CPT(pipe) REG_FIELD_PREP(LVDS_PIPE_SEL_MASK_CPT, (pipe))
+/* LVDS dithering flag on 965/g4x platform */
+#define LVDS_ENABLE_DITHER REG_BIT(25)
+/* LVDS sync polarity flags. Set to invert (i.e. negative) */
+#define LVDS_VSYNC_POLARITY REG_BIT(21)
+#define LVDS_HSYNC_POLARITY REG_BIT(20)
+
+/* Enable border for unscaled (or aspect-scaled) display */
+#define LVDS_BORDER_ENABLE REG_BIT(15)
+/*
+ * Enables the A0-A2 data pairs and CLKA, containing 18 bits of color data per
+ * pixel.
+ */
+#define LVDS_A0A2_CLKA_POWER_MASK REG_GENMASK(9, 8)
+#define LVDS_A0A2_CLKA_POWER_DOWN REG_FIELD_PREP(LVDS_A0A2_CLKA_POWER_MASK, 0)
+#define LVDS_A0A2_CLKA_POWER_UP REG_FIELD_PREP(LVDS_A0A2_CLKA_POWER_MASK, 3)
+/*
+ * Controls the A3 data pair, which contains the additional LSBs for 24 bit
+ * mode. Only enabled if LVDS_A0A2_CLKA_POWER_UP also indicates it should be
+ * on.
+ */
+#define LVDS_A3_POWER_MASK REG_GENMASK(7, 6)
+#define LVDS_A3_POWER_DOWN REG_FIELD_PREP(LVDS_A3_POWER_MASK, 0)
+#define LVDS_A3_POWER_UP REG_FIELD_PREP(LVDS_A3_POWER_MASK, 3)
+/*
+ * Controls the CLKB pair. This should only be set when LVDS_B0B3_POWER_UP
+ * is set.
+ */
+#define LVDS_CLKB_POWER_MASK REG_GENMASK(5, 4)
+#define LVDS_CLKB_POWER_DOWN REG_FIELD_PREP(LVDS_CLKB_POWER_MASK, 0)
+#define LVDS_CLKB_POWER_UP REG_FIELD_PREP(LVDS_CLKB_POWER_MASK, 3)
+/*
+ * Controls the B0-B3 data pairs. This must be set to match the DPLL p2
+ * setting for whether we are in dual-channel mode. The B3 pair will
+ * additionally only be powered up when LVDS_A3_POWER_UP is set.
+ */
+#define LVDS_B0B3_POWER_MASK REG_GENMASK(3, 2)
+#define LVDS_B0B3_POWER_DOWN REG_FIELD_PREP(LVDS_B0B3_POWER_MASK, 0)
+#define LVDS_B0B3_POWER_UP REG_FIELD_PREP(LVDS_B0B3_POWER_MASK, 3)
+
+#define PCH_LVDS _MMIO(0xe1180)
+#define LVDS_DETECTED REG_BIT(1)
+
+#endif /* __INTEL_LVDS_REGS_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_mg_phy_regs.h b/drivers/gpu/drm/i915/display/intel_mg_phy_regs.h
index 0e8248bce52d..0306ade2bc30 100644
--- a/drivers/gpu/drm/i915/display/intel_mg_phy_regs.h
+++ b/drivers/gpu/drm/i915/display/intel_mg_phy_regs.h
@@ -142,7 +142,9 @@
#define FIA1_BASE 0x163000
#define FIA2_BASE 0x16E000
#define FIA3_BASE 0x16F000
-#define _FIA(fia) _PICK((fia), FIA1_BASE, FIA2_BASE, FIA3_BASE)
+#define _FIA(fia) _PICK_EVEN_2RANGES((fia), 1, \
+ FIA1_BASE, FIA1_BASE,\
+ FIA2_BASE, FIA3_BASE)
#define _MMIO_FIA(fia, off) _MMIO(_FIA(fia) + (off))
/* ICL PHY DFLEX registers */
diff --git a/drivers/gpu/drm/i915/display/intel_modeset_setup.c b/drivers/gpu/drm/i915/display/intel_modeset_setup.c
index 52cdbd4fc2fa..4558d02641fe 100644
--- a/drivers/gpu/drm/i915/display/intel_modeset_setup.c
+++ b/drivers/gpu/drm/i915/display/intel_modeset_setup.c
@@ -11,6 +11,7 @@
#include "i915_drv.h"
#include "i915_reg.h"
+#include "i9xx_wm.h"
#include "intel_atomic.h"
#include "intel_bw.h"
#include "intel_color.h"
@@ -21,9 +22,12 @@
#include "intel_display.h"
#include "intel_display_power.h"
#include "intel_display_types.h"
+#include "intel_dmc.h"
+#include "intel_fifo_underrun.h"
#include "intel_modeset_setup.h"
#include "intel_pch_display.h"
-#include "intel_pm.h"
+#include "intel_vblank.h"
+#include "intel_wm.h"
#include "skl_watermark.h"
static void intel_crtc_disable_noatomic(struct intel_crtc *crtc,
@@ -234,12 +238,9 @@ static void intel_sanitize_fifo_underrun_reporting(const struct intel_crtc_state
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *i915 = to_i915(crtc->base.dev);
- if (!crtc_state->hw.active && !HAS_GMCH(i915))
- return;
-
/*
- * We start out with underrun reporting disabled to avoid races.
- * For correct bookkeeping mark this on active crtcs.
+ * We start out with underrun reporting disabled on active
+ * pipes to avoid races.
*
* Also on gmch platforms we dont have any hardware bits to
* disable the underrun reporting. Which means we need to start
@@ -250,19 +251,9 @@ static void intel_sanitize_fifo_underrun_reporting(const struct intel_crtc_state
* No protection against concurrent access is required - at
* worst a fifo underrun happens which also sets this to false.
*/
- crtc->cpu_fifo_underrun_disabled = true;
-
- /*
- * We track the PCH trancoder underrun reporting state
- * within the crtc. With crtc for pipe A housing the underrun
- * reporting state for PCH transcoder A, crtc for pipe B housing
- * it for PCH transcoder B, etc. LPT-H has only PCH transcoder A,
- * and marking underrun reporting as disabled for the non-existing
- * PCH transcoders B and C would prevent enabling the south
- * error interrupt (see cpt_can_enable_serr_int()).
- */
- if (intel_has_pch_trancoder(i915, crtc->pipe))
- crtc->pch_fifo_underrun_disabled = true;
+ intel_init_fifo_underrun_reporting(i915, crtc,
+ !crtc_state->hw.active &&
+ !HAS_GMCH(i915));
}
static void intel_sanitize_crtc(struct intel_crtc *crtc,
@@ -647,17 +638,14 @@ static void intel_early_display_was(struct drm_i915_private *i915)
* Also known as Wa_14010480278.
*/
if (IS_DISPLAY_VER(i915, 10, 12))
- intel_de_write(i915, GEN9_CLKGATE_DIS_0,
- intel_de_read(i915, GEN9_CLKGATE_DIS_0) | DARBF_GATING_DIS);
+ intel_de_rmw(i915, GEN9_CLKGATE_DIS_0, 0, DARBF_GATING_DIS);
- if (IS_HASWELL(i915)) {
- /*
- * WaRsPkgCStateDisplayPMReq:hsw
- * System hang if this isn't done before disabling all planes!
- */
- intel_de_write(i915, CHICKEN_PAR1_1,
- intel_de_read(i915, CHICKEN_PAR1_1) | FORCE_ARB_IDLE_PLANES);
- }
+ /*
+ * WaRsPkgCStateDisplayPMReq:hsw
+ * System hang if this isn't done before disabling all planes!
+ */
+ if (IS_HASWELL(i915))
+ intel_de_rmw(i915, CHICKEN_PAR1_1, 0, FORCE_ARB_IDLE_PLANES);
if (IS_KABYLAKE(i915) || IS_COFFEELAKE(i915) || IS_COMETLAKE(i915)) {
/* Display WA #1142:kbl,cfl,cml */
@@ -723,18 +711,7 @@ void intel_modeset_setup_hw_state(struct drm_i915_private *i915,
intel_dpll_sanitize_state(i915);
- if (IS_G4X(i915)) {
- g4x_wm_get_hw_state(i915);
- g4x_wm_sanitize(i915);
- } else if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) {
- vlv_wm_get_hw_state(i915);
- vlv_wm_sanitize(i915);
- } else if (DISPLAY_VER(i915) >= 9) {
- skl_wm_get_hw_state(i915);
- skl_wm_sanitize(i915);
- } else if (HAS_PCH_SPLIT(i915)) {
- ilk_wm_get_hw_state(i915);
- }
+ intel_wm_get_hw_state(i915);
for_each_intel_crtc(&i915->drm, crtc) {
struct intel_crtc_state *crtc_state =
diff --git a/drivers/gpu/drm/i915/display/intel_opregion.c b/drivers/gpu/drm/i915/display/intel_opregion.c
index b8dce0576512..b7973a05d022 100644
--- a/drivers/gpu/drm/i915/display/intel_opregion.c
+++ b/drivers/gpu/drm/i915/display/intel_opregion.c
@@ -1159,13 +1159,10 @@ void intel_opregion_register(struct drm_i915_private *i915)
intel_opregion_resume(i915);
}
-void intel_opregion_resume(struct drm_i915_private *i915)
+static void intel_opregion_resume_display(struct drm_i915_private *i915)
{
struct intel_opregion *opregion = &i915->display.opregion;
- if (!opregion->header)
- return;
-
if (opregion->acpi) {
intel_didl_outputs(i915);
intel_setup_cadls(i915);
@@ -1186,18 +1183,24 @@ void intel_opregion_resume(struct drm_i915_private *i915)
/* Some platforms abuse the _DSM to enable MUX */
intel_dsm_get_bios_data_funcs_supported(i915);
-
- intel_opregion_notify_adapter(i915, PCI_D0);
}
-void intel_opregion_suspend(struct drm_i915_private *i915, pci_power_t state)
+void intel_opregion_resume(struct drm_i915_private *i915)
{
struct intel_opregion *opregion = &i915->display.opregion;
if (!opregion->header)
return;
- intel_opregion_notify_adapter(i915, state);
+ if (HAS_DISPLAY(i915))
+ intel_opregion_resume_display(i915);
+
+ intel_opregion_notify_adapter(i915, PCI_D0);
+}
+
+static void intel_opregion_suspend_display(struct drm_i915_private *i915)
+{
+ struct intel_opregion *opregion = &i915->display.opregion;
if (opregion->asle)
opregion->asle->ardy = ASLE_ARDY_NOT_READY;
@@ -1208,6 +1211,19 @@ void intel_opregion_suspend(struct drm_i915_private *i915, pci_power_t state)
opregion->acpi->drdy = 0;
}
+void intel_opregion_suspend(struct drm_i915_private *i915, pci_power_t state)
+{
+ struct intel_opregion *opregion = &i915->display.opregion;
+
+ if (!opregion->header)
+ return;
+
+ intel_opregion_notify_adapter(i915, state);
+
+ if (HAS_DISPLAY(i915))
+ intel_opregion_suspend_display(i915);
+}
+
void intel_opregion_unregister(struct drm_i915_private *i915)
{
struct intel_opregion *opregion = &i915->display.opregion;
@@ -1221,6 +1237,14 @@ void intel_opregion_unregister(struct drm_i915_private *i915)
unregister_acpi_notifier(&opregion->acpi_notifier);
opregion->acpi_notifier.notifier_call = NULL;
}
+}
+
+void intel_opregion_cleanup(struct drm_i915_private *i915)
+{
+ struct intel_opregion *opregion = &i915->display.opregion;
+
+ if (!opregion->header)
+ return;
/* just clear all opregion memory pointers now */
memunmap(opregion->header);
diff --git a/drivers/gpu/drm/i915/display/intel_opregion.h b/drivers/gpu/drm/i915/display/intel_opregion.h
index d02e6696a050..fd2ea8ef0fa2 100644
--- a/drivers/gpu/drm/i915/display/intel_opregion.h
+++ b/drivers/gpu/drm/i915/display/intel_opregion.h
@@ -60,6 +60,7 @@ struct intel_opregion {
#ifdef CONFIG_ACPI
int intel_opregion_setup(struct drm_i915_private *dev_priv);
+void intel_opregion_cleanup(struct drm_i915_private *i915);
void intel_opregion_register(struct drm_i915_private *dev_priv);
void intel_opregion_unregister(struct drm_i915_private *dev_priv);
@@ -85,6 +86,10 @@ static inline int intel_opregion_setup(struct drm_i915_private *dev_priv)
return 0;
}
+static inline void intel_opregion_cleanup(struct drm_i915_private *i915)
+{
+}
+
static inline void intel_opregion_register(struct drm_i915_private *dev_priv)
{
}
diff --git a/drivers/gpu/drm/i915/display/intel_panel.c b/drivers/gpu/drm/i915/display/intel_panel.c
index 42aa04bac261..ce2a34a25211 100644
--- a/drivers/gpu/drm/i915/display/intel_panel.c
+++ b/drivers/gpu/drm/i915/display/intel_panel.c
@@ -39,6 +39,7 @@
#include "intel_de.h"
#include "intel_display_types.h"
#include "intel_drrs.h"
+#include "intel_lvds_regs.h"
#include "intel_panel.h"
#include "intel_quirks.h"
diff --git a/drivers/gpu/drm/i915/display/intel_pch_display.c b/drivers/gpu/drm/i915/display/intel_pch_display.c
index cecc0d007cf3..22507da0b5f0 100644
--- a/drivers/gpu/drm/i915/display/intel_pch_display.c
+++ b/drivers/gpu/drm/i915/display/intel_pch_display.c
@@ -10,6 +10,7 @@
#include "intel_display_types.h"
#include "intel_fdi.h"
#include "intel_lvds.h"
+#include "intel_lvds_regs.h"
#include "intel_pch_display.h"
#include "intel_pch_refclk.h"
#include "intel_pps.h"
@@ -219,20 +220,20 @@ static void ilk_pch_transcoder_set_timings(const struct intel_crtc_state *crtc_s
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
intel_de_write(dev_priv, PCH_TRANS_HTOTAL(pch_transcoder),
- intel_de_read(dev_priv, HTOTAL(cpu_transcoder)));
+ intel_de_read(dev_priv, TRANS_HTOTAL(cpu_transcoder)));
intel_de_write(dev_priv, PCH_TRANS_HBLANK(pch_transcoder),
- intel_de_read(dev_priv, HBLANK(cpu_transcoder)));
+ intel_de_read(dev_priv, TRANS_HBLANK(cpu_transcoder)));
intel_de_write(dev_priv, PCH_TRANS_HSYNC(pch_transcoder),
- intel_de_read(dev_priv, HSYNC(cpu_transcoder)));
+ intel_de_read(dev_priv, TRANS_HSYNC(cpu_transcoder)));
intel_de_write(dev_priv, PCH_TRANS_VTOTAL(pch_transcoder),
- intel_de_read(dev_priv, VTOTAL(cpu_transcoder)));
+ intel_de_read(dev_priv, TRANS_VTOTAL(cpu_transcoder)));
intel_de_write(dev_priv, PCH_TRANS_VBLANK(pch_transcoder),
- intel_de_read(dev_priv, VBLANK(cpu_transcoder)));
+ intel_de_read(dev_priv, TRANS_VBLANK(cpu_transcoder)));
intel_de_write(dev_priv, PCH_TRANS_VSYNC(pch_transcoder),
- intel_de_read(dev_priv, VSYNC(cpu_transcoder)));
+ intel_de_read(dev_priv, TRANS_VSYNC(cpu_transcoder)));
intel_de_write(dev_priv, PCH_TRANS_VSYNCSHIFT(pch_transcoder),
- intel_de_read(dev_priv, VSYNCSHIFT(cpu_transcoder)));
+ intel_de_read(dev_priv, TRANS_VSYNCSHIFT(cpu_transcoder)));
}
static void ilk_enable_pch_transcoder(const struct intel_crtc_state *crtc_state)
@@ -266,7 +267,7 @@ static void ilk_enable_pch_transcoder(const struct intel_crtc_state *crtc_state)
reg = PCH_TRANSCONF(pipe);
val = intel_de_read(dev_priv, reg);
- pipeconf_val = intel_de_read(dev_priv, PIPECONF(pipe));
+ pipeconf_val = intel_de_read(dev_priv, TRANSCONF(pipe));
if (HAS_PCH_IBX(dev_priv)) {
/* Configure frame start delay to match the CPU */
@@ -278,15 +279,15 @@ static void ilk_enable_pch_transcoder(const struct intel_crtc_state *crtc_state)
* that in pipeconf reg. For HDMI we must use 8bpc
* here for both 8bpc and 12bpc.
*/
- val &= ~PIPECONF_BPC_MASK;
+ val &= ~TRANSCONF_BPC_MASK;
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
- val |= PIPECONF_BPC_8;
+ val |= TRANSCONF_BPC_8;
else
- val |= pipeconf_val & PIPECONF_BPC_MASK;
+ val |= pipeconf_val & TRANSCONF_BPC_MASK;
}
val &= ~TRANS_INTERLACE_MASK;
- if ((pipeconf_val & PIPECONF_INTERLACE_MASK_ILK) == PIPECONF_INTERLACE_IF_ID_ILK) {
+ if ((pipeconf_val & TRANSCONF_INTERLACE_MASK_ILK) == TRANSCONF_INTERLACE_IF_ID_ILK) {
if (HAS_PCH_IBX(dev_priv) &&
intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
val |= TRANS_INTERLACE_LEGACY_VSYNC_IBX;
@@ -307,7 +308,6 @@ static void ilk_disable_pch_transcoder(struct intel_crtc *crtc)
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
i915_reg_t reg;
- u32 val;
/* FDI relies on the transcoder */
assert_fdi_tx_disabled(dev_priv, pipe);
@@ -317,21 +317,16 @@ static void ilk_disable_pch_transcoder(struct intel_crtc *crtc)
assert_pch_ports_disabled(dev_priv, pipe);
reg = PCH_TRANSCONF(pipe);
- val = intel_de_read(dev_priv, reg);
- val &= ~TRANS_ENABLE;
- intel_de_write(dev_priv, reg, val);
+ intel_de_rmw(dev_priv, reg, TRANS_ENABLE, 0);
/* wait for PCH transcoder off, transcoder state */
if (intel_de_wait_for_clear(dev_priv, reg, TRANS_STATE_ENABLE, 50))
drm_err(&dev_priv->drm, "failed to disable transcoder %c\n",
pipe_name(pipe));
- if (HAS_PCH_CPT(dev_priv)) {
+ if (HAS_PCH_CPT(dev_priv))
/* Workaround: Clear the timing override chicken bit again. */
- reg = TRANS_CHICKEN2(pipe);
- val = intel_de_read(dev_priv, reg);
- val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
- intel_de_write(dev_priv, reg, val);
- }
+ intel_de_rmw(dev_priv, TRANS_CHICKEN2(pipe),
+ TRANS_CHICKEN2_TIMING_OVERRIDE, 0);
}
void ilk_pch_pre_enable(struct intel_atomic_state *state,
@@ -414,7 +409,7 @@ void ilk_pch_enable(struct intel_atomic_state *state,
intel_crtc_has_dp_encoder(crtc_state)) {
const struct drm_display_mode *adjusted_mode =
&crtc_state->hw.adjusted_mode;
- u32 bpc = (intel_de_read(dev_priv, PIPECONF(pipe)) & PIPECONF_BPC_MASK) >> 5;
+ u32 bpc = (intel_de_read(dev_priv, TRANSCONF(pipe)) & TRANSCONF_BPC_MASK) >> 5;
i915_reg_t reg = TRANS_DP_CTL(pipe);
enum port port;
@@ -456,21 +451,14 @@ void ilk_pch_post_disable(struct intel_atomic_state *state,
ilk_disable_pch_transcoder(crtc);
if (HAS_PCH_CPT(dev_priv)) {
- i915_reg_t reg;
- u32 temp;
-
/* disable TRANS_DP_CTL */
- reg = TRANS_DP_CTL(pipe);
- temp = intel_de_read(dev_priv, reg);
- temp &= ~(TRANS_DP_OUTPUT_ENABLE |
- TRANS_DP_PORT_SEL_MASK);
- temp |= TRANS_DP_PORT_SEL_NONE;
- intel_de_write(dev_priv, reg, temp);
+ intel_de_rmw(dev_priv, TRANS_DP_CTL(pipe),
+ TRANS_DP_OUTPUT_ENABLE | TRANS_DP_PORT_SEL_MASK,
+ TRANS_DP_PORT_SEL_NONE);
/* disable DPLL_SEL */
- temp = intel_de_read(dev_priv, PCH_DPLL_SEL);
- temp &= ~(TRANS_DPLL_ENABLE(pipe) | TRANS_DPLLB_SEL(pipe));
- intel_de_write(dev_priv, PCH_DPLL_SEL, temp);
+ intel_de_rmw(dev_priv, PCH_DPLL_SEL,
+ TRANS_DPLL_ENABLE(pipe) | TRANS_DPLLB_SEL(pipe), 0);
}
ilk_fdi_pll_disable(crtc);
@@ -565,9 +553,9 @@ static void lpt_enable_pch_transcoder(const struct intel_crtc_state *crtc_state)
intel_de_write(dev_priv, TRANS_CHICKEN2(PIPE_A), val);
val = TRANS_ENABLE;
- pipeconf_val = intel_de_read(dev_priv, PIPECONF(cpu_transcoder));
+ pipeconf_val = intel_de_read(dev_priv, TRANSCONF(cpu_transcoder));
- if ((pipeconf_val & PIPECONF_INTERLACE_MASK_HSW) == PIPECONF_INTERLACE_IF_ID_ILK)
+ if ((pipeconf_val & TRANSCONF_INTERLACE_MASK_HSW) == TRANSCONF_INTERLACE_IF_ID_ILK)
val |= TRANS_INTERLACE_INTERLACED;
else
val |= TRANS_INTERLACE_PROGRESSIVE;
@@ -580,20 +568,14 @@ static void lpt_enable_pch_transcoder(const struct intel_crtc_state *crtc_state)
static void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv)
{
- u32 val;
-
- val = intel_de_read(dev_priv, LPT_TRANSCONF);
- val &= ~TRANS_ENABLE;
- intel_de_write(dev_priv, LPT_TRANSCONF, val);
+ intel_de_rmw(dev_priv, LPT_TRANSCONF, TRANS_ENABLE, 0);
/* wait for PCH transcoder off, transcoder state */
if (intel_de_wait_for_clear(dev_priv, LPT_TRANSCONF,
TRANS_STATE_ENABLE, 50))
drm_err(&dev_priv->drm, "Failed to disable PCH transcoder\n");
/* Workaround: clear timing override bit. */
- val = intel_de_read(dev_priv, TRANS_CHICKEN2(PIPE_A));
- val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
- intel_de_write(dev_priv, TRANS_CHICKEN2(PIPE_A), val);
+ intel_de_rmw(dev_priv, TRANS_CHICKEN2(PIPE_A), TRANS_CHICKEN2_TIMING_OVERRIDE, 0);
}
void lpt_pch_enable(struct intel_atomic_state *state,
diff --git a/drivers/gpu/drm/i915/display/intel_pch_refclk.c b/drivers/gpu/drm/i915/display/intel_pch_refclk.c
index 3657b2940702..f4c09cc37a5e 100644
--- a/drivers/gpu/drm/i915/display/intel_pch_refclk.c
+++ b/drivers/gpu/drm/i915/display/intel_pch_refclk.c
@@ -12,19 +12,13 @@
static void lpt_fdi_reset_mphy(struct drm_i915_private *dev_priv)
{
- u32 tmp;
-
- tmp = intel_de_read(dev_priv, SOUTH_CHICKEN2);
- tmp |= FDI_MPHY_IOSFSB_RESET_CTL;
- intel_de_write(dev_priv, SOUTH_CHICKEN2, tmp);
+ intel_de_rmw(dev_priv, SOUTH_CHICKEN2, 0, FDI_MPHY_IOSFSB_RESET_CTL);
if (wait_for_us(intel_de_read(dev_priv, SOUTH_CHICKEN2) &
FDI_MPHY_IOSFSB_RESET_STATUS, 100))
drm_err(&dev_priv->drm, "FDI mPHY reset assert timeout\n");
- tmp = intel_de_read(dev_priv, SOUTH_CHICKEN2);
- tmp &= ~FDI_MPHY_IOSFSB_RESET_CTL;
- intel_de_write(dev_priv, SOUTH_CHICKEN2, tmp);
+ intel_de_rmw(dev_priv, SOUTH_CHICKEN2, FDI_MPHY_IOSFSB_RESET_CTL, 0);
if (wait_for_us((intel_de_read(dev_priv, SOUTH_CHICKEN2) &
FDI_MPHY_IOSFSB_RESET_STATUS) == 0, 100))
diff --git a/drivers/gpu/drm/i915/display/intel_pps.c b/drivers/gpu/drm/i915/display/intel_pps.c
index 7b21438edd9b..24b5b12f7732 100644
--- a/drivers/gpu/drm/i915/display/intel_pps.c
+++ b/drivers/gpu/drm/i915/display/intel_pps.c
@@ -13,6 +13,7 @@
#include "intel_dpio_phy.h"
#include "intel_dpll.h"
#include "intel_lvds.h"
+#include "intel_lvds_regs.h"
#include "intel_pps.h"
#include "intel_quirks.h"
@@ -1534,17 +1535,13 @@ static void pps_init_registers(struct intel_dp *intel_dp, bool force_disable_vdd
/*
* Compute the divisor for the pp clock, simply match the Bspec formula.
*/
- if (i915_mmio_reg_valid(regs.pp_div)) {
+ if (i915_mmio_reg_valid(regs.pp_div))
intel_de_write(dev_priv, regs.pp_div,
REG_FIELD_PREP(PP_REFERENCE_DIVIDER_MASK, (100 * div) / 2 - 1) | REG_FIELD_PREP(PANEL_POWER_CYCLE_DELAY_MASK, DIV_ROUND_UP(seq->t11_t12, 1000)));
- } else {
- u32 pp_ctl;
-
- pp_ctl = intel_de_read(dev_priv, regs.pp_ctrl);
- pp_ctl &= ~BXT_POWER_CYCLE_DELAY_MASK;
- pp_ctl |= REG_FIELD_PREP(BXT_POWER_CYCLE_DELAY_MASK, DIV_ROUND_UP(seq->t11_t12, 1000));
- intel_de_write(dev_priv, regs.pp_ctrl, pp_ctl);
- }
+ else
+ intel_de_rmw(dev_priv, regs.pp_ctrl, BXT_POWER_CYCLE_DELAY_MASK,
+ REG_FIELD_PREP(BXT_POWER_CYCLE_DELAY_MASK,
+ DIV_ROUND_UP(seq->t11_t12, 1000)));
drm_dbg_kms(&dev_priv->drm,
"panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
diff --git a/drivers/gpu/drm/i915/display/intel_psr.c b/drivers/gpu/drm/i915/display/intel_psr.c
index 7a72e15e6836..31084d95711d 100644
--- a/drivers/gpu/drm/i915/display/intel_psr.c
+++ b/drivers/gpu/drm/i915/display/intel_psr.c
@@ -152,7 +152,7 @@ static void psr_irq_control(struct intel_dp *intel_dp)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
i915_reg_t imr_reg;
- u32 mask, val;
+ u32 mask;
if (DISPLAY_VER(dev_priv) >= 12)
imr_reg = TRANS_PSR_IMR(intel_dp->psr.transcoder);
@@ -164,10 +164,7 @@ static void psr_irq_control(struct intel_dp *intel_dp)
mask |= psr_irq_post_exit_bit_get(intel_dp) |
psr_irq_pre_entry_bit_get(intel_dp);
- val = intel_de_read(dev_priv, imr_reg);
- val &= ~psr_irq_mask_get(intel_dp);
- val |= ~mask;
- intel_de_write(dev_priv, imr_reg, val);
+ intel_de_rmw(dev_priv, imr_reg, psr_irq_mask_get(intel_dp), ~mask);
}
static void psr_event_print(struct drm_i915_private *i915,
@@ -245,8 +242,6 @@ void intel_psr_irq_handler(struct intel_dp *intel_dp, u32 psr_iir)
}
if (psr_iir & psr_irq_psr_error_bit_get(intel_dp)) {
- u32 val;
-
drm_warn(&dev_priv->drm, "[transcoder %s] PSR aux error\n",
transcoder_name(cpu_transcoder));
@@ -260,9 +255,7 @@ void intel_psr_irq_handler(struct intel_dp *intel_dp, u32 psr_iir)
* again so we don't care about unmask the interruption
* or unset irq_aux_error.
*/
- val = intel_de_read(dev_priv, imr_reg);
- val |= psr_irq_psr_error_bit_get(intel_dp);
- intel_de_write(dev_priv, imr_reg, val);
+ intel_de_rmw(dev_priv, imr_reg, 0, psr_irq_psr_error_bit_get(intel_dp));
schedule_work(&intel_dp->psr.work);
}
@@ -542,6 +535,14 @@ static void hsw_activate_psr2(struct intel_dp *intel_dp)
val |= EDP_PSR2_FRAME_BEFORE_SU(max_t(u8, intel_dp->psr.sink_sync_latency + 1, 2));
val |= intel_psr2_get_tp_time(intel_dp);
+ if (DISPLAY_VER(dev_priv) >= 12) {
+ if (intel_dp->psr.io_wake_lines < 9 &&
+ intel_dp->psr.fast_wake_lines < 9)
+ val |= TGL_EDP_PSR2_BLOCK_COUNT_NUM_2;
+ else
+ val |= TGL_EDP_PSR2_BLOCK_COUNT_NUM_3;
+ }
+
/* Wa_22012278275:adl-p */
if (IS_ADLP_DISPLAY_STEP(dev_priv, STEP_A0, STEP_E0)) {
static const u8 map[] = {
@@ -558,31 +559,21 @@ static void hsw_activate_psr2(struct intel_dp *intel_dp)
* Still using the default IO_BUFFER_WAKE and FAST_WAKE, see
* comments bellow for more information
*/
- u32 tmp, lines = 7;
-
- val |= TGL_EDP_PSR2_BLOCK_COUNT_NUM_2;
+ u32 tmp;
- tmp = map[lines - TGL_EDP_PSR2_IO_BUFFER_WAKE_MIN_LINES];
+ tmp = map[intel_dp->psr.io_wake_lines - TGL_EDP_PSR2_IO_BUFFER_WAKE_MIN_LINES];
tmp = tmp << TGL_EDP_PSR2_IO_BUFFER_WAKE_SHIFT;
val |= tmp;
- tmp = map[lines - TGL_EDP_PSR2_FAST_WAKE_MIN_LINES];
+ tmp = map[intel_dp->psr.fast_wake_lines - TGL_EDP_PSR2_FAST_WAKE_MIN_LINES];
tmp = tmp << TGL_EDP_PSR2_FAST_WAKE_MIN_SHIFT;
val |= tmp;
} else if (DISPLAY_VER(dev_priv) >= 12) {
- /*
- * TODO: 7 lines of IO_BUFFER_WAKE and FAST_WAKE are default
- * values from BSpec. In order to setting an optimal power
- * consumption, lower than 4k resolution mode needs to decrease
- * IO_BUFFER_WAKE and FAST_WAKE. And higher than 4K resolution
- * mode needs to increase IO_BUFFER_WAKE and FAST_WAKE.
- */
- val |= TGL_EDP_PSR2_BLOCK_COUNT_NUM_2;
- val |= TGL_EDP_PSR2_IO_BUFFER_WAKE(7);
- val |= TGL_EDP_PSR2_FAST_WAKE(7);
+ val |= TGL_EDP_PSR2_IO_BUFFER_WAKE(intel_dp->psr.io_wake_lines);
+ val |= TGL_EDP_PSR2_FAST_WAKE(intel_dp->psr.fast_wake_lines);
} else if (DISPLAY_VER(dev_priv) >= 9) {
- val |= EDP_PSR2_IO_BUFFER_WAKE(7);
- val |= EDP_PSR2_FAST_WAKE(7);
+ val |= EDP_PSR2_IO_BUFFER_WAKE(intel_dp->psr.io_wake_lines);
+ val |= EDP_PSR2_FAST_WAKE(intel_dp->psr.fast_wake_lines);
}
if (intel_dp->psr.req_psr2_sdp_prior_scanline)
@@ -591,12 +582,6 @@ static void hsw_activate_psr2(struct intel_dp *intel_dp)
if (intel_dp->psr.psr2_sel_fetch_enabled) {
u32 tmp;
- /* Wa_1408330847 */
- if (IS_TGL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0))
- intel_de_rmw(dev_priv, CHICKEN_PAR1_1,
- DIS_RAM_BYPASS_PSR2_MAN_TRACK,
- DIS_RAM_BYPASS_PSR2_MAN_TRACK);
-
tmp = intel_de_read(dev_priv, PSR2_MAN_TRK_CTL(intel_dp->psr.transcoder));
drm_WARN_ON(&dev_priv->drm, !(tmp & PSR2_MAN_TRK_CTL_ENABLE));
} else if (HAS_PSR2_SEL_FETCH(dev_priv)) {
@@ -637,13 +622,10 @@ static void psr2_program_idle_frames(struct intel_dp *intel_dp,
u32 idle_frames)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- u32 val;
idle_frames <<= EDP_PSR2_IDLE_FRAME_SHIFT;
- val = intel_de_read(dev_priv, EDP_PSR2_CTL(intel_dp->psr.transcoder));
- val &= ~EDP_PSR2_IDLE_FRAME_MASK;
- val |= idle_frames;
- intel_de_write(dev_priv, EDP_PSR2_CTL(intel_dp->psr.transcoder), val);
+ intel_de_rmw(dev_priv, EDP_PSR2_CTL(intel_dp->psr.transcoder),
+ EDP_PSR2_IDLE_FRAME_MASK, idle_frames);
}
static void tgl_psr2_enable_dc3co(struct intel_dp *intel_dp)
@@ -708,6 +690,7 @@ tgl_dc3co_exitline_compute_config(struct intel_dp *intel_dp,
{
const u32 crtc_vdisplay = crtc_state->uapi.adjusted_mode.crtc_vdisplay;
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
u32 exit_scanlines;
/*
@@ -724,7 +707,7 @@ tgl_dc3co_exitline_compute_config(struct intel_dp *intel_dp,
if (crtc_state->enable_psr2_sel_fetch)
return;
- if (!(dev_priv->display.dmc.allowed_dc_mask & DC_STATE_EN_DC3CO))
+ if (!(power_domains->allowed_dc_mask & DC_STATE_EN_DC3CO))
return;
if (!dc3co_is_pipe_port_compatible(intel_dp, crtc_state))
@@ -765,13 +748,6 @@ static bool intel_psr2_sel_fetch_config_valid(struct intel_dp *intel_dp,
return false;
}
- /* Wa_14010254185 Wa_14010103792 */
- if (IS_TGL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_C0)) {
- drm_dbg_kms(&dev_priv->drm,
- "PSR2 sel fetch not enabled, missing the implementation of WAs\n");
- return false;
- }
-
return crtc_state->enable_psr2_sel_fetch = true;
}
@@ -842,6 +818,46 @@ static bool _compute_psr2_sdp_prior_scanline_indication(struct intel_dp *intel_d
return true;
}
+static bool _compute_psr2_wake_times(struct intel_dp *intel_dp,
+ struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ int io_wake_lines, io_wake_time, fast_wake_lines, fast_wake_time;
+ u8 max_wake_lines;
+
+ if (DISPLAY_VER(i915) >= 12) {
+ io_wake_time = 42;
+ /*
+ * According to Bspec it's 42us, but based on testing
+ * it is not enough -> use 45 us.
+ */
+ fast_wake_time = 45;
+ max_wake_lines = 12;
+ } else {
+ io_wake_time = 50;
+ fast_wake_time = 32;
+ max_wake_lines = 8;
+ }
+
+ io_wake_lines = intel_usecs_to_scanlines(
+ &crtc_state->uapi.adjusted_mode, io_wake_time);
+ fast_wake_lines = intel_usecs_to_scanlines(
+ &crtc_state->uapi.adjusted_mode, fast_wake_time);
+
+ if (io_wake_lines > max_wake_lines ||
+ fast_wake_lines > max_wake_lines)
+ return false;
+
+ if (i915->params.psr_safest_params)
+ io_wake_lines = fast_wake_lines = max_wake_lines;
+
+ /* According to Bspec lower limit should be set as 7 lines. */
+ intel_dp->psr.io_wake_lines = max(io_wake_lines, 7);
+ intel_dp->psr.fast_wake_lines = max(fast_wake_lines, 7);
+
+ return true;
+}
+
static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
struct intel_crtc_state *crtc_state)
{
@@ -936,6 +952,12 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
return false;
}
+ if (!_compute_psr2_wake_times(intel_dp, crtc_state)) {
+ drm_dbg_kms(&dev_priv->drm,
+ "PSR2 not enabled, Unable to use long enough wake times\n");
+ return false;
+ }
+
if (HAS_PSR2_SEL_FETCH(dev_priv)) {
if (!intel_psr2_sel_fetch_config_valid(intel_dp, crtc_state) &&
!HAS_PSR_HW_TRACKING(dev_priv)) {
@@ -945,13 +967,6 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
}
}
- /* Wa_2209313811 */
- if (!crtc_state->enable_psr2_sel_fetch &&
- IS_TGL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_C0)) {
- drm_dbg_kms(&dev_priv->drm, "PSR2 HW tracking is not supported this Display stepping\n");
- goto unsupported;
- }
-
if (!psr2_granularity_check(intel_dp, crtc_state)) {
drm_dbg_kms(&dev_priv->drm, "PSR2 not enabled, SU granularity not compatible\n");
goto unsupported;
@@ -1071,7 +1086,7 @@ void intel_psr_get_config(struct intel_encoder *encoder,
}
if (DISPLAY_VER(dev_priv) >= 12) {
- val = intel_de_read(dev_priv, EXITLINE(intel_dp->psr.transcoder));
+ val = intel_de_read(dev_priv, TRANS_EXITLINE(intel_dp->psr.transcoder));
val &= EXITLINE_MASK;
pipe_config->dc3co_exitline = val;
}
@@ -1145,19 +1160,13 @@ static void intel_psr_enable_source(struct intel_dp *intel_dp,
psr_irq_control(intel_dp);
- if (intel_dp->psr.dc3co_exitline) {
- u32 val;
-
- /*
- * TODO: if future platforms supports DC3CO in more than one
- * transcoder, EXITLINE will need to be unset when disabling PSR
- */
- val = intel_de_read(dev_priv, EXITLINE(cpu_transcoder));
- val &= ~EXITLINE_MASK;
- val |= intel_dp->psr.dc3co_exitline << EXITLINE_SHIFT;
- val |= EXITLINE_ENABLE;
- intel_de_write(dev_priv, EXITLINE(cpu_transcoder), val);
- }
+ /*
+ * TODO: if future platforms supports DC3CO in more than one
+ * transcoder, EXITLINE will need to be unset when disabling PSR
+ */
+ if (intel_dp->psr.dc3co_exitline)
+ intel_de_rmw(dev_priv, TRANS_EXITLINE(cpu_transcoder), EXITLINE_MASK,
+ intel_dp->psr.dc3co_exitline << EXITLINE_SHIFT | EXITLINE_ENABLE);
if (HAS_PSR_HW_TRACKING(dev_priv) && HAS_PSR2_SEL_FETCH(dev_priv))
intel_de_rmw(dev_priv, CHICKEN_PAR1_1, IGNORE_PSR2_HW_TRACKING,
@@ -1170,13 +1179,8 @@ static void intel_psr_enable_source(struct intel_dp *intel_dp,
*/
if (IS_MTL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0) ||
IS_DISPLAY_VER(dev_priv, 12, 13)) {
- u16 vtotal, vblank;
-
- vtotal = crtc_state->uapi.adjusted_mode.crtc_vtotal -
- crtc_state->uapi.adjusted_mode.crtc_vdisplay;
- vblank = crtc_state->uapi.adjusted_mode.crtc_vblank_end -
- crtc_state->uapi.adjusted_mode.crtc_vblank_start;
- if (vblank > vtotal)
+ if (crtc_state->hw.adjusted_mode.crtc_vblank_start !=
+ crtc_state->hw.adjusted_mode.crtc_vdisplay)
intel_de_rmw(dev_priv, GEN8_CHICKEN_DCPR_1, 0,
wa_16013835468_bit_get(intel_dp));
}
@@ -1199,13 +1203,6 @@ static void intel_psr_enable_source(struct intel_dp *intel_dp,
intel_de_rmw(dev_priv, CHICKEN_TRANS(cpu_transcoder), 0,
ADLP_1_BASED_X_GRANULARITY);
- /* Wa_16011168373:adl-p */
- if (IS_ADLP_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0))
- intel_de_rmw(dev_priv,
- TRANS_SET_CONTEXT_LATENCY(intel_dp->psr.transcoder),
- TRANS_SET_CONTEXT_LATENCY_MASK,
- TRANS_SET_CONTEXT_LATENCY_VALUE(1));
-
/* Wa_16012604467:adlp,mtl[a0,b0] */
if (IS_MTL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0))
intel_de_rmw(dev_priv,
@@ -1360,12 +1357,6 @@ static void intel_psr_disable_locked(struct intel_dp *intel_dp)
intel_psr_exit(intel_dp);
intel_psr_wait_exit_locked(intel_dp);
- /* Wa_1408330847 */
- if (intel_dp->psr.psr2_sel_fetch_enabled &&
- IS_TGL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0))
- intel_de_rmw(dev_priv, CHICKEN_PAR1_1,
- DIS_RAM_BYPASS_PSR2_MAN_TRACK, 0);
-
/*
* Wa_16013835468
* Wa_14015648006
@@ -1376,12 +1367,6 @@ static void intel_psr_disable_locked(struct intel_dp *intel_dp)
wa_16013835468_bit_get(intel_dp), 0);
if (intel_dp->psr.psr2_enabled) {
- /* Wa_16011168373:adl-p */
- if (IS_ADLP_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0))
- intel_de_rmw(dev_priv,
- TRANS_SET_CONTEXT_LATENCY(intel_dp->psr.transcoder),
- TRANS_SET_CONTEXT_LATENCY_MASK, 0);
-
/* Wa_16012604467:adlp,mtl[a0,b0] */
if (IS_MTL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0))
intel_de_rmw(dev_priv,
@@ -1547,8 +1532,8 @@ static void psr_force_hw_tracking_exit(struct intel_dp *intel_dp)
intel_de_write(dev_priv, CURSURFLIVE(intel_dp->psr.pipe), 0);
}
-void intel_psr2_disable_plane_sel_fetch(struct intel_plane *plane,
- const struct intel_crtc_state *crtc_state)
+void intel_psr2_disable_plane_sel_fetch_arm(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
enum pipe pipe = plane->pipe;
@@ -1559,10 +1544,28 @@ void intel_psr2_disable_plane_sel_fetch(struct intel_plane *plane,
intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_CTL(pipe, plane->id), 0);
}
-void intel_psr2_program_plane_sel_fetch(struct intel_plane *plane,
- const struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *plane_state,
- int color_plane)
+void intel_psr2_program_plane_sel_fetch_arm(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
+{
+ struct drm_i915_private *i915 = to_i915(plane->base.dev);
+ enum pipe pipe = plane->pipe;
+
+ if (!crtc_state->enable_psr2_sel_fetch)
+ return;
+
+ if (plane->id == PLANE_CURSOR)
+ intel_de_write_fw(i915, PLANE_SEL_FETCH_CTL(pipe, plane->id),
+ plane_state->ctl);
+ else
+ intel_de_write_fw(i915, PLANE_SEL_FETCH_CTL(pipe, plane->id),
+ PLANE_SEL_FETCH_CTL_ENABLE);
+}
+
+void intel_psr2_program_plane_sel_fetch_noarm(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state,
+ int color_plane)
{
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
enum pipe pipe = plane->pipe;
@@ -1573,11 +1576,8 @@ void intel_psr2_program_plane_sel_fetch(struct intel_plane *plane,
if (!crtc_state->enable_psr2_sel_fetch)
return;
- if (plane->id == PLANE_CURSOR) {
- intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_CTL(pipe, plane->id),
- plane_state->ctl);
+ if (plane->id == PLANE_CURSOR)
return;
- }
clip = &plane_state->psr2_sel_fetch_area;
@@ -1605,9 +1605,6 @@ void intel_psr2_program_plane_sel_fetch(struct intel_plane *plane,
val = (drm_rect_height(clip) - 1) << 16;
val |= (drm_rect_width(&plane_state->uapi.src) >> 16) - 1;
intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_SIZE(pipe, plane->id), val);
-
- intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_CTL(pipe, plane->id),
- PLANE_SEL_FETCH_CTL_ENABLE);
}
void intel_psr2_program_trans_man_trk_ctl(const struct intel_crtc_state *crtc_state)
@@ -2647,3 +2644,302 @@ void intel_psr_unlock(const struct intel_crtc_state *crtc_state)
break;
}
}
+
+static void
+psr_source_status(struct intel_dp *intel_dp, struct seq_file *m)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ const char *status = "unknown";
+ u32 val, status_val;
+
+ if (intel_dp->psr.psr2_enabled) {
+ static const char * const live_status[] = {
+ "IDLE",
+ "CAPTURE",
+ "CAPTURE_FS",
+ "SLEEP",
+ "BUFON_FW",
+ "ML_UP",
+ "SU_STANDBY",
+ "FAST_SLEEP",
+ "DEEP_SLEEP",
+ "BUF_ON",
+ "TG_ON"
+ };
+ val = intel_de_read(dev_priv,
+ EDP_PSR2_STATUS(intel_dp->psr.transcoder));
+ status_val = REG_FIELD_GET(EDP_PSR2_STATUS_STATE_MASK, val);
+ if (status_val < ARRAY_SIZE(live_status))
+ status = live_status[status_val];
+ } else {
+ static const char * const live_status[] = {
+ "IDLE",
+ "SRDONACK",
+ "SRDENT",
+ "BUFOFF",
+ "BUFON",
+ "AUXACK",
+ "SRDOFFACK",
+ "SRDENT_ON",
+ };
+ val = intel_de_read(dev_priv,
+ EDP_PSR_STATUS(intel_dp->psr.transcoder));
+ status_val = (val & EDP_PSR_STATUS_STATE_MASK) >>
+ EDP_PSR_STATUS_STATE_SHIFT;
+ if (status_val < ARRAY_SIZE(live_status))
+ status = live_status[status_val];
+ }
+
+ seq_printf(m, "Source PSR status: %s [0x%08x]\n", status, val);
+}
+
+static int intel_psr_status(struct seq_file *m, struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_psr *psr = &intel_dp->psr;
+ intel_wakeref_t wakeref;
+ const char *status;
+ bool enabled;
+ u32 val;
+
+ seq_printf(m, "Sink support: %s", str_yes_no(psr->sink_support));
+ if (psr->sink_support)
+ seq_printf(m, " [0x%02x]", intel_dp->psr_dpcd[0]);
+ seq_puts(m, "\n");
+
+ if (!psr->sink_support)
+ return 0;
+
+ wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
+ mutex_lock(&psr->lock);
+
+ if (psr->enabled)
+ status = psr->psr2_enabled ? "PSR2 enabled" : "PSR1 enabled";
+ else
+ status = "disabled";
+ seq_printf(m, "PSR mode: %s\n", status);
+
+ if (!psr->enabled) {
+ seq_printf(m, "PSR sink not reliable: %s\n",
+ str_yes_no(psr->sink_not_reliable));
+
+ goto unlock;
+ }
+
+ if (psr->psr2_enabled) {
+ val = intel_de_read(dev_priv,
+ EDP_PSR2_CTL(intel_dp->psr.transcoder));
+ enabled = val & EDP_PSR2_ENABLE;
+ } else {
+ val = intel_de_read(dev_priv,
+ EDP_PSR_CTL(intel_dp->psr.transcoder));
+ enabled = val & EDP_PSR_ENABLE;
+ }
+ seq_printf(m, "Source PSR ctl: %s [0x%08x]\n",
+ str_enabled_disabled(enabled), val);
+ psr_source_status(intel_dp, m);
+ seq_printf(m, "Busy frontbuffer bits: 0x%08x\n",
+ psr->busy_frontbuffer_bits);
+
+ /*
+ * SKL+ Perf counter is reset to 0 everytime DC state is entered
+ */
+ if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
+ val = intel_de_read(dev_priv,
+ EDP_PSR_PERF_CNT(intel_dp->psr.transcoder));
+ val &= EDP_PSR_PERF_CNT_MASK;
+ seq_printf(m, "Performance counter: %u\n", val);
+ }
+
+ if (psr->debug & I915_PSR_DEBUG_IRQ) {
+ seq_printf(m, "Last attempted entry at: %lld\n",
+ psr->last_entry_attempt);
+ seq_printf(m, "Last exit at: %lld\n", psr->last_exit);
+ }
+
+ if (psr->psr2_enabled) {
+ u32 su_frames_val[3];
+ int frame;
+
+ /*
+ * Reading all 3 registers before hand to minimize crossing a
+ * frame boundary between register reads
+ */
+ for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame += 3) {
+ val = intel_de_read(dev_priv,
+ PSR2_SU_STATUS(intel_dp->psr.transcoder, frame));
+ su_frames_val[frame / 3] = val;
+ }
+
+ seq_puts(m, "Frame:\tPSR2 SU blocks:\n");
+
+ for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame++) {
+ u32 su_blocks;
+
+ su_blocks = su_frames_val[frame / 3] &
+ PSR2_SU_STATUS_MASK(frame);
+ su_blocks = su_blocks >> PSR2_SU_STATUS_SHIFT(frame);
+ seq_printf(m, "%d\t%d\n", frame, su_blocks);
+ }
+
+ seq_printf(m, "PSR2 selective fetch: %s\n",
+ str_enabled_disabled(psr->psr2_sel_fetch_enabled));
+ }
+
+unlock:
+ mutex_unlock(&psr->lock);
+ intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
+
+ return 0;
+}
+
+static int i915_edp_psr_status_show(struct seq_file *m, void *data)
+{
+ struct drm_i915_private *dev_priv = m->private;
+ struct intel_dp *intel_dp = NULL;
+ struct intel_encoder *encoder;
+
+ if (!HAS_PSR(dev_priv))
+ return -ENODEV;
+
+ /* Find the first EDP which supports PSR */
+ for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
+ intel_dp = enc_to_intel_dp(encoder);
+ break;
+ }
+
+ if (!intel_dp)
+ return -ENODEV;
+
+ return intel_psr_status(m, intel_dp);
+}
+DEFINE_SHOW_ATTRIBUTE(i915_edp_psr_status);
+
+static int
+i915_edp_psr_debug_set(void *data, u64 val)
+{
+ struct drm_i915_private *dev_priv = data;
+ struct intel_encoder *encoder;
+ intel_wakeref_t wakeref;
+ int ret = -ENODEV;
+
+ if (!HAS_PSR(dev_priv))
+ return ret;
+
+ for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+
+ drm_dbg_kms(&dev_priv->drm, "Setting PSR debug to %llx\n", val);
+
+ wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
+
+ // TODO: split to each transcoder's PSR debug state
+ ret = intel_psr_debug_set(intel_dp, val);
+
+ intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
+ }
+
+ return ret;
+}
+
+static int
+i915_edp_psr_debug_get(void *data, u64 *val)
+{
+ struct drm_i915_private *dev_priv = data;
+ struct intel_encoder *encoder;
+
+ if (!HAS_PSR(dev_priv))
+ return -ENODEV;
+
+ for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+
+ // TODO: split to each transcoder's PSR debug state
+ *val = READ_ONCE(intel_dp->psr.debug);
+ return 0;
+ }
+
+ return -ENODEV;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(i915_edp_psr_debug_fops,
+ i915_edp_psr_debug_get, i915_edp_psr_debug_set,
+ "%llu\n");
+
+void intel_psr_debugfs_register(struct drm_i915_private *i915)
+{
+ struct drm_minor *minor = i915->drm.primary;
+
+ debugfs_create_file("i915_edp_psr_debug", 0644, minor->debugfs_root,
+ i915, &i915_edp_psr_debug_fops);
+
+ debugfs_create_file("i915_edp_psr_status", 0444, minor->debugfs_root,
+ i915, &i915_edp_psr_status_fops);
+}
+
+static int i915_psr_sink_status_show(struct seq_file *m, void *data)
+{
+ struct intel_connector *connector = m->private;
+ struct intel_dp *intel_dp = intel_attached_dp(connector);
+ static const char * const sink_status[] = {
+ "inactive",
+ "transition to active, capture and display",
+ "active, display from RFB",
+ "active, capture and display on sink device timings",
+ "transition to inactive, capture and display, timing re-sync",
+ "reserved",
+ "reserved",
+ "sink internal error",
+ };
+ const char *str;
+ int ret;
+ u8 val;
+
+ if (!CAN_PSR(intel_dp)) {
+ seq_puts(m, "PSR Unsupported\n");
+ return -ENODEV;
+ }
+
+ if (connector->base.status != connector_status_connected)
+ return -ENODEV;
+
+ ret = drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_STATUS, &val);
+ if (ret != 1)
+ return ret < 0 ? ret : -EIO;
+
+ val &= DP_PSR_SINK_STATE_MASK;
+ if (val < ARRAY_SIZE(sink_status))
+ str = sink_status[val];
+ else
+ str = "unknown";
+
+ seq_printf(m, "Sink PSR status: 0x%x [%s]\n", val, str);
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(i915_psr_sink_status);
+
+static int i915_psr_status_show(struct seq_file *m, void *data)
+{
+ struct intel_connector *connector = m->private;
+ struct intel_dp *intel_dp = intel_attached_dp(connector);
+
+ return intel_psr_status(m, intel_dp);
+}
+DEFINE_SHOW_ATTRIBUTE(i915_psr_status);
+
+void intel_psr_connector_debugfs_add(struct intel_connector *connector)
+{
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct dentry *root = connector->base.debugfs_entry;
+
+ if (connector->base.connector_type != DRM_MODE_CONNECTOR_eDP)
+ return;
+
+ debugfs_create_file("i915_psr_sink_status", 0444, root,
+ connector, &i915_psr_sink_status_fops);
+
+ if (HAS_PSR(i915))
+ debugfs_create_file("i915_psr_status", 0444, root,
+ connector, &i915_psr_status_fops);
+}
diff --git a/drivers/gpu/drm/i915/display/intel_psr.h b/drivers/gpu/drm/i915/display/intel_psr.h
index 2ac3a46cccc5..0b95e8aa615f 100644
--- a/drivers/gpu/drm/i915/display/intel_psr.h
+++ b/drivers/gpu/drm/i915/display/intel_psr.h
@@ -13,6 +13,7 @@ struct drm_connector;
struct drm_connector_state;
struct drm_i915_private;
struct intel_atomic_state;
+struct intel_connector;
struct intel_crtc;
struct intel_crtc_state;
struct intel_dp;
@@ -46,16 +47,22 @@ bool intel_psr_enabled(struct intel_dp *intel_dp);
int intel_psr2_sel_fetch_update(struct intel_atomic_state *state,
struct intel_crtc *crtc);
void intel_psr2_program_trans_man_trk_ctl(const struct intel_crtc_state *crtc_state);
-void intel_psr2_program_plane_sel_fetch(struct intel_plane *plane,
- const struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *plane_state,
- int color_plane);
-void intel_psr2_disable_plane_sel_fetch(struct intel_plane *plane,
- const struct intel_crtc_state *crtc_state);
+void intel_psr2_program_plane_sel_fetch_noarm(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state,
+ int color_plane);
+void intel_psr2_program_plane_sel_fetch_arm(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state);
+
+void intel_psr2_disable_plane_sel_fetch_arm(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state);
void intel_psr_pause(struct intel_dp *intel_dp);
void intel_psr_resume(struct intel_dp *intel_dp);
void intel_psr_lock(const struct intel_crtc_state *crtc_state);
void intel_psr_unlock(const struct intel_crtc_state *crtc_state);
+void intel_psr_connector_debugfs_add(struct intel_connector *connector);
+void intel_psr_debugfs_register(struct drm_i915_private *i915);
#endif /* __INTEL_PSR_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_snps_phy.c b/drivers/gpu/drm/i915/display/intel_snps_phy.c
index c65c771f5c46..1cfb94b5cedb 100644
--- a/drivers/gpu/drm/i915/display/intel_snps_phy.c
+++ b/drivers/gpu/drm/i915/display/intel_snps_phy.c
@@ -1419,6 +1419,36 @@ static const struct intel_mpllb_state dg2_hdmi_262750 = {
REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1),
};
+static const struct intel_mpllb_state dg2_hdmi_267300 = {
+ .clock = 267300,
+ .ref_control =
+ REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3),
+ .mpllb_cp =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 7) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124),
+ .mpllb_div =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 3),
+ .mpllb_div2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 74) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1),
+ .mpllb_fracn1 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535),
+ .mpllb_fracn2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 30146) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 36699),
+ .mpllb_sscen =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1),
+};
+
static const struct intel_mpllb_state dg2_hdmi_268500 = {
.clock = 268500,
.ref_control =
@@ -1509,6 +1539,36 @@ static const struct intel_mpllb_state dg2_hdmi_241500 = {
REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1),
};
+static const struct intel_mpllb_state dg2_hdmi_319890 = {
+ .clock = 319890,
+ .ref_control =
+ REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3),
+ .mpllb_cp =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 6) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124),
+ .mpllb_div =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 2),
+ .mpllb_div2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 94) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1),
+ .mpllb_fracn1 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535),
+ .mpllb_fracn2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 64094) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 13631),
+ .mpllb_sscen =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1),
+};
+
static const struct intel_mpllb_state dg2_hdmi_497750 = {
.clock = 497750,
.ref_control =
@@ -1696,8 +1756,10 @@ static const struct intel_mpllb_state * const dg2_hdmi_tables[] = {
&dg2_hdmi_209800,
&dg2_hdmi_241500,
&dg2_hdmi_262750,
+ &dg2_hdmi_267300,
&dg2_hdmi_268500,
&dg2_hdmi_296703,
+ &dg2_hdmi_319890,
&dg2_hdmi_497750,
&dg2_hdmi_592000,
&dg2_hdmi_593407,
diff --git a/drivers/gpu/drm/i915/display/intel_sprite.c b/drivers/gpu/drm/i915/display/intel_sprite.c
index e6b4d24b9cd0..25034bbf1445 100644
--- a/drivers/gpu/drm/i915/display/intel_sprite.c
+++ b/drivers/gpu/drm/i915/display/intel_sprite.c
@@ -32,85 +32,20 @@
#include <linux/string_helpers.h>
-#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_blend.h>
#include <drm/drm_color_mgmt.h>
-#include <drm/drm_crtc.h>
-#include <drm/drm_damage_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_rect.h>
#include "i915_drv.h"
#include "i915_reg.h"
-#include "i915_vgpu.h"
#include "i9xx_plane.h"
#include "intel_atomic_plane.h"
-#include "intel_crtc.h"
#include "intel_de.h"
#include "intel_display_types.h"
#include "intel_fb.h"
-#include "intel_frontbuffer.h"
#include "intel_sprite.h"
-#include "intel_vrr.h"
-
-int intel_plane_check_src_coordinates(struct intel_plane_state *plane_state)
-{
- struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev);
- const struct drm_framebuffer *fb = plane_state->hw.fb;
- struct drm_rect *src = &plane_state->uapi.src;
- u32 src_x, src_y, src_w, src_h, hsub, vsub;
- bool rotated = drm_rotation_90_or_270(plane_state->hw.rotation);
-
- /*
- * FIXME hsub/vsub vs. block size is a mess. Pre-tgl CCS
- * abuses hsub/vsub so we can't use them here. But as they
- * are limited to 32bpp RGB formats we don't actually need
- * to check anything.
- */
- if (fb->modifier == I915_FORMAT_MOD_Y_TILED_CCS ||
- fb->modifier == I915_FORMAT_MOD_Yf_TILED_CCS)
- return 0;
-
- /*
- * Hardware doesn't handle subpixel coordinates.
- * Adjust to (macro)pixel boundary, but be careful not to
- * increase the source viewport size, because that could
- * push the downscaling factor out of bounds.
- */
- src_x = src->x1 >> 16;
- src_w = drm_rect_width(src) >> 16;
- src_y = src->y1 >> 16;
- src_h = drm_rect_height(src) >> 16;
-
- drm_rect_init(src, src_x << 16, src_y << 16,
- src_w << 16, src_h << 16);
-
- if (fb->format->format == DRM_FORMAT_RGB565 && rotated) {
- hsub = 2;
- vsub = 2;
- } else {
- hsub = fb->format->hsub;
- vsub = fb->format->vsub;
- }
-
- if (rotated)
- hsub = vsub = max(hsub, vsub);
-
- if (src_x % hsub || src_w % hsub) {
- drm_dbg_kms(&i915->drm, "src x/w (%u, %u) must be a multiple of %u (rotated: %s)\n",
- src_x, src_w, hsub, str_yes_no(rotated));
- return -EINVAL;
- }
-
- if (src_y % vsub || src_h % vsub) {
- drm_dbg_kms(&i915->drm, "src y/h (%u, %u) must be a multiple of %u (rotated: %s)\n",
- src_y, src_h, vsub, str_yes_no(rotated));
- return -EINVAL;
- }
-
- return 0;
-}
static void i9xx_plane_linear_gamma(u16 gamma[8])
{
@@ -1217,7 +1152,8 @@ g4x_sprite_update_arm(struct intel_plane *plane,
}
intel_de_write_fw(dev_priv, DVSLINOFF(pipe), linear_offset);
- intel_de_write_fw(dev_priv, DVSTILEOFF(pipe), (y << 16) | x);
+ intel_de_write_fw(dev_priv, DVSTILEOFF(pipe),
+ DVS_OFFSET_Y(y) | DVS_OFFSET_X(x));
/*
* The control register self-arms if the plane was previously
@@ -1448,124 +1384,6 @@ vlv_sprite_check(struct intel_crtc_state *crtc_state,
return 0;
}
-static bool has_dst_key_in_primary_plane(struct drm_i915_private *dev_priv)
-{
- return DISPLAY_VER(dev_priv) >= 9;
-}
-
-static void intel_plane_set_ckey(struct intel_plane_state *plane_state,
- const struct drm_intel_sprite_colorkey *set)
-{
- struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
- struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
- struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
-
- *key = *set;
-
- /*
- * We want src key enabled on the
- * sprite and not on the primary.
- */
- if (plane->id == PLANE_PRIMARY &&
- set->flags & I915_SET_COLORKEY_SOURCE)
- key->flags = 0;
-
- /*
- * On SKL+ we want dst key enabled on
- * the primary and not on the sprite.
- */
- if (DISPLAY_VER(dev_priv) >= 9 && plane->id != PLANE_PRIMARY &&
- set->flags & I915_SET_COLORKEY_DESTINATION)
- key->flags = 0;
-}
-
-int intel_sprite_set_colorkey_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct drm_intel_sprite_colorkey *set = data;
- struct drm_plane *plane;
- struct drm_plane_state *plane_state;
- struct drm_atomic_state *state;
- struct drm_modeset_acquire_ctx ctx;
- int ret = 0;
-
- /* ignore the pointless "none" flag */
- set->flags &= ~I915_SET_COLORKEY_NONE;
-
- if (set->flags & ~(I915_SET_COLORKEY_DESTINATION | I915_SET_COLORKEY_SOURCE))
- return -EINVAL;
-
- /* Make sure we don't try to enable both src & dest simultaneously */
- if ((set->flags & (I915_SET_COLORKEY_DESTINATION | I915_SET_COLORKEY_SOURCE)) == (I915_SET_COLORKEY_DESTINATION | I915_SET_COLORKEY_SOURCE))
- return -EINVAL;
-
- if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
- set->flags & I915_SET_COLORKEY_DESTINATION)
- return -EINVAL;
-
- plane = drm_plane_find(dev, file_priv, set->plane_id);
- if (!plane || plane->type != DRM_PLANE_TYPE_OVERLAY)
- return -ENOENT;
-
- /*
- * SKL+ only plane 2 can do destination keying against plane 1.
- * Also multiple planes can't do destination keying on the same
- * pipe simultaneously.
- */
- if (DISPLAY_VER(dev_priv) >= 9 &&
- to_intel_plane(plane)->id >= PLANE_SPRITE1 &&
- set->flags & I915_SET_COLORKEY_DESTINATION)
- return -EINVAL;
-
- drm_modeset_acquire_init(&ctx, 0);
-
- state = drm_atomic_state_alloc(plane->dev);
- if (!state) {
- ret = -ENOMEM;
- goto out;
- }
- state->acquire_ctx = &ctx;
-
- while (1) {
- plane_state = drm_atomic_get_plane_state(state, plane);
- ret = PTR_ERR_OR_ZERO(plane_state);
- if (!ret)
- intel_plane_set_ckey(to_intel_plane_state(plane_state), set);
-
- /*
- * On some platforms we have to configure
- * the dst colorkey on the primary plane.
- */
- if (!ret && has_dst_key_in_primary_plane(dev_priv)) {
- struct intel_crtc *crtc =
- intel_crtc_for_pipe(dev_priv,
- to_intel_plane(plane)->pipe);
-
- plane_state = drm_atomic_get_plane_state(state,
- crtc->base.primary);
- ret = PTR_ERR_OR_ZERO(plane_state);
- if (!ret)
- intel_plane_set_ckey(to_intel_plane_state(plane_state), set);
- }
-
- if (!ret)
- ret = drm_atomic_commit(state);
-
- if (ret != -EDEADLK)
- break;
-
- drm_atomic_state_clear(state);
- drm_modeset_backoff(&ctx);
- }
-
- drm_atomic_state_put(state);
-out:
- drm_modeset_drop_locks(&ctx);
- drm_modeset_acquire_fini(&ctx);
- return ret;
-}
-
static const u32 g4x_sprite_formats[] = {
DRM_FORMAT_XRGB8888,
DRM_FORMAT_YUYV,
diff --git a/drivers/gpu/drm/i915/display/intel_sprite_uapi.c b/drivers/gpu/drm/i915/display/intel_sprite_uapi.c
new file mode 100644
index 000000000000..70a391083751
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_sprite_uapi.c
@@ -0,0 +1,127 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#include "i915_drv.h"
+#include "intel_crtc.h"
+#include "intel_display_types.h"
+#include "intel_sprite_uapi.h"
+
+static bool has_dst_key_in_primary_plane(struct drm_i915_private *dev_priv)
+{
+ return DISPLAY_VER(dev_priv) >= 9;
+}
+
+static void intel_plane_set_ckey(struct intel_plane_state *plane_state,
+ const struct drm_intel_sprite_colorkey *set)
+{
+ struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
+
+ *key = *set;
+
+ /*
+ * We want src key enabled on the
+ * sprite and not on the primary.
+ */
+ if (plane->id == PLANE_PRIMARY &&
+ set->flags & I915_SET_COLORKEY_SOURCE)
+ key->flags = 0;
+
+ /*
+ * On SKL+ we want dst key enabled on
+ * the primary and not on the sprite.
+ */
+ if (DISPLAY_VER(dev_priv) >= 9 && plane->id != PLANE_PRIMARY &&
+ set->flags & I915_SET_COLORKEY_DESTINATION)
+ key->flags = 0;
+}
+
+int intel_sprite_set_colorkey_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_intel_sprite_colorkey *set = data;
+ struct drm_plane *plane;
+ struct drm_plane_state *plane_state;
+ struct drm_atomic_state *state;
+ struct drm_modeset_acquire_ctx ctx;
+ int ret = 0;
+
+ /* ignore the pointless "none" flag */
+ set->flags &= ~I915_SET_COLORKEY_NONE;
+
+ if (set->flags & ~(I915_SET_COLORKEY_DESTINATION | I915_SET_COLORKEY_SOURCE))
+ return -EINVAL;
+
+ /* Make sure we don't try to enable both src & dest simultaneously */
+ if ((set->flags & (I915_SET_COLORKEY_DESTINATION | I915_SET_COLORKEY_SOURCE)) == (I915_SET_COLORKEY_DESTINATION | I915_SET_COLORKEY_SOURCE))
+ return -EINVAL;
+
+ if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
+ set->flags & I915_SET_COLORKEY_DESTINATION)
+ return -EINVAL;
+
+ plane = drm_plane_find(dev, file_priv, set->plane_id);
+ if (!plane || plane->type != DRM_PLANE_TYPE_OVERLAY)
+ return -ENOENT;
+
+ /*
+ * SKL+ only plane 2 can do destination keying against plane 1.
+ * Also multiple planes can't do destination keying on the same
+ * pipe simultaneously.
+ */
+ if (DISPLAY_VER(dev_priv) >= 9 &&
+ to_intel_plane(plane)->id >= PLANE_SPRITE1 &&
+ set->flags & I915_SET_COLORKEY_DESTINATION)
+ return -EINVAL;
+
+ drm_modeset_acquire_init(&ctx, 0);
+
+ state = drm_atomic_state_alloc(plane->dev);
+ if (!state) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ state->acquire_ctx = &ctx;
+
+ while (1) {
+ plane_state = drm_atomic_get_plane_state(state, plane);
+ ret = PTR_ERR_OR_ZERO(plane_state);
+ if (!ret)
+ intel_plane_set_ckey(to_intel_plane_state(plane_state), set);
+
+ /*
+ * On some platforms we have to configure
+ * the dst colorkey on the primary plane.
+ */
+ if (!ret && has_dst_key_in_primary_plane(dev_priv)) {
+ struct intel_crtc *crtc =
+ intel_crtc_for_pipe(dev_priv,
+ to_intel_plane(plane)->pipe);
+
+ plane_state = drm_atomic_get_plane_state(state,
+ crtc->base.primary);
+ ret = PTR_ERR_OR_ZERO(plane_state);
+ if (!ret)
+ intel_plane_set_ckey(to_intel_plane_state(plane_state), set);
+ }
+
+ if (!ret)
+ ret = drm_atomic_commit(state);
+
+ if (ret != -EDEADLK)
+ break;
+
+ drm_atomic_state_clear(state);
+ drm_modeset_backoff(&ctx);
+ }
+
+ drm_atomic_state_put(state);
+out:
+ drm_modeset_drop_locks(&ctx);
+ drm_modeset_acquire_fini(&ctx);
+ return ret;
+}
diff --git a/drivers/gpu/drm/i915/display/intel_sprite_uapi.h b/drivers/gpu/drm/i915/display/intel_sprite_uapi.h
new file mode 100644
index 000000000000..3eb50025acaf
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_sprite_uapi.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#ifndef __INTEL_SPRITE_UAPI_H__
+#define __INTEL_SPRITE_UAPI_H__
+
+struct drm_device;
+struct drm_file;
+
+int intel_sprite_set_colorkey_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+
+#endif /* __INTEL_SPRITE_UAPI_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_tc.c b/drivers/gpu/drm/i915/display/intel_tc.c
index f45328712bff..bd8c9df5f98f 100644
--- a/drivers/gpu/drm/i915/display/intel_tc.c
+++ b/drivers/gpu/drm/i915/display/intel_tc.c
@@ -5,6 +5,7 @@
#include "i915_drv.h"
#include "i915_reg.h"
+#include "intel_ddi.h"
#include "intel_de.h"
#include "intel_display.h"
#include "intel_display_power_map.h"
@@ -118,6 +119,24 @@ assert_tc_cold_blocked(struct intel_digital_port *dig_port)
drm_WARN_ON(&i915->drm, !enabled);
}
+static enum intel_display_power_domain
+tc_port_power_domain(struct intel_digital_port *dig_port)
+{
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ enum tc_port tc_port = intel_port_to_tc(i915, dig_port->base.port);
+
+ return POWER_DOMAIN_PORT_DDI_LANES_TC1 + tc_port - TC_PORT_1;
+}
+
+static void
+assert_tc_port_power_enabled(struct intel_digital_port *dig_port)
+{
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+
+ drm_WARN_ON(&i915->drm,
+ !intel_display_power_is_enabled(i915, tc_port_power_domain(dig_port)));
+}
+
u32 intel_tc_port_get_lane_mask(struct intel_digital_port *dig_port)
{
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
@@ -418,9 +437,9 @@ static bool icl_tc_phy_is_owned(struct intel_digital_port *dig_port)
val = intel_de_read(i915, PORT_TX_DFLEXDPCSSS(dig_port->tc_phy_fia));
if (val == 0xffffffff) {
drm_dbg_kms(&i915->drm,
- "Port %s: PHY in TCCOLD, assume safe mode\n",
+ "Port %s: PHY in TCCOLD, assume not owned\n",
dig_port->tc_port_name);
- return true;
+ return false;
}
return val & DP_PHY_MODE_STATUS_NOT_SAFE(dig_port->tc_phy_fia_idx);
@@ -464,7 +483,8 @@ static void icl_tc_phy_connect(struct intel_digital_port *dig_port,
u32 live_status_mask;
int max_lanes;
- if (!tc_phy_status_complete(dig_port)) {
+ if (!tc_phy_status_complete(dig_port) &&
+ !drm_WARN_ON(&i915->drm, dig_port->tc_legacy_port)) {
drm_dbg_kms(&i915->drm, "Port %s: PHY not ready\n",
dig_port->tc_port_name);
goto out_set_tbt_alt_mode;
@@ -539,62 +559,171 @@ static void icl_tc_phy_disconnect(struct intel_digital_port *dig_port)
}
}
-static bool icl_tc_phy_is_connected(struct intel_digital_port *dig_port)
+static bool tc_phy_is_ready_and_owned(struct intel_digital_port *dig_port,
+ bool phy_is_ready, bool phy_is_owned)
{
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
- if (!tc_phy_status_complete(dig_port)) {
- drm_dbg_kms(&i915->drm, "Port %s: PHY status not complete\n",
- dig_port->tc_port_name);
- return dig_port->tc_mode == TC_PORT_TBT_ALT;
- }
+ drm_WARN_ON(&i915->drm, phy_is_owned && !phy_is_ready);
+
+ return phy_is_ready && phy_is_owned;
+}
- /* On ADL-P the PHY complete flag is set in TBT mode as well. */
- if (IS_ALDERLAKE_P(i915) && dig_port->tc_mode == TC_PORT_TBT_ALT)
- return true;
+static bool tc_phy_is_connected(struct intel_digital_port *dig_port,
+ enum icl_port_dpll_id port_pll_type)
+{
+ struct intel_encoder *encoder = &dig_port->base;
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ bool phy_is_ready = tc_phy_status_complete(dig_port);
+ bool phy_is_owned = tc_phy_is_owned(dig_port);
+ bool is_connected;
- if (!tc_phy_is_owned(dig_port)) {
- drm_dbg_kms(&i915->drm, "Port %s: PHY not owned\n",
- dig_port->tc_port_name);
+ if (tc_phy_is_ready_and_owned(dig_port, phy_is_ready, phy_is_owned))
+ is_connected = port_pll_type == ICL_PORT_DPLL_MG_PHY;
+ else
+ is_connected = port_pll_type == ICL_PORT_DPLL_DEFAULT;
- return false;
+ drm_dbg_kms(&i915->drm,
+ "Port %s: PHY connected: %s (ready: %s, owned: %s, pll_type: %s)\n",
+ dig_port->tc_port_name,
+ str_yes_no(is_connected),
+ str_yes_no(phy_is_ready),
+ str_yes_no(phy_is_owned),
+ port_pll_type == ICL_PORT_DPLL_DEFAULT ? "tbt" : "non-tbt");
+
+ return is_connected;
+}
+
+static void tc_phy_wait_for_ready(struct intel_digital_port *dig_port)
+{
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+
+ if (wait_for(tc_phy_status_complete(dig_port), 100))
+ drm_err(&i915->drm, "Port %s: timeout waiting for PHY ready\n",
+ dig_port->tc_port_name);
+}
+
+static enum tc_port_mode
+hpd_mask_to_tc_mode(u32 live_status_mask)
+{
+ if (live_status_mask)
+ return fls(live_status_mask) - 1;
+
+ return TC_PORT_DISCONNECTED;
+}
+
+static enum tc_port_mode
+tc_phy_hpd_live_mode(struct intel_digital_port *dig_port)
+{
+ u32 live_status_mask = tc_port_live_status_mask(dig_port);
+
+ return hpd_mask_to_tc_mode(live_status_mask);
+}
+
+static enum tc_port_mode
+get_tc_mode_in_phy_owned_state(struct intel_digital_port *dig_port,
+ enum tc_port_mode live_mode)
+{
+ switch (live_mode) {
+ case TC_PORT_LEGACY:
+ case TC_PORT_DP_ALT:
+ return live_mode;
+ default:
+ MISSING_CASE(live_mode);
+ fallthrough;
+ case TC_PORT_TBT_ALT:
+ case TC_PORT_DISCONNECTED:
+ if (dig_port->tc_legacy_port)
+ return TC_PORT_LEGACY;
+ else
+ return TC_PORT_DP_ALT;
}
+}
- return dig_port->tc_mode == TC_PORT_DP_ALT ||
- dig_port->tc_mode == TC_PORT_LEGACY;
+static enum tc_port_mode
+get_tc_mode_in_phy_not_owned_state(struct intel_digital_port *dig_port,
+ enum tc_port_mode live_mode)
+{
+ switch (live_mode) {
+ case TC_PORT_LEGACY:
+ return TC_PORT_DISCONNECTED;
+ case TC_PORT_DP_ALT:
+ case TC_PORT_TBT_ALT:
+ return TC_PORT_TBT_ALT;
+ default:
+ MISSING_CASE(live_mode);
+ fallthrough;
+ case TC_PORT_DISCONNECTED:
+ if (dig_port->tc_legacy_port)
+ return TC_PORT_DISCONNECTED;
+ else
+ return TC_PORT_TBT_ALT;
+ }
}
static enum tc_port_mode
intel_tc_port_get_current_mode(struct intel_digital_port *dig_port)
{
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
- u32 live_status_mask = tc_port_live_status_mask(dig_port);
+ enum tc_port_mode live_mode = tc_phy_hpd_live_mode(dig_port);
+ bool phy_is_ready;
+ bool phy_is_owned;
enum tc_port_mode mode;
- if (!tc_phy_is_owned(dig_port) ||
- drm_WARN_ON(&i915->drm, !tc_phy_status_complete(dig_port)))
- return TC_PORT_TBT_ALT;
+ /*
+ * For legacy ports the IOM firmware initializes the PHY during boot-up
+ * and system resume whether or not a sink is connected. Wait here for
+ * the initialization to get ready.
+ */
+ if (dig_port->tc_legacy_port)
+ tc_phy_wait_for_ready(dig_port);
- mode = dig_port->tc_legacy_port ? TC_PORT_LEGACY : TC_PORT_DP_ALT;
- if (live_status_mask) {
- enum tc_port_mode live_mode = fls(live_status_mask) - 1;
+ phy_is_ready = tc_phy_status_complete(dig_port);
+ phy_is_owned = tc_phy_is_owned(dig_port);
- if (!drm_WARN_ON(&i915->drm, live_mode == TC_PORT_TBT_ALT))
- mode = live_mode;
+ if (!tc_phy_is_ready_and_owned(dig_port, phy_is_ready, phy_is_owned)) {
+ mode = get_tc_mode_in_phy_not_owned_state(dig_port, live_mode);
+ } else {
+ drm_WARN_ON(&i915->drm, live_mode == TC_PORT_TBT_ALT);
+ mode = get_tc_mode_in_phy_owned_state(dig_port, live_mode);
}
+ drm_dbg_kms(&i915->drm,
+ "Port %s: PHY mode: %s (ready: %s, owned: %s, HPD: %s)\n",
+ dig_port->tc_port_name,
+ tc_port_mode_name(mode),
+ str_yes_no(phy_is_ready),
+ str_yes_no(phy_is_owned),
+ tc_port_mode_name(live_mode));
+
return mode;
}
+static enum tc_port_mode default_tc_mode(struct intel_digital_port *dig_port)
+{
+ if (dig_port->tc_legacy_port)
+ return TC_PORT_LEGACY;
+
+ return TC_PORT_TBT_ALT;
+}
+
+static enum tc_port_mode
+hpd_mask_to_target_mode(struct intel_digital_port *dig_port, u32 live_status_mask)
+{
+ enum tc_port_mode mode = hpd_mask_to_tc_mode(live_status_mask);
+
+ if (mode != TC_PORT_DISCONNECTED)
+ return mode;
+
+ return default_tc_mode(dig_port);
+}
+
static enum tc_port_mode
intel_tc_port_get_target_mode(struct intel_digital_port *dig_port)
{
u32 live_status_mask = tc_port_live_status_mask(dig_port);
- if (live_status_mask)
- return fls(live_status_mask) - 1;
-
- return TC_PORT_TBT_ALT;
+ return hpd_mask_to_target_mode(dig_port, live_status_mask);
}
static void intel_tc_port_reset_mode(struct intel_digital_port *dig_port,
@@ -660,11 +789,24 @@ static void intel_tc_port_update_mode(struct intel_digital_port *dig_port,
tc_cold_unblock(dig_port, domain, wref);
}
-static void
-intel_tc_port_link_init_refcount(struct intel_digital_port *dig_port,
- int refcount)
+static void __intel_tc_port_get_link(struct intel_digital_port *dig_port)
+{
+ dig_port->tc_link_refcount++;
+}
+
+static void __intel_tc_port_put_link(struct intel_digital_port *dig_port)
+{
+ dig_port->tc_link_refcount--;
+}
+
+static bool tc_port_is_enabled(struct intel_digital_port *dig_port)
{
- dig_port->tc_link_refcount = refcount;
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+
+ assert_tc_port_power_enabled(dig_port);
+
+ return intel_de_read(i915, DDI_BUF_CTL(dig_port->base.port)) &
+ DDI_BUF_CTL_ENABLE;
}
/**
@@ -679,6 +821,7 @@ void intel_tc_port_init_mode(struct intel_digital_port *dig_port)
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
intel_wakeref_t tc_cold_wref;
enum intel_display_power_domain domain;
+ bool update_mode = false;
mutex_lock(&dig_port->tc_lock);
@@ -689,63 +832,105 @@ void intel_tc_port_init_mode(struct intel_digital_port *dig_port)
tc_cold_wref = tc_cold_block(dig_port, &domain);
dig_port->tc_mode = intel_tc_port_get_current_mode(dig_port);
+ /*
+ * Save the initial mode for the state check in
+ * intel_tc_port_sanitize_mode().
+ */
+ dig_port->tc_init_mode = dig_port->tc_mode;
+ if (dig_port->tc_mode != TC_PORT_DISCONNECTED)
+ dig_port->tc_lock_wakeref =
+ tc_cold_block(dig_port, &dig_port->tc_lock_power_domain);
+
+ /*
+ * The PHY needs to be connected for AUX to work during HW readout and
+ * MST topology resume, but the PHY mode can only be changed if the
+ * port is disabled.
+ *
+ * An exception is the case where BIOS leaves the PHY incorrectly
+ * disconnected on an enabled legacy port. Work around that by
+ * connecting the PHY even though the port is enabled. This doesn't
+ * cause a problem as the PHY ownership state is ignored by the
+ * IOM/TCSS firmware (only display can own the PHY in that case).
+ */
+ if (!tc_port_is_enabled(dig_port)) {
+ update_mode = true;
+ } else if (dig_port->tc_mode == TC_PORT_DISCONNECTED) {
+ drm_WARN_ON(&i915->drm, !dig_port->tc_legacy_port);
+ drm_err(&i915->drm,
+ "Port %s: PHY disconnected on enabled port, connecting it\n",
+ dig_port->tc_port_name);
+ update_mode = true;
+ }
+
+ if (update_mode)
+ intel_tc_port_update_mode(dig_port, 1, false);
+
/* Prevent changing dig_port->tc_mode until intel_tc_port_sanitize_mode() is called. */
- intel_tc_port_link_init_refcount(dig_port, 1);
- dig_port->tc_lock_wakeref = tc_cold_block(dig_port, &dig_port->tc_lock_power_domain);
+ __intel_tc_port_get_link(dig_port);
tc_cold_unblock(dig_port, domain, tc_cold_wref);
- drm_dbg_kms(&i915->drm, "Port %s: init mode (%s)\n",
- dig_port->tc_port_name,
- tc_port_mode_name(dig_port->tc_mode));
-
mutex_unlock(&dig_port->tc_lock);
}
+static bool tc_port_has_active_links(struct intel_digital_port *dig_port,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ enum icl_port_dpll_id pll_type = ICL_PORT_DPLL_DEFAULT;
+ int active_links = 0;
+
+ if (dig_port->dp.is_mst) {
+ /* TODO: get the PLL type for MST, once HW readout is done for it. */
+ active_links = intel_dp_mst_encoder_active_links(dig_port);
+ } else if (crtc_state && crtc_state->hw.active) {
+ pll_type = intel_ddi_port_pll_type(&dig_port->base, crtc_state);
+ active_links = 1;
+ }
+
+ if (active_links && !tc_phy_is_connected(dig_port, pll_type))
+ drm_err(&i915->drm,
+ "Port %s: PHY disconnected with %d active link(s)\n",
+ dig_port->tc_port_name, active_links);
+
+ return active_links;
+}
+
/**
* intel_tc_port_sanitize_mode: Sanitize the given port's TypeC mode
* @dig_port: digital port
+ * @crtc_state: atomic state of CRTC connected to @dig_port
*
* Sanitize @dig_port's TypeC mode wrt. the encoder's state right after driver
* loading and system resume:
* If the encoder is enabled keep the TypeC mode/PHY connected state locked until
* the encoder is disabled.
* If the encoder is disabled make sure the PHY is disconnected.
+ * @crtc_state is valid if @dig_port is enabled, NULL otherwise.
*/
-void intel_tc_port_sanitize_mode(struct intel_digital_port *dig_port)
+void intel_tc_port_sanitize_mode(struct intel_digital_port *dig_port,
+ const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
- struct intel_encoder *encoder = &dig_port->base;
- int active_links = 0;
mutex_lock(&dig_port->tc_lock);
- if (dig_port->dp.is_mst)
- active_links = intel_dp_mst_encoder_active_links(dig_port);
- else if (encoder->base.crtc)
- active_links = to_intel_crtc(encoder->base.crtc)->active;
-
drm_WARN_ON(&i915->drm, dig_port->tc_link_refcount != 1);
- intel_tc_port_link_init_refcount(dig_port, active_links);
-
- if (active_links) {
- if (!icl_tc_phy_is_connected(dig_port))
- drm_dbg_kms(&i915->drm,
- "Port %s: PHY disconnected with %d active link(s)\n",
- dig_port->tc_port_name, active_links);
- } else {
+ if (!tc_port_has_active_links(dig_port, crtc_state)) {
/*
* TBT-alt is the default mode in any case the PHY ownership is not
* held (regardless of the sink's connected live state), so
* we'll just switch to disconnected mode from it here without
* a note.
*/
- if (dig_port->tc_mode != TC_PORT_TBT_ALT)
+ if (dig_port->tc_init_mode != TC_PORT_TBT_ALT &&
+ dig_port->tc_init_mode != TC_PORT_DISCONNECTED)
drm_dbg_kms(&i915->drm,
"Port %s: PHY left in %s mode on disabled port, disconnecting it\n",
dig_port->tc_port_name,
- tc_port_mode_name(dig_port->tc_mode));
+ tc_port_mode_name(dig_port->tc_init_mode));
icl_tc_phy_disconnect(dig_port);
+ __intel_tc_port_put_link(dig_port);
tc_cold_unblock(dig_port, dig_port->tc_lock_power_domain,
fetch_and_zero(&dig_port->tc_lock_wakeref));
@@ -768,16 +953,23 @@ void intel_tc_port_sanitize_mode(struct intel_digital_port *dig_port)
* connected ports are usable, and avoids exposing to the users objects they
* can't really use.
*/
+bool intel_tc_port_connected_locked(struct intel_encoder *encoder)
+{
+ struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+
+ drm_WARN_ON(&i915->drm, !intel_tc_port_ref_held(dig_port));
+
+ return tc_port_live_status_mask(dig_port) & BIT(dig_port->tc_mode);
+}
+
bool intel_tc_port_connected(struct intel_encoder *encoder)
{
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
bool is_connected;
intel_tc_port_lock(dig_port);
-
- is_connected = tc_port_live_status_mask(dig_port) &
- BIT(dig_port->tc_mode);
-
+ is_connected = intel_tc_port_connected_locked(encoder);
intel_tc_port_unlock(dig_port);
return is_connected;
@@ -857,14 +1049,14 @@ void intel_tc_port_get_link(struct intel_digital_port *dig_port,
int required_lanes)
{
__intel_tc_port_lock(dig_port, required_lanes);
- dig_port->tc_link_refcount++;
+ __intel_tc_port_get_link(dig_port);
intel_tc_port_unlock(dig_port);
}
void intel_tc_port_put_link(struct intel_digital_port *dig_port)
{
intel_tc_port_lock(dig_port);
- --dig_port->tc_link_refcount;
+ __intel_tc_port_put_link(dig_port);
intel_tc_port_unlock(dig_port);
/*
diff --git a/drivers/gpu/drm/i915/display/intel_tc.h b/drivers/gpu/drm/i915/display/intel_tc.h
index d54082e2d5e8..79667d977508 100644
--- a/drivers/gpu/drm/i915/display/intel_tc.h
+++ b/drivers/gpu/drm/i915/display/intel_tc.h
@@ -9,6 +9,7 @@
#include <linux/mutex.h>
#include <linux/types.h>
+struct intel_crtc_state;
struct intel_digital_port;
struct intel_encoder;
@@ -17,6 +18,7 @@ bool intel_tc_port_in_dp_alt_mode(struct intel_digital_port *dig_port);
bool intel_tc_port_in_legacy_mode(struct intel_digital_port *dig_port);
bool intel_tc_port_connected(struct intel_encoder *encoder);
+bool intel_tc_port_connected_locked(struct intel_encoder *encoder);
u32 intel_tc_port_get_lane_mask(struct intel_digital_port *dig_port);
u32 intel_tc_port_get_pin_assignment_mask(struct intel_digital_port *dig_port);
@@ -25,7 +27,8 @@ void intel_tc_port_set_fia_lane_count(struct intel_digital_port *dig_port,
int required_lanes);
void intel_tc_port_init_mode(struct intel_digital_port *dig_port);
-void intel_tc_port_sanitize_mode(struct intel_digital_port *dig_port);
+void intel_tc_port_sanitize_mode(struct intel_digital_port *dig_port,
+ const struct intel_crtc_state *crtc_state);
void intel_tc_port_lock(struct intel_digital_port *dig_port);
void intel_tc_port_unlock(struct intel_digital_port *dig_port);
void intel_tc_port_flush_work(struct intel_digital_port *dig_port);
diff --git a/drivers/gpu/drm/i915/display/intel_tv.c b/drivers/gpu/drm/i915/display/intel_tv.c
index b986bf075889..3b5ff84dc615 100644
--- a/drivers/gpu/drm/i915/display/intel_tv.c
+++ b/drivers/gpu/drm/i915/display/intel_tv.c
@@ -930,8 +930,7 @@ intel_enable_tv(struct intel_atomic_state *state,
/* Prevents vblank waits from timing out in intel_tv_detect_type() */
intel_crtc_wait_for_next_vblank(to_intel_crtc(pipe_config->uapi.crtc));
- intel_de_write(dev_priv, TV_CTL,
- intel_de_read(dev_priv, TV_CTL) | TV_ENC_ENABLE);
+ intel_de_rmw(dev_priv, TV_CTL, 0, TV_ENC_ENABLE);
}
static void
@@ -943,8 +942,7 @@ intel_disable_tv(struct intel_atomic_state *state,
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- intel_de_write(dev_priv, TV_CTL,
- intel_de_read(dev_priv, TV_CTL) & ~TV_ENC_ENABLE);
+ intel_de_rmw(dev_priv, TV_CTL, TV_ENC_ENABLE, 0);
}
static const struct tv_mode *intel_tv_mode_find(const struct drm_connector_state *conn_state)
diff --git a/drivers/gpu/drm/i915/display/intel_vblank.c b/drivers/gpu/drm/i915/display/intel_vblank.c
index 4c83e2320bca..f8bf9810527d 100644
--- a/drivers/gpu/drm/i915/display/intel_vblank.c
+++ b/drivers/gpu/drm/i915/display/intel_vblank.c
@@ -8,6 +8,7 @@
#include "intel_de.h"
#include "intel_display_types.h"
#include "intel_vblank.h"
+#include "intel_vrr.h"
/*
* This timing diagram depicts the video signal in and
@@ -26,7 +27,7 @@
* |
* | frame start:
* | generate frame start interrupt (aka. vblank interrupt) (gmch)
- * | may be shifted forward 1-3 extra lines via PIPECONF
+ * | may be shifted forward 1-3 extra lines via TRANSCONF
* | |
* | | start of vsync:
* | | generate vsync interrupt
@@ -54,7 +55,7 @@
* Summary:
* - most events happen at the start of horizontal sync
* - frame start happens at the start of horizontal blank, 1-4 lines
- * (depending on PIPECONF settings) after the start of vblank
+ * (depending on TRANSCONF settings) after the start of vblank
* - gen3/4 pixel and frame counter are synchronized with the start
* of horizontal active on the first line of vertical active
*/
@@ -439,3 +440,94 @@ void intel_wait_for_pipe_scanline_moving(struct intel_crtc *crtc)
{
wait_for_pipe_scanline_moving(crtc, true);
}
+
+static int intel_crtc_scanline_offset(const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
+ const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
+
+ /*
+ * The scanline counter increments at the leading edge of hsync.
+ *
+ * On most platforms it starts counting from vtotal-1 on the
+ * first active line. That means the scanline counter value is
+ * always one less than what we would expect. Ie. just after
+ * start of vblank, which also occurs at start of hsync (on the
+ * last active line), the scanline counter will read vblank_start-1.
+ *
+ * On gen2 the scanline counter starts counting from 1 instead
+ * of vtotal-1, so we have to subtract one (or rather add vtotal-1
+ * to keep the value positive), instead of adding one.
+ *
+ * On HSW+ the behaviour of the scanline counter depends on the output
+ * type. For DP ports it behaves like most other platforms, but on HDMI
+ * there's an extra 1 line difference. So we need to add two instead of
+ * one to the value.
+ *
+ * On VLV/CHV DSI the scanline counter would appear to increment
+ * approx. 1/3 of a scanline before start of vblank. Unfortunately
+ * that means we can't tell whether we're in vblank or not while
+ * we're on that particular line. We must still set scanline_offset
+ * to 1 so that the vblank timestamps come out correct when we query
+ * the scanline counter from within the vblank interrupt handler.
+ * However if queried just before the start of vblank we'll get an
+ * answer that's slightly in the future.
+ */
+ if (DISPLAY_VER(i915) == 2) {
+ int vtotal;
+
+ vtotal = adjusted_mode->crtc_vtotal;
+ if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
+ vtotal /= 2;
+
+ return vtotal - 1;
+ } else if (HAS_DDI(i915) && intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
+ return 2;
+ } else {
+ return 1;
+ }
+}
+
+void intel_crtc_update_active_timings(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ struct drm_display_mode adjusted_mode;
+ int vmax_vblank_start = 0;
+ unsigned long irqflags;
+
+ drm_mode_init(&adjusted_mode, &crtc_state->hw.adjusted_mode);
+
+ if (crtc_state->vrr.enable) {
+ adjusted_mode.crtc_vtotal = crtc_state->vrr.vmax;
+ adjusted_mode.crtc_vblank_end = crtc_state->vrr.vmax;
+ adjusted_mode.crtc_vblank_start = intel_vrr_vmin_vblank_start(crtc_state);
+ vmax_vblank_start = intel_vrr_vmax_vblank_start(crtc_state);
+ }
+
+ /*
+ * Belts and suspenders locking to guarantee everyone sees 100%
+ * consistent state during fastset seamless refresh rate changes.
+ *
+ * vblank_time_lock takes care of all drm_vblank.c stuff, and
+ * uncore.lock takes care of __intel_get_crtc_scanline() which
+ * may get called elsewhere as well.
+ *
+ * TODO maybe just protect everything (including
+ * __intel_get_crtc_scanline()) with vblank_time_lock?
+ * Need to audit everything to make sure it's safe.
+ */
+ spin_lock_irqsave(&i915->drm.vblank_time_lock, irqflags);
+ spin_lock(&i915->uncore.lock);
+
+ drm_calc_timestamping_constants(&crtc->base, &adjusted_mode);
+
+ crtc->vmax_vblank_start = vmax_vblank_start;
+
+ crtc->mode_flags = crtc_state->mode_flags;
+
+ crtc->scanline_offset = intel_crtc_scanline_offset(crtc_state);
+
+ spin_unlock(&i915->uncore.lock);
+ spin_unlock_irqrestore(&i915->drm.vblank_time_lock, irqflags);
+}
diff --git a/drivers/gpu/drm/i915/display/intel_vblank.h b/drivers/gpu/drm/i915/display/intel_vblank.h
index c9fea2c2a990..0884db7e76ae 100644
--- a/drivers/gpu/drm/i915/display/intel_vblank.h
+++ b/drivers/gpu/drm/i915/display/intel_vblank.h
@@ -11,6 +11,7 @@
struct drm_crtc;
struct intel_crtc;
+struct intel_crtc_state;
u32 i915_get_vblank_counter(struct drm_crtc *crtc);
u32 g4x_get_vblank_counter(struct drm_crtc *crtc);
@@ -19,5 +20,6 @@ bool intel_crtc_get_vblank_timestamp(struct drm_crtc *crtc, int *max_error,
int intel_get_crtc_scanline(struct intel_crtc *crtc);
void intel_wait_for_pipe_scanline_stopped(struct intel_crtc *crtc);
void intel_wait_for_pipe_scanline_moving(struct intel_crtc *crtc);
+void intel_crtc_update_active_timings(const struct intel_crtc_state *crtc_state);
#endif /* __INTEL_VBLANK_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_vdsc.c b/drivers/gpu/drm/i915/display/intel_vdsc.c
index 207b2a648d32..09b32ffdc552 100644
--- a/drivers/gpu/drm/i915/display/intel_vdsc.c
+++ b/drivers/gpu/drm/i915/display/intel_vdsc.c
@@ -17,6 +17,7 @@
#include "intel_dsi.h"
#include "intel_qp_tables.h"
#include "intel_vdsc.h"
+#include "intel_vdsc_regs.h"
enum ROW_INDEX_BPP {
ROW_INDEX_6BPP = 0,
diff --git a/drivers/gpu/drm/i915/display/intel_vdsc_regs.h b/drivers/gpu/drm/i915/display/intel_vdsc_regs.h
new file mode 100644
index 000000000000..4fd883463752
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_vdsc_regs.h
@@ -0,0 +1,461 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#ifndef __INTEL_VDSC_REGS_H__
+#define __INTEL_VDSC_REGS_H__
+
+#include "intel_display_reg_defs.h"
+
+/* Display Stream Splitter Control */
+#define DSS_CTL1 _MMIO(0x67400)
+#define SPLITTER_ENABLE (1 << 31)
+#define JOINER_ENABLE (1 << 30)
+#define DUAL_LINK_MODE_INTERLEAVE (1 << 24)
+#define DUAL_LINK_MODE_FRONTBACK (0 << 24)
+#define OVERLAP_PIXELS_MASK (0xf << 16)
+#define OVERLAP_PIXELS(pixels) ((pixels) << 16)
+#define LEFT_DL_BUF_TARGET_DEPTH_MASK (0xfff << 0)
+#define LEFT_DL_BUF_TARGET_DEPTH(pixels) ((pixels) << 0)
+#define MAX_DL_BUFFER_TARGET_DEPTH 0x5a0
+
+#define DSS_CTL2 _MMIO(0x67404)
+#define LEFT_BRANCH_VDSC_ENABLE (1 << 31)
+#define RIGHT_BRANCH_VDSC_ENABLE (1 << 15)
+#define RIGHT_DL_BUF_TARGET_DEPTH_MASK (0xfff << 0)
+#define RIGHT_DL_BUF_TARGET_DEPTH(pixels) ((pixels) << 0)
+
+#define _ICL_PIPE_DSS_CTL1_PB 0x78200
+#define _ICL_PIPE_DSS_CTL1_PC 0x78400
+#define ICL_PIPE_DSS_CTL1(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_PIPE_DSS_CTL1_PB, \
+ _ICL_PIPE_DSS_CTL1_PC)
+#define BIG_JOINER_ENABLE (1 << 29)
+#define MASTER_BIG_JOINER_ENABLE (1 << 28)
+#define VGA_CENTERING_ENABLE (1 << 27)
+#define SPLITTER_CONFIGURATION_MASK REG_GENMASK(26, 25)
+#define SPLITTER_CONFIGURATION_2_SEGMENT REG_FIELD_PREP(SPLITTER_CONFIGURATION_MASK, 0)
+#define SPLITTER_CONFIGURATION_4_SEGMENT REG_FIELD_PREP(SPLITTER_CONFIGURATION_MASK, 1)
+#define UNCOMPRESSED_JOINER_MASTER (1 << 21)
+#define UNCOMPRESSED_JOINER_SLAVE (1 << 20)
+
+#define _ICL_PIPE_DSS_CTL2_PB 0x78204
+#define _ICL_PIPE_DSS_CTL2_PC 0x78404
+#define ICL_PIPE_DSS_CTL2(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_PIPE_DSS_CTL2_PB, \
+ _ICL_PIPE_DSS_CTL2_PC)
+
+/* Icelake Display Stream Compression Registers */
+#define DSCA_PICTURE_PARAMETER_SET_0 _MMIO(0x6B200)
+#define DSCC_PICTURE_PARAMETER_SET_0 _MMIO(0x6BA00)
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_0_PB 0x78270
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_0_PB 0x78370
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_0_PC 0x78470
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_0_PC 0x78570
+#define ICL_DSC0_PICTURE_PARAMETER_SET_0(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC0_PICTURE_PARAMETER_SET_0_PB, \
+ _ICL_DSC0_PICTURE_PARAMETER_SET_0_PC)
+#define ICL_DSC1_PICTURE_PARAMETER_SET_0(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC1_PICTURE_PARAMETER_SET_0_PB, \
+ _ICL_DSC1_PICTURE_PARAMETER_SET_0_PC)
+#define DSC_ALT_ICH_SEL (1 << 20)
+#define DSC_VBR_ENABLE (1 << 19)
+#define DSC_422_ENABLE (1 << 18)
+#define DSC_COLOR_SPACE_CONVERSION (1 << 17)
+#define DSC_BLOCK_PREDICTION (1 << 16)
+#define DSC_LINE_BUF_DEPTH_SHIFT 12
+#define DSC_BPC_SHIFT 8
+#define DSC_VER_MIN_SHIFT 4
+#define DSC_VER_MAJ (0x1 << 0)
+
+#define DSCA_PICTURE_PARAMETER_SET_1 _MMIO(0x6B204)
+#define DSCC_PICTURE_PARAMETER_SET_1 _MMIO(0x6BA04)
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_1_PB 0x78274
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_1_PB 0x78374
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_1_PC 0x78474
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_1_PC 0x78574
+#define ICL_DSC0_PICTURE_PARAMETER_SET_1(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC0_PICTURE_PARAMETER_SET_1_PB, \
+ _ICL_DSC0_PICTURE_PARAMETER_SET_1_PC)
+#define ICL_DSC1_PICTURE_PARAMETER_SET_1(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC1_PICTURE_PARAMETER_SET_1_PB, \
+ _ICL_DSC1_PICTURE_PARAMETER_SET_1_PC)
+#define DSC_BPP(bpp) ((bpp) << 0)
+
+#define DSCA_PICTURE_PARAMETER_SET_2 _MMIO(0x6B208)
+#define DSCC_PICTURE_PARAMETER_SET_2 _MMIO(0x6BA08)
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_2_PB 0x78278
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_2_PB 0x78378
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_2_PC 0x78478
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_2_PC 0x78578
+#define ICL_DSC0_PICTURE_PARAMETER_SET_2(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC0_PICTURE_PARAMETER_SET_2_PB, \
+ _ICL_DSC0_PICTURE_PARAMETER_SET_2_PC)
+#define ICL_DSC1_PICTURE_PARAMETER_SET_2(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC1_PICTURE_PARAMETER_SET_2_PB, \
+ _ICL_DSC1_PICTURE_PARAMETER_SET_2_PC)
+#define DSC_PIC_WIDTH(pic_width) ((pic_width) << 16)
+#define DSC_PIC_HEIGHT(pic_height) ((pic_height) << 0)
+
+#define DSCA_PICTURE_PARAMETER_SET_3 _MMIO(0x6B20C)
+#define DSCC_PICTURE_PARAMETER_SET_3 _MMIO(0x6BA0C)
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_3_PB 0x7827C
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_3_PB 0x7837C
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_3_PC 0x7847C
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_3_PC 0x7857C
+#define ICL_DSC0_PICTURE_PARAMETER_SET_3(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC0_PICTURE_PARAMETER_SET_3_PB, \
+ _ICL_DSC0_PICTURE_PARAMETER_SET_3_PC)
+#define ICL_DSC1_PICTURE_PARAMETER_SET_3(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC1_PICTURE_PARAMETER_SET_3_PB, \
+ _ICL_DSC1_PICTURE_PARAMETER_SET_3_PC)
+#define DSC_SLICE_WIDTH(slice_width) ((slice_width) << 16)
+#define DSC_SLICE_HEIGHT(slice_height) ((slice_height) << 0)
+
+#define DSCA_PICTURE_PARAMETER_SET_4 _MMIO(0x6B210)
+#define DSCC_PICTURE_PARAMETER_SET_4 _MMIO(0x6BA10)
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_4_PB 0x78280
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_4_PB 0x78380
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_4_PC 0x78480
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_4_PC 0x78580
+#define ICL_DSC0_PICTURE_PARAMETER_SET_4(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC0_PICTURE_PARAMETER_SET_4_PB, \
+ _ICL_DSC0_PICTURE_PARAMETER_SET_4_PC)
+#define ICL_DSC1_PICTURE_PARAMETER_SET_4(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC1_PICTURE_PARAMETER_SET_4_PB, \
+ _ICL_DSC1_PICTURE_PARAMETER_SET_4_PC)
+#define DSC_INITIAL_DEC_DELAY(dec_delay) ((dec_delay) << 16)
+#define DSC_INITIAL_XMIT_DELAY(xmit_delay) ((xmit_delay) << 0)
+
+#define DSCA_PICTURE_PARAMETER_SET_5 _MMIO(0x6B214)
+#define DSCC_PICTURE_PARAMETER_SET_5 _MMIO(0x6BA14)
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_5_PB 0x78284
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_5_PB 0x78384
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_5_PC 0x78484
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_5_PC 0x78584
+#define ICL_DSC0_PICTURE_PARAMETER_SET_5(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC0_PICTURE_PARAMETER_SET_5_PB, \
+ _ICL_DSC0_PICTURE_PARAMETER_SET_5_PC)
+#define ICL_DSC1_PICTURE_PARAMETER_SET_5(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC1_PICTURE_PARAMETER_SET_5_PB, \
+ _ICL_DSC1_PICTURE_PARAMETER_SET_5_PC)
+#define DSC_SCALE_DEC_INT(scale_dec) ((scale_dec) << 16)
+#define DSC_SCALE_INC_INT(scale_inc) ((scale_inc) << 0)
+
+#define DSCA_PICTURE_PARAMETER_SET_6 _MMIO(0x6B218)
+#define DSCC_PICTURE_PARAMETER_SET_6 _MMIO(0x6BA18)
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_6_PB 0x78288
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_6_PB 0x78388
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_6_PC 0x78488
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_6_PC 0x78588
+#define ICL_DSC0_PICTURE_PARAMETER_SET_6(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC0_PICTURE_PARAMETER_SET_6_PB, \
+ _ICL_DSC0_PICTURE_PARAMETER_SET_6_PC)
+#define ICL_DSC1_PICTURE_PARAMETER_SET_6(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC1_PICTURE_PARAMETER_SET_6_PB, \
+ _ICL_DSC1_PICTURE_PARAMETER_SET_6_PC)
+#define DSC_FLATNESS_MAX_QP(max_qp) ((max_qp) << 24)
+#define DSC_FLATNESS_MIN_QP(min_qp) ((min_qp) << 16)
+#define DSC_FIRST_LINE_BPG_OFFSET(offset) ((offset) << 8)
+#define DSC_INITIAL_SCALE_VALUE(value) ((value) << 0)
+
+#define DSCA_PICTURE_PARAMETER_SET_7 _MMIO(0x6B21C)
+#define DSCC_PICTURE_PARAMETER_SET_7 _MMIO(0x6BA1C)
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_7_PB 0x7828C
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_7_PB 0x7838C
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_7_PC 0x7848C
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_7_PC 0x7858C
+#define ICL_DSC0_PICTURE_PARAMETER_SET_7(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC0_PICTURE_PARAMETER_SET_7_PB, \
+ _ICL_DSC0_PICTURE_PARAMETER_SET_7_PC)
+#define ICL_DSC1_PICTURE_PARAMETER_SET_7(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC1_PICTURE_PARAMETER_SET_7_PB, \
+ _ICL_DSC1_PICTURE_PARAMETER_SET_7_PC)
+#define DSC_NFL_BPG_OFFSET(bpg_offset) ((bpg_offset) << 16)
+#define DSC_SLICE_BPG_OFFSET(bpg_offset) ((bpg_offset) << 0)
+
+#define DSCA_PICTURE_PARAMETER_SET_8 _MMIO(0x6B220)
+#define DSCC_PICTURE_PARAMETER_SET_8 _MMIO(0x6BA20)
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_8_PB 0x78290
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_8_PB 0x78390
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_8_PC 0x78490
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_8_PC 0x78590
+#define ICL_DSC0_PICTURE_PARAMETER_SET_8(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC0_PICTURE_PARAMETER_SET_8_PB, \
+ _ICL_DSC0_PICTURE_PARAMETER_SET_8_PC)
+#define ICL_DSC1_PICTURE_PARAMETER_SET_8(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC1_PICTURE_PARAMETER_SET_8_PB, \
+ _ICL_DSC1_PICTURE_PARAMETER_SET_8_PC)
+#define DSC_INITIAL_OFFSET(initial_offset) ((initial_offset) << 16)
+#define DSC_FINAL_OFFSET(final_offset) ((final_offset) << 0)
+
+#define DSCA_PICTURE_PARAMETER_SET_9 _MMIO(0x6B224)
+#define DSCC_PICTURE_PARAMETER_SET_9 _MMIO(0x6BA24)
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_9_PB 0x78294
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_9_PB 0x78394
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_9_PC 0x78494
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_9_PC 0x78594
+#define ICL_DSC0_PICTURE_PARAMETER_SET_9(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC0_PICTURE_PARAMETER_SET_9_PB, \
+ _ICL_DSC0_PICTURE_PARAMETER_SET_9_PC)
+#define ICL_DSC1_PICTURE_PARAMETER_SET_9(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC1_PICTURE_PARAMETER_SET_9_PB, \
+ _ICL_DSC1_PICTURE_PARAMETER_SET_9_PC)
+#define DSC_RC_EDGE_FACTOR(rc_edge_fact) ((rc_edge_fact) << 16)
+#define DSC_RC_MODEL_SIZE(rc_model_size) ((rc_model_size) << 0)
+
+#define DSCA_PICTURE_PARAMETER_SET_10 _MMIO(0x6B228)
+#define DSCC_PICTURE_PARAMETER_SET_10 _MMIO(0x6BA28)
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_10_PB 0x78298
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_10_PB 0x78398
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_10_PC 0x78498
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_10_PC 0x78598
+#define ICL_DSC0_PICTURE_PARAMETER_SET_10(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC0_PICTURE_PARAMETER_SET_10_PB, \
+ _ICL_DSC0_PICTURE_PARAMETER_SET_10_PC)
+#define ICL_DSC1_PICTURE_PARAMETER_SET_10(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC1_PICTURE_PARAMETER_SET_10_PB, \
+ _ICL_DSC1_PICTURE_PARAMETER_SET_10_PC)
+#define DSC_RC_TARGET_OFF_LOW(rc_tgt_off_low) ((rc_tgt_off_low) << 20)
+#define DSC_RC_TARGET_OFF_HIGH(rc_tgt_off_high) ((rc_tgt_off_high) << 16)
+#define DSC_RC_QUANT_INC_LIMIT1(lim) ((lim) << 8)
+#define DSC_RC_QUANT_INC_LIMIT0(lim) ((lim) << 0)
+
+#define DSCA_PICTURE_PARAMETER_SET_11 _MMIO(0x6B22C)
+#define DSCC_PICTURE_PARAMETER_SET_11 _MMIO(0x6BA2C)
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_11_PB 0x7829C
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_11_PB 0x7839C
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_11_PC 0x7849C
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_11_PC 0x7859C
+#define ICL_DSC0_PICTURE_PARAMETER_SET_11(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC0_PICTURE_PARAMETER_SET_11_PB, \
+ _ICL_DSC0_PICTURE_PARAMETER_SET_11_PC)
+#define ICL_DSC1_PICTURE_PARAMETER_SET_11(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC1_PICTURE_PARAMETER_SET_11_PB, \
+ _ICL_DSC1_PICTURE_PARAMETER_SET_11_PC)
+
+#define DSCA_PICTURE_PARAMETER_SET_12 _MMIO(0x6B260)
+#define DSCC_PICTURE_PARAMETER_SET_12 _MMIO(0x6BA60)
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_12_PB 0x782A0
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_12_PB 0x783A0
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_12_PC 0x784A0
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_12_PC 0x785A0
+#define ICL_DSC0_PICTURE_PARAMETER_SET_12(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC0_PICTURE_PARAMETER_SET_12_PB, \
+ _ICL_DSC0_PICTURE_PARAMETER_SET_12_PC)
+#define ICL_DSC1_PICTURE_PARAMETER_SET_12(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC1_PICTURE_PARAMETER_SET_12_PB, \
+ _ICL_DSC1_PICTURE_PARAMETER_SET_12_PC)
+
+#define DSCA_PICTURE_PARAMETER_SET_13 _MMIO(0x6B264)
+#define DSCC_PICTURE_PARAMETER_SET_13 _MMIO(0x6BA64)
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_13_PB 0x782A4
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_13_PB 0x783A4
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_13_PC 0x784A4
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_13_PC 0x785A4
+#define ICL_DSC0_PICTURE_PARAMETER_SET_13(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC0_PICTURE_PARAMETER_SET_13_PB, \
+ _ICL_DSC0_PICTURE_PARAMETER_SET_13_PC)
+#define ICL_DSC1_PICTURE_PARAMETER_SET_13(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC1_PICTURE_PARAMETER_SET_13_PB, \
+ _ICL_DSC1_PICTURE_PARAMETER_SET_13_PC)
+
+#define DSCA_PICTURE_PARAMETER_SET_14 _MMIO(0x6B268)
+#define DSCC_PICTURE_PARAMETER_SET_14 _MMIO(0x6BA68)
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_14_PB 0x782A8
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_14_PB 0x783A8
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_14_PC 0x784A8
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_14_PC 0x785A8
+#define ICL_DSC0_PICTURE_PARAMETER_SET_14(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC0_PICTURE_PARAMETER_SET_14_PB, \
+ _ICL_DSC0_PICTURE_PARAMETER_SET_14_PC)
+#define ICL_DSC1_PICTURE_PARAMETER_SET_14(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC1_PICTURE_PARAMETER_SET_14_PB, \
+ _ICL_DSC1_PICTURE_PARAMETER_SET_14_PC)
+
+#define DSCA_PICTURE_PARAMETER_SET_15 _MMIO(0x6B26C)
+#define DSCC_PICTURE_PARAMETER_SET_15 _MMIO(0x6BA6C)
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_15_PB 0x782AC
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_15_PB 0x783AC
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_15_PC 0x784AC
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_15_PC 0x785AC
+#define ICL_DSC0_PICTURE_PARAMETER_SET_15(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC0_PICTURE_PARAMETER_SET_15_PB, \
+ _ICL_DSC0_PICTURE_PARAMETER_SET_15_PC)
+#define ICL_DSC1_PICTURE_PARAMETER_SET_15(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC1_PICTURE_PARAMETER_SET_15_PB, \
+ _ICL_DSC1_PICTURE_PARAMETER_SET_15_PC)
+
+#define DSCA_PICTURE_PARAMETER_SET_16 _MMIO(0x6B270)
+#define DSCC_PICTURE_PARAMETER_SET_16 _MMIO(0x6BA70)
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_16_PB 0x782B0
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_16_PB 0x783B0
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_16_PC 0x784B0
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_16_PC 0x785B0
+#define ICL_DSC0_PICTURE_PARAMETER_SET_16(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC0_PICTURE_PARAMETER_SET_16_PB, \
+ _ICL_DSC0_PICTURE_PARAMETER_SET_16_PC)
+#define ICL_DSC1_PICTURE_PARAMETER_SET_16(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC1_PICTURE_PARAMETER_SET_16_PB, \
+ _ICL_DSC1_PICTURE_PARAMETER_SET_16_PC)
+#define DSC_SLICE_ROW_PER_FRAME(slice_row_per_frame) ((slice_row_per_frame) << 20)
+#define DSC_SLICE_PER_LINE(slice_per_line) ((slice_per_line) << 16)
+#define DSC_SLICE_CHUNK_SIZE(slice_chunk_size) ((slice_chunk_size) << 0)
+
+/* Icelake Rate Control Buffer Threshold Registers */
+#define DSCA_RC_BUF_THRESH_0 _MMIO(0x6B230)
+#define DSCA_RC_BUF_THRESH_0_UDW _MMIO(0x6B230 + 4)
+#define DSCC_RC_BUF_THRESH_0 _MMIO(0x6BA30)
+#define DSCC_RC_BUF_THRESH_0_UDW _MMIO(0x6BA30 + 4)
+#define _ICL_DSC0_RC_BUF_THRESH_0_PB (0x78254)
+#define _ICL_DSC0_RC_BUF_THRESH_0_UDW_PB (0x78254 + 4)
+#define _ICL_DSC1_RC_BUF_THRESH_0_PB (0x78354)
+#define _ICL_DSC1_RC_BUF_THRESH_0_UDW_PB (0x78354 + 4)
+#define _ICL_DSC0_RC_BUF_THRESH_0_PC (0x78454)
+#define _ICL_DSC0_RC_BUF_THRESH_0_UDW_PC (0x78454 + 4)
+#define _ICL_DSC1_RC_BUF_THRESH_0_PC (0x78554)
+#define _ICL_DSC1_RC_BUF_THRESH_0_UDW_PC (0x78554 + 4)
+#define ICL_DSC0_RC_BUF_THRESH_0(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC0_RC_BUF_THRESH_0_PB, \
+ _ICL_DSC0_RC_BUF_THRESH_0_PC)
+#define ICL_DSC0_RC_BUF_THRESH_0_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC0_RC_BUF_THRESH_0_UDW_PB, \
+ _ICL_DSC0_RC_BUF_THRESH_0_UDW_PC)
+#define ICL_DSC1_RC_BUF_THRESH_0(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC1_RC_BUF_THRESH_0_PB, \
+ _ICL_DSC1_RC_BUF_THRESH_0_PC)
+#define ICL_DSC1_RC_BUF_THRESH_0_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC1_RC_BUF_THRESH_0_UDW_PB, \
+ _ICL_DSC1_RC_BUF_THRESH_0_UDW_PC)
+
+#define DSCA_RC_BUF_THRESH_1 _MMIO(0x6B238)
+#define DSCA_RC_BUF_THRESH_1_UDW _MMIO(0x6B238 + 4)
+#define DSCC_RC_BUF_THRESH_1 _MMIO(0x6BA38)
+#define DSCC_RC_BUF_THRESH_1_UDW _MMIO(0x6BA38 + 4)
+#define _ICL_DSC0_RC_BUF_THRESH_1_PB (0x7825C)
+#define _ICL_DSC0_RC_BUF_THRESH_1_UDW_PB (0x7825C + 4)
+#define _ICL_DSC1_RC_BUF_THRESH_1_PB (0x7835C)
+#define _ICL_DSC1_RC_BUF_THRESH_1_UDW_PB (0x7835C + 4)
+#define _ICL_DSC0_RC_BUF_THRESH_1_PC (0x7845C)
+#define _ICL_DSC0_RC_BUF_THRESH_1_UDW_PC (0x7845C + 4)
+#define _ICL_DSC1_RC_BUF_THRESH_1_PC (0x7855C)
+#define _ICL_DSC1_RC_BUF_THRESH_1_UDW_PC (0x7855C + 4)
+#define ICL_DSC0_RC_BUF_THRESH_1(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC0_RC_BUF_THRESH_1_PB, \
+ _ICL_DSC0_RC_BUF_THRESH_1_PC)
+#define ICL_DSC0_RC_BUF_THRESH_1_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC0_RC_BUF_THRESH_1_UDW_PB, \
+ _ICL_DSC0_RC_BUF_THRESH_1_UDW_PC)
+#define ICL_DSC1_RC_BUF_THRESH_1(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC1_RC_BUF_THRESH_1_PB, \
+ _ICL_DSC1_RC_BUF_THRESH_1_PC)
+#define ICL_DSC1_RC_BUF_THRESH_1_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC1_RC_BUF_THRESH_1_UDW_PB, \
+ _ICL_DSC1_RC_BUF_THRESH_1_UDW_PC)
+
+/* Icelake DSC Rate Control Range Parameter Registers */
+#define DSCA_RC_RANGE_PARAMETERS_0 _MMIO(0x6B240)
+#define DSCA_RC_RANGE_PARAMETERS_0_UDW _MMIO(0x6B240 + 4)
+#define DSCC_RC_RANGE_PARAMETERS_0 _MMIO(0x6BA40)
+#define DSCC_RC_RANGE_PARAMETERS_0_UDW _MMIO(0x6BA40 + 4)
+#define _ICL_DSC0_RC_RANGE_PARAMETERS_0_PB (0x78208)
+#define _ICL_DSC0_RC_RANGE_PARAMETERS_0_UDW_PB (0x78208 + 4)
+#define _ICL_DSC1_RC_RANGE_PARAMETERS_0_PB (0x78308)
+#define _ICL_DSC1_RC_RANGE_PARAMETERS_0_UDW_PB (0x78308 + 4)
+#define _ICL_DSC0_RC_RANGE_PARAMETERS_0_PC (0x78408)
+#define _ICL_DSC0_RC_RANGE_PARAMETERS_0_UDW_PC (0x78408 + 4)
+#define _ICL_DSC1_RC_RANGE_PARAMETERS_0_PC (0x78508)
+#define _ICL_DSC1_RC_RANGE_PARAMETERS_0_UDW_PC (0x78508 + 4)
+#define ICL_DSC0_RC_RANGE_PARAMETERS_0(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC0_RC_RANGE_PARAMETERS_0_PB, \
+ _ICL_DSC0_RC_RANGE_PARAMETERS_0_PC)
+#define ICL_DSC0_RC_RANGE_PARAMETERS_0_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC0_RC_RANGE_PARAMETERS_0_UDW_PB, \
+ _ICL_DSC0_RC_RANGE_PARAMETERS_0_UDW_PC)
+#define ICL_DSC1_RC_RANGE_PARAMETERS_0(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC1_RC_RANGE_PARAMETERS_0_PB, \
+ _ICL_DSC1_RC_RANGE_PARAMETERS_0_PC)
+#define ICL_DSC1_RC_RANGE_PARAMETERS_0_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC1_RC_RANGE_PARAMETERS_0_UDW_PB, \
+ _ICL_DSC1_RC_RANGE_PARAMETERS_0_UDW_PC)
+#define RC_BPG_OFFSET_SHIFT 10
+#define RC_MAX_QP_SHIFT 5
+#define RC_MIN_QP_SHIFT 0
+
+#define DSCA_RC_RANGE_PARAMETERS_1 _MMIO(0x6B248)
+#define DSCA_RC_RANGE_PARAMETERS_1_UDW _MMIO(0x6B248 + 4)
+#define DSCC_RC_RANGE_PARAMETERS_1 _MMIO(0x6BA48)
+#define DSCC_RC_RANGE_PARAMETERS_1_UDW _MMIO(0x6BA48 + 4)
+#define _ICL_DSC0_RC_RANGE_PARAMETERS_1_PB (0x78210)
+#define _ICL_DSC0_RC_RANGE_PARAMETERS_1_UDW_PB (0x78210 + 4)
+#define _ICL_DSC1_RC_RANGE_PARAMETERS_1_PB (0x78310)
+#define _ICL_DSC1_RC_RANGE_PARAMETERS_1_UDW_PB (0x78310 + 4)
+#define _ICL_DSC0_RC_RANGE_PARAMETERS_1_PC (0x78410)
+#define _ICL_DSC0_RC_RANGE_PARAMETERS_1_UDW_PC (0x78410 + 4)
+#define _ICL_DSC1_RC_RANGE_PARAMETERS_1_PC (0x78510)
+#define _ICL_DSC1_RC_RANGE_PARAMETERS_1_UDW_PC (0x78510 + 4)
+#define ICL_DSC0_RC_RANGE_PARAMETERS_1(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC0_RC_RANGE_PARAMETERS_1_PB, \
+ _ICL_DSC0_RC_RANGE_PARAMETERS_1_PC)
+#define ICL_DSC0_RC_RANGE_PARAMETERS_1_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC0_RC_RANGE_PARAMETERS_1_UDW_PB, \
+ _ICL_DSC0_RC_RANGE_PARAMETERS_1_UDW_PC)
+#define ICL_DSC1_RC_RANGE_PARAMETERS_1(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC1_RC_RANGE_PARAMETERS_1_PB, \
+ _ICL_DSC1_RC_RANGE_PARAMETERS_1_PC)
+#define ICL_DSC1_RC_RANGE_PARAMETERS_1_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC1_RC_RANGE_PARAMETERS_1_UDW_PB, \
+ _ICL_DSC1_RC_RANGE_PARAMETERS_1_UDW_PC)
+
+#define DSCA_RC_RANGE_PARAMETERS_2 _MMIO(0x6B250)
+#define DSCA_RC_RANGE_PARAMETERS_2_UDW _MMIO(0x6B250 + 4)
+#define DSCC_RC_RANGE_PARAMETERS_2 _MMIO(0x6BA50)
+#define DSCC_RC_RANGE_PARAMETERS_2_UDW _MMIO(0x6BA50 + 4)
+#define _ICL_DSC0_RC_RANGE_PARAMETERS_2_PB (0x78218)
+#define _ICL_DSC0_RC_RANGE_PARAMETERS_2_UDW_PB (0x78218 + 4)
+#define _ICL_DSC1_RC_RANGE_PARAMETERS_2_PB (0x78318)
+#define _ICL_DSC1_RC_RANGE_PARAMETERS_2_UDW_PB (0x78318 + 4)
+#define _ICL_DSC0_RC_RANGE_PARAMETERS_2_PC (0x78418)
+#define _ICL_DSC0_RC_RANGE_PARAMETERS_2_UDW_PC (0x78418 + 4)
+#define _ICL_DSC1_RC_RANGE_PARAMETERS_2_PC (0x78518)
+#define _ICL_DSC1_RC_RANGE_PARAMETERS_2_UDW_PC (0x78518 + 4)
+#define ICL_DSC0_RC_RANGE_PARAMETERS_2(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC0_RC_RANGE_PARAMETERS_2_PB, \
+ _ICL_DSC0_RC_RANGE_PARAMETERS_2_PC)
+#define ICL_DSC0_RC_RANGE_PARAMETERS_2_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC0_RC_RANGE_PARAMETERS_2_UDW_PB, \
+ _ICL_DSC0_RC_RANGE_PARAMETERS_2_UDW_PC)
+#define ICL_DSC1_RC_RANGE_PARAMETERS_2(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC1_RC_RANGE_PARAMETERS_2_PB, \
+ _ICL_DSC1_RC_RANGE_PARAMETERS_2_PC)
+#define ICL_DSC1_RC_RANGE_PARAMETERS_2_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC1_RC_RANGE_PARAMETERS_2_UDW_PB, \
+ _ICL_DSC1_RC_RANGE_PARAMETERS_2_UDW_PC)
+
+#define DSCA_RC_RANGE_PARAMETERS_3 _MMIO(0x6B258)
+#define DSCA_RC_RANGE_PARAMETERS_3_UDW _MMIO(0x6B258 + 4)
+#define DSCC_RC_RANGE_PARAMETERS_3 _MMIO(0x6BA58)
+#define DSCC_RC_RANGE_PARAMETERS_3_UDW _MMIO(0x6BA58 + 4)
+#define _ICL_DSC0_RC_RANGE_PARAMETERS_3_PB (0x78220)
+#define _ICL_DSC0_RC_RANGE_PARAMETERS_3_UDW_PB (0x78220 + 4)
+#define _ICL_DSC1_RC_RANGE_PARAMETERS_3_PB (0x78320)
+#define _ICL_DSC1_RC_RANGE_PARAMETERS_3_UDW_PB (0x78320 + 4)
+#define _ICL_DSC0_RC_RANGE_PARAMETERS_3_PC (0x78420)
+#define _ICL_DSC0_RC_RANGE_PARAMETERS_3_UDW_PC (0x78420 + 4)
+#define _ICL_DSC1_RC_RANGE_PARAMETERS_3_PC (0x78520)
+#define _ICL_DSC1_RC_RANGE_PARAMETERS_3_UDW_PC (0x78520 + 4)
+#define ICL_DSC0_RC_RANGE_PARAMETERS_3(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC0_RC_RANGE_PARAMETERS_3_PB, \
+ _ICL_DSC0_RC_RANGE_PARAMETERS_3_PC)
+#define ICL_DSC0_RC_RANGE_PARAMETERS_3_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC0_RC_RANGE_PARAMETERS_3_UDW_PB, \
+ _ICL_DSC0_RC_RANGE_PARAMETERS_3_UDW_PC)
+#define ICL_DSC1_RC_RANGE_PARAMETERS_3(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC1_RC_RANGE_PARAMETERS_3_PB, \
+ _ICL_DSC1_RC_RANGE_PARAMETERS_3_PC)
+#define ICL_DSC1_RC_RANGE_PARAMETERS_3_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_DSC1_RC_RANGE_PARAMETERS_3_UDW_PB, \
+ _ICL_DSC1_RC_RANGE_PARAMETERS_3_UDW_PC)
+
+#endif /* __INTEL_VDSC_REGS_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_vrr.c b/drivers/gpu/drm/i915/display/intel_vrr.c
index 5ff6aed9575e..4228f26b4c11 100644
--- a/drivers/gpu/drm/i915/display/intel_vrr.c
+++ b/drivers/gpu/drm/i915/display/intel_vrr.c
@@ -144,17 +144,11 @@ intel_vrr_compute_config(struct intel_crtc_state *crtc_state,
* is deprecated.
*/
if (DISPLAY_VER(i915) >= 13) {
- /*
- * FIXME: Subtract Window2 delay from below value.
- *
- * Window2 specifies time required to program DSB (Window2) in
- * number of scan lines. Assuming 0 for no DSB.
- */
crtc_state->vrr.guardband =
- crtc_state->vrr.vmin + 1 - adjusted_mode->crtc_vdisplay;
+ crtc_state->vrr.vmin + 1 - adjusted_mode->crtc_vblank_start;
} else {
crtc_state->vrr.pipeline_full =
- min(255, crtc_state->vrr.vmin - adjusted_mode->crtc_vdisplay -
+ min(255, crtc_state->vrr.vmin - adjusted_mode->crtc_vblank_start -
crtc_state->framestart_delay - 1);
}
diff --git a/drivers/gpu/drm/i915/display/intel_wm.c b/drivers/gpu/drm/i915/display/intel_wm.c
new file mode 100644
index 000000000000..bb99179cd5fd
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_wm.c
@@ -0,0 +1,408 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#include "i915_drv.h"
+#include "i9xx_wm.h"
+#include "intel_display_types.h"
+#include "intel_wm.h"
+#include "skl_watermark.h"
+
+/**
+ * intel_update_watermarks - update FIFO watermark values based on current modes
+ * @dev_priv: i915 device
+ *
+ * Calculate watermark values for the various WM regs based on current mode
+ * and plane configuration.
+ *
+ * There are several cases to deal with here:
+ * - normal (i.e. non-self-refresh)
+ * - self-refresh (SR) mode
+ * - lines are large relative to FIFO size (buffer can hold up to 2)
+ * - lines are small relative to FIFO size (buffer can hold more than 2
+ * lines), so need to account for TLB latency
+ *
+ * The normal calculation is:
+ * watermark = dotclock * bytes per pixel * latency
+ * where latency is platform & configuration dependent (we assume pessimal
+ * values here).
+ *
+ * The SR calculation is:
+ * watermark = (trunc(latency/line time)+1) * surface width *
+ * bytes per pixel
+ * where
+ * line time = htotal / dotclock
+ * surface width = hdisplay for normal plane and 64 for cursor
+ * and latency is assumed to be high, as above.
+ *
+ * The final value programmed to the register should always be rounded up,
+ * and include an extra 2 entries to account for clock crossings.
+ *
+ * We don't use the sprite, so we can ignore that. And on Crestline we have
+ * to set the non-SR watermarks to 8.
+ */
+void intel_update_watermarks(struct drm_i915_private *i915)
+{
+ if (i915->display.funcs.wm->update_wm)
+ i915->display.funcs.wm->update_wm(i915);
+}
+
+int intel_compute_pipe_wm(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ struct drm_i915_private *i915 = to_i915(state->base.dev);
+
+ if (i915->display.funcs.wm->compute_pipe_wm)
+ return i915->display.funcs.wm->compute_pipe_wm(state, crtc);
+
+ return 0;
+}
+
+int intel_compute_intermediate_wm(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ struct drm_i915_private *i915 = to_i915(state->base.dev);
+
+ if (!i915->display.funcs.wm->compute_intermediate_wm)
+ return 0;
+
+ if (drm_WARN_ON(&i915->drm, !i915->display.funcs.wm->compute_pipe_wm))
+ return 0;
+
+ return i915->display.funcs.wm->compute_intermediate_wm(state, crtc);
+}
+
+bool intel_initial_watermarks(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ struct drm_i915_private *i915 = to_i915(state->base.dev);
+
+ if (i915->display.funcs.wm->initial_watermarks) {
+ i915->display.funcs.wm->initial_watermarks(state, crtc);
+ return true;
+ }
+
+ return false;
+}
+
+void intel_atomic_update_watermarks(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ struct drm_i915_private *i915 = to_i915(state->base.dev);
+
+ if (i915->display.funcs.wm->atomic_update_watermarks)
+ i915->display.funcs.wm->atomic_update_watermarks(state, crtc);
+}
+
+void intel_optimize_watermarks(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ struct drm_i915_private *i915 = to_i915(state->base.dev);
+
+ if (i915->display.funcs.wm->optimize_watermarks)
+ i915->display.funcs.wm->optimize_watermarks(state, crtc);
+}
+
+int intel_compute_global_watermarks(struct intel_atomic_state *state)
+{
+ struct drm_i915_private *i915 = to_i915(state->base.dev);
+
+ if (i915->display.funcs.wm->compute_global_watermarks)
+ return i915->display.funcs.wm->compute_global_watermarks(state);
+
+ return 0;
+}
+
+void intel_wm_get_hw_state(struct drm_i915_private *i915)
+{
+ if (i915->display.funcs.wm->get_hw_state)
+ return i915->display.funcs.wm->get_hw_state(i915);
+}
+
+bool intel_wm_plane_visible(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
+{
+ struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
+
+ /* FIXME check the 'enable' instead */
+ if (!crtc_state->hw.active)
+ return false;
+
+ /*
+ * Treat cursor with fb as always visible since cursor updates
+ * can happen faster than the vrefresh rate, and the current
+ * watermark code doesn't handle that correctly. Cursor updates
+ * which set/clear the fb or change the cursor size are going
+ * to get throttled by intel_legacy_cursor_update() to work
+ * around this problem with the watermark code.
+ */
+ if (plane->id == PLANE_CURSOR)
+ return plane_state->hw.fb != NULL;
+ else
+ return plane_state->uapi.visible;
+}
+
+void intel_print_wm_latency(struct drm_i915_private *dev_priv,
+ const char *name, const u16 wm[])
+{
+ int level;
+
+ for (level = 0; level < dev_priv->display.wm.num_levels; level++) {
+ unsigned int latency = wm[level];
+
+ if (latency == 0) {
+ drm_dbg_kms(&dev_priv->drm,
+ "%s WM%d latency not provided\n",
+ name, level);
+ continue;
+ }
+
+ /*
+ * - latencies are in us on gen9.
+ * - before then, WM1+ latency values are in 0.5us units
+ */
+ if (DISPLAY_VER(dev_priv) >= 9)
+ latency *= 10;
+ else if (level > 0)
+ latency *= 5;
+
+ drm_dbg_kms(&dev_priv->drm,
+ "%s WM%d latency %u (%u.%u usec)\n", name, level,
+ wm[level], latency / 10, latency % 10);
+ }
+}
+
+void intel_wm_init(struct drm_i915_private *i915)
+{
+ if (DISPLAY_VER(i915) >= 9)
+ skl_wm_init(i915);
+ else
+ i9xx_wm_init(i915);
+}
+
+static void wm_latency_show(struct seq_file *m, const u16 wm[8])
+{
+ struct drm_i915_private *dev_priv = m->private;
+ int level;
+
+ drm_modeset_lock_all(&dev_priv->drm);
+
+ for (level = 0; level < dev_priv->display.wm.num_levels; level++) {
+ unsigned int latency = wm[level];
+
+ /*
+ * - WM1+ latency values in 0.5us units
+ * - latencies are in us on gen9/vlv/chv
+ */
+ if (DISPLAY_VER(dev_priv) >= 9 ||
+ IS_VALLEYVIEW(dev_priv) ||
+ IS_CHERRYVIEW(dev_priv) ||
+ IS_G4X(dev_priv))
+ latency *= 10;
+ else if (level > 0)
+ latency *= 5;
+
+ seq_printf(m, "WM%d %u (%u.%u usec)\n",
+ level, wm[level], latency / 10, latency % 10);
+ }
+
+ drm_modeset_unlock_all(&dev_priv->drm);
+}
+
+static int pri_wm_latency_show(struct seq_file *m, void *data)
+{
+ struct drm_i915_private *dev_priv = m->private;
+ const u16 *latencies;
+
+ if (DISPLAY_VER(dev_priv) >= 9)
+ latencies = dev_priv->display.wm.skl_latency;
+ else
+ latencies = dev_priv->display.wm.pri_latency;
+
+ wm_latency_show(m, latencies);
+
+ return 0;
+}
+
+static int spr_wm_latency_show(struct seq_file *m, void *data)
+{
+ struct drm_i915_private *dev_priv = m->private;
+ const u16 *latencies;
+
+ if (DISPLAY_VER(dev_priv) >= 9)
+ latencies = dev_priv->display.wm.skl_latency;
+ else
+ latencies = dev_priv->display.wm.spr_latency;
+
+ wm_latency_show(m, latencies);
+
+ return 0;
+}
+
+static int cur_wm_latency_show(struct seq_file *m, void *data)
+{
+ struct drm_i915_private *dev_priv = m->private;
+ const u16 *latencies;
+
+ if (DISPLAY_VER(dev_priv) >= 9)
+ latencies = dev_priv->display.wm.skl_latency;
+ else
+ latencies = dev_priv->display.wm.cur_latency;
+
+ wm_latency_show(m, latencies);
+
+ return 0;
+}
+
+static int pri_wm_latency_open(struct inode *inode, struct file *file)
+{
+ struct drm_i915_private *dev_priv = inode->i_private;
+
+ if (DISPLAY_VER(dev_priv) < 5 && !IS_G4X(dev_priv))
+ return -ENODEV;
+
+ return single_open(file, pri_wm_latency_show, dev_priv);
+}
+
+static int spr_wm_latency_open(struct inode *inode, struct file *file)
+{
+ struct drm_i915_private *dev_priv = inode->i_private;
+
+ if (HAS_GMCH(dev_priv))
+ return -ENODEV;
+
+ return single_open(file, spr_wm_latency_show, dev_priv);
+}
+
+static int cur_wm_latency_open(struct inode *inode, struct file *file)
+{
+ struct drm_i915_private *dev_priv = inode->i_private;
+
+ if (HAS_GMCH(dev_priv))
+ return -ENODEV;
+
+ return single_open(file, cur_wm_latency_show, dev_priv);
+}
+
+static ssize_t wm_latency_write(struct file *file, const char __user *ubuf,
+ size_t len, loff_t *offp, u16 wm[8])
+{
+ struct seq_file *m = file->private_data;
+ struct drm_i915_private *dev_priv = m->private;
+ u16 new[8] = { 0 };
+ int level;
+ int ret;
+ char tmp[32];
+
+ if (len >= sizeof(tmp))
+ return -EINVAL;
+
+ if (copy_from_user(tmp, ubuf, len))
+ return -EFAULT;
+
+ tmp[len] = '\0';
+
+ ret = sscanf(tmp, "%hu %hu %hu %hu %hu %hu %hu %hu",
+ &new[0], &new[1], &new[2], &new[3],
+ &new[4], &new[5], &new[6], &new[7]);
+ if (ret != dev_priv->display.wm.num_levels)
+ return -EINVAL;
+
+ drm_modeset_lock_all(&dev_priv->drm);
+
+ for (level = 0; level < dev_priv->display.wm.num_levels; level++)
+ wm[level] = new[level];
+
+ drm_modeset_unlock_all(&dev_priv->drm);
+
+ return len;
+}
+
+static ssize_t pri_wm_latency_write(struct file *file, const char __user *ubuf,
+ size_t len, loff_t *offp)
+{
+ struct seq_file *m = file->private_data;
+ struct drm_i915_private *dev_priv = m->private;
+ u16 *latencies;
+
+ if (DISPLAY_VER(dev_priv) >= 9)
+ latencies = dev_priv->display.wm.skl_latency;
+ else
+ latencies = dev_priv->display.wm.pri_latency;
+
+ return wm_latency_write(file, ubuf, len, offp, latencies);
+}
+
+static ssize_t spr_wm_latency_write(struct file *file, const char __user *ubuf,
+ size_t len, loff_t *offp)
+{
+ struct seq_file *m = file->private_data;
+ struct drm_i915_private *dev_priv = m->private;
+ u16 *latencies;
+
+ if (DISPLAY_VER(dev_priv) >= 9)
+ latencies = dev_priv->display.wm.skl_latency;
+ else
+ latencies = dev_priv->display.wm.spr_latency;
+
+ return wm_latency_write(file, ubuf, len, offp, latencies);
+}
+
+static ssize_t cur_wm_latency_write(struct file *file, const char __user *ubuf,
+ size_t len, loff_t *offp)
+{
+ struct seq_file *m = file->private_data;
+ struct drm_i915_private *dev_priv = m->private;
+ u16 *latencies;
+
+ if (DISPLAY_VER(dev_priv) >= 9)
+ latencies = dev_priv->display.wm.skl_latency;
+ else
+ latencies = dev_priv->display.wm.cur_latency;
+
+ return wm_latency_write(file, ubuf, len, offp, latencies);
+}
+
+static const struct file_operations i915_pri_wm_latency_fops = {
+ .owner = THIS_MODULE,
+ .open = pri_wm_latency_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .write = pri_wm_latency_write
+};
+
+static const struct file_operations i915_spr_wm_latency_fops = {
+ .owner = THIS_MODULE,
+ .open = spr_wm_latency_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .write = spr_wm_latency_write
+};
+
+static const struct file_operations i915_cur_wm_latency_fops = {
+ .owner = THIS_MODULE,
+ .open = cur_wm_latency_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .write = cur_wm_latency_write
+};
+
+void intel_wm_debugfs_register(struct drm_i915_private *i915)
+{
+ struct drm_minor *minor = i915->drm.primary;
+
+ debugfs_create_file("i915_pri_wm_latency", 0644, minor->debugfs_root,
+ i915, &i915_pri_wm_latency_fops);
+
+ debugfs_create_file("i915_spr_wm_latency", 0644, minor->debugfs_root,
+ i915, &i915_spr_wm_latency_fops);
+
+ debugfs_create_file("i915_cur_wm_latency", 0644, minor->debugfs_root,
+ i915, &i915_cur_wm_latency_fops);
+
+ skl_watermark_debugfs_register(i915);
+}
diff --git a/drivers/gpu/drm/i915/display/intel_wm.h b/drivers/gpu/drm/i915/display/intel_wm.h
new file mode 100644
index 000000000000..48429ac140d2
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_wm.h
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#ifndef __INTEL_WM_H__
+#define __INTEL_WM_H__
+
+#include <linux/types.h>
+
+struct drm_i915_private;
+struct intel_atomic_state;
+struct intel_crtc;
+struct intel_crtc_state;
+struct intel_plane_state;
+
+void intel_update_watermarks(struct drm_i915_private *i915);
+int intel_compute_pipe_wm(struct intel_atomic_state *state,
+ struct intel_crtc *crtc);
+int intel_compute_intermediate_wm(struct intel_atomic_state *state,
+ struct intel_crtc *crtc);
+bool intel_initial_watermarks(struct intel_atomic_state *state,
+ struct intel_crtc *crtc);
+void intel_atomic_update_watermarks(struct intel_atomic_state *state,
+ struct intel_crtc *crtc);
+void intel_optimize_watermarks(struct intel_atomic_state *state,
+ struct intel_crtc *crtc);
+int intel_compute_global_watermarks(struct intel_atomic_state *state);
+void intel_wm_get_hw_state(struct drm_i915_private *i915);
+bool intel_wm_plane_visible(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state);
+void intel_print_wm_latency(struct drm_i915_private *i915,
+ const char *name, const u16 wm[]);
+void intel_wm_init(struct drm_i915_private *i915);
+void intel_wm_debugfs_register(struct drm_i915_private *i915);
+
+#endif /* __INTEL_WM_H__ */
diff --git a/drivers/gpu/drm/i915/intel_pm_types.h b/drivers/gpu/drm/i915/display/intel_wm_types.h
index 93152537b420..628b7c0ce484 100644
--- a/drivers/gpu/drm/i915/intel_pm_types.h
+++ b/drivers/gpu/drm/i915/display/intel_wm_types.h
@@ -3,12 +3,12 @@
* Copyright © 2021 Intel Corporation
*/
-#ifndef __INTEL_PM_TYPES_H__
-#define __INTEL_PM_TYPES_H__
+#ifndef __INTEL_WM_TYPES_H__
+#define __INTEL_WM_TYPES_H__
#include <linux/types.h>
-#include "display/intel_display_limits.h"
+#include "intel_display_limits.h"
enum intel_ddb_partitioning {
INTEL_DDB_PART_1_2,
@@ -73,4 +73,4 @@ static inline bool skl_ddb_entry_equal(const struct skl_ddb_entry *e1,
return false;
}
-#endif /* __INTEL_PM_TYPES_H__ */
+#endif /* __INTEL_WM_TYPES_H__ */
diff --git a/drivers/gpu/drm/i915/display/skl_universal_plane.c b/drivers/gpu/drm/i915/display/skl_universal_plane.c
index 9b172a1e90de..fd0065a46ec5 100644
--- a/drivers/gpu/drm/i915/display/skl_universal_plane.c
+++ b/drivers/gpu/drm/i915/display/skl_universal_plane.c
@@ -17,7 +17,6 @@
#include "intel_fb.h"
#include "intel_fbc.h"
#include "intel_psr.h"
-#include "intel_sprite.h"
#include "skl_scaler.h"
#include "skl_universal_plane.h"
#include "skl_watermark.h"
@@ -642,7 +641,7 @@ icl_plane_disable_arm(struct intel_plane *plane,
skl_write_plane_wm(plane, crtc_state);
- intel_psr2_disable_plane_sel_fetch(plane, crtc_state);
+ intel_psr2_disable_plane_sel_fetch_arm(plane, crtc_state);
intel_de_write_fw(dev_priv, PLANE_CTL(pipe, plane_id), 0);
intel_de_write_fw(dev_priv, PLANE_SURF(pipe, plane_id), 0);
}
@@ -1260,7 +1259,7 @@ icl_plane_update_noarm(struct intel_plane *plane,
if (plane_state->force_black)
icl_plane_csc_load_black(plane);
- intel_psr2_program_plane_sel_fetch(plane, crtc_state, plane_state, color_plane);
+ intel_psr2_program_plane_sel_fetch_noarm(plane, crtc_state, plane_state, color_plane);
}
static void
@@ -1287,6 +1286,8 @@ icl_plane_update_arm(struct intel_plane *plane,
if (plane_state->scaler_id >= 0)
skl_program_plane_scaler(plane, crtc_state, plane_state);
+ intel_psr2_program_plane_sel_fetch_arm(plane, crtc_state, plane_state);
+
/*
* The control register self-arms if the plane was previously
* disabled. Try to make the plane enable atomic by writing
@@ -2180,7 +2181,7 @@ static bool gen12_plane_has_mc_ccs(struct drm_i915_private *i915,
if (DISPLAY_VER(i915) < 12)
return false;
- /* Wa_14010477008:tgl[a0..c0],rkl[all],dg1[all] */
+ /* Wa_14010477008 */
if (IS_DG1(i915) || IS_ROCKETLAKE(i915) ||
IS_TGL_DISPLAY_STEP(i915, STEP_A0, STEP_D0))
return false;
diff --git a/drivers/gpu/drm/i915/display/skl_watermark.c b/drivers/gpu/drm/i915/display/skl_watermark.c
index d1670cc3eff2..50a9a6adbe32 100644
--- a/drivers/gpu/drm/i915/display/skl_watermark.c
+++ b/drivers/gpu/drm/i915/display/skl_watermark.c
@@ -5,21 +5,22 @@
#include <drm/drm_blend.h>
+#include "i915_drv.h"
+#include "i915_fixed.h"
+#include "i915_reg.h"
+#include "i9xx_wm.h"
#include "intel_atomic.h"
#include "intel_atomic_plane.h"
#include "intel_bw.h"
+#include "intel_crtc.h"
#include "intel_de.h"
#include "intel_display.h"
#include "intel_display_power.h"
#include "intel_display_types.h"
#include "intel_fb.h"
-#include "skl_watermark.h"
-
-#include "i915_drv.h"
-#include "i915_fixed.h"
-#include "i915_reg.h"
#include "intel_pcode.h"
-#include "intel_pm.h"
+#include "intel_wm.h"
+#include "skl_watermark.h"
static void skl_sagv_disable(struct drm_i915_private *i915);
@@ -64,7 +65,7 @@ static bool skl_needs_memory_bw_wa(struct drm_i915_private *i915)
static bool
intel_has_sagv(struct drm_i915_private *i915)
{
- return DISPLAY_VER(i915) >= 9 && !IS_LP(i915) &&
+ return HAS_SAGV(i915) &&
i915->display.sagv.status != I915_SAGV_NOT_CONTROLLED;
}
@@ -92,7 +93,7 @@ intel_sagv_block_time(struct drm_i915_private *i915)
return val;
} else if (DISPLAY_VER(i915) == 11) {
return 10;
- } else if (DISPLAY_VER(i915) == 9 && !IS_LP(i915)) {
+ } else if (HAS_SAGV(i915)) {
return 30;
} else {
return 0;
@@ -101,7 +102,7 @@ intel_sagv_block_time(struct drm_i915_private *i915)
static void intel_sagv_init(struct drm_i915_private *i915)
{
- if (!intel_has_sagv(i915))
+ if (!HAS_SAGV(i915))
i915->display.sagv.status = I915_SAGV_NOT_CONTROLLED;
/*
@@ -359,7 +360,7 @@ static bool skl_crtc_can_enable_sagv(const struct intel_crtc_state *crtc_state)
continue;
/* Find the highest enabled wm level for this plane */
- for (level = ilk_wm_max_level(i915);
+ for (level = i915->display.wm.num_levels - 1;
!wm->wm[level].enable; --level)
{ }
@@ -704,16 +705,38 @@ static void skl_compute_plane_wm(const struct intel_crtc_state *crtc_state,
const struct skl_wm_level *result_prev,
struct skl_wm_level *result /* out */);
+static unsigned int skl_wm_latency(struct drm_i915_private *i915, int level,
+ const struct skl_wm_params *wp)
+{
+ unsigned int latency = i915->display.wm.skl_latency[level];
+
+ if (latency == 0)
+ return 0;
+
+ /*
+ * WaIncreaseLatencyIPCEnabled: kbl,cfl
+ * Display WA #1141: kbl,cfl
+ */
+ if ((IS_KABYLAKE(i915) || IS_COFFEELAKE(i915) || IS_COMETLAKE(i915)) &&
+ skl_watermark_ipc_enabled(i915))
+ latency += 4;
+
+ if (skl_needs_memory_bw_wa(i915) && wp && wp->x_tiled)
+ latency += 15;
+
+ return latency;
+}
+
static unsigned int
skl_cursor_allocation(const struct intel_crtc_state *crtc_state,
int num_active)
{
struct intel_plane *plane = to_intel_plane(crtc_state->uapi.crtc->cursor);
struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
- int level, max_level = ilk_wm_max_level(i915);
struct skl_wm_level wm = {};
int ret, min_ddb_alloc = 0;
struct skl_wm_params wp;
+ int level;
ret = skl_compute_wm_params(crtc_state, 256,
drm_format_info(DRM_FORMAT_ARGB8888),
@@ -722,8 +745,8 @@ skl_cursor_allocation(const struct intel_crtc_state *crtc_state,
crtc_state->pixel_rate, &wp, 0);
drm_WARN_ON(&i915->drm, ret);
- for (level = 0; level <= max_level; level++) {
- unsigned int latency = i915->display.wm.skl_latency[level];
+ for (level = 0; level < i915->display.wm.num_levels; level++) {
+ unsigned int latency = skl_wm_latency(i915, level, &wp);
skl_compute_plane_wm(crtc_state, plane, level, latency, &wp, &wm, &wm);
if (wm.min_ddb_alloc == U16_MAX)
@@ -1407,16 +1430,22 @@ skl_check_nv12_wm_level(struct skl_wm_level *wm, struct skl_wm_level *uv_wm,
}
}
-static bool icl_need_wm1_wa(struct drm_i915_private *i915,
- enum plane_id plane_id)
+static bool skl_need_wm_copy_wa(struct drm_i915_private *i915, int level,
+ const struct skl_plane_wm *wm)
{
/*
* Wa_1408961008:icl, ehl
* Wa_14012656716:tgl, adl
- * Underruns with WM1+ disabled
+ * Wa_14017887344:icl
+ * Wa_14017868169:adl, tgl
+ * Due to some power saving optimizations, different subsystems
+ * like PSR, might still use even disabled wm level registers,
+ * for "reference", so lets keep at least the values sane.
+ * Considering amount of WA requiring us to do similar things, was
+ * decided to simply do it for all of the platforms, as those wm
+ * levels are disabled, this isn't going to do harm anyway.
*/
- return DISPLAY_VER(i915) == 11 ||
- (IS_DISPLAY_VER(i915, 12, 13) && plane_id == PLANE_CURSOR);
+ return level > 0 && !wm->wm[level].enable;
}
struct skl_plane_ddb_iter {
@@ -1492,7 +1521,7 @@ skl_crtc_allocate_plane_ddb(struct intel_atomic_state *state,
* Find the highest watermark level for which we can satisfy the block
* requirement of active planes.
*/
- for (level = ilk_wm_max_level(i915); level >= 0; level--) {
+ for (level = i915->display.wm.num_levels - 1; level >= 0; level--) {
blocks = 0;
for_each_plane_id_on_crtc(crtc, plane_id) {
const struct skl_plane_wm *wm =
@@ -1568,7 +1597,7 @@ skl_crtc_allocate_plane_ddb(struct intel_atomic_state *state,
* all levels as "enabled." Go back now and disable the ones
* that aren't actually possible.
*/
- for (level++; level <= ilk_wm_max_level(i915); level++) {
+ for (level++; level < i915->display.wm.num_levels; level++) {
for_each_plane_id_on_crtc(crtc, plane_id) {
const struct skl_ddb_entry *ddb =
&crtc_state->wm.skl.plane_ddb[plane_id];
@@ -1585,12 +1614,10 @@ skl_crtc_allocate_plane_ddb(struct intel_atomic_state *state,
else
skl_check_wm_level(&wm->wm[level], ddb);
- if (icl_need_wm1_wa(i915, plane_id) &&
- level == 1 && !wm->wm[level].enable &&
- wm->wm[0].enable) {
- wm->wm[level].blocks = wm->wm[0].blocks;
- wm->wm[level].lines = wm->wm[0].lines;
- wm->wm[level].ignore_lines = wm->wm[0].ignore_lines;
+ if (skl_need_wm_copy_wa(i915, level, wm)) {
+ wm->wm[level].blocks = wm->wm[level - 1].blocks;
+ wm->wm[level].lines = wm->wm[level - 1].lines;
+ wm->wm[level].ignore_lines = wm->wm[level - 1].ignore_lines;
}
}
}
@@ -1835,17 +1862,6 @@ static void skl_compute_plane_wm(const struct intel_crtc_state *crtc_state,
return;
}
- /*
- * WaIncreaseLatencyIPCEnabled: kbl,cfl
- * Display WA #1141: kbl,cfl
- */
- if ((IS_KABYLAKE(i915) || IS_COFFEELAKE(i915) || IS_COMETLAKE(i915)) &&
- skl_watermark_ipc_enabled(i915))
- latency += 4;
-
- if (skl_needs_memory_bw_wa(i915) && wp->x_tiled)
- latency += 15;
-
method1 = skl_wm_method1(i915, wp->plane_pixel_rate,
wp->cpp, latency, wp->dbuf_block_size);
method2 = skl_wm_method2(wp->plane_pixel_rate,
@@ -1967,12 +1983,12 @@ skl_compute_wm_levels(const struct intel_crtc_state *crtc_state,
struct skl_wm_level *levels)
{
struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
- int level, max_level = ilk_wm_max_level(i915);
struct skl_wm_level *result_prev = &levels[0];
+ int level;
- for (level = 0; level <= max_level; level++) {
+ for (level = 0; level < i915->display.wm.num_levels; level++) {
struct skl_wm_level *result = &levels[level];
- unsigned int latency = i915->display.wm.skl_latency[level];
+ unsigned int latency = skl_wm_latency(i915, level, wm_params);
skl_compute_plane_wm(crtc_state, plane, level, latency,
wm_params, result_prev, result);
@@ -1992,7 +2008,8 @@ static void tgl_compute_sagv_wm(const struct intel_crtc_state *crtc_state,
unsigned int latency = 0;
if (i915->display.sagv.block_time_us)
- latency = i915->display.sagv.block_time_us + i915->display.wm.skl_latency[0];
+ latency = i915->display.sagv.block_time_us +
+ skl_wm_latency(i915, 0, wm_params);
skl_compute_plane_wm(crtc_state, plane, 0, latency,
wm_params, &levels[0],
@@ -2184,6 +2201,119 @@ static int icl_build_plane_wm(struct intel_crtc_state *crtc_state,
return 0;
}
+static bool
+skl_is_vblank_too_short(const struct intel_crtc_state *crtc_state,
+ int wm0_lines, int latency)
+{
+ const struct drm_display_mode *adjusted_mode =
+ &crtc_state->hw.adjusted_mode;
+
+ /* FIXME missing scaler and DSC pre-fill time */
+ return crtc_state->framestart_delay +
+ intel_usecs_to_scanlines(adjusted_mode, latency) +
+ wm0_lines >
+ adjusted_mode->crtc_vtotal - adjusted_mode->crtc_vblank_start;
+}
+
+static int skl_max_wm0_lines(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ enum plane_id plane_id;
+ int wm0_lines = 0;
+
+ for_each_plane_id_on_crtc(crtc, plane_id) {
+ const struct skl_plane_wm *wm = &crtc_state->wm.skl.optimal.planes[plane_id];
+
+ /* FIXME what about !skl_wm_has_lines() platforms? */
+ wm0_lines = max_t(int, wm0_lines, wm->wm[0].lines);
+ }
+
+ return wm0_lines;
+}
+
+static int skl_max_wm_level_for_vblank(struct intel_crtc_state *crtc_state,
+ int wm0_lines)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ int level;
+
+ for (level = i915->display.wm.num_levels - 1; level >= 0; level--) {
+ int latency;
+
+ /* FIXME should we care about the latency w/a's? */
+ latency = skl_wm_latency(i915, level, NULL);
+ if (latency == 0)
+ continue;
+
+ /* FIXME is it correct to use 0 latency for wm0 here? */
+ if (level == 0)
+ latency = 0;
+
+ if (!skl_is_vblank_too_short(crtc_state, wm0_lines, latency))
+ return level;
+ }
+
+ return -EINVAL;
+}
+
+static int skl_wm_check_vblank(struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ int wm0_lines, level;
+
+ if (!crtc_state->hw.active)
+ return 0;
+
+ wm0_lines = skl_max_wm0_lines(crtc_state);
+
+ level = skl_max_wm_level_for_vblank(crtc_state, wm0_lines);
+ if (level < 0)
+ return level;
+
+ /*
+ * FIXME PSR needs to toggle LATENCY_REPORTING_REMOVED_PIPE_*
+ * based on whether we're limited by the vblank duration.
+ *
+ * FIXME also related to skl+ w/a 1136 (also unimplemented as of
+ * now) perhaps?
+ */
+
+ for (level++; level < i915->display.wm.num_levels; level++) {
+ enum plane_id plane_id;
+
+ for_each_plane_id_on_crtc(crtc, plane_id) {
+ struct skl_plane_wm *wm =
+ &crtc_state->wm.skl.optimal.planes[plane_id];
+
+ /*
+ * FIXME just clear enable or flag the entire
+ * thing as bad via min_ddb_alloc=U16_MAX?
+ */
+ wm->wm[level].enable = false;
+ wm->uv_wm[level].enable = false;
+ }
+ }
+
+ if (DISPLAY_VER(i915) >= 12 &&
+ i915->display.sagv.block_time_us &&
+ skl_is_vblank_too_short(crtc_state, wm0_lines,
+ i915->display.sagv.block_time_us)) {
+ enum plane_id plane_id;
+
+ for_each_plane_id_on_crtc(crtc, plane_id) {
+ struct skl_plane_wm *wm =
+ &crtc_state->wm.skl.optimal.planes[plane_id];
+
+ wm->sagv.wm0.enable = false;
+ wm->sagv.trans_wm.enable = false;
+ }
+ }
+
+ return 0;
+}
+
static int skl_build_pipe_wm(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
@@ -2213,7 +2343,7 @@ static int skl_build_pipe_wm(struct intel_atomic_state *state,
crtc_state->wm.skl.optimal = crtc_state->wm.skl.raw;
- return 0;
+ return skl_wm_check_vblank(crtc_state);
}
static void skl_ddb_entry_write(struct drm_i915_private *i915,
@@ -2248,7 +2378,6 @@ void skl_write_plane_wm(struct intel_plane *plane,
const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *i915 = to_i915(plane->base.dev);
- int level, max_level = ilk_wm_max_level(i915);
enum plane_id plane_id = plane->id;
enum pipe pipe = plane->pipe;
const struct skl_pipe_wm *pipe_wm = &crtc_state->wm.skl.optimal;
@@ -2256,8 +2385,9 @@ void skl_write_plane_wm(struct intel_plane *plane,
&crtc_state->wm.skl.plane_ddb[plane_id];
const struct skl_ddb_entry *ddb_y =
&crtc_state->wm.skl.plane_ddb_y[plane_id];
+ int level;
- for (level = 0; level <= max_level; level++)
+ for (level = 0; level < i915->display.wm.num_levels; level++)
skl_write_wm_level(i915, PLANE_WM(pipe, plane_id, level),
skl_plane_wm_level(pipe_wm, plane_id, level));
@@ -2285,14 +2415,14 @@ void skl_write_cursor_wm(struct intel_plane *plane,
const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *i915 = to_i915(plane->base.dev);
- int level, max_level = ilk_wm_max_level(i915);
enum plane_id plane_id = plane->id;
enum pipe pipe = plane->pipe;
const struct skl_pipe_wm *pipe_wm = &crtc_state->wm.skl.optimal;
const struct skl_ddb_entry *ddb =
&crtc_state->wm.skl.plane_ddb[plane_id];
+ int level;
- for (level = 0; level <= max_level; level++)
+ for (level = 0; level < i915->display.wm.num_levels; level++)
skl_write_wm_level(i915, CUR_WM(pipe, level),
skl_plane_wm_level(pipe_wm, plane_id, level));
@@ -2324,9 +2454,9 @@ static bool skl_plane_wm_equals(struct drm_i915_private *i915,
const struct skl_plane_wm *wm1,
const struct skl_plane_wm *wm2)
{
- int level, max_level = ilk_wm_max_level(i915);
+ int level;
- for (level = 0; level <= max_level; level++) {
+ for (level = 0; level < i915->display.wm.num_levels; level++) {
/*
* We don't check uv_wm as the hardware doesn't actually
* use it. It only gets used for calculating the required
@@ -2398,6 +2528,8 @@ skl_ddb_add_affected_planes(const struct intel_crtc_state *old_crtc_state,
return PTR_ERR(plane_state);
new_crtc_state->update_planes |= BIT(plane_id);
+ new_crtc_state->async_flip_planes = 0;
+ new_crtc_state->do_async_flip = false;
}
return 0;
@@ -2674,9 +2806,9 @@ static bool skl_plane_selected_wm_equals(struct intel_plane *plane,
const struct skl_pipe_wm *new_pipe_wm)
{
struct drm_i915_private *i915 = to_i915(plane->base.dev);
- int level, max_level = ilk_wm_max_level(i915);
+ int level;
- for (level = 0; level <= max_level; level++) {
+ for (level = 0; level < i915->display.wm.num_levels; level++) {
/*
* We don't check uv_wm as the hardware doesn't actually
* use it. It only gets used for calculating the required
@@ -2755,6 +2887,8 @@ static int skl_wm_add_affected_planes(struct intel_atomic_state *state,
return PTR_ERR(plane_state);
new_crtc_state->update_planes |= BIT(plane_id);
+ new_crtc_state->async_flip_planes = 0;
+ new_crtc_state->do_async_flip = false;
}
return 0;
@@ -2810,16 +2944,14 @@ static void skl_pipe_wm_get_hw_state(struct intel_crtc *crtc,
{
struct drm_i915_private *i915 = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
- int level, max_level;
enum plane_id plane_id;
+ int level;
u32 val;
- max_level = ilk_wm_max_level(i915);
-
for_each_plane_id_on_crtc(crtc, plane_id) {
struct skl_plane_wm *wm = &out->planes[plane_id];
- for (level = 0; level <= max_level; level++) {
+ for (level = 0; level < i915->display.wm.num_levels; level++) {
if (plane_id != PLANE_CURSOR)
val = intel_de_read(i915, PLANE_WM(pipe, plane_id, level));
else
@@ -2856,7 +2988,7 @@ static void skl_pipe_wm_get_hw_state(struct intel_crtc *crtc,
}
}
-void skl_wm_get_hw_state(struct drm_i915_private *i915)
+static void skl_wm_get_hw_state(struct drm_i915_private *i915)
{
struct intel_dbuf_state *dbuf_state =
to_intel_dbuf_state(i915->display.dbuf.obj.state);
@@ -2956,7 +3088,7 @@ static bool skl_dbuf_is_misconfigured(struct drm_i915_private *i915)
return false;
}
-void skl_wm_sanitize(struct drm_i915_private *i915)
+static void skl_wm_sanitize(struct drm_i915_private *i915)
{
struct intel_crtc *crtc;
@@ -2992,6 +3124,12 @@ void skl_wm_sanitize(struct drm_i915_private *i915)
}
}
+static void skl_wm_get_hw_state_and_sanitize(struct drm_i915_private *i915)
+{
+ skl_wm_get_hw_state(i915);
+ skl_wm_sanitize(i915);
+}
+
void intel_wm_state_verify(struct intel_crtc *crtc,
struct intel_crtc_state *new_crtc_state)
{
@@ -3002,9 +3140,9 @@ void intel_wm_state_verify(struct intel_crtc *crtc,
struct skl_pipe_wm wm;
} *hw;
const struct skl_pipe_wm *sw_wm = &new_crtc_state->wm.skl.optimal;
- int level, max_level = ilk_wm_max_level(i915);
struct intel_plane *plane;
u8 hw_enabled_slices;
+ int level;
if (DISPLAY_VER(i915) < 9 || !new_crtc_state->hw.active)
return;
@@ -3031,7 +3169,7 @@ void intel_wm_state_verify(struct intel_crtc *crtc,
const struct skl_wm_level *hw_wm_level, *sw_wm_level;
/* Watermarks */
- for (level = 0; level <= max_level; level++) {
+ for (level = 0; level < i915->display.wm.num_levels; level++) {
hw_wm_level = &hw->wm.planes[plane->id].wm[level];
sw_wm_level = skl_plane_wm_level(sw_wm, plane->id, level);
@@ -3153,7 +3291,7 @@ void skl_watermark_ipc_init(struct drm_i915_private *i915)
static void
adjust_wm_latency(struct drm_i915_private *i915,
- u16 wm[], int max_level, int read_latency)
+ u16 wm[], int num_levels, int read_latency)
{
bool wm_lv_0_adjust_needed = i915->dram_info.wm_lv_0_adjust_needed;
int i, level;
@@ -3163,12 +3301,12 @@ adjust_wm_latency(struct drm_i915_private *i915,
* need to be disabled. We make sure to sanitize the values out
* of the punit to satisfy this requirement.
*/
- for (level = 1; level <= max_level; level++) {
+ for (level = 1; level < num_levels; level++) {
if (wm[level] == 0) {
- for (i = level + 1; i <= max_level; i++)
+ for (i = level + 1; i < num_levels; i++)
wm[i] = 0;
- max_level = level - 1;
+ num_levels = level;
break;
}
}
@@ -3181,7 +3319,7 @@ adjust_wm_latency(struct drm_i915_private *i915,
* from the punit when level 0 response data is 0us.
*/
if (wm[0] == 0) {
- for (level = 0; level <= max_level; level++)
+ for (level = 0; level < num_levels; level++)
wm[level] += read_latency;
}
@@ -3197,7 +3335,7 @@ adjust_wm_latency(struct drm_i915_private *i915,
static void mtl_read_wm_latency(struct drm_i915_private *i915, u16 wm[])
{
- int max_level = ilk_wm_max_level(i915);
+ int num_levels = i915->display.wm.num_levels;
u32 val;
val = intel_de_read(i915, MTL_LATENCY_LP0_LP1);
@@ -3212,12 +3350,12 @@ static void mtl_read_wm_latency(struct drm_i915_private *i915, u16 wm[])
wm[4] = REG_FIELD_GET(MTL_LATENCY_LEVEL_EVEN_MASK, val);
wm[5] = REG_FIELD_GET(MTL_LATENCY_LEVEL_ODD_MASK, val);
- adjust_wm_latency(i915, wm, max_level, 6);
+ adjust_wm_latency(i915, wm, num_levels, 6);
}
static void skl_read_wm_latency(struct drm_i915_private *i915, u16 wm[])
{
- int max_level = ilk_wm_max_level(i915);
+ int num_levels = i915->display.wm.num_levels;
int read_latency = DISPLAY_VER(i915) >= 12 ? 3 : 2;
int mult = IS_DG2(i915) ? 2 : 1;
u32 val;
@@ -3249,11 +3387,16 @@ static void skl_read_wm_latency(struct drm_i915_private *i915, u16 wm[])
wm[6] = REG_FIELD_GET(GEN9_MEM_LATENCY_LEVEL_2_6_MASK, val) * mult;
wm[7] = REG_FIELD_GET(GEN9_MEM_LATENCY_LEVEL_3_7_MASK, val) * mult;
- adjust_wm_latency(i915, wm, max_level, read_latency);
+ adjust_wm_latency(i915, wm, num_levels, read_latency);
}
static void skl_setup_wm_latency(struct drm_i915_private *i915)
{
+ if (HAS_HW_SAGV_WM(i915))
+ i915->display.wm.num_levels = 6;
+ else
+ i915->display.wm.num_levels = 8;
+
if (DISPLAY_VER(i915) >= 14)
mtl_read_wm_latency(i915, i915->display.wm.skl_latency);
else
@@ -3264,6 +3407,7 @@ static void skl_setup_wm_latency(struct drm_i915_private *i915)
static const struct intel_wm_funcs skl_wm_funcs = {
.compute_global_watermarks = skl_compute_wm,
+ .get_hw_state = skl_wm_get_hw_state_and_sanitize,
};
void skl_wm_init(struct drm_i915_private *i915)
@@ -3541,13 +3685,34 @@ static const struct file_operations skl_watermark_ipc_status_fops = {
.write = skl_watermark_ipc_status_write
};
-void skl_watermark_ipc_debugfs_register(struct drm_i915_private *i915)
+static int intel_sagv_status_show(struct seq_file *m, void *unused)
+{
+ struct drm_i915_private *i915 = m->private;
+ static const char * const sagv_status[] = {
+ [I915_SAGV_UNKNOWN] = "unknown",
+ [I915_SAGV_DISABLED] = "disabled",
+ [I915_SAGV_ENABLED] = "enabled",
+ [I915_SAGV_NOT_CONTROLLED] = "not controlled",
+ };
+
+ seq_printf(m, "SAGV available: %s\n", str_yes_no(intel_has_sagv(i915)));
+ seq_printf(m, "SAGV status: %s\n", sagv_status[i915->display.sagv.status]);
+ seq_printf(m, "SAGV block time: %d usec\n", i915->display.sagv.block_time_us);
+
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(intel_sagv_status);
+
+void skl_watermark_debugfs_register(struct drm_i915_private *i915)
{
struct drm_minor *minor = i915->drm.primary;
- if (!HAS_IPC(i915))
- return;
+ if (HAS_IPC(i915))
+ debugfs_create_file("i915_ipc_status", 0644, minor->debugfs_root, i915,
+ &skl_watermark_ipc_status_fops);
- debugfs_create_file("i915_ipc_status", 0644, minor->debugfs_root, i915,
- &skl_watermark_ipc_status_fops);
+ if (HAS_SAGV(i915))
+ debugfs_create_file("i915_sagv_status", 0444, minor->debugfs_root, i915,
+ &intel_sagv_status_fops);
}
diff --git a/drivers/gpu/drm/i915/display/skl_watermark.h b/drivers/gpu/drm/i915/display/skl_watermark.h
index 37954c472070..f91a3d4ddc07 100644
--- a/drivers/gpu/drm/i915/display/skl_watermark.h
+++ b/drivers/gpu/drm/i915/display/skl_watermark.h
@@ -10,7 +10,7 @@
#include "intel_display_limits.h"
#include "intel_global_state.h"
-#include "intel_pm_types.h"
+#include "intel_wm_types.h"
struct drm_i915_private;
struct intel_atomic_state;
@@ -38,16 +38,13 @@ bool skl_ddb_allocation_overlaps(const struct skl_ddb_entry *ddb,
const struct skl_ddb_entry *entries,
int num_entries, int ignore_idx);
-void skl_wm_get_hw_state(struct drm_i915_private *i915);
-void skl_wm_sanitize(struct drm_i915_private *i915);
-
void intel_wm_state_verify(struct intel_crtc *crtc,
struct intel_crtc_state *new_crtc_state);
void skl_watermark_ipc_init(struct drm_i915_private *i915);
void skl_watermark_ipc_update(struct drm_i915_private *i915);
bool skl_watermark_ipc_enabled(struct drm_i915_private *i915);
-void skl_watermark_ipc_debugfs_register(struct drm_i915_private *i915);
+void skl_watermark_debugfs_register(struct drm_i915_private *i915);
void skl_wm_init(struct drm_i915_private *i915);
diff --git a/drivers/gpu/drm/i915/display/vlv_dsi.c b/drivers/gpu/drm/i915/display/vlv_dsi.c
index 2289f6b1b4eb..028965ab442d 100644
--- a/drivers/gpu/drm/i915/display/vlv_dsi.c
+++ b/drivers/gpu/drm/i915/display/vlv_dsi.c
@@ -331,32 +331,23 @@ static bool glk_dsi_enable_io(struct intel_encoder *encoder)
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
enum port port;
- u32 tmp;
bool cold_boot = false;
/* Set the MIPI mode
* If MIPI_Mode is off, then writing to LP_Wake bit is not reflecting.
* Power ON MIPI IO first and then write into IO reset and LP wake bits
*/
- for_each_dsi_port(port, intel_dsi->ports) {
- tmp = intel_de_read(dev_priv, MIPI_CTRL(port));
- intel_de_write(dev_priv, MIPI_CTRL(port),
- tmp | GLK_MIPIIO_ENABLE);
- }
+ for_each_dsi_port(port, intel_dsi->ports)
+ intel_de_rmw(dev_priv, MIPI_CTRL(port), 0, GLK_MIPIIO_ENABLE);
/* Put the IO into reset */
- tmp = intel_de_read(dev_priv, MIPI_CTRL(PORT_A));
- tmp &= ~GLK_MIPIIO_RESET_RELEASED;
- intel_de_write(dev_priv, MIPI_CTRL(PORT_A), tmp);
+ intel_de_rmw(dev_priv, MIPI_CTRL(PORT_A), GLK_MIPIIO_RESET_RELEASED, 0);
/* Program LP Wake */
for_each_dsi_port(port, intel_dsi->ports) {
- tmp = intel_de_read(dev_priv, MIPI_CTRL(port));
- if (!(intel_de_read(dev_priv, MIPI_DEVICE_READY(port)) & DEVICE_READY))
- tmp &= ~GLK_LP_WAKE;
- else
- tmp |= GLK_LP_WAKE;
- intel_de_write(dev_priv, MIPI_CTRL(port), tmp);
+ u32 tmp = intel_de_read(dev_priv, MIPI_DEVICE_READY(port));
+ intel_de_rmw(dev_priv, MIPI_CTRL(port),
+ GLK_LP_WAKE, (tmp & DEVICE_READY) ? GLK_LP_WAKE : 0);
}
/* Wait for Pwr ACK */
@@ -380,7 +371,6 @@ static void glk_dsi_device_ready(struct intel_encoder *encoder)
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
enum port port;
- u32 val;
/* Wait for MIPI PHY status bit to set */
for_each_dsi_port(port, intel_dsi->ports) {
@@ -390,24 +380,18 @@ static void glk_dsi_device_ready(struct intel_encoder *encoder)
}
/* Get IO out of reset */
- val = intel_de_read(dev_priv, MIPI_CTRL(PORT_A));
- intel_de_write(dev_priv, MIPI_CTRL(PORT_A),
- val | GLK_MIPIIO_RESET_RELEASED);
+ intel_de_rmw(dev_priv, MIPI_CTRL(PORT_A), 0, GLK_MIPIIO_RESET_RELEASED);
/* Get IO out of Low power state*/
for_each_dsi_port(port, intel_dsi->ports) {
if (!(intel_de_read(dev_priv, MIPI_DEVICE_READY(port)) & DEVICE_READY)) {
- val = intel_de_read(dev_priv, MIPI_DEVICE_READY(port));
- val &= ~ULPS_STATE_MASK;
- val |= DEVICE_READY;
- intel_de_write(dev_priv, MIPI_DEVICE_READY(port), val);
+ intel_de_rmw(dev_priv, MIPI_DEVICE_READY(port),
+ ULPS_STATE_MASK, DEVICE_READY);
usleep_range(10, 15);
} else {
/* Enter ULPS */
- val = intel_de_read(dev_priv, MIPI_DEVICE_READY(port));
- val &= ~ULPS_STATE_MASK;
- val |= (ULPS_STATE_ENTER | DEVICE_READY);
- intel_de_write(dev_priv, MIPI_DEVICE_READY(port), val);
+ intel_de_rmw(dev_priv, MIPI_DEVICE_READY(port),
+ ULPS_STATE_MASK, ULPS_STATE_ENTER | DEVICE_READY);
/* Wait for ULPS active */
if (intel_de_wait_for_clear(dev_priv, MIPI_CTRL(port),
@@ -415,20 +399,15 @@ static void glk_dsi_device_ready(struct intel_encoder *encoder)
drm_err(&dev_priv->drm, "ULPS not active\n");
/* Exit ULPS */
- val = intel_de_read(dev_priv, MIPI_DEVICE_READY(port));
- val &= ~ULPS_STATE_MASK;
- val |= (ULPS_STATE_EXIT | DEVICE_READY);
- intel_de_write(dev_priv, MIPI_DEVICE_READY(port), val);
+ intel_de_rmw(dev_priv, MIPI_DEVICE_READY(port),
+ ULPS_STATE_MASK, ULPS_STATE_EXIT | DEVICE_READY);
/* Enter Normal Mode */
- val = intel_de_read(dev_priv, MIPI_DEVICE_READY(port));
- val &= ~ULPS_STATE_MASK;
- val |= (ULPS_STATE_NORMAL_OPERATION | DEVICE_READY);
- intel_de_write(dev_priv, MIPI_DEVICE_READY(port), val);
-
- val = intel_de_read(dev_priv, MIPI_CTRL(port));
- val &= ~GLK_LP_WAKE;
- intel_de_write(dev_priv, MIPI_CTRL(port), val);
+ intel_de_rmw(dev_priv, MIPI_DEVICE_READY(port),
+ ULPS_STATE_MASK,
+ ULPS_STATE_NORMAL_OPERATION | DEVICE_READY);
+
+ intel_de_rmw(dev_priv, MIPI_CTRL(port), GLK_LP_WAKE, 0);
}
}
@@ -460,9 +439,7 @@ static void bxt_dsi_device_ready(struct intel_encoder *encoder)
/* Enable MIPI PHY transparent latch */
for_each_dsi_port(port, intel_dsi->ports) {
- val = intel_de_read(dev_priv, BXT_MIPI_PORT_CTRL(port));
- intel_de_write(dev_priv, BXT_MIPI_PORT_CTRL(port),
- val | LP_OUTPUT_HOLD);
+ intel_de_rmw(dev_priv, BXT_MIPI_PORT_CTRL(port), 0, LP_OUTPUT_HOLD);
usleep_range(2000, 2500);
}
@@ -482,7 +459,6 @@ static void vlv_dsi_device_ready(struct intel_encoder *encoder)
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
enum port port;
- u32 val;
drm_dbg_kms(&dev_priv->drm, "\n");
@@ -505,9 +481,7 @@ static void vlv_dsi_device_ready(struct intel_encoder *encoder)
* Common bit for both MIPI Port A & MIPI Port C
* No similar bit in MIPI Port C reg
*/
- val = intel_de_read(dev_priv, MIPI_PORT_CTRL(PORT_A));
- intel_de_write(dev_priv, MIPI_PORT_CTRL(PORT_A),
- val | LP_OUTPUT_HOLD);
+ intel_de_rmw(dev_priv, MIPI_PORT_CTRL(PORT_A), 0, LP_OUTPUT_HOLD);
usleep_range(1000, 1500);
intel_de_write(dev_priv, MIPI_DEVICE_READY(port),
@@ -537,15 +511,11 @@ static void glk_dsi_enter_low_power_mode(struct intel_encoder *encoder)
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
enum port port;
- u32 val;
/* Enter ULPS */
- for_each_dsi_port(port, intel_dsi->ports) {
- val = intel_de_read(dev_priv, MIPI_DEVICE_READY(port));
- val &= ~ULPS_STATE_MASK;
- val |= (ULPS_STATE_ENTER | DEVICE_READY);
- intel_de_write(dev_priv, MIPI_DEVICE_READY(port), val);
- }
+ for_each_dsi_port(port, intel_dsi->ports)
+ intel_de_rmw(dev_priv, MIPI_DEVICE_READY(port),
+ ULPS_STATE_MASK, ULPS_STATE_ENTER | DEVICE_READY);
/* Wait for MIPI PHY status bit to unset */
for_each_dsi_port(port, intel_dsi->ports) {
@@ -568,12 +538,9 @@ static void glk_dsi_disable_mipi_io(struct intel_encoder *encoder)
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
enum port port;
- u32 tmp;
/* Put the IO into reset */
- tmp = intel_de_read(dev_priv, MIPI_CTRL(PORT_A));
- tmp &= ~GLK_MIPIIO_RESET_RELEASED;
- intel_de_write(dev_priv, MIPI_CTRL(PORT_A), tmp);
+ intel_de_rmw(dev_priv, MIPI_CTRL(PORT_A), GLK_MIPIIO_RESET_RELEASED, 0);
/* Wait for MIPI PHY status bit to unset */
for_each_dsi_port(port, intel_dsi->ports) {
@@ -583,11 +550,8 @@ static void glk_dsi_disable_mipi_io(struct intel_encoder *encoder)
}
/* Clear MIPI mode */
- for_each_dsi_port(port, intel_dsi->ports) {
- tmp = intel_de_read(dev_priv, MIPI_CTRL(port));
- tmp &= ~GLK_MIPIIO_ENABLE;
- intel_de_write(dev_priv, MIPI_CTRL(port), tmp);
- }
+ for_each_dsi_port(port, intel_dsi->ports)
+ intel_de_rmw(dev_priv, MIPI_CTRL(port), GLK_MIPIIO_ENABLE, 0);
}
static void glk_dsi_clear_device_ready(struct intel_encoder *encoder)
@@ -607,7 +571,6 @@ static void vlv_dsi_clear_device_ready(struct intel_encoder *encoder)
/* Common bit for both MIPI Port A & MIPI Port C on VLV/CHV */
i915_reg_t port_ctrl = IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv) ?
BXT_MIPI_PORT_CTRL(port) : MIPI_PORT_CTRL(PORT_A);
- u32 val;
intel_de_write(dev_priv, MIPI_DEVICE_READY(port),
DEVICE_READY | ULPS_STATE_ENTER);
@@ -631,8 +594,7 @@ static void vlv_dsi_clear_device_ready(struct intel_encoder *encoder)
drm_err(&dev_priv->drm, "DSI LP not going Low\n");
/* Disable MIPI PHY transparent latch */
- val = intel_de_read(dev_priv, port_ctrl);
- intel_de_write(dev_priv, port_ctrl, val & ~LP_OUTPUT_HOLD);
+ intel_de_rmw(dev_priv, port_ctrl, LP_OUTPUT_HOLD, 0);
usleep_range(1000, 1500);
intel_de_write(dev_priv, MIPI_DEVICE_READY(port), 0x00);
@@ -649,23 +611,17 @@ static void intel_dsi_port_enable(struct intel_encoder *encoder,
enum port port;
if (intel_dsi->dual_link == DSI_DUAL_LINK_FRONT_BACK) {
- u32 temp;
+ u32 temp = intel_dsi->pixel_overlap;
+
if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) {
- for_each_dsi_port(port, intel_dsi->ports) {
- temp = intel_de_read(dev_priv,
- MIPI_CTRL(port));
- temp &= ~BXT_PIXEL_OVERLAP_CNT_MASK |
- intel_dsi->pixel_overlap <<
- BXT_PIXEL_OVERLAP_CNT_SHIFT;
- intel_de_write(dev_priv, MIPI_CTRL(port),
- temp);
- }
+ for_each_dsi_port(port, intel_dsi->ports)
+ intel_de_rmw(dev_priv, MIPI_CTRL(port),
+ BXT_PIXEL_OVERLAP_CNT_MASK,
+ temp << BXT_PIXEL_OVERLAP_CNT_SHIFT);
} else {
- temp = intel_de_read(dev_priv, VLV_CHICKEN_3);
- temp &= ~PIXEL_OVERLAP_CNT_MASK |
- intel_dsi->pixel_overlap <<
- PIXEL_OVERLAP_CNT_SHIFT;
- intel_de_write(dev_priv, VLV_CHICKEN_3, temp);
+ intel_de_rmw(dev_priv, VLV_CHICKEN_3,
+ PIXEL_OVERLAP_CNT_MASK,
+ temp << PIXEL_OVERLAP_CNT_SHIFT);
}
}
@@ -709,11 +665,9 @@ static void intel_dsi_port_disable(struct intel_encoder *encoder)
for_each_dsi_port(port, intel_dsi->ports) {
i915_reg_t port_ctrl = IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv) ?
BXT_MIPI_PORT_CTRL(port) : MIPI_PORT_CTRL(port);
- u32 temp;
/* de-assert ip_tg_enable signal */
- temp = intel_de_read(dev_priv, port_ctrl);
- intel_de_write(dev_priv, port_ctrl, temp & ~DPI_ENABLE);
+ intel_de_rmw(dev_priv, port_ctrl, DPI_ENABLE, 0);
intel_de_posting_read(dev_priv, port_ctrl);
}
}
@@ -787,7 +741,6 @@ static void intel_dsi_pre_enable(struct intel_atomic_state *state,
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
enum port port;
- u32 val;
bool glk_cold_boot = false;
drm_dbg_kms(&dev_priv->drm, "\n");
@@ -810,9 +763,7 @@ static void intel_dsi_pre_enable(struct intel_atomic_state *state,
if (IS_BROXTON(dev_priv)) {
/* Add MIPI IO reset programming for modeset */
- val = intel_de_read(dev_priv, BXT_P_CR_GT_DISP_PWRON);
- intel_de_write(dev_priv, BXT_P_CR_GT_DISP_PWRON,
- val | MIPIO_RST_CTRL);
+ intel_de_rmw(dev_priv, BXT_P_CR_GT_DISP_PWRON, 0, MIPIO_RST_CTRL);
/* Power up DSI regulator */
intel_de_write(dev_priv, BXT_P_DSI_REGULATOR_CFG, STAP_SELECT);
@@ -820,12 +771,9 @@ static void intel_dsi_pre_enable(struct intel_atomic_state *state,
}
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
- u32 val;
-
/* Disable DPOunit clock gating, can stall pipe */
- val = intel_de_read(dev_priv, DSPCLK_GATE_D(dev_priv));
- val |= DPOUNIT_CLOCK_GATE_DISABLE;
- intel_de_write(dev_priv, DSPCLK_GATE_D(dev_priv), val);
+ intel_de_rmw(dev_priv, DSPCLK_GATE_D(dev_priv),
+ 0, DPOUNIT_CLOCK_GATE_DISABLE);
}
if (!IS_GEMINILAKE(dev_priv))
@@ -949,7 +897,6 @@ static void intel_dsi_post_disable(struct intel_atomic_state *state,
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
enum port port;
- u32 val;
drm_dbg_kms(&dev_priv->drm, "\n");
@@ -987,21 +934,16 @@ static void intel_dsi_post_disable(struct intel_atomic_state *state,
HS_IO_CTRL_SELECT);
/* Add MIPI IO reset programming for modeset */
- val = intel_de_read(dev_priv, BXT_P_CR_GT_DISP_PWRON);
- intel_de_write(dev_priv, BXT_P_CR_GT_DISP_PWRON,
- val & ~MIPIO_RST_CTRL);
+ intel_de_rmw(dev_priv, BXT_P_CR_GT_DISP_PWRON, MIPIO_RST_CTRL, 0);
}
if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) {
bxt_dsi_pll_disable(encoder);
} else {
- u32 val;
-
vlv_dsi_pll_disable(encoder);
- val = intel_de_read(dev_priv, DSPCLK_GATE_D(dev_priv));
- val &= ~DPOUNIT_CLOCK_GATE_DISABLE;
- intel_de_write(dev_priv, DSPCLK_GATE_D(dev_priv), val);
+ intel_de_rmw(dev_priv, DSPCLK_GATE_D(dev_priv),
+ DPOUNIT_CLOCK_GATE_DISABLE, 0);
}
/* Assert reset */
@@ -1058,7 +1000,7 @@ static bool intel_dsi_get_hw_state(struct intel_encoder *encoder,
*/
if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
port == PORT_C)
- enabled = intel_de_read(dev_priv, PIPECONF(PIPE_B)) & PIPECONF_ENABLE;
+ enabled = intel_de_read(dev_priv, TRANSCONF(PIPE_B)) & TRANSCONF_ENABLE;
/* Try command mode if video mode not enabled */
if (!enabled) {
@@ -1130,7 +1072,7 @@ static void bxt_dsi_get_pipe_config(struct intel_encoder *encoder,
bpp = mipi_dsi_pixel_format_to_bpp(
pixel_format_from_register_bits(fmt));
- pipe_config->pipe_bpp = bdw_get_pipemisc_bpp(crtc);
+ pipe_config->pipe_bpp = bdw_get_pipe_misc_bpp(crtc);
/* Enable Frame time stamo based scanline reporting */
pipe_config->mode_flags |=
@@ -1432,11 +1374,8 @@ static void intel_dsi_prepare(struct intel_encoder *intel_encoder,
} else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) {
enum pipe pipe = crtc->pipe;
- tmp = intel_de_read(dev_priv, MIPI_CTRL(port));
- tmp &= ~BXT_PIPE_SELECT_MASK;
-
- tmp |= BXT_PIPE_SELECT(pipe);
- intel_de_write(dev_priv, MIPI_CTRL(port), tmp);
+ intel_de_rmw(dev_priv, MIPI_CTRL(port),
+ BXT_PIPE_SELECT_MASK, BXT_PIPE_SELECT(pipe));
}
/* XXX: why here, why like this? handling in irq handler?! */
@@ -1605,7 +1544,6 @@ static void intel_dsi_unprepare(struct intel_encoder *encoder)
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
enum port port;
- u32 val;
if (IS_GEMINILAKE(dev_priv))
return;
@@ -1620,9 +1558,7 @@ static void intel_dsi_unprepare(struct intel_encoder *encoder)
vlv_dsi_reset_clocks(encoder, port);
intel_de_write(dev_priv, MIPI_EOT_DISABLE(port), CLOCKSTOP);
- val = intel_de_read(dev_priv, MIPI_DSI_FUNC_PRG(port));
- val &= ~VID_MODE_FORMAT_MASK;
- intel_de_write(dev_priv, MIPI_DSI_FUNC_PRG(port), val);
+ intel_de_rmw(dev_priv, MIPI_DSI_FUNC_PRG(port), VID_MODE_FORMAT_MASK, 0);
intel_de_write(dev_priv, MIPI_DEVICE_READY(port), 0x1);
}
diff --git a/drivers/gpu/drm/i915/display/vlv_dsi_pll.c b/drivers/gpu/drm/i915/display/vlv_dsi_pll.c
index af7402127cd9..b697badbbe71 100644
--- a/drivers/gpu/drm/i915/display/vlv_dsi_pll.c
+++ b/drivers/gpu/drm/i915/display/vlv_dsi_pll.c
@@ -302,13 +302,10 @@ bool bxt_dsi_pll_is_enabled(struct drm_i915_private *dev_priv)
void bxt_dsi_pll_disable(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- u32 val;
drm_dbg_kms(&dev_priv->drm, "\n");
- val = intel_de_read(dev_priv, BXT_DSI_PLL_ENABLE);
- val &= ~BXT_DSI_PLL_DO_ENABLE;
- intel_de_write(dev_priv, BXT_DSI_PLL_ENABLE, val);
+ intel_de_rmw(dev_priv, BXT_DSI_PLL_ENABLE, BXT_DSI_PLL_DO_ENABLE, 0);
/*
* PLL lock should deassert within 200us.
@@ -542,7 +539,6 @@ void bxt_dsi_pll_enable(struct intel_encoder *encoder,
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
enum port port;
- u32 val;
drm_dbg_kms(&dev_priv->drm, "\n");
@@ -559,9 +555,7 @@ void bxt_dsi_pll_enable(struct intel_encoder *encoder,
}
/* Enable DSI PLL */
- val = intel_de_read(dev_priv, BXT_DSI_PLL_ENABLE);
- val |= BXT_DSI_PLL_DO_ENABLE;
- intel_de_write(dev_priv, BXT_DSI_PLL_ENABLE, val);
+ intel_de_rmw(dev_priv, BXT_DSI_PLL_ENABLE, 0, BXT_DSI_PLL_DO_ENABLE);
/* Timeout and fail if PLL not locked */
if (intel_de_wait_for_set(dev_priv, BXT_DSI_PLL_ENABLE,
@@ -589,13 +583,9 @@ void bxt_dsi_reset_clocks(struct intel_encoder *encoder, enum port port)
tmp &= ~(BXT_MIPI_RX_ESCLK_LOWER_FIXDIV_MASK(port));
intel_de_write(dev_priv, BXT_MIPI_CLOCK_CTL, tmp);
} else {
- tmp = intel_de_read(dev_priv, MIPIO_TXESC_CLK_DIV1);
- tmp &= ~GLK_TX_ESC_CLK_DIV1_MASK;
- intel_de_write(dev_priv, MIPIO_TXESC_CLK_DIV1, tmp);
+ intel_de_rmw(dev_priv, MIPIO_TXESC_CLK_DIV1, GLK_TX_ESC_CLK_DIV1_MASK, 0);
- tmp = intel_de_read(dev_priv, MIPIO_TXESC_CLK_DIV2);
- tmp &= ~GLK_TX_ESC_CLK_DIV2_MASK;
- intel_de_write(dev_priv, MIPIO_TXESC_CLK_DIV2, tmp);
+ intel_de_rmw(dev_priv, MIPIO_TXESC_CLK_DIV2, GLK_TX_ESC_CLK_DIV2_MASK, 0);
}
intel_de_write(dev_priv, MIPI_EOT_DISABLE(port), CLOCKSTOP);
}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
index 90a967374b1a..d8e06e783e30 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
@@ -909,7 +909,7 @@ i915_gem_stolen_lmem_setup(struct drm_i915_private *i915, u16 type,
dsm_base = intel_uncore_read64(uncore, GEN12_DSMBASE) & GEN12_BDSM_MASK;
if (WARN_ON(lmem_size < dsm_base))
return ERR_PTR(-ENODEV);
- dsm_size = lmem_size - dsm_base;
+ dsm_size = ALIGN_DOWN(lmem_size - dsm_base, SZ_1M);
}
io_size = dsm_size;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
index 7420276827a5..341b94672abc 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
@@ -472,7 +472,7 @@ static int i915_ttm_shrink(struct drm_i915_gem_object *obj, unsigned int flags)
struct ttm_placement place = {};
int ret;
- if (!bo->ttm || bo->resource->mem_type != TTM_PL_SYSTEM)
+ if (!bo->ttm || i915_ttm_cpu_maps_iomem(bo->resource))
return 0;
GEM_BUG_ON(!i915_tt->is_shmem);
@@ -511,7 +511,13 @@ static void i915_ttm_delete_mem_notify(struct ttm_buffer_object *bo)
{
struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
- if (bo->resource && !i915_ttm_is_ghost_object(bo)) {
+ /*
+ * This gets called twice by ttm, so long as we have a ttm resource or
+ * ttm_tt then we can still safely call this. Due to pipeline-gutting,
+ * we maybe have NULL bo->resource, but in that case we should always
+ * have a ttm alive (like if the pages are swapped out).
+ */
+ if ((bo->resource || bo->ttm) && !i915_ttm_is_ghost_object(bo)) {
__i915_gem_object_pages_fini(obj);
i915_ttm_free_cached_io_rsgt(obj);
}
@@ -1067,11 +1073,12 @@ static vm_fault_t vm_fault_ttm(struct vm_fault *vmf)
.interruptible = true,
.no_wait_gpu = true, /* should be idle already */
};
+ int err;
GEM_BUG_ON(!bo->ttm || !(bo->ttm->page_flags & TTM_TT_FLAG_SWAPPED));
- ret = ttm_bo_validate(bo, i915_ttm_sys_placement(), &ctx);
- if (ret) {
+ err = ttm_bo_validate(bo, i915_ttm_sys_placement(), &ctx);
+ if (err) {
dma_resv_unlock(bo->base.resv);
return VM_FAULT_SIGBUS;
}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm.h b/drivers/gpu/drm/i915/gem/i915_gem_ttm.h
index 2a94a99ef76b..f8f6bed1b297 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_ttm.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm.h
@@ -98,7 +98,7 @@ static inline bool i915_ttm_gtt_binds_lmem(struct ttm_resource *mem)
static inline bool i915_ttm_cpu_maps_iomem(struct ttm_resource *mem)
{
/* Once / if we support GGTT, this is also false for cached ttm_tts */
- return mem->mem_type != I915_PL_SYSTEM;
+ return mem && mem->mem_type != I915_PL_SYSTEM;
}
bool i915_ttm_resource_mappable(struct ttm_resource *res);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c b/drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c
index 76dd9e5e1a8b..d030182ca176 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c
@@ -711,6 +711,10 @@ int i915_gem_obj_copy_ttm(struct drm_i915_gem_object *dst,
assert_object_held(dst);
assert_object_held(src);
+
+ if (GEM_WARN_ON(!src_bo->resource || !dst_bo->resource))
+ return -EINVAL;
+
i915_deps_init(&deps, GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN);
ret = dma_resv_reserve_fences(src_bo->base.resv, 1);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm_pm.c b/drivers/gpu/drm/i915/gem/i915_gem_ttm_pm.c
index 7e67742bc65e..dfe39c8e74d8 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_ttm_pm.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm_pm.c
@@ -53,7 +53,7 @@ static int i915_ttm_backup(struct i915_gem_apply_to_region *apply,
unsigned int flags;
int err = 0;
- if (bo->resource->mem_type == I915_PL_SYSTEM || obj->ttm.backup)
+ if (!i915_ttm_cpu_maps_iomem(bo->resource) || obj->ttm.backup)
return 0;
if (pm_apply->allow_gpu && i915_gem_object_evictable(obj))
@@ -187,7 +187,10 @@ static int i915_ttm_restore(struct i915_gem_apply_to_region *apply,
return err;
/* Content may have been swapped. */
- err = ttm_tt_populate(backup_bo->bdev, backup_bo->ttm, &ctx);
+ if (!backup_bo->resource)
+ err = ttm_bo_validate(backup_bo, i915_ttm_sys_placement(), &ctx);
+ if (!err)
+ err = ttm_tt_populate(backup_bo->bdev, backup_bo->ttm, &ctx);
if (!err) {
err = i915_gem_obj_copy_ttm(obj, backup, pm_apply->allow_gpu,
false);
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c
index 3bb1f7f0110e..ff81af4c8202 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c
@@ -108,31 +108,30 @@ struct tiled_blits {
u32 height;
};
-static bool supports_x_tiling(const struct drm_i915_private *i915)
+static bool fastblit_supports_x_tiling(const struct drm_i915_private *i915)
{
int gen = GRAPHICS_VER(i915);
+ /* XY_FAST_COPY_BLT does not exist on pre-gen9 platforms */
+ drm_WARN_ON(&i915->drm, gen < 9);
+
if (gen < 12)
return true;
- if (!HAS_LMEM(i915) || IS_DG1(i915))
+ if (GRAPHICS_VER_FULL(i915) < IP_VER(12, 50))
return false;
- return true;
+ return HAS_DISPLAY(i915);
}
static bool fast_blit_ok(const struct blit_buffer *buf)
{
- int gen = GRAPHICS_VER(buf->vma->vm->i915);
-
- if (gen < 9)
+ /* XY_FAST_COPY_BLT does not exist on pre-gen9 platforms */
+ if (GRAPHICS_VER(buf->vma->vm->i915) < 9)
return false;
- if (gen < 12)
- return true;
-
/* filter out platforms with unsupported X-tile support in fastblit */
- if (buf->tiling == CLIENT_TILING_X && !supports_x_tiling(buf->vma->vm->i915))
+ if (buf->tiling == CLIENT_TILING_X && !fastblit_supports_x_tiling(buf->vma->vm->i915))
return false;
return true;
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
index d4e29da74612..ad3413242100 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
@@ -9,6 +9,7 @@
#include "gem/i915_gem_context.h"
#include "gem/i915_gem_internal.h"
+#include "gt/intel_gt_print.h"
#include "gt/intel_gt_regs.h"
#include "i915_cmd_parser.h"
@@ -1143,12 +1144,130 @@ err_put:
return ret;
}
+static int intel_engine_init_tlb_invalidation(struct intel_engine_cs *engine)
+{
+ static const union intel_engine_tlb_inv_reg gen8_regs[] = {
+ [RENDER_CLASS].reg = GEN8_RTCR,
+ [VIDEO_DECODE_CLASS].reg = GEN8_M1TCR, /* , GEN8_M2TCR */
+ [VIDEO_ENHANCEMENT_CLASS].reg = GEN8_VTCR,
+ [COPY_ENGINE_CLASS].reg = GEN8_BTCR,
+ };
+ static const union intel_engine_tlb_inv_reg gen12_regs[] = {
+ [RENDER_CLASS].reg = GEN12_GFX_TLB_INV_CR,
+ [VIDEO_DECODE_CLASS].reg = GEN12_VD_TLB_INV_CR,
+ [VIDEO_ENHANCEMENT_CLASS].reg = GEN12_VE_TLB_INV_CR,
+ [COPY_ENGINE_CLASS].reg = GEN12_BLT_TLB_INV_CR,
+ [COMPUTE_CLASS].reg = GEN12_COMPCTX_TLB_INV_CR,
+ };
+ static const union intel_engine_tlb_inv_reg xehp_regs[] = {
+ [RENDER_CLASS].mcr_reg = XEHP_GFX_TLB_INV_CR,
+ [VIDEO_DECODE_CLASS].mcr_reg = XEHP_VD_TLB_INV_CR,
+ [VIDEO_ENHANCEMENT_CLASS].mcr_reg = XEHP_VE_TLB_INV_CR,
+ [COPY_ENGINE_CLASS].mcr_reg = XEHP_BLT_TLB_INV_CR,
+ [COMPUTE_CLASS].mcr_reg = XEHP_COMPCTX_TLB_INV_CR,
+ };
+ static const union intel_engine_tlb_inv_reg xelpmp_regs[] = {
+ [VIDEO_DECODE_CLASS].reg = GEN12_VD_TLB_INV_CR,
+ [VIDEO_ENHANCEMENT_CLASS].reg = GEN12_VE_TLB_INV_CR,
+ [OTHER_CLASS].reg = XELPMP_GSC_TLB_INV_CR,
+ };
+ struct drm_i915_private *i915 = engine->i915;
+ const unsigned int instance = engine->instance;
+ const unsigned int class = engine->class;
+ const union intel_engine_tlb_inv_reg *regs;
+ union intel_engine_tlb_inv_reg reg;
+ unsigned int num = 0;
+ u32 val;
+
+ /*
+ * New platforms should not be added with catch-all-newer (>=)
+ * condition so that any later platform added triggers the below warning
+ * and in turn mandates a human cross-check of whether the invalidation
+ * flows have compatible semantics.
+ *
+ * For instance with the 11.00 -> 12.00 transition three out of five
+ * respective engine registers were moved to masked type. Then after the
+ * 12.00 -> 12.50 transition multi cast handling is required too.
+ */
+
+ if (engine->gt->type == GT_MEDIA) {
+ if (MEDIA_VER_FULL(i915) == IP_VER(13, 0)) {
+ regs = xelpmp_regs;
+ num = ARRAY_SIZE(xelpmp_regs);
+ }
+ } else {
+ if (GRAPHICS_VER_FULL(i915) == IP_VER(12, 71) ||
+ GRAPHICS_VER_FULL(i915) == IP_VER(12, 70) ||
+ GRAPHICS_VER_FULL(i915) == IP_VER(12, 50) ||
+ GRAPHICS_VER_FULL(i915) == IP_VER(12, 55)) {
+ regs = xehp_regs;
+ num = ARRAY_SIZE(xehp_regs);
+ } else if (GRAPHICS_VER_FULL(i915) == IP_VER(12, 0) ||
+ GRAPHICS_VER_FULL(i915) == IP_VER(12, 10)) {
+ regs = gen12_regs;
+ num = ARRAY_SIZE(gen12_regs);
+ } else if (GRAPHICS_VER(i915) >= 8 && GRAPHICS_VER(i915) <= 11) {
+ regs = gen8_regs;
+ num = ARRAY_SIZE(gen8_regs);
+ } else if (GRAPHICS_VER(i915) < 8) {
+ return 0;
+ }
+ }
+
+ if (gt_WARN_ONCE(engine->gt, !num,
+ "Platform does not implement TLB invalidation!"))
+ return -ENODEV;
+
+ if (gt_WARN_ON_ONCE(engine->gt,
+ class >= num ||
+ (!regs[class].reg.reg &&
+ !regs[class].mcr_reg.reg)))
+ return -ERANGE;
+
+ reg = regs[class];
+
+ if (regs == xelpmp_regs && class == OTHER_CLASS) {
+ /*
+ * There's only a single GSC instance, but it uses register bit
+ * 1 instead of either 0 or OTHER_GSC_INSTANCE.
+ */
+ GEM_WARN_ON(instance != OTHER_GSC_INSTANCE);
+ val = 1;
+ } else if (regs == gen8_regs && class == VIDEO_DECODE_CLASS && instance == 1) {
+ reg.reg = GEN8_M2TCR;
+ val = 0;
+ } else {
+ val = instance;
+ }
+
+ val = BIT(val);
+
+ engine->tlb_inv.mcr = regs == xehp_regs;
+ engine->tlb_inv.reg = reg;
+ engine->tlb_inv.done = val;
+
+ if (GRAPHICS_VER(i915) >= 12 &&
+ (engine->class == VIDEO_DECODE_CLASS ||
+ engine->class == VIDEO_ENHANCEMENT_CLASS ||
+ engine->class == COMPUTE_CLASS ||
+ engine->class == OTHER_CLASS))
+ engine->tlb_inv.request = _MASKED_BIT_ENABLE(val);
+ else
+ engine->tlb_inv.request = val;
+
+ return 0;
+}
+
static int engine_setup_common(struct intel_engine_cs *engine)
{
int err;
init_llist_head(&engine->barrier_tasks);
+ err = intel_engine_init_tlb_invalidation(engine);
+ if (err)
+ return err;
+
err = init_status_page(engine);
if (err)
return err;
@@ -1939,13 +2058,13 @@ static const char *repr_timer(const struct timer_list *t)
static void intel_engine_print_registers(struct intel_engine_cs *engine,
struct drm_printer *m)
{
- struct drm_i915_private *dev_priv = engine->i915;
+ struct drm_i915_private *i915 = engine->i915;
struct intel_engine_execlists * const execlists = &engine->execlists;
u64 addr;
- if (engine->id == RENDER_CLASS && IS_GRAPHICS_VER(dev_priv, 4, 7))
+ if (engine->id == RENDER_CLASS && IS_GRAPHICS_VER(i915, 4, 7))
drm_printf(m, "\tCCID: 0x%08x\n", ENGINE_READ(engine, CCID));
- if (HAS_EXECLISTS(dev_priv)) {
+ if (HAS_EXECLISTS(i915)) {
drm_printf(m, "\tEL_STAT_HI: 0x%08x\n",
ENGINE_READ(engine, RING_EXECLIST_STATUS_HI));
drm_printf(m, "\tEL_STAT_LO: 0x%08x\n",
@@ -1966,7 +2085,7 @@ static void intel_engine_print_registers(struct intel_engine_cs *engine,
ENGINE_READ(engine, RING_MI_MODE) & (MODE_IDLE) ? " [idle]" : "");
}
- if (GRAPHICS_VER(dev_priv) >= 6) {
+ if (GRAPHICS_VER(i915) >= 6) {
drm_printf(m, "\tRING_IMR: 0x%08x\n",
ENGINE_READ(engine, RING_IMR));
drm_printf(m, "\tRING_ESR: 0x%08x\n",
@@ -1983,15 +2102,15 @@ static void intel_engine_print_registers(struct intel_engine_cs *engine,
addr = intel_engine_get_last_batch_head(engine);
drm_printf(m, "\tBBADDR: 0x%08x_%08x\n",
upper_32_bits(addr), lower_32_bits(addr));
- if (GRAPHICS_VER(dev_priv) >= 8)
+ if (GRAPHICS_VER(i915) >= 8)
addr = ENGINE_READ64(engine, RING_DMA_FADD, RING_DMA_FADD_UDW);
- else if (GRAPHICS_VER(dev_priv) >= 4)
+ else if (GRAPHICS_VER(i915) >= 4)
addr = ENGINE_READ(engine, RING_DMA_FADD);
else
addr = ENGINE_READ(engine, DMA_FADD_I8XX);
drm_printf(m, "\tDMA_FADDR: 0x%08x_%08x\n",
upper_32_bits(addr), lower_32_bits(addr));
- if (GRAPHICS_VER(dev_priv) >= 4) {
+ if (GRAPHICS_VER(i915) >= 4) {
drm_printf(m, "\tIPEIR: 0x%08x\n",
ENGINE_READ(engine, RING_IPEIR));
drm_printf(m, "\tIPEHR: 0x%08x\n",
@@ -2001,7 +2120,7 @@ static void intel_engine_print_registers(struct intel_engine_cs *engine,
drm_printf(m, "\tIPEHR: 0x%08x\n", ENGINE_READ(engine, IPEHR));
}
- if (HAS_EXECLISTS(dev_priv) && !intel_engine_uses_guc(engine)) {
+ if (HAS_EXECLISTS(i915) && !intel_engine_uses_guc(engine)) {
struct i915_request * const *port, *rq;
const u32 *hws =
&engine->status_page.addr[I915_HWS_CSB_BUF0_INDEX];
@@ -2067,7 +2186,7 @@ static void intel_engine_print_registers(struct intel_engine_cs *engine,
}
rcu_read_unlock();
i915_sched_engine_active_unlock_bh(engine->sched_engine);
- } else if (GRAPHICS_VER(dev_priv) > 6) {
+ } else if (GRAPHICS_VER(i915) > 6) {
drm_printf(m, "\tPP_DIR_BASE: 0x%08x\n",
ENGINE_READ(engine, RING_PP_DIR_BASE));
drm_printf(m, "\tPP_DIR_BASE_READ: 0x%08x\n",
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_types.h b/drivers/gpu/drm/i915/gt/intel_engine_types.h
index 4fd54fb8810f..0a071e5da1a8 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine_types.h
@@ -341,6 +341,18 @@ struct intel_engine_guc_stats {
u64 start_gt_clk;
};
+union intel_engine_tlb_inv_reg {
+ i915_reg_t reg;
+ i915_mcr_reg_t mcr_reg;
+};
+
+struct intel_engine_tlb_inv {
+ bool mcr;
+ union intel_engine_tlb_inv_reg reg;
+ u32 request;
+ u32 done;
+};
+
struct intel_engine_cs {
struct drm_i915_private *i915;
struct intel_gt *gt;
@@ -372,6 +384,8 @@ struct intel_engine_cs {
u32 context_size;
u32 mmio_base;
+ struct intel_engine_tlb_inv tlb_inv;
+
/*
* Some w/a require forcewake to be held (which prevents RC6) while
* a particular engine is active. If so, we set fw_domain to which
diff --git a/drivers/gpu/drm/i915/gt/intel_gpu_commands.h b/drivers/gpu/drm/i915/gt/intel_gpu_commands.h
index 2af1ae3831df..5d143e2a8db0 100644
--- a/drivers/gpu/drm/i915/gt/intel_gpu_commands.h
+++ b/drivers/gpu/drm/i915/gt/intel_gpu_commands.h
@@ -394,6 +394,7 @@
#define MI_LOAD_URB_MEM MI_INSTR(0x2C, 0)
#define MI_STORE_URB_MEM MI_INSTR(0x2D, 0)
#define MI_CONDITIONAL_BATCH_BUFFER_END MI_INSTR(0x36, 0)
+#define MI_DO_COMPARE REG_BIT(21)
#define STATE_BASE_ADDRESS \
((0x3 << 29) | (0x0 << 27) | (0x1 << 24) | (0x1 << 16))
@@ -439,6 +440,8 @@
#define GSC_FW_LOAD GSC_INSTR(1, 0, 2)
#define HECI1_FW_LIMIT_VALID (1 << 31)
+#define GSC_HECI_CMD_PKT GSC_INSTR(0, 0, 6)
+
/*
* Used to convert any address to canonical form.
* Starting from gen8, some commands (e.g. STATE_BASE_ADDRESS,
diff --git a/drivers/gpu/drm/i915/gt/intel_gsc.h b/drivers/gpu/drm/i915/gt/intel_gsc.h
index fcac1775e9c3..7ab3ca0f9f26 100644
--- a/drivers/gpu/drm/i915/gt/intel_gsc.h
+++ b/drivers/gpu/drm/i915/gt/intel_gsc.h
@@ -33,7 +33,7 @@ struct intel_gsc {
} intf[INTEL_GSC_NUM_INTERFACES];
};
-void intel_gsc_init(struct intel_gsc *gsc, struct drm_i915_private *dev_priv);
+void intel_gsc_init(struct intel_gsc *gsc, struct drm_i915_private *i915);
void intel_gsc_fini(struct intel_gsc *gsc);
void intel_gsc_irq_handler(struct intel_gt *gt, u32 iir);
diff --git a/drivers/gpu/drm/i915/gt/intel_gt.c b/drivers/gpu/drm/i915/gt/intel_gt.c
index f0dbfc434e07..256cecf99dd1 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt.c
@@ -28,7 +28,6 @@
#include "intel_migrate.h"
#include "intel_mocs.h"
#include "intel_pci_config.h"
-#include "intel_pm.h"
#include "intel_rc6.h"
#include "intel_renderstate.h"
#include "intel_rps.h"
@@ -785,6 +784,29 @@ void intel_gt_driver_unregister(struct intel_gt *gt)
intel_gsc_fini(&gt->gsc);
/*
+ * If we unload the driver and wedge before the GSC worker is complete,
+ * the worker will hit an error on its submission to the GSC engine and
+ * then exit. This is hard to hit for a user, but it is reproducible
+ * with skipping selftests. The error is handled gracefully by the
+ * worker, so there are no functional issues, but we still end up with
+ * an error message in dmesg, which is something we want to avoid as
+ * this is a supported scenario. We could modify the worker to better
+ * handle a wedging occurring during its execution, but that gets
+ * complicated for a couple of reasons:
+ * - We do want the error on runtime wedging, because there are
+ * implications for subsystems outside of GT (i.e., PXP, HDCP), it's
+ * only the error on driver unload that we want to silence.
+ * - The worker is responsible for multiple submissions (GSC FW load,
+ * HuC auth, SW proxy), so all of those will have to be adapted to
+ * handle the wedged_on_fini scenario.
+ * Therefore, it's much simpler to just wait for the worker to be done
+ * before wedging on driver removal, also considering that the worker
+ * will likely already be idle in the great majority of non-selftest
+ * scenarios.
+ */
+ intel_gsc_uc_flush_work(&gt->uc.gsc);
+
+ /*
* Upon unregistering the device to prevent any new users, cancel
* all in-flight requests so that we can quickly unbind the active
* resources.
@@ -982,35 +1004,6 @@ void intel_gt_info_print(const struct intel_gt_info *info,
intel_sseu_dump(&info->sseu, p);
}
-struct reg_and_bit {
- union {
- i915_reg_t reg;
- i915_mcr_reg_t mcr_reg;
- };
- u32 bit;
-};
-
-static struct reg_and_bit
-get_reg_and_bit(const struct intel_engine_cs *engine, const bool gen8,
- const i915_reg_t *regs, const unsigned int num)
-{
- const unsigned int class = engine->class;
- struct reg_and_bit rb = { };
-
- if (gt_WARN_ON_ONCE(engine->gt, class >= num || !regs[class].reg))
- return rb;
-
- rb.reg = regs[class];
- if (gen8 && class == VIDEO_DECODE_CLASS)
- rb.reg.reg += 4 * engine->instance; /* GEN8_M2TCR */
- else
- rb.bit = engine->instance;
-
- rb.bit = BIT(rb.bit);
-
- return rb;
-}
-
/*
* HW architecture suggest typical invalidation time at 40us,
* with pessimistic cases up to 100us and a recommendation to
@@ -1024,14 +1017,20 @@ get_reg_and_bit(const struct intel_engine_cs *engine, const bool gen8,
* but are now considered MCR registers. Since they exist within a GAM range,
* the primary instance of the register rolls up the status from each unit.
*/
-static int wait_for_invalidate(struct intel_gt *gt, struct reg_and_bit rb)
+static int wait_for_invalidate(struct intel_engine_cs *engine)
{
- if (GRAPHICS_VER_FULL(gt->i915) >= IP_VER(12, 50))
- return intel_gt_mcr_wait_for_reg(gt, rb.mcr_reg, rb.bit, 0,
+ if (engine->tlb_inv.mcr)
+ return intel_gt_mcr_wait_for_reg(engine->gt,
+ engine->tlb_inv.reg.mcr_reg,
+ engine->tlb_inv.done,
+ 0,
TLB_INVAL_TIMEOUT_US,
TLB_INVAL_TIMEOUT_MS);
else
- return __intel_wait_for_register_fw(gt->uncore, rb.reg, rb.bit, 0,
+ return __intel_wait_for_register_fw(engine->gt->uncore,
+ engine->tlb_inv.reg.reg,
+ engine->tlb_inv.done,
+ 0,
TLB_INVAL_TIMEOUT_US,
TLB_INVAL_TIMEOUT_MS,
NULL);
@@ -1039,62 +1038,14 @@ static int wait_for_invalidate(struct intel_gt *gt, struct reg_and_bit rb)
static void mmio_invalidate_full(struct intel_gt *gt)
{
- static const i915_reg_t gen8_regs[] = {
- [RENDER_CLASS] = GEN8_RTCR,
- [VIDEO_DECODE_CLASS] = GEN8_M1TCR, /* , GEN8_M2TCR */
- [VIDEO_ENHANCEMENT_CLASS] = GEN8_VTCR,
- [COPY_ENGINE_CLASS] = GEN8_BTCR,
- };
- static const i915_reg_t gen12_regs[] = {
- [RENDER_CLASS] = GEN12_GFX_TLB_INV_CR,
- [VIDEO_DECODE_CLASS] = GEN12_VD_TLB_INV_CR,
- [VIDEO_ENHANCEMENT_CLASS] = GEN12_VE_TLB_INV_CR,
- [COPY_ENGINE_CLASS] = GEN12_BLT_TLB_INV_CR,
- [COMPUTE_CLASS] = GEN12_COMPCTX_TLB_INV_CR,
- };
- static const i915_mcr_reg_t xehp_regs[] = {
- [RENDER_CLASS] = XEHP_GFX_TLB_INV_CR,
- [VIDEO_DECODE_CLASS] = XEHP_VD_TLB_INV_CR,
- [VIDEO_ENHANCEMENT_CLASS] = XEHP_VE_TLB_INV_CR,
- [COPY_ENGINE_CLASS] = XEHP_BLT_TLB_INV_CR,
- [COMPUTE_CLASS] = XEHP_COMPCTX_TLB_INV_CR,
- };
struct drm_i915_private *i915 = gt->i915;
struct intel_uncore *uncore = gt->uncore;
struct intel_engine_cs *engine;
intel_engine_mask_t awake, tmp;
enum intel_engine_id id;
- const i915_reg_t *regs;
- unsigned int num = 0;
unsigned long flags;
- /*
- * New platforms should not be added with catch-all-newer (>=)
- * condition so that any later platform added triggers the below warning
- * and in turn mandates a human cross-check of whether the invalidation
- * flows have compatible semantics.
- *
- * For instance with the 11.00 -> 12.00 transition three out of five
- * respective engine registers were moved to masked type. Then after the
- * 12.00 -> 12.50 transition multi cast handling is required too.
- */
-
- if (GRAPHICS_VER_FULL(i915) == IP_VER(12, 50) ||
- GRAPHICS_VER_FULL(i915) == IP_VER(12, 55)) {
- regs = NULL;
- num = ARRAY_SIZE(xehp_regs);
- } else if (GRAPHICS_VER_FULL(i915) == IP_VER(12, 0) ||
- GRAPHICS_VER_FULL(i915) == IP_VER(12, 10)) {
- regs = gen12_regs;
- num = ARRAY_SIZE(gen12_regs);
- } else if (GRAPHICS_VER(i915) >= 8 && GRAPHICS_VER(i915) <= 11) {
- regs = gen8_regs;
- num = ARRAY_SIZE(gen8_regs);
- } else if (GRAPHICS_VER(i915) < 8) {
- return;
- }
-
- if (gt_WARN_ONCE(gt, !num, "Platform does not implement TLB invalidation!"))
+ if (GRAPHICS_VER(i915) < 8)
return;
intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
@@ -1104,33 +1055,18 @@ static void mmio_invalidate_full(struct intel_gt *gt)
awake = 0;
for_each_engine(engine, gt, id) {
- struct reg_and_bit rb;
-
if (!intel_engine_pm_is_awake(engine))
continue;
- if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50)) {
- u32 val = BIT(engine->instance);
-
- if (engine->class == VIDEO_DECODE_CLASS ||
- engine->class == VIDEO_ENHANCEMENT_CLASS ||
- engine->class == COMPUTE_CLASS)
- val = _MASKED_BIT_ENABLE(val);
+ if (engine->tlb_inv.mcr)
intel_gt_mcr_multicast_write_fw(gt,
- xehp_regs[engine->class],
- val);
- } else {
- rb = get_reg_and_bit(engine, regs == gen8_regs, regs, num);
- if (!i915_mmio_reg_offset(rb.reg))
- continue;
-
- if (GRAPHICS_VER(i915) == 12 && (engine->class == VIDEO_DECODE_CLASS ||
- engine->class == VIDEO_ENHANCEMENT_CLASS ||
- engine->class == COMPUTE_CLASS))
- rb.bit = _MASKED_BIT_ENABLE(rb.bit);
-
- intel_uncore_write_fw(uncore, rb.reg, rb.bit);
- }
+ engine->tlb_inv.reg.mcr_reg,
+ engine->tlb_inv.request);
+ else
+ intel_uncore_write_fw(uncore,
+ engine->tlb_inv.reg.reg,
+ engine->tlb_inv.request);
+
awake |= engine->mask;
}
@@ -1149,17 +1085,9 @@ static void mmio_invalidate_full(struct intel_gt *gt)
intel_gt_mcr_unlock(gt, flags);
for_each_engine_masked(engine, gt, awake, tmp) {
- struct reg_and_bit rb;
-
- if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50)) {
- rb.mcr_reg = xehp_regs[engine->class];
- rb.bit = BIT(engine->instance);
- } else {
- rb = get_reg_and_bit(engine, regs == gen8_regs, regs, num);
- }
-
- if (wait_for_invalidate(gt, rb))
- gt_err_ratelimited(gt, "%s TLB invalidation did not complete in %ums!\n",
+ if (wait_for_invalidate(engine))
+ gt_err_ratelimited(gt,
+ "%s TLB invalidation did not complete in %ums!\n",
engine->name, TLB_INVAL_TIMEOUT_MS);
}
@@ -1205,3 +1133,7 @@ unlock:
mutex_unlock(&gt->tlb.invalidate_lock);
}
}
+
+#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
+#include "selftest_tlb.c"
+#endif
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_mcr.c b/drivers/gpu/drm/i915/gt/intel_gt_mcr.c
index 3bb1c701d5ff..0b414eae1683 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_mcr.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_mcr.c
@@ -35,7 +35,7 @@
* ignored.
*/
-#define HAS_MSLICE_STEERING(dev_priv) (INTEL_INFO(dev_priv)->has_mslice_steering)
+#define HAS_MSLICE_STEERING(i915) (INTEL_INFO(i915)->has_mslice_steering)
static const char * const intel_steering_types[] = {
"L3BANK",
@@ -364,6 +364,7 @@ static u32 rw_with_mcr_steering(struct intel_gt *gt,
* function call.
*/
void intel_gt_mcr_lock(struct intel_gt *gt, unsigned long *flags)
+ __acquires(&gt->mcr_lock)
{
unsigned long __flags;
int err = 0;
@@ -410,6 +411,7 @@ void intel_gt_mcr_lock(struct intel_gt *gt, unsigned long *flags)
* Context: Releases gt->mcr_lock
*/
void intel_gt_mcr_unlock(struct intel_gt *gt, unsigned long flags)
+ __releases(&gt->mcr_lock)
{
spin_unlock_irqrestore(&gt->mcr_lock, flags);
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm.c b/drivers/gpu/drm/i915/gt/intel_gt_pm.c
index cef3d6f5c34e..e02cb90723ae 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_pm.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_pm.c
@@ -17,35 +17,13 @@
#include "intel_gt_print.h"
#include "intel_gt_requests.h"
#include "intel_llc.h"
-#include "intel_pm.h"
#include "intel_rc6.h"
#include "intel_rps.h"
#include "intel_wakeref.h"
-#include "intel_pcode.h"
#include "pxp/intel_pxp_pm.h"
#define I915_GT_SUSPEND_IDLE_TIMEOUT (HZ / 2)
-static void mtl_media_busy(struct intel_gt *gt)
-{
- /* Wa_14017073508: mtl */
- if (IS_MTL_GRAPHICS_STEP(gt->i915, P, STEP_A0, STEP_B0) &&
- gt->type == GT_MEDIA)
- snb_pcode_write_p(gt->uncore, PCODE_MBOX_GT_STATE,
- PCODE_MBOX_GT_STATE_MEDIA_BUSY,
- PCODE_MBOX_GT_STATE_DOMAIN_MEDIA, 0);
-}
-
-static void mtl_media_idle(struct intel_gt *gt)
-{
- /* Wa_14017073508: mtl */
- if (IS_MTL_GRAPHICS_STEP(gt->i915, P, STEP_A0, STEP_B0) &&
- gt->type == GT_MEDIA)
- snb_pcode_write_p(gt->uncore, PCODE_MBOX_GT_STATE,
- PCODE_MBOX_GT_STATE_MEDIA_NOT_BUSY,
- PCODE_MBOX_GT_STATE_DOMAIN_MEDIA, 0);
-}
-
static void user_forcewake(struct intel_gt *gt, bool suspend)
{
int count = atomic_read(&gt->user_wakeref);
@@ -93,9 +71,6 @@ static int __gt_unpark(struct intel_wakeref *wf)
GT_TRACE(gt, "\n");
- /* Wa_14017073508: mtl */
- mtl_media_busy(gt);
-
/*
* It seems that the DMC likes to transition between the DC states a lot
* when there are no connected displays (no active power domains) during
@@ -145,9 +120,6 @@ static int __gt_park(struct intel_wakeref *wf)
GEM_BUG_ON(!wakeref);
intel_display_power_put_async(i915, POWER_DOMAIN_GT_IRQ, wakeref);
- /* Wa_14017073508: mtl */
- mtl_media_idle(gt);
-
return 0;
}
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_print.h b/drivers/gpu/drm/i915/gt/intel_gt_print.h
index 5d9da355ce24..55a336a9ff06 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_print.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt_print.h
@@ -28,6 +28,9 @@
#define gt_err_ratelimited(_gt, _fmt, ...) \
drm_err_ratelimited(&(_gt)->i915->drm, "GT%u: " _fmt, (_gt)->info.id, ##__VA_ARGS__)
+#define gt_notice_ratelimited(_gt, _fmt, ...) \
+ dev_notice_ratelimited((_gt)->i915->drm.dev, "GT%u: " _fmt, (_gt)->info.id, ##__VA_ARGS__)
+
#define gt_probe_error(_gt, _fmt, ...) \
do { \
if (i915_error_injected()) \
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_regs.h b/drivers/gpu/drm/i915/gt/intel_gt_regs.h
index be0f6e305c88..4aecb5a7b631 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_regs.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt_regs.h
@@ -9,8 +9,6 @@
#include "i915_reg_defs.h"
#include "display/intel_display_reg_defs.h" /* VLV_DISPLAY_BASE */
-#define MCR_REG(offset) ((const i915_mcr_reg_t){ .reg = (offset) })
-
/*
* The perf control registers are technically multicast registers, but the
* driver never needs to read/write them directly; we only use them to build
@@ -480,6 +478,9 @@
#define HDC_FORCE_NON_COHERENT (1 << 4)
#define HDC_BARRIER_PERFORMANCE_DISABLE (1 << 10)
+#define COMMON_SLICE_CHICKEN4 _MMIO(0x7300)
+#define DISABLE_TDC_LOAD_BALANCING_CALC REG_BIT(6)
+
#define GEN8_HDC_CHICKEN1 _MMIO(0x7304)
#define GEN11_COMMON_SLICE_CHICKEN3 _MMIO(0x7304)
@@ -769,9 +770,6 @@
#define GEN10_DFR_RATIO_EN_AND_CHICKEN MCR_REG(0x9550)
#define DFR_DISABLE (1 << 9)
-#define INF_UNIT_LEVEL_CLKGATE MCR_REG(0x9560)
-#define CGPSF_CLKGATE_DIS (1 << 3)
-
#define MICRO_BP0_0 _MMIO(0x9800)
#define MICRO_BP0_2 _MMIO(0x9804)
#define MICRO_BP0_1 _MMIO(0x9808)
@@ -1093,6 +1091,7 @@
#define XEHP_BLT_TLB_INV_CR MCR_REG(0xcee4)
#define GEN12_COMPCTX_TLB_INV_CR _MMIO(0xcf04)
#define XEHP_COMPCTX_TLB_INV_CR MCR_REG(0xcf04)
+#define XELPMP_GSC_TLB_INV_CR _MMIO(0xcf04) /* media GT only */
#define XEHP_MERT_MOD_CTRL MCR_REG(0xcf28)
#define RENDER_MOD_CTRL MCR_REG(0xcf2c)
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_sysfs.c b/drivers/gpu/drm/i915/gt/intel_gt_sysfs.c
index 6629e4c72b6b..33cba406b569 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_sysfs.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_sysfs.c
@@ -72,7 +72,7 @@ static void kobj_gt_release(struct kobject *kobj)
{
}
-static struct kobj_type kobj_gt_type = {
+static const struct kobj_type kobj_gt_type = {
.release = kobj_gt_release,
.sysfs_ops = &kobj_sysfs_ops,
.default_groups = id_groups,
diff --git a/drivers/gpu/drm/i915/gt/intel_rc6.c b/drivers/gpu/drm/i915/gt/intel_rc6.c
index 5c91622dfca4..f4150f61f39c 100644
--- a/drivers/gpu/drm/i915/gt/intel_rc6.c
+++ b/drivers/gpu/drm/i915/gt/intel_rc6.c
@@ -486,6 +486,7 @@ static bool bxt_check_bios_rc6_setup(struct intel_rc6 *rc6)
static bool rc6_supported(struct intel_rc6 *rc6)
{
struct drm_i915_private *i915 = rc6_to_i915(rc6);
+ struct intel_gt *gt = rc6_to_gt(rc6);
if (!HAS_RC6(i915))
return false;
@@ -502,6 +503,13 @@ static bool rc6_supported(struct intel_rc6 *rc6)
return false;
}
+ if (IS_MTL_MEDIA_STEP(gt->i915, STEP_A0, STEP_B0) &&
+ gt->type == GT_MEDIA) {
+ drm_notice(&i915->drm,
+ "Media RC6 disabled on A step\n");
+ return false;
+ }
+
return true;
}
diff --git a/drivers/gpu/drm/i915/gt/intel_region_lmem.c b/drivers/gpu/drm/i915/gt/intel_region_lmem.c
index f3ad93db0b21..89fdfc67f8d1 100644
--- a/drivers/gpu/drm/i915/gt/intel_region_lmem.c
+++ b/drivers/gpu/drm/i915/gt/intel_region_lmem.c
@@ -158,7 +158,7 @@ static const struct intel_memory_region_ops intel_region_lmem_ops = {
static bool get_legacy_lowmem_region(struct intel_uncore *uncore,
u64 *start, u32 *size)
{
- if (!IS_DG1_GRAPHICS_STEP(uncore->i915, STEP_A0, STEP_C0))
+ if (!IS_DG1(uncore->i915))
return false;
*start = 0;
diff --git a/drivers/gpu/drm/i915/gt/intel_reset_types.h b/drivers/gpu/drm/i915/gt/intel_reset_types.h
index 9312b29f5a97..80351f0a856c 100644
--- a/drivers/gpu/drm/i915/gt/intel_reset_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_reset_types.h
@@ -51,7 +51,7 @@ struct intel_reset {
/**
* Waitqueue to signal when the reset has completed. Used by clients
- * that wait for dev_priv->mm.wedged to settle.
+ * that wait for i915->mm.wedged to settle.
*/
wait_queue_head_t queue;
diff --git a/drivers/gpu/drm/i915/gt/intel_ring_submission.c b/drivers/gpu/drm/i915/gt/intel_ring_submission.c
index 827adb0cfaea..3fd795c3263f 100644
--- a/drivers/gpu/drm/i915/gt/intel_ring_submission.c
+++ b/drivers/gpu/drm/i915/gt/intel_ring_submission.c
@@ -1052,9 +1052,9 @@ static void gen6_bsd_set_default_submission(struct intel_engine_cs *engine)
static void ring_release(struct intel_engine_cs *engine)
{
- struct drm_i915_private *dev_priv = engine->i915;
+ struct drm_i915_private *i915 = engine->i915;
- drm_WARN_ON(&dev_priv->drm, GRAPHICS_VER(dev_priv) > 2 &&
+ drm_WARN_ON(&i915->drm, GRAPHICS_VER(i915) > 2 &&
(ENGINE_READ(engine, RING_MI_MODE) & MODE_IDLE) == 0);
intel_engine_cleanup_common(engine);
diff --git a/drivers/gpu/drm/i915/gt/intel_rps.c b/drivers/gpu/drm/i915/gt/intel_rps.c
index f5d7b5126433..4d0dc9de23f9 100644
--- a/drivers/gpu/drm/i915/gt/intel_rps.c
+++ b/drivers/gpu/drm/i915/gt/intel_rps.c
@@ -1677,7 +1677,6 @@ static void vlv_init_gpll_ref_freq(struct intel_rps *rps)
static void vlv_rps_init(struct intel_rps *rps)
{
struct drm_i915_private *i915 = rps_to_i915(rps);
- u32 val;
vlv_iosf_sb_get(i915,
BIT(VLV_IOSF_SB_PUNIT) |
@@ -1686,21 +1685,6 @@ static void vlv_rps_init(struct intel_rps *rps)
vlv_init_gpll_ref_freq(rps);
- val = vlv_punit_read(i915, PUNIT_REG_GPU_FREQ_STS);
- switch ((val >> 6) & 3) {
- case 0:
- case 1:
- i915->mem_freq = 800;
- break;
- case 2:
- i915->mem_freq = 1066;
- break;
- case 3:
- i915->mem_freq = 1333;
- break;
- }
- drm_dbg(&i915->drm, "DDR speed: %d MHz\n", i915->mem_freq);
-
rps->max_freq = vlv_rps_max_freq(rps);
rps->rp0_freq = rps->max_freq;
drm_dbg(&i915->drm, "max GPU freq: %d MHz (%u)\n",
@@ -1727,7 +1711,6 @@ static void vlv_rps_init(struct intel_rps *rps)
static void chv_rps_init(struct intel_rps *rps)
{
struct drm_i915_private *i915 = rps_to_i915(rps);
- u32 val;
vlv_iosf_sb_get(i915,
BIT(VLV_IOSF_SB_PUNIT) |
@@ -1736,18 +1719,6 @@ static void chv_rps_init(struct intel_rps *rps)
vlv_init_gpll_ref_freq(rps);
- val = vlv_cck_read(i915, CCK_FUSE_REG);
-
- switch ((val >> 2) & 0x7) {
- case 3:
- i915->mem_freq = 2000;
- break;
- default:
- i915->mem_freq = 1600;
- break;
- }
- drm_dbg(&i915->drm, "DDR speed: %d MHz\n", i915->mem_freq);
-
rps->max_freq = chv_rps_max_freq(rps);
rps->rp0_freq = rps->max_freq;
drm_dbg(&i915->drm, "max GPU freq: %d MHz (%u)\n",
diff --git a/drivers/gpu/drm/i915/gt/intel_rps_types.h b/drivers/gpu/drm/i915/gt/intel_rps_types.h
index 9173ec75f2b8..6507fa3f6d1e 100644
--- a/drivers/gpu/drm/i915/gt/intel_rps_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_rps_types.h
@@ -57,7 +57,7 @@ struct intel_rps {
/*
* work, interrupts_enabled and pm_iir are protected by
- * dev_priv->irq_lock
+ * i915->irq_lock
*/
struct timer_list timer;
struct work_struct work;
diff --git a/drivers/gpu/drm/i915/gt/intel_sseu.h b/drivers/gpu/drm/i915/gt/intel_sseu.h
index aa87d3832d60..d7e8c374f153 100644
--- a/drivers/gpu/drm/i915/gt/intel_sseu.h
+++ b/drivers/gpu/drm/i915/gt/intel_sseu.h
@@ -27,7 +27,7 @@ struct drm_printer;
* is only relevant to pre-Xe_HP platforms (Xe_HP and beyond use the
* I915_MAX_SS_FUSE_BITS value below).
*/
-#define GEN_MAX_SS_PER_HSW_SLICE 6
+#define GEN_MAX_SS_PER_HSW_SLICE 8
/*
* Maximum number of 32-bit registers used by hardware to express the
diff --git a/drivers/gpu/drm/i915/gt/intel_workarounds.c b/drivers/gpu/drm/i915/gt/intel_workarounds.c
index 485c5cc5d0f9..e7ee24bcad89 100644
--- a/drivers/gpu/drm/i915/gt/intel_workarounds.c
+++ b/drivers/gpu/drm/i915/gt/intel_workarounds.c
@@ -743,9 +743,13 @@ static void gen12_ctx_workarounds_init(struct intel_engine_cs *engine,
FF_MODE2_GS_TIMER_224,
0, false);
- if (!IS_DG1(i915))
+ if (!IS_DG1(i915)) {
/* Wa_1806527549 */
wa_masked_en(wal, HIZ_CHICKEN, HZ_DEPTH_TEST_LE_GE_OPT_DISABLE);
+
+ /* Wa_1606376872 */
+ wa_masked_en(wal, COMMON_SLICE_CHICKEN4, DISABLE_TDC_LOAD_BALANCING_CALC);
+ }
}
static void dg1_ctx_workarounds_init(struct intel_engine_cs *engine,
@@ -1470,54 +1474,17 @@ gen12_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
}
static void
-tgl_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
-{
- struct drm_i915_private *i915 = gt->i915;
-
- gen12_gt_workarounds_init(gt, wal);
-
- /* Wa_1409420604:tgl */
- if (IS_TGL_UY_GRAPHICS_STEP(i915, STEP_A0, STEP_B0))
- wa_mcr_write_or(wal,
- SUBSLICE_UNIT_LEVEL_CLKGATE2,
- CPSSUNIT_CLKGATE_DIS);
-
- /* Wa_1607087056:tgl also know as BUG:1409180338 */
- if (IS_TGL_UY_GRAPHICS_STEP(i915, STEP_A0, STEP_B0))
- wa_write_or(wal,
- GEN11_SLICE_UNIT_LEVEL_CLKGATE,
- L3_CLKGATE_DIS | L3_CR2X_CLKGATE_DIS);
-
- /* Wa_1408615072:tgl[a0] */
- if (IS_TGL_UY_GRAPHICS_STEP(i915, STEP_A0, STEP_B0))
- wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE2,
- VSUNIT_CLKGATE_DIS_TGL);
-}
-
-static void
dg1_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
{
- struct drm_i915_private *i915 = gt->i915;
-
gen12_gt_workarounds_init(gt, wal);
- /* Wa_1607087056:dg1 */
- if (IS_DG1_GRAPHICS_STEP(i915, STEP_A0, STEP_B0))
- wa_write_or(wal,
- GEN11_SLICE_UNIT_LEVEL_CLKGATE,
- L3_CLKGATE_DIS | L3_CR2X_CLKGATE_DIS);
-
/* Wa_1409420604:dg1 */
- if (IS_DG1(i915))
- wa_mcr_write_or(wal,
- SUBSLICE_UNIT_LEVEL_CLKGATE2,
- CPSSUNIT_CLKGATE_DIS);
+ wa_mcr_write_or(wal, SUBSLICE_UNIT_LEVEL_CLKGATE2,
+ CPSSUNIT_CLKGATE_DIS);
/* Wa_1408615072:dg1 */
/* Empirical testing shows this register is unaffected by engine reset. */
- if (IS_DG1(i915))
- wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE2,
- VSUNIT_CLKGATE_DIS_TGL);
+ wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE2, VSUNIT_CLKGATE_DIS_TGL);
}
static void
@@ -1530,6 +1497,12 @@ xehpsdv_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
/* Wa_1409757795:xehpsdv */
wa_mcr_write_or(wal, SCCGCTL94DC, CG3DDISURB);
+ /* Wa_18011725039:xehpsdv */
+ if (IS_XEHPSDV_GRAPHICS_STEP(i915, STEP_A1, STEP_B0)) {
+ wa_mcr_masked_dis(wal, MLTICTXCTL, TDONRENDER);
+ wa_mcr_write_or(wal, L3SQCREG1_CCS0, FLUSHALLNONCOH);
+ }
+
/* Wa_16011155590:xehpsdv */
if (IS_XEHPSDV_GRAPHICS_STEP(i915, STEP_A0, STEP_B0))
wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE,
@@ -1579,6 +1552,9 @@ xehpsdv_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
/* Wa_14014368820:xehpsdv */
wa_mcr_write_or(wal, XEHP_GAMCNTRL_CTRL,
INVALIDATION_BROADCAST_MODE_DIS | GLOBAL_INVALIDATION_MODE);
+
+ /* Wa_14010670810:xehpsdv */
+ wa_mcr_write_or(wal, XEHP_L3NODEARBCFG, XEHP_LNESPARE);
}
static void
@@ -1681,13 +1657,6 @@ dg2_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
/* Wa_14014830051:dg2 */
wa_mcr_write_clr(wal, SARB_CHICKEN1, COMP_CKN_IN);
- /*
- * The following are not actually "workarounds" but rather
- * recommended tuning settings documented in the bspec's
- * performance guide section.
- */
- wa_mcr_write_or(wal, XEHP_SQCM, EN_32B_ACCESS);
-
/* Wa_14015795083 */
wa_write_clr(wal, GEN7_MISCCPCTL, GEN12_DOP_CLOCK_GATE_RENDER_ENABLE);
@@ -1700,6 +1669,9 @@ dg2_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
/* Wa_1509235366:dg2 */
wa_mcr_write_or(wal, XEHP_GAMCNTRL_CTRL,
INVALIDATION_BROADCAST_MODE_DIS | GLOBAL_INVALIDATION_MODE);
+
+ /* Wa_14010648519:dg2 */
+ wa_mcr_write_or(wal, XEHP_L3NODEARBCFG, XEHP_LNESPARE);
}
static void
@@ -1715,6 +1687,9 @@ pvc_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
wa_mcr_write_or(wal, COMP_MOD_CTRL, FORCE_MISS_FTLB);
wa_mcr_write_or(wal, XEHP_VDBX_MOD_CTRL, FORCE_MISS_FTLB);
wa_mcr_write_or(wal, XEHP_VEBX_MOD_CTRL, FORCE_MISS_FTLB);
+
+ /* Wa_16016694945 */
+ wa_mcr_masked_en(wal, XEHPC_LNCFMISCCFGREG0, XEHPC_OVRLSCCC);
}
static void
@@ -1755,11 +1730,38 @@ xelpmp_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
debug_dump_steering(gt);
}
+/*
+ * The bspec performance guide has recommended MMIO tuning settings. These
+ * aren't truly "workarounds" but we want to program them through the
+ * workaround infrastructure to make sure they're (re)applied at the proper
+ * times.
+ *
+ * The programming in this function is for settings that persist through
+ * engine resets and also are not part of any engine's register state context.
+ * I.e., settings that only need to be re-applied in the event of a full GT
+ * reset.
+ */
+static void gt_tuning_settings(struct intel_gt *gt, struct i915_wa_list *wal)
+{
+ if (IS_PONTEVECCHIO(gt->i915)) {
+ wa_mcr_write(wal, XEHPC_L3SCRUB,
+ SCRUB_CL_DWNGRADE_SHARED | SCRUB_RATE_4B_PER_CLK);
+ wa_mcr_masked_en(wal, XEHPC_LNCFMISCCFGREG0, XEHPC_HOSTCACHEEN);
+ }
+
+ if (IS_DG2(gt->i915)) {
+ wa_mcr_write_or(wal, XEHP_L3SCQREG7, BLEND_FILL_CACHING_OPT_DIS);
+ wa_mcr_write_or(wal, XEHP_SQCM, EN_32B_ACCESS);
+ }
+}
+
static void
gt_init_workarounds(struct intel_gt *gt, struct i915_wa_list *wal)
{
struct drm_i915_private *i915 = gt->i915;
+ gt_tuning_settings(gt, wal);
+
if (gt->type == GT_MEDIA) {
if (MEDIA_VER(i915) >= 13)
xelpmp_gt_workarounds_init(gt, wal);
@@ -1779,8 +1781,6 @@ gt_init_workarounds(struct intel_gt *gt, struct i915_wa_list *wal)
xehpsdv_gt_workarounds_init(gt, wal);
else if (IS_DG1(i915))
dg1_gt_workarounds_init(gt, wal);
- else if (IS_TIGERLAKE(i915))
- tgl_gt_workarounds_init(gt, wal);
else if (GRAPHICS_VER(i915) == 12)
gen12_gt_workarounds_init(gt, wal);
else if (GRAPHICS_VER(i915) == 11)
@@ -2187,37 +2187,20 @@ static void tgl_whitelist_build(struct intel_engine_cs *engine)
/* Wa_1806527549:tgl */
whitelist_reg(w, HIZ_CHICKEN);
+
+ /* Required by recommended tuning setting (not a workaround) */
+ whitelist_reg(w, GEN11_COMMON_SLICE_CHICKEN3);
+
break;
default:
break;
}
}
-static void dg1_whitelist_build(struct intel_engine_cs *engine)
-{
- struct i915_wa_list *w = &engine->whitelist;
-
- tgl_whitelist_build(engine);
-
- /* GEN:BUG:1409280441:dg1 */
- if (IS_DG1_GRAPHICS_STEP(engine->i915, STEP_A0, STEP_B0) &&
- (engine->class == RENDER_CLASS ||
- engine->class == COPY_ENGINE_CLASS))
- whitelist_reg_ext(w, RING_ID(engine->mmio_base),
- RING_FORCE_TO_NONPRIV_ACCESS_RD);
-}
-
-static void xehpsdv_whitelist_build(struct intel_engine_cs *engine)
-{
- allow_read_ctx_timestamp(engine);
-}
-
static void dg2_whitelist_build(struct intel_engine_cs *engine)
{
struct i915_wa_list *w = &engine->whitelist;
- allow_read_ctx_timestamp(engine);
-
switch (engine->class) {
case RENDER_CLASS:
/*
@@ -2234,6 +2217,9 @@ static void dg2_whitelist_build(struct intel_engine_cs *engine)
RING_FORCE_TO_NONPRIV_ACCESS_RD |
RING_FORCE_TO_NONPRIV_RANGE_4);
+ /* Required by recommended tuning setting (not a workaround) */
+ whitelist_mcr_reg(w, XEHP_COMMON_SLICE_CHICKEN3);
+
break;
case COMPUTE_CLASS:
/* Wa_16011157294:dg2_g10 */
@@ -2265,12 +2251,25 @@ static void blacklist_trtt(struct intel_engine_cs *engine)
static void pvc_whitelist_build(struct intel_engine_cs *engine)
{
- allow_read_ctx_timestamp(engine);
-
/* Wa_16014440446:pvc */
blacklist_trtt(engine);
}
+static void mtl_whitelist_build(struct intel_engine_cs *engine)
+{
+ struct i915_wa_list *w = &engine->whitelist;
+
+ switch (engine->class) {
+ case RENDER_CLASS:
+ /* Required by recommended tuning setting (not a workaround) */
+ whitelist_mcr_reg(w, XEHP_COMMON_SLICE_CHICKEN3);
+
+ break;
+ default:
+ break;
+ }
+}
+
void intel_engine_init_whitelist(struct intel_engine_cs *engine)
{
struct drm_i915_private *i915 = engine->i915;
@@ -2279,15 +2278,13 @@ void intel_engine_init_whitelist(struct intel_engine_cs *engine)
wa_init_start(w, engine->gt, "whitelist", engine->name);
if (IS_METEORLAKE(i915))
- ; /* noop; none at this time */
+ mtl_whitelist_build(engine);
else if (IS_PONTEVECCHIO(i915))
pvc_whitelist_build(engine);
else if (IS_DG2(i915))
dg2_whitelist_build(engine);
else if (IS_XEHPSDV(i915))
- xehpsdv_whitelist_build(engine);
- else if (IS_DG1(i915))
- dg1_whitelist_build(engine);
+ ; /* none needed */
else if (GRAPHICS_VER(i915) == 12)
tgl_whitelist_build(engine);
else if (GRAPHICS_VER(i915) == 11)
@@ -2452,16 +2449,12 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
MDQ_ARBITRATION_MODE | UGM_BACKUP_MODE);
}
- if (IS_DG2_GRAPHICS_STEP(i915, G10, STEP_A0, STEP_B0)) {
+ if (IS_DG2_GRAPHICS_STEP(i915, G10, STEP_A0, STEP_B0))
/* Wa_22010430635:dg2 */
wa_mcr_masked_en(wal,
GEN9_ROW_CHICKEN4,
GEN12_DISABLE_GRF_CLEAR);
- /* Wa_14010648519:dg2 */
- wa_mcr_write_or(wal, XEHP_L3NODEARBCFG, XEHP_LNESPARE);
- }
-
/* Wa_14013202645:dg2 */
if (IS_DG2_GRAPHICS_STEP(i915, G10, STEP_B0, STEP_C0) ||
IS_DG2_GRAPHICS_STEP(i915, G11, STEP_A0, STEP_B0))
@@ -2482,27 +2475,6 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
true);
}
- if (IS_DG1_GRAPHICS_STEP(i915, STEP_A0, STEP_B0) ||
- IS_TGL_UY_GRAPHICS_STEP(i915, STEP_A0, STEP_B0)) {
- /*
- * Wa_1607138336:tgl[a0],dg1[a0]
- * Wa_1607063988:tgl[a0],dg1[a0]
- */
- wa_write_or(wal,
- GEN9_CTX_PREEMPT_REG,
- GEN12_DISABLE_POSH_BUSY_FF_DOP_CG);
- }
-
- if (IS_TGL_UY_GRAPHICS_STEP(i915, STEP_A0, STEP_B0)) {
- /*
- * Wa_1606679103:tgl
- * (see also Wa_1606682166:icl)
- */
- wa_write_or(wal,
- GEN7_SARCHKMD,
- GEN7_DISABLE_SAMPLER_PREFETCH);
- }
-
if (IS_ALDERLAKE_P(i915) || IS_ALDERLAKE_S(i915) || IS_DG1(i915) ||
IS_ROCKETLAKE(i915) || IS_TIGERLAKE(i915)) {
/* Wa_1606931601:tgl,rkl,dg1,adl-s,adl-p */
@@ -2532,30 +2504,22 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
}
if (IS_ALDERLAKE_P(i915) || IS_ALDERLAKE_S(i915) ||
- IS_DG1_GRAPHICS_STEP(i915, STEP_A0, STEP_B0) ||
IS_ROCKETLAKE(i915) || IS_TIGERLAKE(i915)) {
- /* Wa_1409804808:tgl,rkl,dg1[a0],adl-s,adl-p */
+ /* Wa_1409804808 */
wa_mcr_masked_en(wal, GEN8_ROW_CHICKEN2,
GEN12_PUSH_CONST_DEREF_HOLD_DIS);
- /*
- * Wa_1409085225:tgl
- * Wa_14010229206:tgl,rkl,dg1[a0],adl-s,adl-p
- */
+ /* Wa_14010229206 */
wa_mcr_masked_en(wal, GEN9_ROW_CHICKEN4, GEN12_DISABLE_TDL_PUSH);
}
- if (IS_DG1_GRAPHICS_STEP(i915, STEP_A0, STEP_B0) ||
- IS_ROCKETLAKE(i915) || IS_TIGERLAKE(i915) || IS_ALDERLAKE_P(i915)) {
+ if (IS_ROCKETLAKE(i915) || IS_TIGERLAKE(i915) || IS_ALDERLAKE_P(i915)) {
/*
- * Wa_1607030317:tgl
- * Wa_1607186500:tgl
- * Wa_1607297627:tgl,rkl,dg1[a0],adlp
+ * Wa_1607297627
*
* On TGL and RKL there are multiple entries for this WA in the
* BSpec; some indicate this is an A0-only WA, others indicate
* it applies to all steppings so we trust the "all steppings."
- * For DG1 this only applies to A0.
*/
wa_masked_en(wal,
RING_PSMI_CTL(RENDER_RING_BASE),
@@ -2975,16 +2939,8 @@ static void
add_render_compute_tuning_settings(struct drm_i915_private *i915,
struct i915_wa_list *wal)
{
- if (IS_PONTEVECCHIO(i915)) {
- wa_mcr_write(wal, XEHPC_L3SCRUB,
- SCRUB_CL_DWNGRADE_SHARED | SCRUB_RATE_4B_PER_CLK);
- wa_mcr_masked_en(wal, XEHPC_LNCFMISCCFGREG0, XEHPC_HOSTCACHEEN);
- }
-
- if (IS_DG2(i915)) {
- wa_mcr_write_or(wal, XEHP_L3SCQREG7, BLEND_FILL_CACHING_OPT_DIS);
+ if (IS_DG2(i915))
wa_mcr_write_clr_set(wal, RT_CTRL, STACKID_CTRL, STACKID_CTRL_512);
- }
/*
* This tuning setting proves beneficial only on ATS-M designs; the
@@ -3066,11 +3022,6 @@ general_render_compute_wa_init(struct intel_engine_cs *engine, struct i915_wa_li
0, false);
}
- if (IS_PONTEVECCHIO(i915)) {
- /* Wa_16016694945 */
- wa_mcr_masked_en(wal, XEHPC_LNCFMISCCFGREG0, XEHPC_OVRLSCCC);
- }
-
if (IS_XEHPSDV(i915)) {
/* Wa_1409954639 */
wa_mcr_masked_en(wal,
@@ -3082,18 +3033,9 @@ general_render_compute_wa_init(struct intel_engine_cs *engine, struct i915_wa_li
GEN9_ROW_CHICKEN4,
GEN12_DISABLE_GRF_CLEAR);
- /* Wa_14010670810:xehpsdv */
- wa_mcr_write_or(wal, XEHP_L3NODEARBCFG, XEHP_LNESPARE);
-
/* Wa_14010449647:xehpsdv */
wa_mcr_masked_en(wal, GEN8_HALF_SLICE_CHICKEN1,
GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE);
-
- /* Wa_18011725039:xehpsdv */
- if (IS_XEHPSDV_GRAPHICS_STEP(i915, STEP_A1, STEP_B0)) {
- wa_mcr_masked_dis(wal, MLTICTXCTL, TDONRENDER);
- wa_mcr_write_or(wal, L3SQCREG1_CCS0, FLUSHALLNONCOH);
- }
}
if (IS_DG2(i915) || IS_PONTEVECCHIO(i915)) {
diff --git a/drivers/gpu/drm/i915/gt/selftest_gt_pm.c b/drivers/gpu/drm/i915/gt/selftest_gt_pm.c
index b46425aeb2f0..0971241707ce 100644
--- a/drivers/gpu/drm/i915/gt/selftest_gt_pm.c
+++ b/drivers/gpu/drm/i915/gt/selftest_gt_pm.c
@@ -63,8 +63,8 @@ static void measure_clocks(struct intel_engine_cs *engine,
udelay(1000);
- dt[i] = ktime_sub(ktime_get(), dt[i]);
cycles[i] += read_timestamp(engine);
+ dt[i] = ktime_sub(ktime_get(), dt[i]);
local_irq_enable();
}
diff --git a/drivers/gpu/drm/i915/gt/selftest_llc.c b/drivers/gpu/drm/i915/gt/selftest_llc.c
index cfd736d88939..779fadcec7c4 100644
--- a/drivers/gpu/drm/i915/gt/selftest_llc.c
+++ b/drivers/gpu/drm/i915/gt/selftest_llc.c
@@ -3,7 +3,6 @@
* Copyright © 2019 Intel Corporation
*/
-#include "intel_pm.h" /* intel_gpu_freq() */
#include "selftest_llc.h"
#include "intel_rps.h"
diff --git a/drivers/gpu/drm/i915/gt/selftest_rps.c b/drivers/gpu/drm/i915/gt/selftest_rps.c
index 6755bbc4ebda..84e77e8dbba1 100644
--- a/drivers/gpu/drm/i915/gt/selftest_rps.c
+++ b/drivers/gpu/drm/i915/gt/selftest_rps.c
@@ -299,13 +299,13 @@ int live_rps_clock_interval(void *arg)
for (i = 0; i < 5; i++) {
preempt_disable();
- dt_[i] = ktime_get();
cycles_[i] = -intel_uncore_read_fw(gt->uncore, GEN6_RP_CUR_UP_EI);
+ dt_[i] = ktime_get();
udelay(1000);
- dt_[i] = ktime_sub(ktime_get(), dt_[i]);
cycles_[i] += intel_uncore_read_fw(gt->uncore, GEN6_RP_CUR_UP_EI);
+ dt_[i] = ktime_sub(ktime_get(), dt_[i]);
preempt_enable();
}
@@ -537,8 +537,8 @@ static u64 __measure_frequency(u32 *cntr, int duration_ms)
{
u64 dc, dt;
- dt = ktime_get();
dc = READ_ONCE(*cntr);
+ dt = ktime_get();
usleep_range(1000 * duration_ms, 2000 * duration_ms);
dc = READ_ONCE(*cntr) - dc;
dt = ktime_get() - dt;
@@ -566,8 +566,8 @@ static u64 __measure_cs_frequency(struct intel_engine_cs *engine,
{
u64 dc, dt;
- dt = ktime_get();
dc = intel_uncore_read_fw(engine->uncore, CS_GPR(0));
+ dt = ktime_get();
usleep_range(1000 * duration_ms, 2000 * duration_ms);
dc = intel_uncore_read_fw(engine->uncore, CS_GPR(0)) - dc;
dt = ktime_get() - dt;
@@ -1094,8 +1094,8 @@ static u64 __measure_power(int duration_ms)
{
u64 dE, dt;
- dt = ktime_get();
dE = librapl_energy_uJ();
+ dt = ktime_get();
usleep_range(1000 * duration_ms, 2000 * duration_ms);
dE = librapl_energy_uJ() - dE;
dt = ktime_get() - dt;
diff --git a/drivers/gpu/drm/i915/gt/selftest_tlb.c b/drivers/gpu/drm/i915/gt/selftest_tlb.c
new file mode 100644
index 000000000000..e6cac1f15d6e
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/selftest_tlb.c
@@ -0,0 +1,388 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#include "i915_selftest.h"
+
+#include "gem/i915_gem_internal.h"
+#include "gem/i915_gem_region.h"
+
+#include "gen8_engine_cs.h"
+#include "i915_gem_ww.h"
+#include "intel_engine_regs.h"
+#include "intel_gpu_commands.h"
+#include "intel_context.h"
+#include "intel_gt.h"
+#include "intel_ring.h"
+
+#include "selftests/igt_flush_test.h"
+#include "selftests/i915_random.h"
+
+static void vma_set_qw(struct i915_vma *vma, u64 addr, u64 val)
+{
+ GEM_BUG_ON(addr < i915_vma_offset(vma));
+ GEM_BUG_ON(addr >= i915_vma_offset(vma) + i915_vma_size(vma) + sizeof(val));
+ memset64(page_mask_bits(vma->obj->mm.mapping) +
+ (addr - i915_vma_offset(vma)), val, 1);
+}
+
+static int
+pte_tlbinv(struct intel_context *ce,
+ struct i915_vma *va,
+ struct i915_vma *vb,
+ u64 align,
+ void (*tlbinv)(struct i915_address_space *vm, u64 addr, u64 length),
+ u64 length,
+ struct rnd_state *prng)
+{
+ struct drm_i915_gem_object *batch;
+ struct drm_mm_node vb_node;
+ struct i915_request *rq;
+ struct i915_vma *vma;
+ u64 addr;
+ int err;
+ u32 *cs;
+
+ batch = i915_gem_object_create_internal(ce->vm->i915, 4096);
+ if (IS_ERR(batch))
+ return PTR_ERR(batch);
+
+ vma = i915_vma_instance(batch, ce->vm, NULL);
+ if (IS_ERR(vma)) {
+ err = PTR_ERR(vma);
+ goto out;
+ }
+
+ err = i915_vma_pin(vma, 0, 0, PIN_USER);
+ if (err)
+ goto out;
+
+ /* Pin va at random but aligned offset after vma */
+ addr = round_up(vma->node.start + vma->node.size, align);
+ /* MI_CONDITIONAL_BATCH_BUFFER_END limits address to 48b */
+ addr = igt_random_offset(prng, addr, min(ce->vm->total, BIT_ULL(48)),
+ va->size, align);
+ err = i915_vma_pin(va, 0, 0, addr | PIN_OFFSET_FIXED | PIN_USER);
+ if (err) {
+ pr_err("Cannot pin at %llx+%llx\n", addr, va->size);
+ goto out;
+ }
+ GEM_BUG_ON(i915_vma_offset(va) != addr);
+ if (vb != va) {
+ vb_node = vb->node;
+ vb->node = va->node; /* overwrites the _same_ PTE */
+ }
+
+ /*
+ * Now choose random dword at the 1st pinned page.
+ *
+ * SZ_64K pages on dg1 require that the whole PT be marked
+ * containing 64KiB entries. So we make sure that vma
+ * covers the whole PT, despite being randomly aligned to 64KiB
+ * and restrict our sampling to the 2MiB PT within where
+ * we know that we will be using 64KiB pages.
+ */
+ if (align == SZ_64K)
+ addr = round_up(addr, SZ_2M);
+ addr = igt_random_offset(prng, addr, addr + align, 8, 8);
+
+ if (va != vb)
+ pr_info("%s(%s): Sampling %llx, with alignment %llx, using PTE size %x (phys %x, sg %x), invalidate:%llx+%llx\n",
+ ce->engine->name, va->obj->mm.region->name ?: "smem",
+ addr, align, va->resource->page_sizes_gtt,
+ va->page_sizes.phys, va->page_sizes.sg,
+ addr & -length, length);
+
+ cs = i915_gem_object_pin_map_unlocked(batch, I915_MAP_WC);
+ *cs++ = MI_NOOP; /* for later termination */
+ /*
+ * Sample the target to see if we spot the updated backing store.
+ * Gen8 VCS compares immediate value with bitwise-and of two
+ * consecutive DWORDS pointed by addr, other gen/engines compare value
+ * with DWORD pointed by addr. Moreover we want to exercise DWORD size
+ * invalidations. To fulfill all these requirements below values
+ * have been chosen.
+ */
+ *cs++ = MI_CONDITIONAL_BATCH_BUFFER_END | MI_DO_COMPARE | 2;
+ *cs++ = 0; /* break if *addr == 0 */
+ *cs++ = lower_32_bits(addr);
+ *cs++ = upper_32_bits(addr);
+ vma_set_qw(va, addr, -1);
+ vma_set_qw(vb, addr, 0);
+
+ /* Keep sampling until we get bored */
+ *cs++ = MI_BATCH_BUFFER_START | BIT(8) | 1;
+ *cs++ = lower_32_bits(i915_vma_offset(vma));
+ *cs++ = upper_32_bits(i915_vma_offset(vma));
+
+ i915_gem_object_flush_map(batch);
+
+ rq = i915_request_create(ce);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto out_va;
+ }
+
+ err = rq->engine->emit_bb_start(rq, i915_vma_offset(vma), 0, 0);
+ if (err) {
+ i915_request_add(rq);
+ goto out_va;
+ }
+
+ i915_request_get(rq);
+ i915_request_add(rq);
+
+ /* Short sleep to sanitycheck the batch is spinning before we begin */
+ msleep(10);
+ if (va == vb) {
+ if (!i915_request_completed(rq)) {
+ pr_err("%s(%s): Semaphore sanitycheck failed %llx, with alignment %llx, using PTE size %x (phys %x, sg %x)\n",
+ ce->engine->name, va->obj->mm.region->name ?: "smem",
+ addr, align, va->resource->page_sizes_gtt,
+ va->page_sizes.phys, va->page_sizes.sg);
+ err = -EIO;
+ }
+ } else if (!i915_request_completed(rq)) {
+ struct i915_vma_resource vb_res = {
+ .bi.pages = vb->obj->mm.pages,
+ .bi.page_sizes = vb->obj->mm.page_sizes,
+ .start = i915_vma_offset(vb),
+ .vma_size = i915_vma_size(vb)
+ };
+ unsigned int pte_flags = 0;
+
+ /* Flip the PTE between A and B */
+ if (i915_gem_object_is_lmem(vb->obj))
+ pte_flags |= PTE_LM;
+ ce->vm->insert_entries(ce->vm, &vb_res, 0, pte_flags);
+
+ /* Flush the PTE update to concurrent HW */
+ tlbinv(ce->vm, addr & -length, length);
+
+ if (wait_for(i915_request_completed(rq), HZ / 2)) {
+ pr_err("%s: Request did not complete; the COND_BBE did not read the updated PTE\n",
+ ce->engine->name);
+ err = -EINVAL;
+ }
+ } else {
+ pr_err("Spinner ended unexpectedly\n");
+ err = -EIO;
+ }
+ i915_request_put(rq);
+
+ cs = page_mask_bits(batch->mm.mapping);
+ *cs = MI_BATCH_BUFFER_END;
+ wmb();
+
+out_va:
+ if (vb != va)
+ vb->node = vb_node;
+ i915_vma_unpin(va);
+ if (i915_vma_unbind_unlocked(va))
+ err = -EIO;
+out:
+ i915_gem_object_put(batch);
+ return err;
+}
+
+static struct drm_i915_gem_object *create_lmem(struct intel_gt *gt)
+{
+ /*
+ * Allocation of largest possible page size allows to test all types
+ * of pages.
+ */
+ return i915_gem_object_create_lmem(gt->i915, SZ_1G, I915_BO_ALLOC_CONTIGUOUS);
+}
+
+static struct drm_i915_gem_object *create_smem(struct intel_gt *gt)
+{
+ /*
+ * SZ_64K pages require covering the whole 2M PT (gen8 to tgl/dg1).
+ * While that does not require the whole 2M block to be contiguous
+ * it is easier to make it so, since we need that for SZ_2M pagees.
+ * Since we randomly offset the start of the vma, we need a 4M object
+ * so that there is a 2M range within it is suitable for SZ_64K PTE.
+ */
+ return i915_gem_object_create_internal(gt->i915, SZ_4M);
+}
+
+static int
+mem_tlbinv(struct intel_gt *gt,
+ struct drm_i915_gem_object *(*create_fn)(struct intel_gt *),
+ void (*tlbinv)(struct i915_address_space *vm, u64 addr, u64 length))
+{
+ unsigned int ppgtt_size = RUNTIME_INFO(gt->i915)->ppgtt_size;
+ struct intel_engine_cs *engine;
+ struct drm_i915_gem_object *A, *B;
+ struct i915_ppgtt *ppgtt;
+ struct i915_vma *va, *vb;
+ enum intel_engine_id id;
+ I915_RND_STATE(prng);
+ void *vaddr;
+ int err;
+
+ /*
+ * Check that the TLB invalidate is able to revoke an active
+ * page. We load a page into a spinning COND_BBE loop and then
+ * remap that page to a new physical address. The old address, and
+ * so the loop keeps spinning, is retained in the TLB cache until
+ * we issue an invalidate.
+ */
+
+ A = create_fn(gt);
+ if (IS_ERR(A))
+ return PTR_ERR(A);
+
+ vaddr = i915_gem_object_pin_map_unlocked(A, I915_MAP_WC);
+ if (IS_ERR(vaddr)) {
+ err = PTR_ERR(vaddr);
+ goto out_a;
+ }
+
+ B = create_fn(gt);
+ if (IS_ERR(B)) {
+ err = PTR_ERR(B);
+ goto out_a;
+ }
+
+ vaddr = i915_gem_object_pin_map_unlocked(B, I915_MAP_WC);
+ if (IS_ERR(vaddr)) {
+ err = PTR_ERR(vaddr);
+ goto out_b;
+ }
+
+ GEM_BUG_ON(A->base.size != B->base.size);
+ if ((A->mm.page_sizes.phys | B->mm.page_sizes.phys) & (A->base.size - 1))
+ pr_warn("Failed to allocate contiguous pages for size %zx\n",
+ A->base.size);
+
+ ppgtt = i915_ppgtt_create(gt, 0);
+ if (IS_ERR(ppgtt)) {
+ err = PTR_ERR(ppgtt);
+ goto out_b;
+ }
+
+ va = i915_vma_instance(A, &ppgtt->vm, NULL);
+ if (IS_ERR(va)) {
+ err = PTR_ERR(va);
+ goto out_vm;
+ }
+
+ vb = i915_vma_instance(B, &ppgtt->vm, NULL);
+ if (IS_ERR(vb)) {
+ err = PTR_ERR(vb);
+ goto out_vm;
+ }
+
+ err = 0;
+ for_each_engine(engine, gt, id) {
+ struct i915_gem_ww_ctx ww;
+ struct intel_context *ce;
+ int bit;
+
+ ce = intel_context_create(engine);
+ if (IS_ERR(ce)) {
+ err = PTR_ERR(ce);
+ break;
+ }
+
+ i915_vm_put(ce->vm);
+ ce->vm = i915_vm_get(&ppgtt->vm);
+
+ for_i915_gem_ww(&ww, err, true)
+ err = intel_context_pin_ww(ce, &ww);
+ if (err)
+ goto err_put;
+
+ for_each_set_bit(bit,
+ (unsigned long *)&RUNTIME_INFO(gt->i915)->page_sizes,
+ BITS_PER_TYPE(RUNTIME_INFO(gt->i915)->page_sizes)) {
+ unsigned int len;
+
+ if (BIT_ULL(bit) < i915_vm_obj_min_alignment(va->vm, va->obj))
+ continue;
+
+ /* sanitycheck the semaphore wake up */
+ err = pte_tlbinv(ce, va, va,
+ BIT_ULL(bit),
+ NULL, SZ_4K,
+ &prng);
+ if (err)
+ goto err_unpin;
+
+ for (len = 2; len <= ppgtt_size; len = min(2 * len, ppgtt_size)) {
+ err = pte_tlbinv(ce, va, vb,
+ BIT_ULL(bit),
+ tlbinv,
+ BIT_ULL(len),
+ &prng);
+ if (err)
+ goto err_unpin;
+ if (len == ppgtt_size)
+ break;
+ }
+ }
+err_unpin:
+ intel_context_unpin(ce);
+err_put:
+ intel_context_put(ce);
+ if (err)
+ break;
+ }
+
+ if (igt_flush_test(gt->i915))
+ err = -EIO;
+
+out_vm:
+ i915_vm_put(&ppgtt->vm);
+out_b:
+ i915_gem_object_put(B);
+out_a:
+ i915_gem_object_put(A);
+ return err;
+}
+
+static void tlbinv_full(struct i915_address_space *vm, u64 addr, u64 length)
+{
+ intel_gt_invalidate_tlb(vm->gt, intel_gt_tlb_seqno(vm->gt) | 1);
+}
+
+static int invalidate_full(void *arg)
+{
+ struct intel_gt *gt = arg;
+ int err;
+
+ if (GRAPHICS_VER(gt->i915) < 8)
+ return 0; /* TLB invalidate not implemented */
+
+ err = mem_tlbinv(gt, create_smem, tlbinv_full);
+ if (err == 0)
+ err = mem_tlbinv(gt, create_lmem, tlbinv_full);
+ if (err == -ENODEV || err == -ENXIO)
+ err = 0;
+
+ return err;
+}
+
+int intel_tlb_live_selftests(struct drm_i915_private *i915)
+{
+ static const struct i915_subtest tests[] = {
+ SUBTEST(invalidate_full),
+ };
+ struct intel_gt *gt;
+ unsigned int i;
+
+ for_each_gt(gt, i915, i) {
+ int err;
+
+ if (intel_gt_is_wedged(gt))
+ continue;
+
+ err = intel_gt_live_subtests(tests, gt);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/i915/gt/sysfs_engines.c b/drivers/gpu/drm/i915/gt/sysfs_engines.c
index f2d9858d827c..021f51d9b456 100644
--- a/drivers/gpu/drm/i915/gt/sysfs_engines.c
+++ b/drivers/gpu/drm/i915/gt/sysfs_engines.c
@@ -24,37 +24,37 @@ static struct intel_engine_cs *kobj_to_engine(struct kobject *kobj)
static ssize_t
name_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
{
- return sprintf(buf, "%s\n", kobj_to_engine(kobj)->name);
+ return sysfs_emit(buf, "%s\n", kobj_to_engine(kobj)->name);
}
-static struct kobj_attribute name_attr =
+static const struct kobj_attribute name_attr =
__ATTR(name, 0444, name_show, NULL);
static ssize_t
class_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
{
- return sprintf(buf, "%d\n", kobj_to_engine(kobj)->uabi_class);
+ return sysfs_emit(buf, "%d\n", kobj_to_engine(kobj)->uabi_class);
}
-static struct kobj_attribute class_attr =
+static const struct kobj_attribute class_attr =
__ATTR(class, 0444, class_show, NULL);
static ssize_t
inst_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
{
- return sprintf(buf, "%d\n", kobj_to_engine(kobj)->uabi_instance);
+ return sysfs_emit(buf, "%d\n", kobj_to_engine(kobj)->uabi_instance);
}
-static struct kobj_attribute inst_attr =
+static const struct kobj_attribute inst_attr =
__ATTR(instance, 0444, inst_show, NULL);
static ssize_t
mmio_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
{
- return sprintf(buf, "0x%x\n", kobj_to_engine(kobj)->mmio_base);
+ return sysfs_emit(buf, "0x%x\n", kobj_to_engine(kobj)->mmio_base);
}
-static struct kobj_attribute mmio_attr =
+static const struct kobj_attribute mmio_attr =
__ATTR(mmio_base, 0444, mmio_show, NULL);
static const char * const vcs_caps[] = {
@@ -107,11 +107,9 @@ __caps_show(struct intel_engine_cs *engine,
for_each_set_bit(n, &caps, show_unknown ? BITS_PER_LONG : count) {
if (n >= count || !repr[n]) {
if (GEM_WARN_ON(show_unknown))
- len += snprintf(buf + len, PAGE_SIZE - len,
- "[%x] ", n);
+ len += sysfs_emit_at(buf, len, "[%x] ", n);
} else {
- len += snprintf(buf + len, PAGE_SIZE - len,
- "%s ", repr[n]);
+ len += sysfs_emit_at(buf, len, "%s ", repr[n]);
}
if (GEM_WARN_ON(len >= PAGE_SIZE))
break;
@@ -127,7 +125,7 @@ caps_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
return __caps_show(engine, engine->uabi_capabilities, buf, true);
}
-static struct kobj_attribute caps_attr =
+static const struct kobj_attribute caps_attr =
__ATTR(capabilities, 0444, caps_show, NULL);
static ssize_t
@@ -136,7 +134,7 @@ all_caps_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
return __caps_show(kobj_to_engine(kobj), -1, buf, false);
}
-static struct kobj_attribute all_caps_attr =
+static const struct kobj_attribute all_caps_attr =
__ATTR(known_capabilities, 0444, all_caps_show, NULL);
static ssize_t
@@ -182,10 +180,10 @@ max_spin_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
{
struct intel_engine_cs *engine = kobj_to_engine(kobj);
- return sprintf(buf, "%lu\n", engine->props.max_busywait_duration_ns);
+ return sysfs_emit(buf, "%lu\n", engine->props.max_busywait_duration_ns);
}
-static struct kobj_attribute max_spin_attr =
+static const struct kobj_attribute max_spin_attr =
__ATTR(max_busywait_duration_ns, 0644, max_spin_show, max_spin_store);
static ssize_t
@@ -193,10 +191,10 @@ max_spin_default(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
{
struct intel_engine_cs *engine = kobj_to_engine(kobj);
- return sprintf(buf, "%lu\n", engine->defaults.max_busywait_duration_ns);
+ return sysfs_emit(buf, "%lu\n", engine->defaults.max_busywait_duration_ns);
}
-static struct kobj_attribute max_spin_def =
+static const struct kobj_attribute max_spin_def =
__ATTR(max_busywait_duration_ns, 0444, max_spin_default, NULL);
static ssize_t
@@ -236,10 +234,10 @@ timeslice_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
{
struct intel_engine_cs *engine = kobj_to_engine(kobj);
- return sprintf(buf, "%lu\n", engine->props.timeslice_duration_ms);
+ return sysfs_emit(buf, "%lu\n", engine->props.timeslice_duration_ms);
}
-static struct kobj_attribute timeslice_duration_attr =
+static const struct kobj_attribute timeslice_duration_attr =
__ATTR(timeslice_duration_ms, 0644, timeslice_show, timeslice_store);
static ssize_t
@@ -247,10 +245,10 @@ timeslice_default(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
{
struct intel_engine_cs *engine = kobj_to_engine(kobj);
- return sprintf(buf, "%lu\n", engine->defaults.timeslice_duration_ms);
+ return sysfs_emit(buf, "%lu\n", engine->defaults.timeslice_duration_ms);
}
-static struct kobj_attribute timeslice_duration_def =
+static const struct kobj_attribute timeslice_duration_def =
__ATTR(timeslice_duration_ms, 0444, timeslice_default, NULL);
static ssize_t
@@ -287,10 +285,10 @@ stop_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
{
struct intel_engine_cs *engine = kobj_to_engine(kobj);
- return sprintf(buf, "%lu\n", engine->props.stop_timeout_ms);
+ return sysfs_emit(buf, "%lu\n", engine->props.stop_timeout_ms);
}
-static struct kobj_attribute stop_timeout_attr =
+static const struct kobj_attribute stop_timeout_attr =
__ATTR(stop_timeout_ms, 0644, stop_show, stop_store);
static ssize_t
@@ -298,10 +296,10 @@ stop_default(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
{
struct intel_engine_cs *engine = kobj_to_engine(kobj);
- return sprintf(buf, "%lu\n", engine->defaults.stop_timeout_ms);
+ return sysfs_emit(buf, "%lu\n", engine->defaults.stop_timeout_ms);
}
-static struct kobj_attribute stop_timeout_def =
+static const struct kobj_attribute stop_timeout_def =
__ATTR(stop_timeout_ms, 0444, stop_default, NULL);
static ssize_t
@@ -343,10 +341,10 @@ preempt_timeout_show(struct kobject *kobj, struct kobj_attribute *attr,
{
struct intel_engine_cs *engine = kobj_to_engine(kobj);
- return sprintf(buf, "%lu\n", engine->props.preempt_timeout_ms);
+ return sysfs_emit(buf, "%lu\n", engine->props.preempt_timeout_ms);
}
-static struct kobj_attribute preempt_timeout_attr =
+static const struct kobj_attribute preempt_timeout_attr =
__ATTR(preempt_timeout_ms, 0644, preempt_timeout_show, preempt_timeout_store);
static ssize_t
@@ -355,10 +353,10 @@ preempt_timeout_default(struct kobject *kobj, struct kobj_attribute *attr,
{
struct intel_engine_cs *engine = kobj_to_engine(kobj);
- return sprintf(buf, "%lu\n", engine->defaults.preempt_timeout_ms);
+ return sysfs_emit(buf, "%lu\n", engine->defaults.preempt_timeout_ms);
}
-static struct kobj_attribute preempt_timeout_def =
+static const struct kobj_attribute preempt_timeout_def =
__ATTR(preempt_timeout_ms, 0444, preempt_timeout_default, NULL);
static ssize_t
@@ -399,10 +397,10 @@ heartbeat_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
{
struct intel_engine_cs *engine = kobj_to_engine(kobj);
- return sprintf(buf, "%lu\n", engine->props.heartbeat_interval_ms);
+ return sysfs_emit(buf, "%lu\n", engine->props.heartbeat_interval_ms);
}
-static struct kobj_attribute heartbeat_interval_attr =
+static const struct kobj_attribute heartbeat_interval_attr =
__ATTR(heartbeat_interval_ms, 0644, heartbeat_show, heartbeat_store);
static ssize_t
@@ -410,10 +408,10 @@ heartbeat_default(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
{
struct intel_engine_cs *engine = kobj_to_engine(kobj);
- return sprintf(buf, "%lu\n", engine->defaults.heartbeat_interval_ms);
+ return sysfs_emit(buf, "%lu\n", engine->defaults.heartbeat_interval_ms);
}
-static struct kobj_attribute heartbeat_interval_def =
+static const struct kobj_attribute heartbeat_interval_def =
__ATTR(heartbeat_interval_ms, 0444, heartbeat_default, NULL);
static void kobj_engine_release(struct kobject *kobj)
@@ -421,7 +419,7 @@ static void kobj_engine_release(struct kobject *kobj)
kfree(kobj);
}
-static struct kobj_type kobj_engine_type = {
+static const struct kobj_type kobj_engine_type = {
.release = kobj_engine_release,
.sysfs_ops = &kobj_sysfs_ops
};
@@ -449,7 +447,7 @@ kobj_engine(struct kobject *dir, struct intel_engine_cs *engine)
static void add_defaults(struct kobj_engine *parent)
{
- static const struct attribute *files[] = {
+ static const struct attribute * const files[] = {
&max_spin_def.attr,
&stop_timeout_def.attr,
#if CONFIG_DRM_I915_HEARTBEAT_INTERVAL
@@ -485,7 +483,7 @@ static void add_defaults(struct kobj_engine *parent)
void intel_engines_add_sysfs(struct drm_i915_private *i915)
{
- static const struct attribute *files[] = {
+ static const struct attribute * const files[] = {
&name_attr.attr,
&class_attr.attr,
&inst_attr.attr,
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_gsc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_gsc_fw.c
index e73d4440c5e8..1d9fdfb11268 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_gsc_fw.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_gsc_fw.c
@@ -6,6 +6,7 @@
#include "gt/intel_engine_pm.h"
#include "gt/intel_gpu_commands.h"
#include "gt/intel_gt.h"
+#include "gt/intel_gt_print.h"
#include "gt/intel_ring.h"
#include "intel_gsc_fw.h"
@@ -88,9 +89,8 @@ out_rq:
i915_request_put(rq);
if (err)
- drm_err(&gsc_uc_to_gt(gsc)->i915->drm,
- "Request submission for GSC load failed (%d)\n",
- err);
+ gt_err(gsc_uc_to_gt(gsc), "Request submission for GSC load failed %pe\n",
+ ERR_PTR(err));
return err;
}
@@ -200,8 +200,7 @@ int intel_gsc_uc_fw_upload(struct intel_gsc_uc *gsc)
/* FW is not fully operational until we enable SW proxy */
intel_uc_fw_change_status(gsc_fw, INTEL_UC_FIRMWARE_TRANSFERRED);
- drm_info(&gt->i915->drm, "Loaded GSC firmware %s\n",
- gsc_fw->file_selected.path);
+ gt_info(gt, "Loaded GSC firmware %s\n", gsc_fw->file_selected.path);
return 0;
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_gsc_uc.c b/drivers/gpu/drm/i915/gt/uc/intel_gsc_uc.c
index fd21dbd2663b..2d5b70b3384c 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_gsc_uc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_gsc_uc.c
@@ -6,6 +6,7 @@
#include <linux/types.h>
#include "gt/intel_gt.h"
+#include "gt/intel_gt_print.h"
#include "intel_gsc_uc.h"
#include "intel_gsc_fw.h"
#include "i915_drv.h"
@@ -59,7 +60,6 @@ int intel_gsc_uc_init(struct intel_gsc_uc *gsc)
{
static struct lock_class_key gsc_lock;
struct intel_gt *gt = gsc_uc_to_gt(gsc);
- struct drm_i915_private *i915 = gt->i915;
struct intel_engine_cs *engine = gt->engine[GSC0];
struct intel_context *ce;
struct i915_vma *vma;
@@ -81,8 +81,7 @@ int intel_gsc_uc_init(struct intel_gsc_uc *gsc)
I915_GEM_HWS_GSC_ADDR,
&gsc_lock, "gsc_context");
if (IS_ERR(ce)) {
- drm_err(&gt->i915->drm,
- "failed to create GSC CS ctx for FW communication\n");
+ gt_err(gt, "failed to create GSC CS ctx for FW communication\n");
err = PTR_ERR(ce);
goto out_vma;
}
@@ -98,7 +97,7 @@ out_vma:
out_fw:
intel_uc_fw_fini(&gsc->fw);
out:
- i915_probe_error(i915, "failed with %d\n", err);
+ gt_probe_error(gt, "GSC init failed %pe\n", ERR_PTR(err));
return err;
}
@@ -117,7 +116,7 @@ void intel_gsc_uc_fini(struct intel_gsc_uc *gsc)
intel_uc_fw_fini(&gsc->fw);
}
-void intel_gsc_uc_suspend(struct intel_gsc_uc *gsc)
+void intel_gsc_uc_flush_work(struct intel_gsc_uc *gsc)
{
if (!intel_uc_fw_is_loadable(&gsc->fw))
return;
@@ -125,6 +124,25 @@ void intel_gsc_uc_suspend(struct intel_gsc_uc *gsc)
flush_work(&gsc->work);
}
+void intel_gsc_uc_resume(struct intel_gsc_uc *gsc)
+{
+ if (!intel_uc_fw_is_loadable(&gsc->fw))
+ return;
+
+ /*
+ * we only want to start the GSC worker from here in the actual resume
+ * flow and not during driver load. This is because GSC load is slow and
+ * therefore we want to make sure that the default state init completes
+ * first to not slow down the init thread. A separate call to
+ * intel_gsc_uc_load_start will ensure that the GSC is loaded during
+ * driver load.
+ */
+ if (!gsc_uc_to_gt(gsc)->engine[GSC0]->default_state)
+ return;
+
+ intel_gsc_uc_load_start(gsc);
+}
+
void intel_gsc_uc_load_start(struct intel_gsc_uc *gsc)
{
if (!intel_uc_fw_is_loadable(&gsc->fw))
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_gsc_uc.h b/drivers/gpu/drm/i915/gt/uc/intel_gsc_uc.h
index 03fd0a8e8db1..5f50fa1ff8b9 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_gsc_uc.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_gsc_uc.h
@@ -26,6 +26,8 @@ void intel_gsc_uc_init_early(struct intel_gsc_uc *gsc);
int intel_gsc_uc_init(struct intel_gsc_uc *gsc);
void intel_gsc_uc_fini(struct intel_gsc_uc *gsc);
void intel_gsc_uc_suspend(struct intel_gsc_uc *gsc);
+void intel_gsc_uc_resume(struct intel_gsc_uc *gsc);
+void intel_gsc_uc_flush_work(struct intel_gsc_uc *gsc);
void intel_gsc_uc_load_start(struct intel_gsc_uc *gsc);
static inline bool intel_gsc_uc_is_supported(struct intel_gsc_uc *gsc)
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_gsc_uc_heci_cmd_submit.c b/drivers/gpu/drm/i915/gt/uc/intel_gsc_uc_heci_cmd_submit.c
new file mode 100644
index 000000000000..ea0da06e2f39
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/uc/intel_gsc_uc_heci_cmd_submit.c
@@ -0,0 +1,109 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#include "gt/intel_engine_pm.h"
+#include "gt/intel_gpu_commands.h"
+#include "gt/intel_gt.h"
+#include "gt/intel_ring.h"
+#include "intel_gsc_uc_heci_cmd_submit.h"
+
+struct gsc_heci_pkt {
+ u64 addr_in;
+ u32 size_in;
+ u64 addr_out;
+ u32 size_out;
+};
+
+static int emit_gsc_heci_pkt(struct i915_request *rq, struct gsc_heci_pkt *pkt)
+{
+ u32 *cs;
+
+ cs = intel_ring_begin(rq, 8);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ *cs++ = GSC_HECI_CMD_PKT;
+ *cs++ = lower_32_bits(pkt->addr_in);
+ *cs++ = upper_32_bits(pkt->addr_in);
+ *cs++ = pkt->size_in;
+ *cs++ = lower_32_bits(pkt->addr_out);
+ *cs++ = upper_32_bits(pkt->addr_out);
+ *cs++ = pkt->size_out;
+ *cs++ = 0;
+
+ intel_ring_advance(rq, cs);
+
+ return 0;
+}
+
+int intel_gsc_uc_heci_cmd_submit_packet(struct intel_gsc_uc *gsc, u64 addr_in,
+ u32 size_in, u64 addr_out,
+ u32 size_out)
+{
+ struct intel_context *ce = gsc->ce;
+ struct i915_request *rq;
+ struct gsc_heci_pkt pkt = {
+ .addr_in = addr_in,
+ .size_in = size_in,
+ .addr_out = addr_out,
+ .size_out = size_out
+ };
+ int err;
+
+ if (!ce)
+ return -ENODEV;
+
+ rq = i915_request_create(ce);
+ if (IS_ERR(rq))
+ return PTR_ERR(rq);
+
+ if (ce->engine->emit_init_breadcrumb) {
+ err = ce->engine->emit_init_breadcrumb(rq);
+ if (err)
+ goto out_rq;
+ }
+
+ err = emit_gsc_heci_pkt(rq, &pkt);
+
+ if (err)
+ goto out_rq;
+
+ err = ce->engine->emit_flush(rq, 0);
+
+out_rq:
+ i915_request_get(rq);
+
+ if (unlikely(err))
+ i915_request_set_error_once(rq, err);
+
+ i915_request_add(rq);
+
+ if (!err && i915_request_wait(rq, 0, msecs_to_jiffies(500)) < 0)
+ err = -ETIME;
+
+ i915_request_put(rq);
+
+ if (err)
+ drm_err(&gsc_uc_to_gt(gsc)->i915->drm,
+ "Request submission for GSC heci cmd failed (%d)\n",
+ err);
+
+ return err;
+}
+
+void intel_gsc_uc_heci_cmd_emit_mtl_header(struct intel_gsc_mtl_header *header,
+ u8 heci_client_id, u32 message_size,
+ u64 host_session_id)
+{
+ host_session_id &= ~HOST_SESSION_MASK;
+ if (heci_client_id == HECI_MEADDRESS_PXP)
+ host_session_id |= HOST_SESSION_PXP_SINGLE;
+
+ header->validity_marker = GSC_HECI_VALIDITY_MARKER;
+ header->heci_client_id = heci_client_id;
+ header->host_session_handle = host_session_id;
+ header->header_version = MTL_GSC_HEADER_VERSION;
+ header->message_size = message_size;
+}
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_gsc_uc_heci_cmd_submit.h b/drivers/gpu/drm/i915/gt/uc/intel_gsc_uc_heci_cmd_submit.h
new file mode 100644
index 000000000000..3d56ae501991
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/uc/intel_gsc_uc_heci_cmd_submit.h
@@ -0,0 +1,61 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#ifndef _INTEL_GSC_UC_HECI_CMD_SUBMIT_H_
+#define _INTEL_GSC_UC_HECI_CMD_SUBMIT_H_
+
+#include <linux/types.h>
+
+struct intel_gsc_uc;
+struct intel_gsc_mtl_header {
+ u32 validity_marker;
+#define GSC_HECI_VALIDITY_MARKER 0xA578875A
+
+ u8 heci_client_id;
+#define HECI_MEADDRESS_PXP 17
+#define HECI_MEADDRESS_HDCP 18
+
+ u8 reserved1;
+
+ u16 header_version;
+#define MTL_GSC_HEADER_VERSION 1
+
+ /*
+ * FW allows host to decide host_session handle
+ * as it sees fit.
+ * For intertracebility reserving select bits(60-63)
+ * to differentiate caller-target subsystem
+ * 0000 - HDCP
+ * 0001 - PXP Single Session
+ */
+ u64 host_session_handle;
+#define HOST_SESSION_MASK REG_GENMASK64(63, 60)
+#define HOST_SESSION_PXP_SINGLE BIT_ULL(60)
+ u64 gsc_message_handle;
+
+ u32 message_size; /* lower 20 bits only, upper 12 are reserved */
+
+ /*
+ * Flags mask:
+ * Bit 0: Pending
+ * Bit 1: Session Cleanup;
+ * Bits 2-15: Flags
+ * Bits 16-31: Extension Size
+ * According to internal spec flags are either input or output
+ * we distinguish the flags using OUTFLAG or INFLAG
+ */
+ u32 flags;
+#define GSC_OUTFLAG_MSG_PENDING 1
+
+ u32 status;
+} __packed;
+
+int intel_gsc_uc_heci_cmd_submit_packet(struct intel_gsc_uc *gsc,
+ u64 addr_in, u32 size_in,
+ u64 addr_out, u32 size_out);
+void intel_gsc_uc_heci_cmd_emit_mtl_header(struct intel_gsc_mtl_header *header,
+ u8 heci_client_id, u32 message_size,
+ u64 host_session_id);
+#endif
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_capture.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_capture.c
index fc3b994626a4..cf49188db6a6 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_capture.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_capture.c
@@ -15,6 +15,7 @@
#include "guc_capture_fwif.h"
#include "intel_guc_capture.h"
#include "intel_guc_fwif.h"
+#include "intel_guc_print.h"
#include "i915_drv.h"
#include "i915_gpu_error.h"
#include "i915_irq.h"
@@ -353,7 +354,6 @@ guc_capture_alloc_steered_lists_xe_hpg(struct intel_guc *guc,
u32 ipver)
{
struct intel_gt *gt = guc_to_gt(guc);
- struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
struct sseu_dev_info *sseu;
int slice, subslice, i, iter, num_steer_regs, num_tot_regs = 0;
const struct __guc_mmio_reg_descr_group *list;
@@ -402,7 +402,7 @@ guc_capture_alloc_steered_lists_xe_hpg(struct intel_guc *guc,
}
}
- drm_dbg(&i915->drm, "GuC-capture found %d-ext-regs.\n", num_tot_regs);
+ guc_dbg(guc, "capture found %d ext-regs.\n", num_tot_regs);
guc->capture->extlists = extlists;
}
@@ -477,7 +477,6 @@ guc_capture_list_init(struct intel_guc *guc, u32 owner, u32 type, u32 classid,
struct guc_mmio_reg *ptr, u16 num_entries)
{
u32 i = 0, j = 0;
- struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
const struct __guc_mmio_reg_descr_group *reglists = guc->capture->reglists;
struct __guc_mmio_reg_descr_group *extlists = guc->capture->extlists;
const struct __guc_mmio_reg_descr_group *match;
@@ -509,8 +508,7 @@ guc_capture_list_init(struct intel_guc *guc, u32 owner, u32 type, u32 classid,
}
}
if (i < num_entries)
- drm_dbg(&i915->drm, "GuC-capture: Init reglist short %d out %d.\n",
- (int)i, (int)num_entries);
+ guc_dbg(guc, "Got short capture reglist init: %d out %d.\n", i, num_entries);
return 0;
}
@@ -540,12 +538,11 @@ guc_capture_getlistsize(struct intel_guc *guc, u32 owner, u32 type, u32 classid,
size_t *size, bool is_purpose_est)
{
struct intel_guc_state_capture *gc = guc->capture;
- struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
struct __guc_capture_ads_cache *cache = &gc->ads_cache[owner][type][classid];
int num_regs;
if (!gc->reglists) {
- drm_warn(&i915->drm, "GuC-capture: No reglist on this device\n");
+ guc_warn(guc, "No capture reglist for this device\n");
return -ENODEV;
}
@@ -557,9 +554,9 @@ guc_capture_getlistsize(struct intel_guc *guc, u32 owner, u32 type, u32 classid,
if (!is_purpose_est && owner == GUC_CAPTURE_LIST_INDEX_PF &&
!guc_capture_get_one_list(gc->reglists, owner, type, classid)) {
if (type == GUC_CAPTURE_LIST_TYPE_GLOBAL)
- drm_warn(&i915->drm, "Missing GuC-Err-Cap reglist Global!\n");
+ guc_warn(guc, "Missing capture reglist: global!\n");
else
- drm_warn(&i915->drm, "Missing GuC-Err-Cap reglist %s(%u):%s(%u)!\n",
+ guc_warn(guc, "Missing capture reglist: %s(%u):%s(%u)!\n",
__stringify_type(type), type,
__stringify_engclass(classid), classid);
return -ENODATA;
@@ -592,7 +589,6 @@ intel_guc_capture_getlist(struct intel_guc *guc, u32 owner, u32 type, u32 classi
{
struct intel_guc_state_capture *gc = guc->capture;
struct __guc_capture_ads_cache *cache = &gc->ads_cache[owner][type][classid];
- struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
struct guc_debug_capture_list *listnode;
int ret, num_regs;
u8 *caplist, *tmp;
@@ -623,7 +619,7 @@ intel_guc_capture_getlist(struct intel_guc *guc, u32 owner, u32 type, u32 classi
caplist = kzalloc(size, GFP_KERNEL);
if (!caplist) {
- drm_dbg(&i915->drm, "GuC-capture: failed to alloc cached caplist");
+ guc_dbg(guc, "Failed to alloc cached register capture list");
return -ENOMEM;
}
@@ -653,7 +649,6 @@ intel_guc_capture_getnullheader(struct intel_guc *guc,
void **outptr, size_t *size)
{
struct intel_guc_state_capture *gc = guc->capture;
- struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
int tmp = sizeof(u32) * 4;
void *null_header;
@@ -665,7 +660,7 @@ intel_guc_capture_getnullheader(struct intel_guc *guc,
null_header = kzalloc(tmp, GFP_KERNEL);
if (!null_header) {
- drm_dbg(&i915->drm, "GuC-capture: failed to alloc cached nulllist");
+ guc_dbg(guc, "Failed to alloc cached register capture null list");
return -ENOMEM;
}
@@ -727,7 +722,6 @@ guc_capture_output_min_size_est(struct intel_guc *guc)
static void check_guc_capture_size(struct intel_guc *guc)
{
- struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
int min_size = guc_capture_output_min_size_est(guc);
int spare_size = min_size * GUC_CAPTURE_OVERBUFFER_MULTIPLIER;
u32 buffer_size = intel_guc_log_section_size_capture(&guc->log);
@@ -741,13 +735,13 @@ static void check_guc_capture_size(struct intel_guc *guc)
* INTEL_GUC_STATE_CAPTURE_EVENT_STATUS_NOSPACE.
*/
if (min_size < 0)
- drm_warn(&i915->drm, "Failed to calculate GuC error state capture buffer minimum size: %d!\n",
+ guc_warn(guc, "Failed to calculate error state capture buffer minimum size: %d!\n",
min_size);
else if (min_size > buffer_size)
- drm_warn(&i915->drm, "GuC error state capture buffer maybe small: %d < %d\n",
+ guc_warn(guc, "Error state capture buffer maybe small: %d < %d\n",
buffer_size, min_size);
else if (spare_size > buffer_size)
- drm_dbg(&i915->drm, "GuC error state capture buffer lacks spare size: %d < %d (min = %d)\n",
+ guc_dbg(guc, "Error state capture buffer lacks spare size: %d < %d (min = %d)\n",
buffer_size, spare_size, min_size);
}
@@ -848,7 +842,6 @@ static int
guc_capture_log_remove_dw(struct intel_guc *guc, struct __guc_capture_bufstate *buf,
u32 *dw)
{
- struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
int tries = 2;
int avail = 0;
u32 *src_data;
@@ -865,7 +858,7 @@ guc_capture_log_remove_dw(struct intel_guc *guc, struct __guc_capture_bufstate *
return 4;
}
if (avail)
- drm_dbg(&i915->drm, "GuC-Cap-Logs not dword aligned, skipping.\n");
+ guc_dbg(guc, "Register capture log not dword aligned, skipping.\n");
buf->rd = 0;
}
@@ -1118,13 +1111,12 @@ static void
__guc_capture_create_prealloc_nodes(struct intel_guc *guc)
{
struct __guc_capture_parsed_output *node = NULL;
- struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
int i;
for (i = 0; i < PREALLOC_NODES_MAX_COUNT; ++i) {
node = guc_capture_alloc_one_node(guc);
if (!node) {
- drm_warn(&i915->drm, "GuC Capture pre-alloc-cache failure\n");
+ guc_warn(guc, "Register capture pre-alloc-cache failure\n");
/* dont free the priors, use what we got and cleanup at shutdown */
return;
}
@@ -1169,7 +1161,6 @@ guc_capture_create_prealloc_nodes(struct intel_guc *guc)
static int
guc_capture_extract_reglists(struct intel_guc *guc, struct __guc_capture_bufstate *buf)
{
- struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
struct guc_state_capture_group_header_t ghdr = {0};
struct guc_state_capture_header_t hdr = {0};
struct __guc_capture_parsed_output *node = NULL;
@@ -1183,7 +1174,7 @@ guc_capture_extract_reglists(struct intel_guc *guc, struct __guc_capture_bufstat
if (!i)
return -ENODATA;
if (i % sizeof(u32)) {
- drm_warn(&i915->drm, "GuC Capture new entries unaligned\n");
+ guc_warn(guc, "Got mis-aligned register capture entries\n");
ret = -EIO;
goto bailout;
}
@@ -1301,7 +1292,7 @@ guc_capture_extract_reglists(struct intel_guc *guc, struct __guc_capture_bufstat
break;
}
if (datatype != GUC_CAPTURE_LIST_TYPE_GLOBAL)
- drm_dbg(&i915->drm, "GuC Capture missing global dump: %08x!\n",
+ guc_dbg(guc, "Register capture missing global dump: %08x!\n",
datatype);
}
node->is_partial = is_partial;
@@ -1322,7 +1313,7 @@ guc_capture_extract_reglists(struct intel_guc *guc, struct __guc_capture_bufstat
numregs = FIELD_GET(CAP_HDR_NUM_MMIOS, hdr.num_mmios);
if (numregs > guc->capture->max_mmio_per_node) {
- drm_dbg(&i915->drm, "GuC Capture list extraction clipped by prealloc!\n");
+ guc_dbg(guc, "Register capture list extraction clipped by prealloc!\n");
numregs = guc->capture->max_mmio_per_node;
}
node->reginfo[datatype].num_regs = numregs;
@@ -1367,7 +1358,6 @@ static void __guc_capture_process_output(struct intel_guc *guc)
{
unsigned int buffer_size, read_offset, write_offset, full_count;
struct intel_uc *uc = container_of(guc, typeof(*uc), guc);
- struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
struct guc_log_buffer_state log_buf_state_local;
struct guc_log_buffer_state *log_buf_state;
struct __guc_capture_bufstate buf;
@@ -1403,7 +1393,8 @@ static void __guc_capture_process_output(struct intel_guc *guc)
write_offset = buffer_size;
} else if (unlikely((read_offset > buffer_size) ||
(write_offset > buffer_size))) {
- drm_err(&i915->drm, "invalid GuC log capture buffer state!\n");
+ guc_err(guc, "Register capture buffer in invalid state: read = 0x%X, size = 0x%X!\n",
+ read_offset, buffer_size);
/* copy whole buffer as offsets are unreliable */
read_offset = 0;
write_offset = buffer_size;
@@ -1571,6 +1562,27 @@ int intel_guc_capture_print_engine_node(struct drm_i915_error_state_buf *ebuf,
#endif //CONFIG_DRM_I915_CAPTURE_ERROR
+static void guc_capture_find_ecode(struct intel_engine_coredump *ee)
+{
+ struct gcap_reg_list_info *reginfo;
+ struct guc_mmio_reg *regs;
+ i915_reg_t reg_ipehr = RING_IPEHR(0);
+ i915_reg_t reg_instdone = RING_INSTDONE(0);
+ int i;
+
+ if (!ee->guc_capture_node)
+ return;
+
+ reginfo = ee->guc_capture_node->reginfo + GUC_CAPTURE_LIST_TYPE_ENGINE_INSTANCE;
+ regs = reginfo->regs;
+ for (i = 0; i < reginfo->num_regs; i++) {
+ if (regs[i].offset == reg_ipehr.reg)
+ ee->ipehr = regs[i].value;
+ else if (regs[i].offset == reg_instdone.reg)
+ ee->instdone.instdone = regs[i].value;
+ }
+}
+
void intel_guc_capture_free_node(struct intel_engine_coredump *ee)
{
if (!ee || !ee->guc_capture_node)
@@ -1586,13 +1598,11 @@ void intel_guc_capture_get_matching_node(struct intel_gt *gt,
struct intel_context *ce)
{
struct __guc_capture_parsed_output *n, *ntmp;
- struct drm_i915_private *i915;
struct intel_guc *guc;
if (!gt || !ee || !ce)
return;
- i915 = gt->i915;
guc = &gt->uc.guc;
if (!guc->capture)
return;
@@ -1606,16 +1616,18 @@ void intel_guc_capture_get_matching_node(struct intel_gt *gt,
list_for_each_entry_safe(n, ntmp, &guc->capture->outlist, link) {
if (n->eng_inst == GUC_ID_TO_ENGINE_INSTANCE(ee->engine->guc_id) &&
n->eng_class == GUC_ID_TO_ENGINE_CLASS(ee->engine->guc_id) &&
- n->guc_id && n->guc_id == ce->guc_id.id &&
- (n->lrca & CTX_GTT_ADDRESS_MASK) && (n->lrca & CTX_GTT_ADDRESS_MASK) ==
- (ce->lrc.lrca & CTX_GTT_ADDRESS_MASK)) {
+ n->guc_id == ce->guc_id.id &&
+ (n->lrca & CTX_GTT_ADDRESS_MASK) == (ce->lrc.lrca & CTX_GTT_ADDRESS_MASK)) {
list_del(&n->link);
ee->guc_capture_node = n;
ee->guc_capture = guc->capture;
+ guc_capture_find_ecode(ee);
return;
}
}
- drm_dbg(&i915->drm, "GuC capture can't match ee to node\n");
+
+ guc_warn(guc, "No register capture node found for 0x%04X / 0x%08X\n",
+ ce->guc_id.id, ce->lrc.lrca);
}
void intel_guc_capture_process(struct intel_guc *guc)
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c
index c3792ddeec80..195db8c9d420 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c
@@ -333,8 +333,7 @@ bool intel_guc_check_log_buf_overflow(struct intel_guc_log *log,
log->stats[type].sampled_overflow += 16;
}
- dev_notice_ratelimited(guc_to_gt(log_to_guc(log))->i915->drm.dev,
- "GuC log buffer overflow\n");
+ guc_notice_ratelimited(log_to_guc(log), "log buffer overflow\n");
}
return overflow;
@@ -521,7 +520,7 @@ void intel_guc_log_init_early(struct intel_guc_log *log)
static int guc_log_relay_create(struct intel_guc_log *log)
{
struct intel_guc *guc = log_to_guc(log);
- struct drm_i915_private *dev_priv = guc_to_gt(guc)->i915;
+ struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
struct rchan *guc_log_relay_chan;
size_t n_subbufs, subbuf_size;
int ret;
@@ -544,9 +543,9 @@ static int guc_log_relay_create(struct intel_guc_log *log)
n_subbufs = 8;
guc_log_relay_chan = relay_open("guc_log",
- dev_priv->drm.primary->debugfs_root,
+ i915->drm.primary->debugfs_root,
subbuf_size, n_subbufs,
- &relay_callbacks, dev_priv);
+ &relay_callbacks, i915);
if (!guc_log_relay_chan) {
guc_err(guc, "Couldn't create relay channel for logging\n");
@@ -571,7 +570,7 @@ static void guc_log_relay_destroy(struct intel_guc_log *log)
static void guc_log_copy_debuglogs_for_relay(struct intel_guc_log *log)
{
struct intel_guc *guc = log_to_guc(log);
- struct drm_i915_private *dev_priv = guc_to_gt(guc)->i915;
+ struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
intel_wakeref_t wakeref;
_guc_log_copy_debuglogs_for_relay(log);
@@ -580,7 +579,7 @@ static void guc_log_copy_debuglogs_for_relay(struct intel_guc_log *log)
* Generally device is expected to be active only at this
* time, so get/put should be really quick.
*/
- with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref)
+ with_intel_runtime_pm(&i915->runtime_pm, wakeref)
guc_action_flush_log_complete(guc);
}
@@ -662,7 +661,7 @@ void intel_guc_log_destroy(struct intel_guc_log *log)
int intel_guc_log_set_level(struct intel_guc_log *log, u32 level)
{
struct intel_guc *guc = log_to_guc(log);
- struct drm_i915_private *dev_priv = guc_to_gt(guc)->i915;
+ struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
intel_wakeref_t wakeref;
int ret = 0;
@@ -676,12 +675,12 @@ int intel_guc_log_set_level(struct intel_guc_log *log, u32 level)
if (level < GUC_LOG_LEVEL_DISABLED || level > GUC_LOG_LEVEL_MAX)
return -EINVAL;
- mutex_lock(&dev_priv->drm.struct_mutex);
+ mutex_lock(&i915->drm.struct_mutex);
if (log->level == level)
goto out_unlock;
- with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref)
+ with_intel_runtime_pm(&i915->runtime_pm, wakeref)
ret = guc_action_control_log(guc,
GUC_LOG_LEVEL_IS_VERBOSE(level),
GUC_LOG_LEVEL_IS_ENABLED(level),
@@ -694,7 +693,7 @@ int intel_guc_log_set_level(struct intel_guc_log *log, u32 level)
log->level = level;
out_unlock:
- mutex_unlock(&dev_priv->drm.struct_mutex);
+ mutex_unlock(&i915->drm.struct_mutex);
return ret;
}
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_print.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_print.h
index e75989d4ba06..2465d05638b4 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_print.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_print.h
@@ -30,6 +30,9 @@
#define guc_err_ratelimited(_guc, _fmt, ...) \
guc_printk((_guc), err_ratelimited, _fmt, ##__VA_ARGS__)
+#define guc_notice_ratelimited(_guc, _fmt, ...) \
+ guc_printk((_guc), notice_ratelimited, _fmt, ##__VA_ARGS__)
+
#define guc_probe_error(_guc, _fmt, ...) \
guc_printk((_guc), probe_error, _fmt, ##__VA_ARGS__)
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_rc.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_rc.c
index b5855091cf6a..1adec6de223c 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_rc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_rc.c
@@ -6,25 +6,15 @@
#include <linux/string_helpers.h>
#include "intel_guc_rc.h"
+#include "intel_guc_print.h"
#include "gt/intel_gt.h"
#include "i915_drv.h"
static bool __guc_rc_supported(struct intel_guc *guc)
{
- struct intel_gt *gt = guc_to_gt(guc);
-
- /*
- * Wa_14017073508: mtl
- * Do not enable gucrc to avoid additional interrupts which
- * may disrupt pcode wa.
- */
- if (IS_MTL_GRAPHICS_STEP(gt->i915, P, STEP_A0, STEP_B0) &&
- gt->type == GT_MEDIA)
- return false;
-
/* GuC RC is unavailable for pre-Gen12 */
return guc->submission_supported &&
- GRAPHICS_VER(gt->i915) >= 12;
+ GRAPHICS_VER(guc_to_gt(guc)->i915) >= 12;
}
static bool __guc_rc_selected(struct intel_guc *guc)
@@ -70,13 +60,12 @@ static int __guc_rc_control(struct intel_guc *guc, bool enable)
ret = guc_action_control_gucrc(guc, enable);
if (ret) {
- i915_probe_error(guc_to_gt(guc)->i915, "Failed to %s GuC RC (%pe)\n",
- str_enable_disable(enable), ERR_PTR(ret));
+ guc_probe_error(guc, "Failed to %s RC (%pe)\n",
+ str_enable_disable(enable), ERR_PTR(ret));
return ret;
}
- drm_info(&gt->i915->drm, "GuC RC: %s\n",
- str_enabled_disabled(enable));
+ guc_info(guc, "RC %s\n", str_enabled_disabled(enable));
return 0;
}
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c
index 63464933cbce..026d73855f36 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c
@@ -9,6 +9,7 @@
#include "i915_drv.h"
#include "i915_reg.h"
#include "intel_guc_slpc.h"
+#include "intel_guc_print.h"
#include "intel_mchbar_regs.h"
#include "gt/intel_gt.h"
#include "gt/intel_gt_regs.h"
@@ -171,14 +172,12 @@ static int guc_action_slpc_query(struct intel_guc *guc, u32 offset)
static int slpc_query_task_state(struct intel_guc_slpc *slpc)
{
struct intel_guc *guc = slpc_to_guc(slpc);
- struct drm_i915_private *i915 = slpc_to_i915(slpc);
u32 offset = intel_guc_ggtt_offset(guc, slpc->vma);
int ret;
ret = guc_action_slpc_query(guc, offset);
if (unlikely(ret))
- i915_probe_error(i915, "Failed to query task state (%pe)\n",
- ERR_PTR(ret));
+ guc_probe_error(guc, "Failed to query task state: %pe\n", ERR_PTR(ret));
drm_clflush_virt_range(slpc->vaddr, SLPC_PAGE_SIZE_BYTES);
@@ -188,15 +187,14 @@ static int slpc_query_task_state(struct intel_guc_slpc *slpc)
static int slpc_set_param(struct intel_guc_slpc *slpc, u8 id, u32 value)
{
struct intel_guc *guc = slpc_to_guc(slpc);
- struct drm_i915_private *i915 = slpc_to_i915(slpc);
int ret;
GEM_BUG_ON(id >= SLPC_MAX_PARAM);
ret = guc_action_slpc_set_param(guc, id, value);
if (ret)
- i915_probe_error(i915, "Failed to set param %d to %u (%pe)\n",
- id, value, ERR_PTR(ret));
+ guc_probe_error(guc, "Failed to set param %d to %u: %pe\n",
+ id, value, ERR_PTR(ret));
return ret;
}
@@ -212,8 +210,8 @@ static int slpc_unset_param(struct intel_guc_slpc *slpc, u8 id)
static int slpc_force_min_freq(struct intel_guc_slpc *slpc, u32 freq)
{
- struct drm_i915_private *i915 = slpc_to_i915(slpc);
struct intel_guc *guc = slpc_to_guc(slpc);
+ struct drm_i915_private *i915 = slpc_to_i915(slpc);
intel_wakeref_t wakeref;
int ret = 0;
@@ -236,9 +234,8 @@ static int slpc_force_min_freq(struct intel_guc_slpc *slpc, u32 freq)
SLPC_PARAM_GLOBAL_MIN_GT_UNSLICE_FREQ_MHZ,
freq);
if (ret)
- drm_notice(&i915->drm,
- "Failed to send set_param for min freq(%d): (%d)\n",
- freq, ret);
+ guc_notice(guc, "Failed to send set_param for min freq(%d): %pe\n",
+ freq, ERR_PTR(ret));
}
return ret;
@@ -267,7 +264,6 @@ static void slpc_boost_work(struct work_struct *work)
int intel_guc_slpc_init(struct intel_guc_slpc *slpc)
{
struct intel_guc *guc = slpc_to_guc(slpc);
- struct drm_i915_private *i915 = slpc_to_i915(slpc);
u32 size = PAGE_ALIGN(sizeof(struct slpc_shared_data));
int err;
@@ -275,9 +271,7 @@ int intel_guc_slpc_init(struct intel_guc_slpc *slpc)
err = intel_guc_allocate_and_map_vma(guc, size, &slpc->vma, (void **)&slpc->vaddr);
if (unlikely(err)) {
- i915_probe_error(i915,
- "Failed to allocate SLPC struct (err=%pe)\n",
- ERR_PTR(err));
+ guc_probe_error(guc, "Failed to allocate SLPC struct: %pe\n", ERR_PTR(err));
return err;
}
@@ -338,7 +332,6 @@ static int guc_action_slpc_reset(struct intel_guc *guc, u32 offset)
static int slpc_reset(struct intel_guc_slpc *slpc)
{
- struct drm_i915_private *i915 = slpc_to_i915(slpc);
struct intel_guc *guc = slpc_to_guc(slpc);
u32 offset = intel_guc_ggtt_offset(guc, slpc->vma);
int ret;
@@ -346,15 +339,14 @@ static int slpc_reset(struct intel_guc_slpc *slpc)
ret = guc_action_slpc_reset(guc, offset);
if (unlikely(ret < 0)) {
- i915_probe_error(i915, "SLPC reset action failed (%pe)\n",
- ERR_PTR(ret));
+ guc_probe_error(guc, "SLPC reset action failed: %pe\n", ERR_PTR(ret));
return ret;
}
if (!ret) {
if (wait_for(slpc_is_running(slpc), SLPC_RESET_TIMEOUT_MS)) {
- i915_probe_error(i915, "SLPC not enabled! State = %s\n",
- slpc_get_state_string(slpc));
+ guc_probe_error(guc, "SLPC not enabled! State = %s\n",
+ slpc_get_state_string(slpc));
return -EIO;
}
}
@@ -495,8 +487,8 @@ int intel_guc_slpc_set_min_freq(struct intel_guc_slpc *slpc, u32 val)
SLPC_PARAM_IGNORE_EFFICIENT_FREQUENCY,
val < slpc->rp1_freq);
if (ret) {
- i915_probe_error(i915, "Failed to toggle efficient freq (%pe)\n",
- ERR_PTR(ret));
+ guc_probe_error(slpc_to_guc(slpc), "Failed to toggle efficient freq: %pe\n",
+ ERR_PTR(ret));
goto out;
}
@@ -611,15 +603,12 @@ static int slpc_set_softlimits(struct intel_guc_slpc *slpc)
static bool is_slpc_min_freq_rpmax(struct intel_guc_slpc *slpc)
{
- struct drm_i915_private *i915 = slpc_to_i915(slpc);
int slpc_min_freq;
int ret;
ret = intel_guc_slpc_get_min_freq(slpc, &slpc_min_freq);
if (ret) {
- drm_err(&i915->drm,
- "Failed to get min freq: (%d)\n",
- ret);
+ guc_err(slpc_to_guc(slpc), "Failed to get min freq: %pe\n", ERR_PTR(ret));
return false;
}
@@ -685,9 +674,8 @@ int intel_guc_slpc_override_gucrc_mode(struct intel_guc_slpc *slpc, u32 mode)
with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
ret = slpc_set_param(slpc, SLPC_PARAM_PWRGATE_RC_MODE, mode);
if (ret)
- drm_err(&i915->drm,
- "Override gucrc mode %d failed %d\n",
- mode, ret);
+ guc_err(slpc_to_guc(slpc), "Override RC mode %d failed: %pe\n",
+ mode, ERR_PTR(ret));
}
return ret;
@@ -702,9 +690,7 @@ int intel_guc_slpc_unset_gucrc_mode(struct intel_guc_slpc *slpc)
with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
ret = slpc_unset_param(slpc, SLPC_PARAM_PWRGATE_RC_MODE);
if (ret)
- drm_err(&i915->drm,
- "Unsetting gucrc mode failed %d\n",
- ret);
+ guc_err(slpc_to_guc(slpc), "Unsetting RC mode failed: %pe\n", ERR_PTR(ret));
}
return ret;
@@ -725,7 +711,7 @@ int intel_guc_slpc_unset_gucrc_mode(struct intel_guc_slpc *slpc)
*/
int intel_guc_slpc_enable(struct intel_guc_slpc *slpc)
{
- struct drm_i915_private *i915 = slpc_to_i915(slpc);
+ struct intel_guc *guc = slpc_to_guc(slpc);
int ret;
GEM_BUG_ON(!slpc->vma);
@@ -734,8 +720,7 @@ int intel_guc_slpc_enable(struct intel_guc_slpc *slpc)
ret = slpc_reset(slpc);
if (unlikely(ret < 0)) {
- i915_probe_error(i915, "SLPC Reset event returned (%pe)\n",
- ERR_PTR(ret));
+ guc_probe_error(guc, "SLPC Reset event returned: %pe\n", ERR_PTR(ret));
return ret;
}
@@ -743,7 +728,7 @@ int intel_guc_slpc_enable(struct intel_guc_slpc *slpc)
if (unlikely(ret < 0))
return ret;
- intel_guc_pm_intrmsk_enable(to_gt(i915));
+ intel_guc_pm_intrmsk_enable(slpc_to_gt(slpc));
slpc_get_rp_values(slpc);
@@ -753,16 +738,14 @@ int intel_guc_slpc_enable(struct intel_guc_slpc *slpc)
/* Set SLPC max limit to RP0 */
ret = slpc_use_fused_rp0(slpc);
if (unlikely(ret)) {
- i915_probe_error(i915, "Failed to set SLPC max to RP0 (%pe)\n",
- ERR_PTR(ret));
+ guc_probe_error(guc, "Failed to set SLPC max to RP0: %pe\n", ERR_PTR(ret));
return ret;
}
/* Revert SLPC min/max to softlimits if necessary */
ret = slpc_set_softlimits(slpc);
if (unlikely(ret)) {
- i915_probe_error(i915, "Failed to set SLPC softlimits (%pe)\n",
- ERR_PTR(ret));
+ guc_probe_error(guc, "Failed to set SLPC softlimits: %pe\n", ERR_PTR(ret));
return ret;
}
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
index 53f3ed3244d5..88e881b100cf 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
@@ -1352,6 +1352,16 @@ static ktime_t guc_engine_busyness(struct intel_engine_cs *engine, ktime_t *now)
return ns_to_ktime(total);
}
+static void guc_enable_busyness_worker(struct intel_guc *guc)
+{
+ mod_delayed_work(system_highpri_wq, &guc->timestamp.work, guc->timestamp.ping_delay);
+}
+
+static void guc_cancel_busyness_worker(struct intel_guc *guc)
+{
+ cancel_delayed_work_sync(&guc->timestamp.work);
+}
+
static void __reset_guc_busyness_stats(struct intel_guc *guc)
{
struct intel_gt *gt = guc_to_gt(guc);
@@ -1360,7 +1370,7 @@ static void __reset_guc_busyness_stats(struct intel_guc *guc)
unsigned long flags;
ktime_t unused;
- cancel_delayed_work_sync(&guc->timestamp.work);
+ guc_cancel_busyness_worker(guc);
spin_lock_irqsave(&guc->timestamp.lock, flags);
@@ -1416,8 +1426,7 @@ static void guc_timestamp_ping(struct work_struct *wrk)
intel_gt_reset_unlock(gt, srcu);
- mod_delayed_work(system_highpri_wq, &guc->timestamp.work,
- guc->timestamp.ping_delay);
+ guc_enable_busyness_worker(guc);
}
static int guc_action_enable_usage_stats(struct intel_guc *guc)
@@ -1432,20 +1441,26 @@ static int guc_action_enable_usage_stats(struct intel_guc *guc)
return intel_guc_send(guc, action, ARRAY_SIZE(action));
}
-static void guc_init_engine_stats(struct intel_guc *guc)
+static int guc_init_engine_stats(struct intel_guc *guc)
{
struct intel_gt *gt = guc_to_gt(guc);
intel_wakeref_t wakeref;
+ int ret;
- mod_delayed_work(system_highpri_wq, &guc->timestamp.work,
- guc->timestamp.ping_delay);
+ with_intel_runtime_pm(&gt->i915->runtime_pm, wakeref)
+ ret = guc_action_enable_usage_stats(guc);
- with_intel_runtime_pm(&gt->i915->runtime_pm, wakeref) {
- int ret = guc_action_enable_usage_stats(guc);
+ if (ret)
+ guc_err(guc, "Failed to enable usage stats: %pe\n", ERR_PTR(ret));
+ else
+ guc_enable_busyness_worker(guc);
- if (ret)
- guc_err(guc, "Failed to enable usage stats: %pe\n", ERR_PTR(ret));
- }
+ return ret;
+}
+
+static void guc_fini_engine_stats(struct intel_guc *guc)
+{
+ guc_cancel_busyness_worker(guc);
}
void intel_guc_busyness_park(struct intel_gt *gt)
@@ -1460,7 +1475,7 @@ void intel_guc_busyness_park(struct intel_gt *gt)
* and causes an unclaimed register access warning. Cancel the worker
* synchronously here.
*/
- cancel_delayed_work_sync(&guc->timestamp.work);
+ guc_cancel_busyness_worker(guc);
/*
* Before parking, we should sample engine busyness stats if we need to.
@@ -1487,8 +1502,7 @@ void intel_guc_busyness_unpark(struct intel_gt *gt)
spin_lock_irqsave(&guc->timestamp.lock, flags);
guc_update_pm_timestamp(guc, &unused);
spin_unlock_irqrestore(&guc->timestamp.lock, flags);
- mod_delayed_work(system_highpri_wq, &guc->timestamp.work,
- guc->timestamp.ping_delay);
+ guc_enable_busyness_worker(guc);
}
static inline bool
@@ -4102,9 +4116,11 @@ static void guc_set_default_submission(struct intel_engine_cs *engine)
engine->submit_request = guc_submit_request;
}
-static inline void guc_kernel_context_pin(struct intel_guc *guc,
- struct intel_context *ce)
+static inline int guc_kernel_context_pin(struct intel_guc *guc,
+ struct intel_context *ce)
{
+ int ret;
+
/*
* Note: we purposefully do not check the returns below because
* the registration can only fail if a reset is just starting.
@@ -4112,16 +4128,24 @@ static inline void guc_kernel_context_pin(struct intel_guc *guc,
* isn't happening and even it did this code would be run again.
*/
- if (context_guc_id_invalid(ce))
- pin_guc_id(guc, ce);
+ if (context_guc_id_invalid(ce)) {
+ ret = pin_guc_id(guc, ce);
+
+ if (ret < 0)
+ return ret;
+ }
if (!test_bit(CONTEXT_GUC_INIT, &ce->flags))
guc_context_init(ce);
- try_context_registration(ce, true);
+ ret = try_context_registration(ce, true);
+ if (ret)
+ unpin_guc_id(guc, ce);
+
+ return ret;
}
-static inline void guc_init_lrc_mapping(struct intel_guc *guc)
+static inline int guc_init_submission(struct intel_guc *guc)
{
struct intel_gt *gt = guc_to_gt(guc);
struct intel_engine_cs *engine;
@@ -4148,9 +4172,17 @@ static inline void guc_init_lrc_mapping(struct intel_guc *guc)
struct intel_context *ce;
list_for_each_entry(ce, &engine->pinned_contexts_list,
- pinned_contexts_link)
- guc_kernel_context_pin(guc, ce);
+ pinned_contexts_link) {
+ int ret = guc_kernel_context_pin(guc, ce);
+
+ if (ret) {
+ /* No point in trying to clean up as i915 will wedge on failure */
+ return ret;
+ }
+ }
}
+
+ return 0;
}
static void guc_release(struct intel_engine_cs *engine)
@@ -4393,30 +4425,57 @@ static int guc_init_global_schedule_policy(struct intel_guc *guc)
return ret;
}
-void intel_guc_submission_enable(struct intel_guc *guc)
+static void guc_route_semaphores(struct intel_guc *guc, bool to_guc)
{
struct intel_gt *gt = guc_to_gt(guc);
+ u32 val;
- /* Enable and route to GuC */
- if (GRAPHICS_VER(gt->i915) >= 12)
- intel_uncore_write(gt->uncore, GEN12_GUC_SEM_INTR_ENABLES,
- GUC_SEM_INTR_ROUTE_TO_GUC |
- GUC_SEM_INTR_ENABLE_ALL);
+ if (GRAPHICS_VER(gt->i915) < 12)
+ return;
+
+ if (to_guc)
+ val = GUC_SEM_INTR_ROUTE_TO_GUC | GUC_SEM_INTR_ENABLE_ALL;
+ else
+ val = 0;
- guc_init_lrc_mapping(guc);
- guc_init_engine_stats(guc);
- guc_init_global_schedule_policy(guc);
+ intel_uncore_write(gt->uncore, GEN12_GUC_SEM_INTR_ENABLES, val);
}
-void intel_guc_submission_disable(struct intel_guc *guc)
+int intel_guc_submission_enable(struct intel_guc *guc)
{
- struct intel_gt *gt = guc_to_gt(guc);
+ int ret;
+
+ /* Semaphore interrupt enable and route to GuC */
+ guc_route_semaphores(guc, true);
- /* Note: By the time we're here, GuC may have already been reset */
+ ret = guc_init_submission(guc);
+ if (ret)
+ goto fail_sem;
+
+ ret = guc_init_engine_stats(guc);
+ if (ret)
+ goto fail_sem;
+
+ ret = guc_init_global_schedule_policy(guc);
+ if (ret)
+ goto fail_stats;
+
+ return 0;
+
+fail_stats:
+ guc_fini_engine_stats(guc);
+fail_sem:
+ guc_route_semaphores(guc, false);
+ return ret;
+}
+
+/* Note: By the time we're here, GuC may have already been reset */
+void intel_guc_submission_disable(struct intel_guc *guc)
+{
+ guc_cancel_busyness_worker(guc);
- /* Disable and route to host */
- if (GRAPHICS_VER(gt->i915) >= 12)
- intel_uncore_write(gt->uncore, GEN12_GUC_SEM_INTR_ENABLES, 0x0);
+ /* Semaphore interrupt disable and route to host */
+ guc_route_semaphores(guc, false);
}
static bool __guc_submission_supported(struct intel_guc *guc)
@@ -4660,9 +4719,10 @@ static void guc_handle_context_reset(struct intel_guc *guc,
{
trace_intel_context_reset(ce);
- drm_dbg(&guc_to_gt(guc)->i915->drm, "Got GuC reset of 0x%04X, exiting = %d, banned = %d\n",
- ce->guc_id.id, test_bit(CONTEXT_EXITING, &ce->flags),
- test_bit(CONTEXT_BANNED, &ce->flags));
+ guc_dbg(guc, "Got context reset notification: 0x%04X on %s, exiting = %s, banned = %s\n",
+ ce->guc_id.id, ce->engine->name,
+ str_yes_no(intel_context_is_exiting(ce)),
+ str_yes_no(intel_context_is_banned(ce)));
if (likely(intel_context_is_schedulable(ce))) {
capture_error_state(guc, ce);
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.h
index 5a95a9f0a8e3..c57b29cdb1a6 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.h
@@ -15,7 +15,7 @@ struct intel_engine_cs;
void intel_guc_submission_init_early(struct intel_guc *guc);
int intel_guc_submission_init(struct intel_guc *guc);
-void intel_guc_submission_enable(struct intel_guc *guc);
+int intel_guc_submission_enable(struct intel_guc *guc);
void intel_guc_submission_disable(struct intel_guc *guc);
void intel_guc_submission_fini(struct intel_guc *guc);
int intel_guc_preempt_work_create(struct intel_guc *guc);
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_huc.c b/drivers/gpu/drm/i915/gt/uc/intel_huc.c
index 410905da8e97..72884e21470b 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_huc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_huc.c
@@ -6,6 +6,7 @@
#include <linux/types.h>
#include "gt/intel_gt.h"
+#include "gt/intel_gt_print.h"
#include "intel_guc_reg.h"
#include "intel_huc.h"
#include "i915_drv.h"
@@ -13,6 +14,15 @@
#include <linux/device/bus.h>
#include <linux/mei_aux.h>
+#define huc_printk(_huc, _level, _fmt, ...) \
+ gt_##_level(huc_to_gt(_huc), "HuC: " _fmt, ##__VA_ARGS__)
+#define huc_err(_huc, _fmt, ...) huc_printk((_huc), err, _fmt, ##__VA_ARGS__)
+#define huc_warn(_huc, _fmt, ...) huc_printk((_huc), warn, _fmt, ##__VA_ARGS__)
+#define huc_notice(_huc, _fmt, ...) huc_printk((_huc), notice, _fmt, ##__VA_ARGS__)
+#define huc_info(_huc, _fmt, ...) huc_printk((_huc), info, _fmt, ##__VA_ARGS__)
+#define huc_dbg(_huc, _fmt, ...) huc_printk((_huc), dbg, _fmt, ##__VA_ARGS__)
+#define huc_probe_error(_huc, _fmt, ...) huc_printk((_huc), probe_error, _fmt, ##__VA_ARGS__)
+
/**
* DOC: HuC
*
@@ -107,11 +117,9 @@ static enum hrtimer_restart huc_delayed_load_timer_callback(struct hrtimer *hrti
if (!intel_huc_is_authenticated(huc)) {
if (huc->delayed_load.status == INTEL_HUC_WAITING_ON_GSC)
- drm_notice(&huc_to_gt(huc)->i915->drm,
- "timed out waiting for MEI GSC init to load HuC\n");
+ huc_notice(huc, "timed out waiting for MEI GSC\n");
else if (huc->delayed_load.status == INTEL_HUC_WAITING_ON_PXP)
- drm_notice(&huc_to_gt(huc)->i915->drm,
- "timed out waiting for MEI PXP init to load HuC\n");
+ huc_notice(huc, "timed out waiting for MEI PXP\n");
else
MISSING_CASE(huc->delayed_load.status);
@@ -174,8 +182,7 @@ static int gsc_notifier(struct notifier_block *nb, unsigned long action, void *d
case BUS_NOTIFY_DRIVER_NOT_BOUND: /* mei driver fails to be bound */
case BUS_NOTIFY_UNBIND_DRIVER: /* mei driver about to be unbound */
- drm_info(&huc_to_gt(huc)->i915->drm,
- "mei driver not bound, disabling HuC load\n");
+ huc_info(huc, "MEI driver not bound, disabling load\n");
gsc_init_error(huc);
break;
}
@@ -193,8 +200,7 @@ void intel_huc_register_gsc_notifier(struct intel_huc *huc, struct bus_type *bus
huc->delayed_load.nb.notifier_call = gsc_notifier;
ret = bus_register_notifier(bus, &huc->delayed_load.nb);
if (ret) {
- drm_err(&huc_to_gt(huc)->i915->drm,
- "failed to register GSC notifier\n");
+ huc_err(huc, "failed to register GSC notifier %pe\n", ERR_PTR(ret));
huc->delayed_load.nb.notifier_call = NULL;
gsc_init_error(huc);
}
@@ -306,29 +312,25 @@ static int check_huc_loading_mode(struct intel_huc *huc)
GSC_LOADS_HUC;
if (fw_needs_gsc != hw_uses_gsc) {
- drm_err(&gt->i915->drm,
- "mismatch between HuC FW (%s) and HW (%s) load modes\n",
- HUC_LOAD_MODE_STRING(fw_needs_gsc),
- HUC_LOAD_MODE_STRING(hw_uses_gsc));
+ huc_err(huc, "mismatch between FW (%s) and HW (%s) load modes\n",
+ HUC_LOAD_MODE_STRING(fw_needs_gsc), HUC_LOAD_MODE_STRING(hw_uses_gsc));
return -ENOEXEC;
}
/* make sure we can access the GSC via the mei driver if we need it */
if (!(IS_ENABLED(CONFIG_INTEL_MEI_PXP) && IS_ENABLED(CONFIG_INTEL_MEI_GSC)) &&
fw_needs_gsc) {
- drm_info(&gt->i915->drm,
- "Can't load HuC due to missing MEI modules\n");
+ huc_info(huc, "can't load due to missing MEI modules\n");
return -EIO;
}
- drm_dbg(&gt->i915->drm, "GSC loads huc=%s\n", str_yes_no(fw_needs_gsc));
+ huc_dbg(huc, "loaded by GSC = %s\n", str_yes_no(fw_needs_gsc));
return 0;
}
int intel_huc_init(struct intel_huc *huc)
{
- struct drm_i915_private *i915 = huc_to_gt(huc)->i915;
int err;
err = check_huc_loading_mode(huc);
@@ -345,7 +347,7 @@ int intel_huc_init(struct intel_huc *huc)
out:
intel_uc_fw_change_status(&huc->fw, INTEL_UC_FIRMWARE_INIT_FAIL);
- drm_info(&i915->drm, "HuC init failed with %d\n", err);
+ huc_info(huc, "initialization failed %pe\n", ERR_PTR(err));
return err;
}
@@ -389,13 +391,13 @@ int intel_huc_wait_for_auth_complete(struct intel_huc *huc)
delayed_huc_load_complete(huc);
if (ret) {
- drm_err(&gt->i915->drm, "HuC: Firmware not verified %d\n", ret);
+ huc_err(huc, "firmware not verified %pe\n", ERR_PTR(ret));
intel_uc_fw_change_status(&huc->fw, INTEL_UC_FIRMWARE_LOAD_FAIL);
return ret;
}
intel_uc_fw_change_status(&huc->fw, INTEL_UC_FIRMWARE_RUNNING);
- drm_info(&gt->i915->drm, "HuC authenticated\n");
+ huc_info(huc, "authenticated!\n");
return 0;
}
@@ -430,7 +432,7 @@ int intel_huc_auth(struct intel_huc *huc)
ret = intel_guc_auth_huc(guc, intel_guc_ggtt_offset(guc, huc->fw.rsa_data));
if (ret) {
- DRM_ERROR("HuC: GuC did not ack Auth request %d\n", ret);
+ huc_err(huc, "authentication by GuC failed %pe\n", ERR_PTR(ret));
goto fail;
}
@@ -442,7 +444,7 @@ int intel_huc_auth(struct intel_huc *huc)
return 0;
fail:
- i915_probe_error(gt->i915, "HuC: Authentication failed %d\n", ret);
+ huc_probe_error(huc, "authentication failed %pe\n", ERR_PTR(ret));
return ret;
}
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc.c b/drivers/gpu/drm/i915/gt/uc/intel_uc.c
index de7f987cf611..4ccb4be4c9cb 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_uc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_uc.c
@@ -83,15 +83,15 @@ static int __intel_uc_reset_hw(struct intel_uc *uc)
static void __confirm_options(struct intel_uc *uc)
{
- struct drm_i915_private *i915 = uc_to_gt(uc)->i915;
+ struct intel_gt *gt = uc_to_gt(uc);
+ struct drm_i915_private *i915 = gt->i915;
- drm_dbg(&i915->drm,
- "enable_guc=%d (guc:%s submission:%s huc:%s slpc:%s)\n",
- i915->params.enable_guc,
- str_yes_no(intel_uc_wants_guc(uc)),
- str_yes_no(intel_uc_wants_guc_submission(uc)),
- str_yes_no(intel_uc_wants_huc(uc)),
- str_yes_no(intel_uc_wants_guc_slpc(uc)));
+ gt_dbg(gt, "enable_guc=%d (guc:%s submission:%s huc:%s slpc:%s)\n",
+ i915->params.enable_guc,
+ str_yes_no(intel_uc_wants_guc(uc)),
+ str_yes_no(intel_uc_wants_guc_submission(uc)),
+ str_yes_no(intel_uc_wants_huc(uc)),
+ str_yes_no(intel_uc_wants_guc_slpc(uc)));
if (i915->params.enable_guc == 0) {
GEM_BUG_ON(intel_uc_wants_guc(uc));
@@ -102,26 +102,22 @@ static void __confirm_options(struct intel_uc *uc)
}
if (!intel_uc_supports_guc(uc))
- drm_info(&i915->drm,
- "Incompatible option enable_guc=%d - %s\n",
- i915->params.enable_guc, "GuC is not supported!");
+ gt_info(gt, "Incompatible option enable_guc=%d - %s\n",
+ i915->params.enable_guc, "GuC is not supported!");
if (i915->params.enable_guc & ENABLE_GUC_LOAD_HUC &&
!intel_uc_supports_huc(uc))
- drm_info(&i915->drm,
- "Incompatible option enable_guc=%d - %s\n",
- i915->params.enable_guc, "HuC is not supported!");
+ gt_info(gt, "Incompatible option enable_guc=%d - %s\n",
+ i915->params.enable_guc, "HuC is not supported!");
if (i915->params.enable_guc & ENABLE_GUC_SUBMISSION &&
!intel_uc_supports_guc_submission(uc))
- drm_info(&i915->drm,
- "Incompatible option enable_guc=%d - %s\n",
- i915->params.enable_guc, "GuC submission is N/A");
+ gt_info(gt, "Incompatible option enable_guc=%d - %s\n",
+ i915->params.enable_guc, "GuC submission is N/A");
if (i915->params.enable_guc & ~ENABLE_GUC_MASK)
- drm_info(&i915->drm,
- "Incompatible option enable_guc=%d - %s\n",
- i915->params.enable_guc, "undocumented flag");
+ gt_info(gt, "Incompatible option enable_guc=%d - %s\n",
+ i915->params.enable_guc, "undocumented flag");
}
void intel_uc_init_early(struct intel_uc *uc)
@@ -143,6 +139,7 @@ void intel_uc_init_early(struct intel_uc *uc)
void intel_uc_init_late(struct intel_uc *uc)
{
intel_guc_init_late(&uc->guc);
+ intel_gsc_uc_load_start(&uc->gsc);
}
void intel_uc_driver_late_release(struct intel_uc *uc)
@@ -535,8 +532,11 @@ static int __uc_init_hw(struct intel_uc *uc)
else
intel_huc_auth(huc);
- if (intel_uc_uses_guc_submission(uc))
- intel_guc_submission_enable(guc);
+ if (intel_uc_uses_guc_submission(uc)) {
+ ret = intel_guc_submission_enable(guc);
+ if (ret)
+ goto err_log_capture;
+ }
if (intel_uc_uses_guc_slpc(uc)) {
ret = intel_guc_slpc_enable(&guc->slpc);
@@ -547,12 +547,8 @@ static int __uc_init_hw(struct intel_uc *uc)
intel_rps_lower_unslice(&uc_to_gt(uc)->rps);
}
- intel_gsc_uc_load_start(&uc->gsc);
-
- gt_info(gt, "GuC submission %s\n",
- str_enabled_disabled(intel_uc_uses_guc_submission(uc)));
- gt_info(gt, "GuC SLPC %s\n",
- str_enabled_disabled(intel_uc_uses_guc_slpc(uc)));
+ guc_info(guc, "submission %s\n", str_enabled_disabled(intel_uc_uses_guc_submission(uc)));
+ guc_info(guc, "SLPC %s\n", str_enabled_disabled(intel_uc_uses_guc_slpc(uc)));
return 0;
@@ -678,7 +674,7 @@ void intel_uc_suspend(struct intel_uc *uc)
int err;
/* flush the GSC worker */
- intel_gsc_uc_suspend(&uc->gsc);
+ intel_gsc_uc_flush_work(&uc->gsc);
if (!intel_guc_is_ready(guc)) {
guc->interrupts.enabled = false;
@@ -720,6 +716,8 @@ static int __uc_resume(struct intel_uc *uc, bool enable_communication)
return err;
}
+ intel_gsc_uc_resume(&uc->gsc);
+
return 0;
}
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
index 65672ff82605..264c952f777b 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
@@ -11,6 +11,7 @@
#include <drm/drm_print.h>
#include "gem/i915_gem_lmem.h"
+#include "gt/intel_gt_print.h"
#include "intel_uc_fw.h"
#include "intel_uc_fw_abi.h"
#include "i915_drv.h"
@@ -44,11 +45,10 @@ void intel_uc_fw_change_status(struct intel_uc_fw *uc_fw,
enum intel_uc_fw_status status)
{
uc_fw->__status = status;
- drm_dbg(&__uc_fw_to_gt(uc_fw)->i915->drm,
- "%s firmware -> %s\n",
- intel_uc_fw_type_repr(uc_fw->type),
- status == INTEL_UC_FIRMWARE_SELECTED ?
- uc_fw->file_selected.path : intel_uc_fw_status_repr(status));
+ gt_dbg(__uc_fw_to_gt(uc_fw), "%s firmware -> %s\n",
+ intel_uc_fw_type_repr(uc_fw->type),
+ status == INTEL_UC_FIRMWARE_SELECTED ?
+ uc_fw->file_selected.path : intel_uc_fw_status_repr(status));
}
#endif
@@ -562,15 +562,14 @@ static int check_ccs_header(struct intel_gt *gt,
const struct firmware *fw,
struct intel_uc_fw *uc_fw)
{
- struct drm_i915_private *i915 = gt->i915;
struct uc_css_header *css;
size_t size;
/* Check the size of the blob before examining buffer contents */
if (unlikely(fw->size < sizeof(struct uc_css_header))) {
- drm_warn(&i915->drm, "%s firmware %s: invalid size: %zu < %zu\n",
- intel_uc_fw_type_repr(uc_fw->type), uc_fw->file_selected.path,
- fw->size, sizeof(struct uc_css_header));
+ gt_warn(gt, "%s firmware %s: invalid size: %zu < %zu\n",
+ intel_uc_fw_type_repr(uc_fw->type), uc_fw->file_selected.path,
+ fw->size, sizeof(struct uc_css_header));
return -ENODATA;
}
@@ -580,10 +579,9 @@ static int check_ccs_header(struct intel_gt *gt,
size = (css->header_size_dw - css->key_size_dw - css->modulus_size_dw -
css->exponent_size_dw) * sizeof(u32);
if (unlikely(size != sizeof(struct uc_css_header))) {
- drm_warn(&i915->drm,
- "%s firmware %s: unexpected header size: %zu != %zu\n",
- intel_uc_fw_type_repr(uc_fw->type), uc_fw->file_selected.path,
- fw->size, sizeof(struct uc_css_header));
+ gt_warn(gt, "%s firmware %s: unexpected header size: %zu != %zu\n",
+ intel_uc_fw_type_repr(uc_fw->type), uc_fw->file_selected.path,
+ fw->size, sizeof(struct uc_css_header));
return -EPROTO;
}
@@ -596,18 +594,18 @@ static int check_ccs_header(struct intel_gt *gt,
/* At least, it should have header, uCode and RSA. Size of all three. */
size = sizeof(struct uc_css_header) + uc_fw->ucode_size + uc_fw->rsa_size;
if (unlikely(fw->size < size)) {
- drm_warn(&i915->drm, "%s firmware %s: invalid size: %zu < %zu\n",
- intel_uc_fw_type_repr(uc_fw->type), uc_fw->file_selected.path,
- fw->size, size);
+ gt_warn(gt, "%s firmware %s: invalid size: %zu < %zu\n",
+ intel_uc_fw_type_repr(uc_fw->type), uc_fw->file_selected.path,
+ fw->size, size);
return -ENOEXEC;
}
/* Sanity check whether this fw is not larger than whole WOPCM memory */
size = __intel_uc_fw_get_upload_size(uc_fw);
if (unlikely(size >= gt->wopcm.size)) {
- drm_warn(&i915->drm, "%s firmware %s: invalid size: %zu > %zu\n",
- intel_uc_fw_type_repr(uc_fw->type), uc_fw->file_selected.path,
- size, (size_t)gt->wopcm.size);
+ gt_warn(gt, "%s firmware %s: invalid size: %zu > %zu\n",
+ intel_uc_fw_type_repr(uc_fw->type), uc_fw->file_selected.path,
+ size, (size_t)gt->wopcm.size);
return -E2BIG;
}
@@ -635,20 +633,20 @@ static bool guc_check_version_range(struct intel_uc_fw *uc_fw)
*/
if (!is_ver_8bit(&uc_fw->file_selected.ver)) {
- drm_warn(&__uc_fw_to_gt(uc_fw)->i915->drm, "%s firmware: invalid file version: 0x%02X:%02X:%02X\n",
- intel_uc_fw_type_repr(uc_fw->type),
- uc_fw->file_selected.ver.major,
- uc_fw->file_selected.ver.minor,
- uc_fw->file_selected.ver.patch);
+ gt_warn(__uc_fw_to_gt(uc_fw), "%s firmware: invalid file version: 0x%02X:%02X:%02X\n",
+ intel_uc_fw_type_repr(uc_fw->type),
+ uc_fw->file_selected.ver.major,
+ uc_fw->file_selected.ver.minor,
+ uc_fw->file_selected.ver.patch);
return false;
}
if (!is_ver_8bit(&guc->submission_version)) {
- drm_warn(&__uc_fw_to_gt(uc_fw)->i915->drm, "%s firmware: invalid submit version: 0x%02X:%02X:%02X\n",
- intel_uc_fw_type_repr(uc_fw->type),
- guc->submission_version.major,
- guc->submission_version.minor,
- guc->submission_version.patch);
+ gt_warn(__uc_fw_to_gt(uc_fw), "%s firmware: invalid submit version: 0x%02X:%02X:%02X\n",
+ intel_uc_fw_type_repr(uc_fw->type),
+ guc->submission_version.major,
+ guc->submission_version.minor,
+ guc->submission_version.patch);
return false;
}
@@ -687,10 +685,9 @@ static int try_firmware_load(struct intel_uc_fw *uc_fw, const struct firmware **
return err;
if ((*fw)->size > INTEL_UC_RSVD_GGTT_PER_FW) {
- drm_err(&gt->i915->drm,
- "%s firmware %s: size (%zuKB) exceeds max supported size (%uKB)\n",
- intel_uc_fw_type_repr(uc_fw->type), uc_fw->file_selected.path,
- (*fw)->size / SZ_1K, INTEL_UC_RSVD_GGTT_PER_FW / SZ_1K);
+ gt_err(gt, "%s firmware %s: size (%zuKB) exceeds max supported size (%uKB)\n",
+ intel_uc_fw_type_repr(uc_fw->type), uc_fw->file_selected.path,
+ (*fw)->size / SZ_1K, INTEL_UC_RSVD_GGTT_PER_FW / SZ_1K);
/* try to find another blob to load */
release_firmware(*fw);
@@ -768,10 +765,10 @@ int intel_uc_fw_fetch(struct intel_uc_fw *uc_fw)
if (uc_fw->file_wanted.ver.major && uc_fw->file_selected.ver.major) {
/* Check the file's major version was as it claimed */
if (uc_fw->file_selected.ver.major != uc_fw->file_wanted.ver.major) {
- drm_notice(&i915->drm, "%s firmware %s: unexpected version: %u.%u != %u.%u\n",
- intel_uc_fw_type_repr(uc_fw->type), uc_fw->file_selected.path,
- uc_fw->file_selected.ver.major, uc_fw->file_selected.ver.minor,
- uc_fw->file_wanted.ver.major, uc_fw->file_wanted.ver.minor);
+ gt_notice(gt, "%s firmware %s: unexpected version: %u.%u != %u.%u\n",
+ intel_uc_fw_type_repr(uc_fw->type), uc_fw->file_selected.path,
+ uc_fw->file_selected.ver.major, uc_fw->file_selected.ver.minor,
+ uc_fw->file_wanted.ver.major, uc_fw->file_wanted.ver.minor);
if (!intel_uc_fw_is_overridden(uc_fw)) {
err = -ENOEXEC;
goto fail;
@@ -786,16 +783,14 @@ int intel_uc_fw_fetch(struct intel_uc_fw *uc_fw)
/* Preserve the version that was really wanted */
memcpy(&uc_fw->file_wanted, &file_ideal, sizeof(uc_fw->file_wanted));
- drm_notice(&i915->drm,
- "%s firmware %s (%d.%d) is recommended, but only %s (%d.%d) was found\n",
- intel_uc_fw_type_repr(uc_fw->type),
- uc_fw->file_wanted.path,
- uc_fw->file_wanted.ver.major, uc_fw->file_wanted.ver.minor,
- uc_fw->file_selected.path,
- uc_fw->file_selected.ver.major, uc_fw->file_selected.ver.minor);
- drm_info(&i915->drm,
- "Consider updating your linux-firmware pkg or downloading from %s\n",
- INTEL_UC_FIRMWARE_URL);
+ gt_notice(gt, "%s firmware %s (%d.%d) is recommended, but only %s (%d.%d) was found\n",
+ intel_uc_fw_type_repr(uc_fw->type),
+ uc_fw->file_wanted.path,
+ uc_fw->file_wanted.ver.major, uc_fw->file_wanted.ver.minor,
+ uc_fw->file_selected.path,
+ uc_fw->file_selected.ver.major, uc_fw->file_selected.ver.minor);
+ gt_info(gt, "Consider updating your linux-firmware pkg or downloading from %s\n",
+ INTEL_UC_FIRMWARE_URL);
}
if (HAS_LMEM(i915)) {
@@ -823,10 +818,10 @@ fail:
INTEL_UC_FIRMWARE_MISSING :
INTEL_UC_FIRMWARE_ERROR);
- i915_probe_error(i915, "%s firmware %s: fetch failed with error %d\n",
- intel_uc_fw_type_repr(uc_fw->type), uc_fw->file_selected.path, err);
- drm_info(&i915->drm, "%s firmware(s) can be downloaded from %s\n",
- intel_uc_fw_type_repr(uc_fw->type), INTEL_UC_FIRMWARE_URL);
+ gt_probe_error(gt, "%s firmware %s: fetch failed %pe\n",
+ intel_uc_fw_type_repr(uc_fw->type), uc_fw->file_selected.path, ERR_PTR(err));
+ gt_info(gt, "%s firmware(s) can be downloaded from %s\n",
+ intel_uc_fw_type_repr(uc_fw->type), INTEL_UC_FIRMWARE_URL);
release_firmware(fw); /* OK even if fw is NULL */
return err;
@@ -932,9 +927,9 @@ static int uc_fw_xfer(struct intel_uc_fw *uc_fw, u32 dst_offset, u32 dma_flags)
/* Wait for DMA to finish */
ret = intel_wait_for_register_fw(uncore, DMA_CTRL, START_DMA, 0, 100);
if (ret)
- drm_err(&gt->i915->drm, "DMA for %s fw failed, DMA_CTRL=%u\n",
- intel_uc_fw_type_repr(uc_fw->type),
- intel_uncore_read_fw(uncore, DMA_CTRL));
+ gt_err(gt, "DMA for %s fw failed, DMA_CTRL=%u\n",
+ intel_uc_fw_type_repr(uc_fw->type),
+ intel_uncore_read_fw(uncore, DMA_CTRL));
/* Disable the bits once DMA is over */
intel_uncore_write_fw(uncore, DMA_CTRL, _MASKED_BIT_DISABLE(dma_flags));
@@ -950,9 +945,8 @@ int intel_uc_fw_mark_load_failed(struct intel_uc_fw *uc_fw, int err)
GEM_BUG_ON(!intel_uc_fw_is_loadable(uc_fw));
- i915_probe_error(gt->i915, "Failed to load %s firmware %s (%d)\n",
- intel_uc_fw_type_repr(uc_fw->type), uc_fw->file_selected.path,
- err);
+ gt_probe_error(gt, "Failed to load %s firmware %s %pe\n",
+ intel_uc_fw_type_repr(uc_fw->type), uc_fw->file_selected.path, ERR_PTR(err));
intel_uc_fw_change_status(uc_fw, INTEL_UC_FIRMWARE_LOAD_FAIL);
return err;
@@ -1078,15 +1072,15 @@ int intel_uc_fw_init(struct intel_uc_fw *uc_fw)
err = i915_gem_object_pin_pages_unlocked(uc_fw->obj);
if (err) {
- DRM_DEBUG_DRIVER("%s fw pin-pages err=%d\n",
- intel_uc_fw_type_repr(uc_fw->type), err);
+ gt_dbg(__uc_fw_to_gt(uc_fw), "%s fw pin-pages failed %pe\n",
+ intel_uc_fw_type_repr(uc_fw->type), ERR_PTR(err));
goto out;
}
err = uc_fw_rsa_data_create(uc_fw);
if (err) {
- DRM_DEBUG_DRIVER("%s fw rsa data creation failed, err=%d\n",
- intel_uc_fw_type_repr(uc_fw->type), err);
+ gt_dbg(__uc_fw_to_gt(uc_fw), "%s fw rsa data creation failed %pe\n",
+ intel_uc_fw_type_repr(uc_fw->type), ERR_PTR(err));
goto out_unpin;
}
diff --git a/drivers/gpu/drm/i915/gt/uc/selftest_guc.c b/drivers/gpu/drm/i915/gt/uc/selftest_guc.c
index e28518fe8b90..1fd760539f77 100644
--- a/drivers/gpu/drm/i915/gt/uc/selftest_guc.c
+++ b/drivers/gpu/drm/i915/gt/uc/selftest_guc.c
@@ -3,6 +3,8 @@
* Copyright �� 2021 Intel Corporation
*/
+#include "gt/intel_gt_print.h"
+#include "intel_guc_print.h"
#include "selftests/igt_spinner.h"
#include "selftests/intel_scheduler_helpers.h"
@@ -65,7 +67,7 @@ static int intel_guc_scrub_ctbs(void *arg)
ce = intel_context_create(engine);
if (IS_ERR(ce)) {
ret = PTR_ERR(ce);
- drm_err(&gt->i915->drm, "Failed to create context, %d: %d\n", i, ret);
+ gt_err(gt, "Failed to create context %d: %pe\n", i, ce);
goto err;
}
@@ -86,7 +88,7 @@ static int intel_guc_scrub_ctbs(void *arg)
if (IS_ERR(rq)) {
ret = PTR_ERR(rq);
- drm_err(&gt->i915->drm, "Failed to create request, %d: %d\n", i, ret);
+ gt_err(gt, "Failed to create request %d: %pe\n", i, rq);
goto err;
}
@@ -96,7 +98,7 @@ static int intel_guc_scrub_ctbs(void *arg)
for (i = 0; i < 3; ++i) {
ret = i915_request_wait(last[i], 0, HZ);
if (ret < 0) {
- drm_err(&gt->i915->drm, "Last request failed to complete: %d\n", ret);
+ gt_err(gt, "Last request failed to complete: %pe\n", ERR_PTR(ret));
goto err;
}
i915_request_put(last[i]);
@@ -113,7 +115,7 @@ static int intel_guc_scrub_ctbs(void *arg)
/* GT will not idle if G2H are lost */
ret = intel_gt_wait_for_idle(gt, HZ);
if (ret < 0) {
- drm_err(&gt->i915->drm, "GT failed to idle: %d\n", ret);
+ gt_err(gt, "GT failed to idle: %pe\n", ERR_PTR(ret));
goto err;
}
@@ -153,7 +155,7 @@ static int intel_guc_steal_guc_ids(void *arg)
ce = kcalloc(GUC_MAX_CONTEXT_ID, sizeof(*ce), GFP_KERNEL);
if (!ce) {
- drm_err(&gt->i915->drm, "Context array allocation failed\n");
+ guc_err(guc, "Context array allocation failed\n");
return -ENOMEM;
}
@@ -166,25 +168,25 @@ static int intel_guc_steal_guc_ids(void *arg)
ce[context_index] = intel_context_create(engine);
if (IS_ERR(ce[context_index])) {
ret = PTR_ERR(ce[context_index]);
+ guc_err(guc, "Failed to create context: %pe\n", ce[context_index]);
ce[context_index] = NULL;
- drm_err(&gt->i915->drm, "Failed to create context: %d\n", ret);
goto err_wakeref;
}
ret = igt_spinner_init(&spin, engine->gt);
if (ret) {
- drm_err(&gt->i915->drm, "Failed to create spinner: %d\n", ret);
+ guc_err(guc, "Failed to create spinner: %pe\n", ERR_PTR(ret));
goto err_contexts;
}
spin_rq = igt_spinner_create_request(&spin, ce[context_index],
MI_ARB_CHECK);
if (IS_ERR(spin_rq)) {
ret = PTR_ERR(spin_rq);
- drm_err(&gt->i915->drm, "Failed to create spinner request: %d\n", ret);
+ guc_err(guc, "Failed to create spinner request: %pe\n", spin_rq);
goto err_contexts;
}
ret = request_add_spin(spin_rq, &spin);
if (ret) {
- drm_err(&gt->i915->drm, "Failed to add Spinner request: %d\n", ret);
+ guc_err(guc, "Failed to add Spinner request: %pe\n", ERR_PTR(ret));
goto err_spin_rq;
}
@@ -192,9 +194,9 @@ static int intel_guc_steal_guc_ids(void *arg)
while (ret != -EAGAIN) {
ce[++context_index] = intel_context_create(engine);
if (IS_ERR(ce[context_index])) {
- ret = PTR_ERR(ce[context_index--]);
- ce[context_index] = NULL;
- drm_err(&gt->i915->drm, "Failed to create context: %d\n", ret);
+ ret = PTR_ERR(ce[context_index]);
+ guc_err(guc, "Failed to create context: %pe\n", ce[context_index]);
+ ce[context_index--] = NULL;
goto err_spin_rq;
}
@@ -203,8 +205,8 @@ static int intel_guc_steal_guc_ids(void *arg)
ret = PTR_ERR(rq);
rq = NULL;
if (ret != -EAGAIN) {
- drm_err(&gt->i915->drm, "Failed to create request, %d: %d\n",
- context_index, ret);
+ guc_err(guc, "Failed to create request %d: %pe\n",
+ context_index, ERR_PTR(ret));
goto err_spin_rq;
}
} else {
@@ -218,7 +220,7 @@ static int intel_guc_steal_guc_ids(void *arg)
igt_spinner_end(&spin);
ret = intel_selftest_wait_for_rq(spin_rq);
if (ret) {
- drm_err(&gt->i915->drm, "Spin request failed to complete: %d\n", ret);
+ guc_err(guc, "Spin request failed to complete: %pe\n", ERR_PTR(ret));
i915_request_put(last);
goto err_spin_rq;
}
@@ -230,7 +232,7 @@ static int intel_guc_steal_guc_ids(void *arg)
ret = i915_request_wait(last, 0, HZ * 30);
i915_request_put(last);
if (ret < 0) {
- drm_err(&gt->i915->drm, "Last request failed to complete: %d\n", ret);
+ guc_err(guc, "Last request failed to complete: %pe\n", ERR_PTR(ret));
goto err_spin_rq;
}
@@ -238,7 +240,7 @@ static int intel_guc_steal_guc_ids(void *arg)
rq = nop_user_request(ce[context_index], NULL);
if (IS_ERR(rq)) {
ret = PTR_ERR(rq);
- drm_err(&gt->i915->drm, "Failed to steal guc_id, %d: %d\n", context_index, ret);
+ guc_err(guc, "Failed to steal guc_id %d: %pe\n", context_index, rq);
goto err_spin_rq;
}
@@ -246,20 +248,20 @@ static int intel_guc_steal_guc_ids(void *arg)
ret = i915_request_wait(rq, 0, HZ);
i915_request_put(rq);
if (ret < 0) {
- drm_err(&gt->i915->drm, "Request with stolen guc_id failed to complete: %d\n", ret);
+ guc_err(guc, "Request with stolen guc_id failed to complete: %pe\n", ERR_PTR(ret));
goto err_spin_rq;
}
/* Wait for idle */
ret = intel_gt_wait_for_idle(gt, HZ * 30);
if (ret < 0) {
- drm_err(&gt->i915->drm, "GT failed to idle: %d\n", ret);
+ guc_err(guc, "GT failed to idle: %pe\n", ERR_PTR(ret));
goto err_spin_rq;
}
/* Verify a guc_id was stolen */
if (guc->number_guc_id_stolen == number_guc_id_stolen) {
- drm_err(&gt->i915->drm, "No guc_id was stolen");
+ guc_err(guc, "No guc_id was stolen");
ret = -EINVAL;
} else {
ret = 0;
diff --git a/drivers/gpu/drm/i915/gt/uc/selftest_guc_hangcheck.c b/drivers/gpu/drm/i915/gt/uc/selftest_guc_hangcheck.c
index d91b58f70403..34b5d952e2bc 100644
--- a/drivers/gpu/drm/i915/gt/uc/selftest_guc_hangcheck.c
+++ b/drivers/gpu/drm/i915/gt/uc/selftest_guc_hangcheck.c
@@ -3,6 +3,7 @@
* Copyright © 2022 Intel Corporation
*/
+#include "gt/intel_gt_print.h"
#include "selftests/igt_spinner.h"
#include "selftests/igt_reset.h"
#include "selftests/intel_scheduler_helpers.h"
@@ -45,7 +46,7 @@ static int intel_hang_guc(void *arg)
ctx = kernel_context(gt->i915, NULL);
if (IS_ERR(ctx)) {
- drm_err(&gt->i915->drm, "Failed get kernel context: %ld\n", PTR_ERR(ctx));
+ gt_err(gt, "Failed get kernel context: %pe\n", ctx);
return PTR_ERR(ctx);
}
@@ -54,7 +55,7 @@ static int intel_hang_guc(void *arg)
ce = intel_context_create(engine);
if (IS_ERR(ce)) {
ret = PTR_ERR(ce);
- drm_err(&gt->i915->drm, "Failed to create spinner request: %d\n", ret);
+ gt_err(gt, "Failed to create spinner request: %pe\n", ce);
goto err;
}
@@ -63,13 +64,13 @@ static int intel_hang_guc(void *arg)
old_beat = engine->props.heartbeat_interval_ms;
ret = intel_engine_set_heartbeat(engine, BEAT_INTERVAL);
if (ret) {
- drm_err(&gt->i915->drm, "Failed to boost heatbeat interval: %d\n", ret);
+ gt_err(gt, "Failed to boost heatbeat interval: %pe\n", ERR_PTR(ret));
goto err;
}
ret = igt_spinner_init(&spin, engine->gt);
if (ret) {
- drm_err(&gt->i915->drm, "Failed to create spinner: %d\n", ret);
+ gt_err(gt, "Failed to create spinner: %pe\n", ERR_PTR(ret));
goto err;
}
@@ -77,28 +78,28 @@ static int intel_hang_guc(void *arg)
intel_context_put(ce);
if (IS_ERR(rq)) {
ret = PTR_ERR(rq);
- drm_err(&gt->i915->drm, "Failed to create spinner request: %d\n", ret);
+ gt_err(gt, "Failed to create spinner request: %pe\n", rq);
goto err_spin;
}
ret = request_add_spin(rq, &spin);
if (ret) {
i915_request_put(rq);
- drm_err(&gt->i915->drm, "Failed to add Spinner request: %d\n", ret);
+ gt_err(gt, "Failed to add Spinner request: %pe\n", ERR_PTR(ret));
goto err_spin;
}
ret = intel_reset_guc(gt);
if (ret) {
i915_request_put(rq);
- drm_err(&gt->i915->drm, "Failed to reset GuC, ret = %d\n", ret);
+ gt_err(gt, "Failed to reset GuC: %pe\n", ERR_PTR(ret));
goto err_spin;
}
guc_status = intel_uncore_read(gt->uncore, GUC_STATUS);
if (!(guc_status & GS_MIA_IN_RESET)) {
i915_request_put(rq);
- drm_err(&gt->i915->drm, "GuC failed to reset: status = 0x%08X\n", guc_status);
+ gt_err(gt, "Failed to reset GuC: status = 0x%08X\n", guc_status);
ret = -EIO;
goto err_spin;
}
@@ -107,12 +108,12 @@ static int intel_hang_guc(void *arg)
ret = intel_selftest_wait_for_rq(rq);
i915_request_put(rq);
if (ret) {
- drm_err(&gt->i915->drm, "Request failed to complete: %d\n", ret);
+ gt_err(gt, "Request failed to complete: %pe\n", ERR_PTR(ret));
goto err_spin;
}
if (i915_reset_count(global) == reset_count) {
- drm_err(&gt->i915->drm, "Failed to record a GPU reset\n");
+ gt_err(gt, "Failed to record a GPU reset\n");
ret = -EINVAL;
goto err_spin;
}
@@ -132,7 +133,7 @@ err_spin:
ret = intel_selftest_wait_for_rq(rq);
i915_request_put(rq);
if (ret) {
- drm_err(&gt->i915->drm, "No-op failed to complete: %d\n", ret);
+ gt_err(gt, "No-op failed to complete: %pe\n", ERR_PTR(ret));
goto err;
}
}
diff --git a/drivers/gpu/drm/i915/gt/uc/selftest_guc_multi_lrc.c b/drivers/gpu/drm/i915/gt/uc/selftest_guc_multi_lrc.c
index d17982c36d25..a40e7c32e613 100644
--- a/drivers/gpu/drm/i915/gt/uc/selftest_guc_multi_lrc.c
+++ b/drivers/gpu/drm/i915/gt/uc/selftest_guc_multi_lrc.c
@@ -3,6 +3,7 @@
* Copyright �� 2019 Intel Corporation
*/
+#include "gt/intel_gt_print.h"
#include "selftests/igt_spinner.h"
#include "selftests/igt_reset.h"
#include "selftests/intel_scheduler_helpers.h"
@@ -115,30 +116,30 @@ static int __intel_guc_multi_lrc_basic(struct intel_gt *gt, unsigned int class)
parent = multi_lrc_create_parent(gt, class, 0);
if (IS_ERR(parent)) {
- drm_err(&gt->i915->drm, "Failed creating contexts: %ld", PTR_ERR(parent));
+ gt_err(gt, "Failed creating contexts: %pe\n", parent);
return PTR_ERR(parent);
} else if (!parent) {
- drm_dbg(&gt->i915->drm, "Not enough engines in class: %d", class);
+ gt_dbg(gt, "Not enough engines in class: %d\n", class);
return 0;
}
rq = multi_lrc_nop_request(parent);
if (IS_ERR(rq)) {
ret = PTR_ERR(rq);
- drm_err(&gt->i915->drm, "Failed creating requests: %d", ret);
+ gt_err(gt, "Failed creating requests: %pe\n", rq);
goto out;
}
ret = intel_selftest_wait_for_rq(rq);
if (ret)
- drm_err(&gt->i915->drm, "Failed waiting on request: %d", ret);
+ gt_err(gt, "Failed waiting on request: %pe\n", ERR_PTR(ret));
i915_request_put(rq);
if (ret >= 0) {
ret = intel_gt_wait_for_idle(gt, HZ * 5);
if (ret < 0)
- drm_err(&gt->i915->drm, "GT failed to idle: %d\n", ret);
+ gt_err(gt, "GT failed to idle: %pe\n", ERR_PTR(ret));
}
out:
diff --git a/drivers/gpu/drm/i915/gvt/display.c b/drivers/gpu/drm/i915/gvt/display.c
index 4d898b14de93..e0c5dfb788eb 100644
--- a/drivers/gpu/drm/i915/gvt/display.c
+++ b/drivers/gpu/drm/i915/gvt/display.c
@@ -63,7 +63,7 @@ static int edp_pipe_is_enabled(struct intel_vgpu *vgpu)
{
struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
- if (!(vgpu_vreg_t(vgpu, PIPECONF(_PIPE_EDP)) & PIPECONF_ENABLE))
+ if (!(vgpu_vreg_t(vgpu, TRANSCONF(TRANSCODER_EDP)) & TRANSCONF_ENABLE))
return 0;
if (!(vgpu_vreg(vgpu, _TRANS_DDI_FUNC_CTL_EDP) & TRANS_DDI_FUNC_ENABLE))
@@ -79,7 +79,7 @@ int pipe_is_enabled(struct intel_vgpu *vgpu, int pipe)
pipe < PIPE_A || pipe >= I915_MAX_PIPES))
return -EINVAL;
- if (vgpu_vreg_t(vgpu, PIPECONF(pipe)) & PIPECONF_ENABLE)
+ if (vgpu_vreg_t(vgpu, TRANSCONF(pipe)) & TRANSCONF_ENABLE)
return 1;
if (edp_pipe_is_enabled(vgpu) &&
@@ -187,8 +187,8 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
GEN8_DE_PORT_HOTPLUG(HPD_PORT_C));
for_each_pipe(dev_priv, pipe) {
- vgpu_vreg_t(vgpu, PIPECONF(pipe)) &=
- ~(PIPECONF_ENABLE | PIPECONF_STATE_ENABLE);
+ vgpu_vreg_t(vgpu, TRANSCONF(pipe)) &=
+ ~(TRANSCONF_ENABLE | TRANSCONF_STATE_ENABLE);
vgpu_vreg_t(vgpu, DSPCNTR(pipe)) &= ~DISP_ENABLE;
vgpu_vreg_t(vgpu, SPRCTL(pipe)) &= ~SPRITE_ENABLE;
vgpu_vreg_t(vgpu, CURCNTR(pipe)) &= ~MCURSOR_MODE_MASK;
@@ -248,8 +248,8 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
* TRANSCODER_A can be enabled. PORT_x depends on the input of
* setup_virtual_dp_monitor.
*/
- vgpu_vreg_t(vgpu, PIPECONF(PIPE_A)) |= PIPECONF_ENABLE;
- vgpu_vreg_t(vgpu, PIPECONF(PIPE_A)) |= PIPECONF_STATE_ENABLE;
+ vgpu_vreg_t(vgpu, TRANSCONF(TRANSCODER_A)) |= TRANSCONF_ENABLE;
+ vgpu_vreg_t(vgpu, TRANSCONF(TRANSCODER_A)) |= TRANSCONF_STATE_ENABLE;
/*
* Golden M/N are calculated based on:
@@ -506,7 +506,7 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
vgpu_vreg_t(vgpu, CURCNTR(pipe)) |= MCURSOR_MODE_DISABLE;
}
- vgpu_vreg_t(vgpu, PIPECONF(PIPE_A)) |= PIPECONF_ENABLE;
+ vgpu_vreg_t(vgpu, TRANSCONF(TRANSCODER_A)) |= TRANSCONF_ENABLE;
}
static void clean_virtual_dp_monitor(struct intel_vgpu *vgpu, int port_num)
@@ -584,7 +584,7 @@ static int setup_virtual_dp_monitor(struct intel_vgpu *vgpu, int port_num,
* @turnon: Turn ON/OFF vblank_timer
*
* This function is used to turn on/off or update the per-vGPU vblank_timer
- * when PIPECONF is enabled or disabled. vblank_timer period is also updated
+ * when TRANSCONF is enabled or disabled. vblank_timer period is also updated
* if guest changed the refresh rate.
*
*/
diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c
index 735fc83e7026..3c8e0d198c4f 100644
--- a/drivers/gpu/drm/i915/gvt/handlers.c
+++ b/drivers/gpu/drm/i915/gvt/handlers.c
@@ -666,8 +666,8 @@ static void vgpu_update_refresh_rate(struct intel_vgpu *vgpu)
link_n = vgpu_vreg_t(vgpu, PIPE_LINK_N1(TRANSCODER_A));
/* Get H/V total from transcoder timing */
- htotal = (vgpu_vreg_t(vgpu, HTOTAL(TRANSCODER_A)) >> TRANS_HTOTAL_SHIFT);
- vtotal = (vgpu_vreg_t(vgpu, VTOTAL(TRANSCODER_A)) >> TRANS_VTOTAL_SHIFT);
+ htotal = (vgpu_vreg_t(vgpu, TRANS_HTOTAL(TRANSCODER_A)) >> TRANS_HTOTAL_SHIFT);
+ vtotal = (vgpu_vreg_t(vgpu, TRANS_VTOTAL(TRANSCODER_A)) >> TRANS_VTOTAL_SHIFT);
if (dp_br && link_n && htotal && vtotal) {
u64 pixel_clk = 0;
@@ -697,12 +697,12 @@ static int pipeconf_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
write_vreg(vgpu, offset, p_data, bytes);
data = vgpu_vreg(vgpu, offset);
- if (data & PIPECONF_ENABLE) {
- vgpu_vreg(vgpu, offset) |= PIPECONF_STATE_ENABLE;
+ if (data & TRANSCONF_ENABLE) {
+ vgpu_vreg(vgpu, offset) |= TRANSCONF_STATE_ENABLE;
vgpu_update_refresh_rate(vgpu);
vgpu_update_vblank_emulation(vgpu, true);
} else {
- vgpu_vreg(vgpu, offset) &= ~PIPECONF_STATE_ENABLE;
+ vgpu_vreg(vgpu, offset) &= ~TRANSCONF_STATE_ENABLE;
vgpu_update_vblank_emulation(vgpu, false);
}
return 0;
@@ -2262,10 +2262,10 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
MMIO_DFH(GEN7_HALF_SLICE_CHICKEN1, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
/* display */
- MMIO_DH(PIPECONF(PIPE_A), D_ALL, NULL, pipeconf_mmio_write);
- MMIO_DH(PIPECONF(PIPE_B), D_ALL, NULL, pipeconf_mmio_write);
- MMIO_DH(PIPECONF(PIPE_C), D_ALL, NULL, pipeconf_mmio_write);
- MMIO_DH(PIPECONF(_PIPE_EDP), D_ALL, NULL, pipeconf_mmio_write);
+ MMIO_DH(TRANSCONF(TRANSCODER_A), D_ALL, NULL, pipeconf_mmio_write);
+ MMIO_DH(TRANSCONF(TRANSCODER_B), D_ALL, NULL, pipeconf_mmio_write);
+ MMIO_DH(TRANSCONF(TRANSCODER_C), D_ALL, NULL, pipeconf_mmio_write);
+ MMIO_DH(TRANSCONF(TRANSCODER_EDP), D_ALL, NULL, pipeconf_mmio_write);
MMIO_DH(DSPSURF(PIPE_A), D_ALL, NULL, pri_surf_mmio_write);
MMIO_DH(REG_50080(PIPE_A, PLANE_PRIMARY), D_ALL, NULL,
reg50080_mmio_write);
diff --git a/drivers/gpu/drm/i915/i915_active.c b/drivers/gpu/drm/i915/i915_active.c
index 7412abf166a8..8ef93889061a 100644
--- a/drivers/gpu/drm/i915/i915_active.c
+++ b/drivers/gpu/drm/i915/i915_active.c
@@ -92,8 +92,7 @@ static void debug_active_init(struct i915_active *ref)
static void debug_active_activate(struct i915_active *ref)
{
lockdep_assert_held(&ref->tree_lock);
- if (!atomic_read(&ref->count)) /* before the first inc */
- debug_object_activate(ref, &active_debug_desc);
+ debug_object_activate(ref, &active_debug_desc);
}
static void debug_active_deactivate(struct i915_active *ref)
@@ -422,12 +421,12 @@ replace_barrier(struct i915_active *ref, struct i915_active_fence *active)
* we can use it to substitute for the pending idle-barrer
* request that we want to emit on the kernel_context.
*/
- __active_del_barrier(ref, node_from_active(active));
- return true;
+ return __active_del_barrier(ref, node_from_active(active));
}
int i915_active_add_request(struct i915_active *ref, struct i915_request *rq)
{
+ u64 idx = i915_request_timeline(rq)->fence_context;
struct dma_fence *fence = &rq->fence;
struct i915_active_fence *active;
int err;
@@ -437,16 +436,19 @@ int i915_active_add_request(struct i915_active *ref, struct i915_request *rq)
if (err)
return err;
- active = active_instance(ref, i915_request_timeline(rq)->fence_context);
- if (!active) {
- err = -ENOMEM;
- goto out;
- }
+ do {
+ active = active_instance(ref, idx);
+ if (!active) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ if (replace_barrier(ref, active)) {
+ RCU_INIT_POINTER(active->fence, NULL);
+ atomic_dec(&ref->count);
+ }
+ } while (unlikely(is_barrier(active)));
- if (replace_barrier(ref, active)) {
- RCU_INIT_POINTER(active->fence, NULL);
- atomic_dec(&ref->count);
- }
if (!__i915_active_fence_set(active, fence))
__i915_active_acquire(ref);
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 45773ce1deac..16011c0286ad 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -52,7 +52,6 @@
#include "i915_irq.h"
#include "i915_scheduler.h"
#include "intel_mchbar_regs.h"
-#include "intel_pm.h"
static inline struct drm_i915_private *node_to_i915(struct drm_info_node *node)
{
diff --git a/drivers/gpu/drm/i915/i915_driver.c b/drivers/gpu/drm/i915/i915_driver.c
index cf1c0970ecb4..da249337c23b 100644
--- a/drivers/gpu/drm/i915/i915_driver.c
+++ b/drivers/gpu/drm/i915/i915_driver.c
@@ -167,6 +167,8 @@ static void intel_detect_preproduction_hw(struct drm_i915_private *dev_priv)
pre |= IS_KABYLAKE(dev_priv) && INTEL_REVID(dev_priv) < 0x1;
pre |= IS_GEMINILAKE(dev_priv) && INTEL_REVID(dev_priv) < 0x3;
pre |= IS_ICELAKE(dev_priv) && INTEL_REVID(dev_priv) < 0x7;
+ pre |= IS_TIGERLAKE(dev_priv) && INTEL_REVID(dev_priv) < 0x1;
+ pre |= IS_DG1(dev_priv) && INTEL_REVID(dev_priv) < 0x1;
if (pre) {
drm_err(&dev_priv->drm, "This is a pre-production stepping. "
@@ -248,10 +250,6 @@ static int i915_driver_early_probe(struct drm_i915_private *dev_priv)
/* This must be called before any calls to HAS_PCH_* */
intel_detect_pch(dev_priv);
- intel_pm_setup(dev_priv);
- ret = intel_power_domains_init(dev_priv);
- if (ret < 0)
- goto err_gem;
intel_irq_init(dev_priv);
intel_init_display_hooks(dev_priv);
intel_init_clock_gating_hooks(dev_priv);
@@ -260,10 +258,6 @@ static int i915_driver_early_probe(struct drm_i915_private *dev_priv)
return 0;
-err_gem:
- i915_gem_cleanup_early(dev_priv);
- intel_gt_driver_late_release_all(dev_priv);
- i915_drm_clients_fini(&dev_priv->clients);
err_rootgt:
intel_region_ttm_device_fini(dev_priv);
err_ttm:
@@ -489,13 +483,17 @@ static int i915_driver_hw_probe(struct drm_i915_private *dev_priv)
if (ret)
goto err_ggtt;
- ret = intel_memory_regions_hw_probe(dev_priv);
+ /*
+ * Make sure we probe lmem before we probe stolen-lmem. The BAR size
+ * might be different due to bar resizing.
+ */
+ ret = intel_gt_tiles_init(dev_priv);
if (ret)
goto err_ggtt;
- ret = intel_gt_tiles_init(dev_priv);
+ ret = intel_memory_regions_hw_probe(dev_priv);
if (ret)
- goto err_mem_regions;
+ goto err_ggtt;
ret = i915_ggtt_enable_hw(dev_priv);
if (ret) {
@@ -537,7 +535,7 @@ static int i915_driver_hw_probe(struct drm_i915_private *dev_priv)
ret = i915_pcode_init(dev_priv);
if (ret)
- goto err_msi;
+ goto err_opregion;
/*
* Fill the dram structure to get the system dram info. This will be
@@ -558,6 +556,8 @@ static int i915_driver_hw_probe(struct drm_i915_private *dev_priv)
return 0;
+err_opregion:
+ intel_opregion_cleanup(dev_priv);
err_msi:
if (pdev->msi_enabled)
pci_disable_msi(pdev);
@@ -583,6 +583,8 @@ static void i915_driver_hw_remove(struct drm_i915_private *dev_priv)
i915_perf_fini(dev_priv);
+ intel_opregion_cleanup(dev_priv);
+
if (pdev->msi_enabled)
pci_disable_msi(pdev);
@@ -936,7 +938,9 @@ static int i915_driver_open(struct drm_device *dev, struct drm_file *file)
*/
static void i915_driver_lastclose(struct drm_device *dev)
{
- intel_fbdev_restore_mode(dev);
+ struct drm_i915_private *i915 = to_i915(dev);
+
+ intel_fbdev_restore_mode(i915);
vga_switcheroo_process_delayed_switch();
}
@@ -1002,7 +1006,7 @@ void i915_driver_shutdown(struct drm_i915_private *i915)
intel_suspend_encoders(i915);
intel_shutdown_encoders(i915);
- intel_dmc_ucode_suspend(i915);
+ intel_dmc_suspend(i915);
i915_gem_suspend(i915);
@@ -1032,6 +1036,13 @@ static bool suspend_to_idle(struct drm_i915_private *dev_priv)
return false;
}
+static void i915_drm_complete(struct drm_device *dev)
+{
+ struct drm_i915_private *i915 = to_i915(dev);
+
+ intel_pxp_resume_complete(i915->pxp);
+}
+
static int i915_drm_prepare(struct drm_device *dev)
{
struct drm_i915_private *i915 = to_i915(dev);
@@ -1072,8 +1083,6 @@ static int i915_drm_suspend(struct drm_device *dev)
intel_suspend_encoders(dev_priv);
- intel_suspend_hw(dev_priv);
-
/* Must be called before GGTT is suspended. */
intel_dpt_suspend(dev_priv);
i915_ggtt_suspend(to_gt(dev_priv)->ggtt);
@@ -1087,7 +1096,7 @@ static int i915_drm_suspend(struct drm_device *dev)
dev_priv->suspend_count++;
- intel_dmc_ucode_suspend(dev_priv);
+ intel_dmc_suspend(dev_priv);
enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
@@ -1208,7 +1217,7 @@ static int i915_drm_resume(struct drm_device *dev)
/* Must be called after GGTT is resumed. */
intel_dpt_resume(dev_priv);
- intel_dmc_ucode_resume(dev_priv);
+ intel_dmc_resume(dev_priv);
i915_restore_display(dev_priv);
intel_pps_unlock_regs_wa(dev_priv);
@@ -1232,8 +1241,6 @@ static int i915_drm_resume(struct drm_device *dev)
i915_gem_resume(dev_priv);
- intel_pxp_resume(dev_priv->pxp);
-
intel_modeset_init_hw(dev_priv);
intel_init_clock_gating(dev_priv);
intel_hpd_init(dev_priv);
@@ -1425,6 +1432,16 @@ static int i915_pm_resume(struct device *kdev)
return i915_drm_resume(&i915->drm);
}
+static void i915_pm_complete(struct device *kdev)
+{
+ struct drm_i915_private *i915 = kdev_to_i915(kdev);
+
+ if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF)
+ return;
+
+ i915_drm_complete(&i915->drm);
+}
+
/* freeze: before creating the hibernation_image */
static int i915_pm_freeze(struct device *kdev)
{
@@ -1645,6 +1662,7 @@ const struct dev_pm_ops i915_pm_ops = {
.suspend_late = i915_pm_suspend_late,
.resume_early = i915_pm_resume_early,
.resume = i915_pm_resume,
+ .complete = i915_pm_complete,
/*
* S4 event handlers
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 4295306487c7..6254aa977398 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -580,6 +580,8 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
IS_SUBPLATFORM(dev_priv, INTEL_ALDERLAKE_P, INTEL_SUBPLATFORM_N)
#define IS_ADLP_RPLP(dev_priv) \
IS_SUBPLATFORM(dev_priv, INTEL_ALDERLAKE_P, INTEL_SUBPLATFORM_RPL)
+#define IS_ADLP_RPLU(dev_priv) \
+ IS_SUBPLATFORM(dev_priv, INTEL_ALDERLAKE_P, INTEL_SUBPLATFORM_RPLU)
#define IS_HSW_EARLY_SDV(dev_priv) (IS_HASWELL(dev_priv) && \
(INTEL_DEVID(dev_priv) & 0xFF00) == 0x0C00)
#define IS_BDW_ULT(dev_priv) \
@@ -653,22 +655,9 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
(IS_TIGERLAKE(__i915) && \
IS_DISPLAY_STEP(__i915, since, until))
-#define IS_TGL_UY_GRAPHICS_STEP(__i915, since, until) \
- (IS_TGL_UY(__i915) && \
- IS_GRAPHICS_STEP(__i915, since, until))
-
-#define IS_TGL_GRAPHICS_STEP(__i915, since, until) \
- (IS_TIGERLAKE(__i915) && !IS_TGL_UY(__i915)) && \
- IS_GRAPHICS_STEP(__i915, since, until))
-
#define IS_RKL_DISPLAY_STEP(p, since, until) \
(IS_ROCKETLAKE(p) && IS_DISPLAY_STEP(p, since, until))
-#define IS_DG1_GRAPHICS_STEP(p, since, until) \
- (IS_DG1(p) && IS_GRAPHICS_STEP(p, since, until))
-#define IS_DG1_DISPLAY_STEP(p, since, until) \
- (IS_DG1(p) && IS_DISPLAY_STEP(p, since, until))
-
#define IS_ADLS_DISPLAY_STEP(__i915, since, until) \
(IS_ALDERLAKE_S(__i915) && \
IS_DISPLAY_STEP(__i915, since, until))
@@ -876,7 +865,8 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
*/
#define HAS_64K_PAGES(dev_priv) (INTEL_INFO(dev_priv)->has_64k_pages)
-#define HAS_IPC(dev_priv) (INTEL_INFO(dev_priv)->display.has_ipc)
+#define HAS_IPC(dev_priv) (INTEL_INFO(dev_priv)->display.has_ipc)
+#define HAS_SAGV(dev_priv) (DISPLAY_VER(dev_priv) >= 9 && !IS_LP(dev_priv))
#define HAS_REGION(i915, i) (RUNTIME_INFO(i915)->memory_regions & (i))
#define HAS_LMEM(i915) HAS_REGION(i915, REGION_LMEM)
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index 904f21e1380c..f020c0086fbc 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -505,6 +505,7 @@ static void error_print_context(struct drm_i915_error_state_buf *m,
header, ctx->comm, ctx->pid, ctx->sched_attr.priority,
ctx->guilty, ctx->active,
ctx->total_runtime, ctx->avg_runtime);
+ err_printf(m, " context timeline seqno %u\n", ctx->hwsp_seqno);
}
static struct i915_vma_coredump *
@@ -1395,6 +1396,8 @@ static bool record_context(struct i915_gem_context_coredump *e,
e->sched_attr = ctx->sched;
e->guilty = atomic_read(&ctx->guilty_count);
e->active = atomic_read(&ctx->active_count);
+ e->hwsp_seqno = (ce->timeline && ce->timeline->hwsp_seqno) ?
+ *ce->timeline->hwsp_seqno : ~0U;
e->total_runtime = intel_context_get_total_runtime_ns(ce);
e->avg_runtime = intel_context_get_avg_runtime_ns(ce);
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.h b/drivers/gpu/drm/i915/i915_gpu_error.h
index 56027ffbce51..a91932cc6531 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.h
+++ b/drivers/gpu/drm/i915/i915_gpu_error.h
@@ -107,6 +107,7 @@ struct intel_engine_coredump {
int active;
int guilty;
struct i915_sched_attr sched_attr;
+ u32 hwsp_seqno;
} context;
struct i915_vma_coredump *vma;
diff --git a/drivers/gpu/drm/i915/i915_hwmon.c b/drivers/gpu/drm/i915/i915_hwmon.c
index 1225bc432f0d..596dd2c07010 100644
--- a/drivers/gpu/drm/i915/i915_hwmon.c
+++ b/drivers/gpu/drm/i915/i915_hwmon.c
@@ -99,20 +99,6 @@ hwm_field_read_and_scale(struct hwm_drvdata *ddat, i915_reg_t rgadr,
return mul_u64_u32_shr(reg_value, scale_factor, nshift);
}
-static void
-hwm_field_scale_and_write(struct hwm_drvdata *ddat, i915_reg_t rgadr,
- int nshift, unsigned int scale_factor, long lval)
-{
- u32 nval;
-
- /* Computation in 64-bits to avoid overflow. Round to nearest. */
- nval = DIV_ROUND_CLOSEST_ULL((u64)lval << nshift, scale_factor);
-
- hwm_locked_with_pm_intel_uncore_rmw(ddat, rgadr,
- PKG_PWR_LIM_1,
- REG_FIELD_PREP(PKG_PWR_LIM_1, nval));
-}
-
/*
* hwm_energy - Obtain energy value
*
@@ -232,11 +218,15 @@ hwm_power1_max_interval_store(struct device *dev,
/* val in hw units */
val = DIV_ROUND_CLOSEST_ULL((u64)val << hwmon->scl_shift_time, SF_TIME);
/* Convert to 1.x * power(2,y) */
- if (!val)
- return -EINVAL;
- y = ilog2(val);
- /* x = (val - (1 << y)) >> (y - 2); */
- x = (val - (1ul << y)) << x_w >> y;
+ if (!val) {
+ /* Avoid ilog2(0) */
+ y = 0;
+ x = 0;
+ } else {
+ y = ilog2(val);
+ /* x = (val - (1 << y)) >> (y - 2); */
+ x = (val - (1ul << y)) << x_w >> y;
+ }
rxy = REG_FIELD_PREP(PKG_PWR_LIM_1_TIME_X, x) | REG_FIELD_PREP(PKG_PWR_LIM_1_TIME_Y, y);
@@ -392,6 +382,22 @@ hwm_power_max_read(struct hwm_drvdata *ddat, long *val)
}
static int
+hwm_power_max_write(struct hwm_drvdata *ddat, long val)
+{
+ struct i915_hwmon *hwmon = ddat->hwmon;
+ u32 nval;
+
+ /* Computation in 64-bits to avoid overflow. Round to nearest. */
+ nval = DIV_ROUND_CLOSEST_ULL((u64)val << hwmon->scl_shift_power, SF_POWER);
+ nval = PKG_PWR_LIM_1_EN | REG_FIELD_PREP(PKG_PWR_LIM_1, nval);
+
+ hwm_locked_with_pm_intel_uncore_rmw(ddat, hwmon->rg.pkg_rapl_limit,
+ PKG_PWR_LIM_1_EN | PKG_PWR_LIM_1,
+ nval);
+ return 0;
+}
+
+static int
hwm_power_read(struct hwm_drvdata *ddat, u32 attr, int chan, long *val)
{
struct i915_hwmon *hwmon = ddat->hwmon;
@@ -425,16 +431,11 @@ hwm_power_read(struct hwm_drvdata *ddat, u32 attr, int chan, long *val)
static int
hwm_power_write(struct hwm_drvdata *ddat, u32 attr, int chan, long val)
{
- struct i915_hwmon *hwmon = ddat->hwmon;
u32 uval;
switch (attr) {
case hwmon_power_max:
- hwm_field_scale_and_write(ddat,
- hwmon->rg.pkg_rapl_limit,
- hwmon->scl_shift_power,
- SF_POWER, val);
- return 0;
+ return hwm_power_max_write(ddat, val);
case hwmon_power_crit:
uval = DIV_ROUND_CLOSEST_ULL(val << POWER_SETUP_I1_SHIFT, SF_POWER);
return hwm_pcode_write_i1(ddat->uncore->i915, uval);
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 240d5e198904..31271c30a8cf 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -52,7 +52,6 @@
#include "i915_driver.h"
#include "i915_drv.h"
#include "i915_irq.h"
-#include "intel_pm.h"
/**
* DOC: interrupt handling
@@ -81,8 +80,7 @@ static inline void pmu_irq_stats(struct drm_i915_private *i915,
}
typedef bool (*long_pulse_detect_func)(enum hpd_pin pin, u32 val);
-typedef u32 (*hotplug_enables_func)(struct drm_i915_private *i915,
- enum hpd_pin pin);
+typedef u32 (*hotplug_enables_func)(struct intel_encoder *encoder);
static const u32 hpd_ilk[HPD_NUM_PINS] = {
[HPD_PORT_A] = DE_DP_A_HOTPLUG,
@@ -199,6 +197,8 @@ static void intel_hpd_init_pins(struct drm_i915_private *dev_priv)
hpd->hpd = hpd_gen11;
else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
hpd->hpd = hpd_bxt;
+ else if (DISPLAY_VER(dev_priv) == 9)
+ hpd->hpd = NULL; /* no north HPD on SKL */
else if (DISPLAY_VER(dev_priv) >= 8)
hpd->hpd = hpd_bdw;
else if (DISPLAY_VER(dev_priv) >= 7)
@@ -884,7 +884,7 @@ static u32 intel_hpd_hotplug_enables(struct drm_i915_private *i915,
u32 hotplug = 0;
for_each_intel_encoder(&i915->drm, encoder)
- hotplug |= hotplug_enables(i915, encoder->hpd_pin);
+ hotplug |= hotplug_enables(encoder);
return hotplug;
}
@@ -2835,10 +2835,11 @@ static void cherryview_irq_reset(struct drm_i915_private *dev_priv)
spin_unlock_irq(&dev_priv->irq_lock);
}
-static u32 ibx_hotplug_enables(struct drm_i915_private *i915,
- enum hpd_pin pin)
+static u32 ibx_hotplug_enables(struct intel_encoder *encoder)
{
- switch (pin) {
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+
+ switch (encoder->hpd_pin) {
case HPD_PORT_A:
/*
* When CPU and PCH are on the same package, port A
@@ -2890,31 +2891,29 @@ static void ibx_hpd_irq_setup(struct drm_i915_private *dev_priv)
ibx_hpd_detection_setup(dev_priv);
}
-static u32 icp_ddi_hotplug_enables(struct drm_i915_private *i915,
- enum hpd_pin pin)
+static u32 icp_ddi_hotplug_enables(struct intel_encoder *encoder)
{
- switch (pin) {
+ switch (encoder->hpd_pin) {
case HPD_PORT_A:
case HPD_PORT_B:
case HPD_PORT_C:
case HPD_PORT_D:
- return SHOTPLUG_CTL_DDI_HPD_ENABLE(pin);
+ return SHOTPLUG_CTL_DDI_HPD_ENABLE(encoder->hpd_pin);
default:
return 0;
}
}
-static u32 icp_tc_hotplug_enables(struct drm_i915_private *i915,
- enum hpd_pin pin)
+static u32 icp_tc_hotplug_enables(struct intel_encoder *encoder)
{
- switch (pin) {
+ switch (encoder->hpd_pin) {
case HPD_PORT_TC1:
case HPD_PORT_TC2:
case HPD_PORT_TC3:
case HPD_PORT_TC4:
case HPD_PORT_TC5:
case HPD_PORT_TC6:
- return ICP_TC_HPD_ENABLE(pin);
+ return ICP_TC_HPD_ENABLE(encoder->hpd_pin);
default:
return 0;
}
@@ -2958,17 +2957,16 @@ static void icp_hpd_irq_setup(struct drm_i915_private *dev_priv)
icp_tc_hpd_detection_setup(dev_priv);
}
-static u32 gen11_hotplug_enables(struct drm_i915_private *i915,
- enum hpd_pin pin)
+static u32 gen11_hotplug_enables(struct intel_encoder *encoder)
{
- switch (pin) {
+ switch (encoder->hpd_pin) {
case HPD_PORT_TC1:
case HPD_PORT_TC2:
case HPD_PORT_TC3:
case HPD_PORT_TC4:
case HPD_PORT_TC5:
case HPD_PORT_TC6:
- return GEN11_HOTPLUG_CTL_ENABLE(pin);
+ return GEN11_HOTPLUG_CTL_ENABLE(encoder->hpd_pin);
default:
return 0;
}
@@ -3031,10 +3029,9 @@ static void gen11_hpd_irq_setup(struct drm_i915_private *dev_priv)
icp_hpd_irq_setup(dev_priv);
}
-static u32 spt_hotplug_enables(struct drm_i915_private *i915,
- enum hpd_pin pin)
+static u32 spt_hotplug_enables(struct intel_encoder *encoder)
{
- switch (pin) {
+ switch (encoder->hpd_pin) {
case HPD_PORT_A:
return PORTA_HOTPLUG_ENABLE;
case HPD_PORT_B:
@@ -3048,10 +3045,9 @@ static u32 spt_hotplug_enables(struct drm_i915_private *i915,
}
}
-static u32 spt_hotplug2_enables(struct drm_i915_private *i915,
- enum hpd_pin pin)
+static u32 spt_hotplug2_enables(struct intel_encoder *encoder)
{
- switch (pin) {
+ switch (encoder->hpd_pin) {
case HPD_PORT_E:
return PORTE_HOTPLUG_ENABLE;
default:
@@ -3094,10 +3090,9 @@ static void spt_hpd_irq_setup(struct drm_i915_private *dev_priv)
spt_hpd_detection_setup(dev_priv);
}
-static u32 ilk_hotplug_enables(struct drm_i915_private *i915,
- enum hpd_pin pin)
+static u32 ilk_hotplug_enables(struct intel_encoder *encoder)
{
- switch (pin) {
+ switch (encoder->hpd_pin) {
case HPD_PORT_A:
return DIGITAL_PORTA_HOTPLUG_ENABLE |
DIGITAL_PORTA_PULSE_DURATION_2ms;
@@ -3135,25 +3130,24 @@ static void ilk_hpd_irq_setup(struct drm_i915_private *dev_priv)
ibx_hpd_irq_setup(dev_priv);
}
-static u32 bxt_hotplug_enables(struct drm_i915_private *i915,
- enum hpd_pin pin)
+static u32 bxt_hotplug_enables(struct intel_encoder *encoder)
{
u32 hotplug;
- switch (pin) {
+ switch (encoder->hpd_pin) {
case HPD_PORT_A:
hotplug = PORTA_HOTPLUG_ENABLE;
- if (intel_bios_is_port_hpd_inverted(i915, PORT_A))
+ if (intel_bios_encoder_hpd_invert(encoder->devdata))
hotplug |= BXT_DDIA_HPD_INVERT;
return hotplug;
case HPD_PORT_B:
hotplug = PORTB_HOTPLUG_ENABLE;
- if (intel_bios_is_port_hpd_inverted(i915, PORT_B))
+ if (intel_bios_encoder_hpd_invert(encoder->devdata))
hotplug |= BXT_DDIB_HPD_INVERT;
return hotplug;
case HPD_PORT_C:
hotplug = PORTC_HOTPLUG_ENABLE;
- if (intel_bios_is_port_hpd_inverted(i915, PORT_C))
+ if (intel_bios_encoder_hpd_invert(encoder->devdata))
hotplug |= BXT_DDIC_HPD_INVERT;
return hotplug;
default:
@@ -3471,15 +3465,33 @@ static void i8xx_irq_reset(struct drm_i915_private *dev_priv)
dev_priv->irq_mask = ~0u;
}
+static u32 i9xx_error_mask(struct drm_i915_private *i915)
+{
+ /*
+ * On gen2/3 FBC generates (seemingly spurious)
+ * display INVALID_GTT/INVALID_GTT_PTE table errors.
+ *
+ * Also gen3 bspec has this to say:
+ * "DISPA_INVALID_GTT_PTE
+ " [DevNapa] : Reserved. This bit does not reflect the page
+ " table error for the display plane A."
+ *
+ * Unfortunately we can't mask off individual PGTBL_ER bits,
+ * so we just have to mask off all page table errors via EMR.
+ */
+ if (HAS_FBC(i915))
+ return ~I915_ERROR_MEMORY_REFRESH;
+ else
+ return ~(I915_ERROR_PAGE_TABLE |
+ I915_ERROR_MEMORY_REFRESH);
+}
+
static void i8xx_irq_postinstall(struct drm_i915_private *dev_priv)
{
struct intel_uncore *uncore = &dev_priv->uncore;
u16 enable_mask;
- intel_uncore_write16(uncore,
- EMR,
- ~(I915_ERROR_PAGE_TABLE |
- I915_ERROR_MEMORY_REFRESH));
+ intel_uncore_write16(uncore, EMR, i9xx_error_mask(dev_priv));
/* Unmask the interrupts that we always want on. */
dev_priv->irq_mask =
@@ -3510,9 +3522,7 @@ static void i8xx_error_irq_ack(struct drm_i915_private *i915,
u16 emr;
*eir = intel_uncore_read16(uncore, EIR);
-
- if (*eir)
- intel_uncore_write16(uncore, EIR, *eir);
+ intel_uncore_write16(uncore, EIR, *eir);
*eir_stuck = intel_uncore_read16(uncore, EIR);
if (*eir_stuck == 0)
@@ -3541,6 +3551,9 @@ static void i8xx_error_irq_handler(struct drm_i915_private *dev_priv,
if (eir_stuck)
drm_dbg(&dev_priv->drm, "EIR stuck: 0x%04x, masked\n",
eir_stuck);
+
+ drm_dbg(&dev_priv->drm, "PGTBL_ER: 0x%08x\n",
+ intel_uncore_read(&dev_priv->uncore, PGTBL_ER));
}
static void i9xx_error_irq_ack(struct drm_i915_private *dev_priv,
@@ -3548,7 +3561,8 @@ static void i9xx_error_irq_ack(struct drm_i915_private *dev_priv,
{
u32 emr;
- *eir = intel_uncore_rmw(&dev_priv->uncore, EIR, 0, 0);
+ *eir = intel_uncore_read(&dev_priv->uncore, EIR);
+ intel_uncore_write(&dev_priv->uncore, EIR, *eir);
*eir_stuck = intel_uncore_read(&dev_priv->uncore, EIR);
if (*eir_stuck == 0)
@@ -3564,7 +3578,8 @@ static void i9xx_error_irq_ack(struct drm_i915_private *dev_priv,
* (or by a GPU reset) so we mask any bit that
* remains set.
*/
- emr = intel_uncore_rmw(&dev_priv->uncore, EMR, ~0, 0xffffffff);
+ emr = intel_uncore_read(&dev_priv->uncore, EMR);
+ intel_uncore_write(&dev_priv->uncore, EMR, 0xffffffff);
intel_uncore_write(&dev_priv->uncore, EMR, emr | *eir_stuck);
}
@@ -3576,6 +3591,9 @@ static void i9xx_error_irq_handler(struct drm_i915_private *dev_priv,
if (eir_stuck)
drm_dbg(&dev_priv->drm, "EIR stuck: 0x%08x, masked\n",
eir_stuck);
+
+ drm_dbg(&dev_priv->drm, "PGTBL_ER: 0x%08x\n",
+ intel_uncore_read(&dev_priv->uncore, PGTBL_ER));
}
static irqreturn_t i8xx_irq_handler(int irq, void *arg)
@@ -3645,8 +3663,7 @@ static void i915_irq_postinstall(struct drm_i915_private *dev_priv)
struct intel_uncore *uncore = &dev_priv->uncore;
u32 enable_mask;
- intel_uncore_write(uncore, EMR, ~(I915_ERROR_PAGE_TABLE |
- I915_ERROR_MEMORY_REFRESH));
+ intel_uncore_write(uncore, EMR, i9xx_error_mask(dev_priv));
/* Unmask the interrupts that we always want on. */
dev_priv->irq_mask =
@@ -3749,26 +3766,31 @@ static void i965_irq_reset(struct drm_i915_private *dev_priv)
dev_priv->irq_mask = ~0u;
}
-static void i965_irq_postinstall(struct drm_i915_private *dev_priv)
+static u32 i965_error_mask(struct drm_i915_private *i915)
{
- struct intel_uncore *uncore = &dev_priv->uncore;
- u32 enable_mask;
- u32 error_mask;
-
/*
* Enable some error detection, note the instruction error mask
* bit is reserved, so we leave it masked.
+ *
+ * i965 FBC no longer generates spurious GTT errors,
+ * so we can always enable the page table errors.
*/
- if (IS_G4X(dev_priv)) {
- error_mask = ~(GM45_ERROR_PAGE_TABLE |
- GM45_ERROR_MEM_PRIV |
- GM45_ERROR_CP_PRIV |
- I915_ERROR_MEMORY_REFRESH);
- } else {
- error_mask = ~(I915_ERROR_PAGE_TABLE |
- I915_ERROR_MEMORY_REFRESH);
- }
- intel_uncore_write(uncore, EMR, error_mask);
+ if (IS_G4X(i915))
+ return ~(GM45_ERROR_PAGE_TABLE |
+ GM45_ERROR_MEM_PRIV |
+ GM45_ERROR_CP_PRIV |
+ I915_ERROR_MEMORY_REFRESH);
+ else
+ return ~(I915_ERROR_PAGE_TABLE |
+ I915_ERROR_MEMORY_REFRESH);
+}
+
+static void i965_irq_postinstall(struct drm_i915_private *dev_priv)
+{
+ struct intel_uncore *uncore = &dev_priv->uncore;
+ u32 enable_mask;
+
+ intel_uncore_write(uncore, EMR, i965_error_mask(dev_priv));
/* Unmask the interrupts that we always want on. */
dev_priv->irq_mask =
diff --git a/drivers/gpu/drm/i915/i915_pmu.c b/drivers/gpu/drm/i915/i915_pmu.c
index 52531ab28c5f..a76c5ce9513d 100644
--- a/drivers/gpu/drm/i915/i915_pmu.c
+++ b/drivers/gpu/drm/i915/i915_pmu.c
@@ -17,7 +17,6 @@
#include "i915_drv.h"
#include "i915_pmu.h"
-#include "intel_pm.h"
/* Frequency for the sampling timer for events which need it. */
#define FREQUENCY 200
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 3b2642397b82..d22ffd7a32dc 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -116,6 +116,9 @@
* #define GEN8_BAR _MMIO(0xb888)
*/
+#define GU_CNTL_PROTECTED _MMIO(0x10100C)
+#define DEPRESENT REG_BIT(9)
+
#define GU_CNTL _MMIO(0x101010)
#define LMEM_INIT REG_BIT(7)
#define DRIVERFLR REG_BIT(31)
@@ -541,9 +544,10 @@
#define _BXT_PHY0_BASE 0x6C000
#define _BXT_PHY1_BASE 0x162000
#define _BXT_PHY2_BASE 0x163000
-#define BXT_PHY_BASE(phy) _PHY3((phy), _BXT_PHY0_BASE, \
- _BXT_PHY1_BASE, \
- _BXT_PHY2_BASE)
+#define BXT_PHY_BASE(phy) \
+ _PICK_EVEN_2RANGES(phy, 1, \
+ _BXT_PHY0_BASE, _BXT_PHY0_BASE, \
+ _BXT_PHY1_BASE, _BXT_PHY2_BASE)
#define _BXT_PHY(phy, reg) \
_MMIO(BXT_PHY_BASE(phy) - _BXT_PHY0_BASE + (reg))
@@ -566,13 +570,14 @@
#define BXT_PHY_CTL(port) _MMIO_PORT(port, _BXT_PHY_CTL_DDI_A, \
_BXT_PHY_CTL_DDI_B)
-#define _PHY_CTL_FAMILY_EDP 0x64C80
#define _PHY_CTL_FAMILY_DDI 0x64C90
+#define _PHY_CTL_FAMILY_EDP 0x64C80
#define _PHY_CTL_FAMILY_DDI_C 0x64CA0
#define COMMON_RESET_DIS (1 << 31)
-#define BXT_PHY_CTL_FAMILY(phy) _MMIO_PHY3((phy), _PHY_CTL_FAMILY_DDI, \
- _PHY_CTL_FAMILY_EDP, \
- _PHY_CTL_FAMILY_DDI_C)
+#define BXT_PHY_CTL_FAMILY(phy) \
+ _MMIO(_PICK_EVEN_2RANGES(phy, 1, \
+ _PHY_CTL_FAMILY_DDI, _PHY_CTL_FAMILY_DDI, \
+ _PHY_CTL_FAMILY_EDP, _PHY_CTL_FAMILY_DDI_C))
/* BXT PHY PLL registers */
#define _PORT_PLL_A 0x46074
@@ -1038,9 +1043,11 @@
#define _MBUS_ABOX0_CTL 0x45038
#define _MBUS_ABOX1_CTL 0x45048
#define _MBUS_ABOX2_CTL 0x4504C
-#define MBUS_ABOX_CTL(x) _MMIO(_PICK(x, _MBUS_ABOX0_CTL, \
- _MBUS_ABOX1_CTL, \
- _MBUS_ABOX2_CTL))
+#define MBUS_ABOX_CTL(x) \
+ _MMIO(_PICK_EVEN_2RANGES(x, 2, \
+ _MBUS_ABOX0_CTL, _MBUS_ABOX1_CTL, \
+ _MBUS_ABOX2_CTL, _MBUS_ABOX2_CTL))
+
#define MBUS_ABOX_BW_CREDIT_MASK (3 << 20)
#define MBUS_ABOX_BW_CREDIT(x) ((x) << 20)
#define MBUS_ABOX_B_CREDIT_MASK (0xF << 16)
@@ -1730,10 +1737,11 @@
#define PALETTE_10BIT_BLUE_EXP_MASK REG_GENMASK(7, 6)
#define PALETTE_10BIT_BLUE_MANT_MASK REG_GENMASK(5, 2)
#define PALETTE_10BIT_BLUE_UDW_MASK REG_GENMASK(1, 0)
-#define PALETTE(pipe, i) _MMIO(DISPLAY_MMIO_BASE(dev_priv) + \
- _PICK((pipe), _PALETTE_A, \
- _PALETTE_B, _CHV_PALETTE_C) + \
- (i) * 4)
+#define PALETTE(pipe, i) _MMIO(DISPLAY_MMIO_BASE(dev_priv) + \
+ _PICK_EVEN_2RANGES(pipe, 2, \
+ _PALETTE_A, _PALETTE_B, \
+ _CHV_PALETTE_C, _CHV_PALETTE_C) + \
+ (i) * 4)
#define PEG_BAND_GAP_DATA _MMIO(0x14d68)
@@ -1786,9 +1794,11 @@
* GEN9 clock gating regs
*/
#define GEN9_CLKGATE_DIS_0 _MMIO(0x46530)
-#define DARBF_GATING_DIS (1 << 27)
-#define PWM2_GATING_DIS (1 << 14)
-#define PWM1_GATING_DIS (1 << 13)
+#define DARBF_GATING_DIS REG_BIT(27)
+#define MTL_PIPEDMC_GATING_DIS_A REG_BIT(15)
+#define MTL_PIPEDMC_GATING_DIS_B REG_BIT(14)
+#define PWM2_GATING_DIS REG_BIT(14)
+#define PWM1_GATING_DIS REG_BIT(13)
#define GEN9_CLKGATE_DIS_3 _MMIO(0x46538)
#define TGL_VRH_GATING_DIS REG_BIT(31)
@@ -1906,48 +1916,72 @@
#define PIPE_CRC_RES_RES1_I915(pipe) _MMIO_TRANS2(pipe, _PIPE_CRC_RES_RES1_A_I915)
#define PIPE_CRC_RES_RES2_G4X(pipe) _MMIO_TRANS2(pipe, _PIPE_CRC_RES_RES2_A_G4X)
-/* Pipe A timing regs */
-#define _HTOTAL_A 0x60000
-#define _HBLANK_A 0x60004
-#define _HSYNC_A 0x60008
-#define _VTOTAL_A 0x6000c
-#define _VBLANK_A 0x60010
-#define _VSYNC_A 0x60014
-#define _EXITLINE_A 0x60018
-#define _PIPEASRC 0x6001c
+/* Pipe/transcoder A timing regs */
+#define _TRANS_HTOTAL_A 0x60000
+#define HTOTAL_MASK REG_GENMASK(31, 16)
+#define HTOTAL(htotal) REG_FIELD_PREP(HTOTAL_MASK, (htotal))
+#define HACTIVE_MASK REG_GENMASK(15, 0)
+#define HACTIVE(hdisplay) REG_FIELD_PREP(HACTIVE_MASK, (hdisplay))
+#define _TRANS_HBLANK_A 0x60004
+#define HBLANK_END_MASK REG_GENMASK(31, 16)
+#define HBLANK_END(hblank_end) REG_FIELD_PREP(HBLANK_END_MASK, (hblank_end))
+#define HBLANK_START_MASK REG_GENMASK(15, 0)
+#define HBLANK_START(hblank_start) REG_FIELD_PREP(HBLANK_START_MASK, (hblank_start))
+#define _TRANS_HSYNC_A 0x60008
+#define HSYNC_END_MASK REG_GENMASK(31, 16)
+#define HSYNC_END(hsync_end) REG_FIELD_PREP(HSYNC_END_MASK, (hsync_end))
+#define HSYNC_START_MASK REG_GENMASK(15, 0)
+#define HSYNC_START(hsync_start) REG_FIELD_PREP(HSYNC_START_MASK, (hsync_start))
+#define _TRANS_VTOTAL_A 0x6000c
+#define VTOTAL_MASK REG_GENMASK(31, 16)
+#define VTOTAL(vtotal) REG_FIELD_PREP(VTOTAL_MASK, (vtotal))
+#define VACTIVE_MASK REG_GENMASK(15, 0)
+#define VACTIVE(vdisplay) REG_FIELD_PREP(VACTIVE_MASK, (vdisplay))
+#define _TRANS_VBLANK_A 0x60010
+#define VBLANK_END_MASK REG_GENMASK(31, 16)
+#define VBLANK_END(vblank_end) REG_FIELD_PREP(VBLANK_END_MASK, (vblank_end))
+#define VBLANK_START_MASK REG_GENMASK(15, 0)
+#define VBLANK_START(vblank_start) REG_FIELD_PREP(VBLANK_START_MASK, (vblank_start))
+#define _TRANS_VSYNC_A 0x60014
+#define VSYNC_END_MASK REG_GENMASK(31, 16)
+#define VSYNC_END(vsync_end) REG_FIELD_PREP(VSYNC_END_MASK, (vsync_end))
+#define VSYNC_START_MASK REG_GENMASK(15, 0)
+#define VSYNC_START(vsync_start) REG_FIELD_PREP(VSYNC_START_MASK, (vsync_start))
+#define _TRANS_EXITLINE_A 0x60018
+#define _PIPEASRC 0x6001c
#define PIPESRC_WIDTH_MASK REG_GENMASK(31, 16)
#define PIPESRC_WIDTH(w) REG_FIELD_PREP(PIPESRC_WIDTH_MASK, (w))
#define PIPESRC_HEIGHT_MASK REG_GENMASK(15, 0)
#define PIPESRC_HEIGHT(h) REG_FIELD_PREP(PIPESRC_HEIGHT_MASK, (h))
-#define _BCLRPAT_A 0x60020
-#define _VSYNCSHIFT_A 0x60028
-#define _PIPE_MULT_A 0x6002c
-
-/* Pipe B timing regs */
-#define _HTOTAL_B 0x61000
-#define _HBLANK_B 0x61004
-#define _HSYNC_B 0x61008
-#define _VTOTAL_B 0x6100c
-#define _VBLANK_B 0x61010
-#define _VSYNC_B 0x61014
-#define _PIPEBSRC 0x6101c
-#define _BCLRPAT_B 0x61020
-#define _VSYNCSHIFT_B 0x61028
-#define _PIPE_MULT_B 0x6102c
+#define _BCLRPAT_A 0x60020
+#define _TRANS_VSYNCSHIFT_A 0x60028
+#define _TRANS_MULT_A 0x6002c
+
+/* Pipe/transcoder B timing regs */
+#define _TRANS_HTOTAL_B 0x61000
+#define _TRANS_HBLANK_B 0x61004
+#define _TRANS_HSYNC_B 0x61008
+#define _TRANS_VTOTAL_B 0x6100c
+#define _TRANS_VBLANK_B 0x61010
+#define _TRANS_VSYNC_B 0x61014
+#define _PIPEBSRC 0x6101c
+#define _BCLRPAT_B 0x61020
+#define _TRANS_VSYNCSHIFT_B 0x61028
+#define _TRANS_MULT_B 0x6102c
/* DSI 0 timing regs */
-#define _HTOTAL_DSI0 0x6b000
-#define _HSYNC_DSI0 0x6b008
-#define _VTOTAL_DSI0 0x6b00c
-#define _VSYNC_DSI0 0x6b014
-#define _VSYNCSHIFT_DSI0 0x6b028
+#define _TRANS_HTOTAL_DSI0 0x6b000
+#define _TRANS_HSYNC_DSI0 0x6b008
+#define _TRANS_VTOTAL_DSI0 0x6b00c
+#define _TRANS_VSYNC_DSI0 0x6b014
+#define _TRANS_VSYNCSHIFT_DSI0 0x6b028
/* DSI 1 timing regs */
-#define _HTOTAL_DSI1 0x6b800
-#define _HSYNC_DSI1 0x6b808
-#define _VTOTAL_DSI1 0x6b80c
-#define _VSYNC_DSI1 0x6b814
-#define _VSYNCSHIFT_DSI1 0x6b828
+#define _TRANS_HTOTAL_DSI1 0x6b800
+#define _TRANS_HSYNC_DSI1 0x6b808
+#define _TRANS_VTOTAL_DSI1 0x6b80c
+#define _TRANS_VSYNC_DSI1 0x6b814
+#define _TRANS_VSYNCSHIFT_DSI1 0x6b828
#define TRANSCODER_A_OFFSET 0x60000
#define TRANSCODER_B_OFFSET 0x61000
@@ -1958,18 +1992,18 @@
#define TRANSCODER_DSI0_OFFSET 0x6b000
#define TRANSCODER_DSI1_OFFSET 0x6b800
-#define HTOTAL(trans) _MMIO_TRANS2(trans, _HTOTAL_A)
-#define HBLANK(trans) _MMIO_TRANS2(trans, _HBLANK_A)
-#define HSYNC(trans) _MMIO_TRANS2(trans, _HSYNC_A)
-#define VTOTAL(trans) _MMIO_TRANS2(trans, _VTOTAL_A)
-#define VBLANK(trans) _MMIO_TRANS2(trans, _VBLANK_A)
-#define VSYNC(trans) _MMIO_TRANS2(trans, _VSYNC_A)
-#define BCLRPAT(trans) _MMIO_TRANS2(trans, _BCLRPAT_A)
-#define VSYNCSHIFT(trans) _MMIO_TRANS2(trans, _VSYNCSHIFT_A)
-#define PIPESRC(trans) _MMIO_TRANS2(trans, _PIPEASRC)
-#define PIPE_MULT(trans) _MMIO_TRANS2(trans, _PIPE_MULT_A)
-
-#define EXITLINE(trans) _MMIO_TRANS2(trans, _EXITLINE_A)
+#define TRANS_HTOTAL(trans) _MMIO_TRANS2((trans), _TRANS_HTOTAL_A)
+#define TRANS_HBLANK(trans) _MMIO_TRANS2((trans), _TRANS_HBLANK_A)
+#define TRANS_HSYNC(trans) _MMIO_TRANS2((trans), _TRANS_HSYNC_A)
+#define TRANS_VTOTAL(trans) _MMIO_TRANS2((trans), _TRANS_VTOTAL_A)
+#define TRANS_VBLANK(trans) _MMIO_TRANS2((trans), _TRANS_VBLANK_A)
+#define TRANS_VSYNC(trans) _MMIO_TRANS2((trans), _TRANS_VSYNC_A)
+#define BCLRPAT(trans) _MMIO_TRANS2((trans), _BCLRPAT_A)
+#define TRANS_VSYNCSHIFT(trans) _MMIO_TRANS2((trans), _TRANS_VSYNCSHIFT_A)
+#define PIPESRC(pipe) _MMIO_TRANS2((pipe), _PIPEASRC)
+#define TRANS_MULT(trans) _MMIO_TRANS2((trans), _TRANS_MULT_A)
+
+#define TRANS_EXITLINE(trans) _MMIO_TRANS2((trans), _TRANS_EXITLINE_A)
#define EXITLINE_ENABLE REG_BIT(31)
#define EXITLINE_MASK REG_GENMASK(12, 0)
#define EXITLINE_SHIFT 0
@@ -2266,110 +2300,6 @@
#define ADLP_PSR2_MAN_TRK_CTL_SF_SINGLE_FULL_FRAME REG_BIT(14)
#define ADLP_PSR2_MAN_TRK_CTL_SF_CONTINUOS_FULL_FRAME REG_BIT(13)
-/* Icelake DSC Rate Control Range Parameter Registers */
-#define DSCA_RC_RANGE_PARAMETERS_0 _MMIO(0x6B240)
-#define DSCA_RC_RANGE_PARAMETERS_0_UDW _MMIO(0x6B240 + 4)
-#define DSCC_RC_RANGE_PARAMETERS_0 _MMIO(0x6BA40)
-#define DSCC_RC_RANGE_PARAMETERS_0_UDW _MMIO(0x6BA40 + 4)
-#define _ICL_DSC0_RC_RANGE_PARAMETERS_0_PB (0x78208)
-#define _ICL_DSC0_RC_RANGE_PARAMETERS_0_UDW_PB (0x78208 + 4)
-#define _ICL_DSC1_RC_RANGE_PARAMETERS_0_PB (0x78308)
-#define _ICL_DSC1_RC_RANGE_PARAMETERS_0_UDW_PB (0x78308 + 4)
-#define _ICL_DSC0_RC_RANGE_PARAMETERS_0_PC (0x78408)
-#define _ICL_DSC0_RC_RANGE_PARAMETERS_0_UDW_PC (0x78408 + 4)
-#define _ICL_DSC1_RC_RANGE_PARAMETERS_0_PC (0x78508)
-#define _ICL_DSC1_RC_RANGE_PARAMETERS_0_UDW_PC (0x78508 + 4)
-#define ICL_DSC0_RC_RANGE_PARAMETERS_0(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
- _ICL_DSC0_RC_RANGE_PARAMETERS_0_PB, \
- _ICL_DSC0_RC_RANGE_PARAMETERS_0_PC)
-#define ICL_DSC0_RC_RANGE_PARAMETERS_0_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
- _ICL_DSC0_RC_RANGE_PARAMETERS_0_UDW_PB, \
- _ICL_DSC0_RC_RANGE_PARAMETERS_0_UDW_PC)
-#define ICL_DSC1_RC_RANGE_PARAMETERS_0(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
- _ICL_DSC1_RC_RANGE_PARAMETERS_0_PB, \
- _ICL_DSC1_RC_RANGE_PARAMETERS_0_PC)
-#define ICL_DSC1_RC_RANGE_PARAMETERS_0_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
- _ICL_DSC1_RC_RANGE_PARAMETERS_0_UDW_PB, \
- _ICL_DSC1_RC_RANGE_PARAMETERS_0_UDW_PC)
-#define RC_BPG_OFFSET_SHIFT 10
-#define RC_MAX_QP_SHIFT 5
-#define RC_MIN_QP_SHIFT 0
-
-#define DSCA_RC_RANGE_PARAMETERS_1 _MMIO(0x6B248)
-#define DSCA_RC_RANGE_PARAMETERS_1_UDW _MMIO(0x6B248 + 4)
-#define DSCC_RC_RANGE_PARAMETERS_1 _MMIO(0x6BA48)
-#define DSCC_RC_RANGE_PARAMETERS_1_UDW _MMIO(0x6BA48 + 4)
-#define _ICL_DSC0_RC_RANGE_PARAMETERS_1_PB (0x78210)
-#define _ICL_DSC0_RC_RANGE_PARAMETERS_1_UDW_PB (0x78210 + 4)
-#define _ICL_DSC1_RC_RANGE_PARAMETERS_1_PB (0x78310)
-#define _ICL_DSC1_RC_RANGE_PARAMETERS_1_UDW_PB (0x78310 + 4)
-#define _ICL_DSC0_RC_RANGE_PARAMETERS_1_PC (0x78410)
-#define _ICL_DSC0_RC_RANGE_PARAMETERS_1_UDW_PC (0x78410 + 4)
-#define _ICL_DSC1_RC_RANGE_PARAMETERS_1_PC (0x78510)
-#define _ICL_DSC1_RC_RANGE_PARAMETERS_1_UDW_PC (0x78510 + 4)
-#define ICL_DSC0_RC_RANGE_PARAMETERS_1(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
- _ICL_DSC0_RC_RANGE_PARAMETERS_1_PB, \
- _ICL_DSC0_RC_RANGE_PARAMETERS_1_PC)
-#define ICL_DSC0_RC_RANGE_PARAMETERS_1_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
- _ICL_DSC0_RC_RANGE_PARAMETERS_1_UDW_PB, \
- _ICL_DSC0_RC_RANGE_PARAMETERS_1_UDW_PC)
-#define ICL_DSC1_RC_RANGE_PARAMETERS_1(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
- _ICL_DSC1_RC_RANGE_PARAMETERS_1_PB, \
- _ICL_DSC1_RC_RANGE_PARAMETERS_1_PC)
-#define ICL_DSC1_RC_RANGE_PARAMETERS_1_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
- _ICL_DSC1_RC_RANGE_PARAMETERS_1_UDW_PB, \
- _ICL_DSC1_RC_RANGE_PARAMETERS_1_UDW_PC)
-
-#define DSCA_RC_RANGE_PARAMETERS_2 _MMIO(0x6B250)
-#define DSCA_RC_RANGE_PARAMETERS_2_UDW _MMIO(0x6B250 + 4)
-#define DSCC_RC_RANGE_PARAMETERS_2 _MMIO(0x6BA50)
-#define DSCC_RC_RANGE_PARAMETERS_2_UDW _MMIO(0x6BA50 + 4)
-#define _ICL_DSC0_RC_RANGE_PARAMETERS_2_PB (0x78218)
-#define _ICL_DSC0_RC_RANGE_PARAMETERS_2_UDW_PB (0x78218 + 4)
-#define _ICL_DSC1_RC_RANGE_PARAMETERS_2_PB (0x78318)
-#define _ICL_DSC1_RC_RANGE_PARAMETERS_2_UDW_PB (0x78318 + 4)
-#define _ICL_DSC0_RC_RANGE_PARAMETERS_2_PC (0x78418)
-#define _ICL_DSC0_RC_RANGE_PARAMETERS_2_UDW_PC (0x78418 + 4)
-#define _ICL_DSC1_RC_RANGE_PARAMETERS_2_PC (0x78518)
-#define _ICL_DSC1_RC_RANGE_PARAMETERS_2_UDW_PC (0x78518 + 4)
-#define ICL_DSC0_RC_RANGE_PARAMETERS_2(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
- _ICL_DSC0_RC_RANGE_PARAMETERS_2_PB, \
- _ICL_DSC0_RC_RANGE_PARAMETERS_2_PC)
-#define ICL_DSC0_RC_RANGE_PARAMETERS_2_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
- _ICL_DSC0_RC_RANGE_PARAMETERS_2_UDW_PB, \
- _ICL_DSC0_RC_RANGE_PARAMETERS_2_UDW_PC)
-#define ICL_DSC1_RC_RANGE_PARAMETERS_2(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
- _ICL_DSC1_RC_RANGE_PARAMETERS_2_PB, \
- _ICL_DSC1_RC_RANGE_PARAMETERS_2_PC)
-#define ICL_DSC1_RC_RANGE_PARAMETERS_2_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
- _ICL_DSC1_RC_RANGE_PARAMETERS_2_UDW_PB, \
- _ICL_DSC1_RC_RANGE_PARAMETERS_2_UDW_PC)
-
-#define DSCA_RC_RANGE_PARAMETERS_3 _MMIO(0x6B258)
-#define DSCA_RC_RANGE_PARAMETERS_3_UDW _MMIO(0x6B258 + 4)
-#define DSCC_RC_RANGE_PARAMETERS_3 _MMIO(0x6BA58)
-#define DSCC_RC_RANGE_PARAMETERS_3_UDW _MMIO(0x6BA58 + 4)
-#define _ICL_DSC0_RC_RANGE_PARAMETERS_3_PB (0x78220)
-#define _ICL_DSC0_RC_RANGE_PARAMETERS_3_UDW_PB (0x78220 + 4)
-#define _ICL_DSC1_RC_RANGE_PARAMETERS_3_PB (0x78320)
-#define _ICL_DSC1_RC_RANGE_PARAMETERS_3_UDW_PB (0x78320 + 4)
-#define _ICL_DSC0_RC_RANGE_PARAMETERS_3_PC (0x78420)
-#define _ICL_DSC0_RC_RANGE_PARAMETERS_3_UDW_PC (0x78420 + 4)
-#define _ICL_DSC1_RC_RANGE_PARAMETERS_3_PC (0x78520)
-#define _ICL_DSC1_RC_RANGE_PARAMETERS_3_UDW_PC (0x78520 + 4)
-#define ICL_DSC0_RC_RANGE_PARAMETERS_3(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
- _ICL_DSC0_RC_RANGE_PARAMETERS_3_PB, \
- _ICL_DSC0_RC_RANGE_PARAMETERS_3_PC)
-#define ICL_DSC0_RC_RANGE_PARAMETERS_3_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
- _ICL_DSC0_RC_RANGE_PARAMETERS_3_UDW_PB, \
- _ICL_DSC0_RC_RANGE_PARAMETERS_3_UDW_PC)
-#define ICL_DSC1_RC_RANGE_PARAMETERS_3(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
- _ICL_DSC1_RC_RANGE_PARAMETERS_3_PB, \
- _ICL_DSC1_RC_RANGE_PARAMETERS_3_PC)
-#define ICL_DSC1_RC_RANGE_PARAMETERS_3_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
- _ICL_DSC1_RC_RANGE_PARAMETERS_3_UDW_PB, \
- _ICL_DSC1_RC_RANGE_PARAMETERS_3_UDW_PC)
-
/* VGA port control */
#define ADPA _MMIO(0x61100)
#define PCH_ADPA _MMIO(0xe1100)
@@ -2451,18 +2381,7 @@
#define CRT_HOTPLUG_DETECT_VOLTAGE_475MV (1 << 2)
#define PORT_HOTPLUG_STAT _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x61114)
-/*
- * HDMI/DP bits are g4x+
- *
- * WARNING: Bspec for hpd status bits on gen4 seems to be completely confused.
- * Please check the detailed lore in the commit message for for experimental
- * evidence.
- */
-/* Bspec says GM45 should match G4X/VLV/CHV, but reality disagrees */
-#define PORTD_HOTPLUG_LIVE_STATUS_GM45 (1 << 29)
-#define PORTC_HOTPLUG_LIVE_STATUS_GM45 (1 << 28)
-#define PORTB_HOTPLUG_LIVE_STATUS_GM45 (1 << 27)
-/* G4X/VLV/CHV DP/HDMI bits again match Bspec */
+/* HDMI/DP bits are g4x+ */
#define PORTD_HOTPLUG_LIVE_STATUS_G4X (1 << 27)
#define PORTC_HOTPLUG_LIVE_STATUS_G4X (1 << 28)
#define PORTB_HOTPLUG_LIVE_STATUS_G4X (1 << 29)
@@ -2592,59 +2511,6 @@
#define SDVO_PIPE_SEL_MASK_CHV (3 << 24)
#define SDVO_PIPE_SEL_CHV(pipe) ((pipe) << 24)
-/* LVDS port control */
-#define LVDS _MMIO(0x61180)
-/*
- * Enables the LVDS port. This bit must be set before DPLLs are enabled, as
- * the DPLL semantics change when the LVDS is assigned to that pipe.
- */
-#define LVDS_PORT_EN (1 << 31)
-/* Selects pipe B for LVDS data. Must be set on pre-965. */
-#define LVDS_PIPE_SEL_SHIFT 30
-#define LVDS_PIPE_SEL_MASK (1 << 30)
-#define LVDS_PIPE_SEL(pipe) ((pipe) << 30)
-#define LVDS_PIPE_SEL_SHIFT_CPT 29
-#define LVDS_PIPE_SEL_MASK_CPT (3 << 29)
-#define LVDS_PIPE_SEL_CPT(pipe) ((pipe) << 29)
-/* LVDS dithering flag on 965/g4x platform */
-#define LVDS_ENABLE_DITHER (1 << 25)
-/* LVDS sync polarity flags. Set to invert (i.e. negative) */
-#define LVDS_VSYNC_POLARITY (1 << 21)
-#define LVDS_HSYNC_POLARITY (1 << 20)
-
-/* Enable border for unscaled (or aspect-scaled) display */
-#define LVDS_BORDER_ENABLE (1 << 15)
-/*
- * Enables the A0-A2 data pairs and CLKA, containing 18 bits of color data per
- * pixel.
- */
-#define LVDS_A0A2_CLKA_POWER_MASK (3 << 8)
-#define LVDS_A0A2_CLKA_POWER_DOWN (0 << 8)
-#define LVDS_A0A2_CLKA_POWER_UP (3 << 8)
-/*
- * Controls the A3 data pair, which contains the additional LSBs for 24 bit
- * mode. Only enabled if LVDS_A0A2_CLKA_POWER_UP also indicates it should be
- * on.
- */
-#define LVDS_A3_POWER_MASK (3 << 6)
-#define LVDS_A3_POWER_DOWN (0 << 6)
-#define LVDS_A3_POWER_UP (3 << 6)
-/*
- * Controls the CLKB pair. This should only be set when LVDS_B0B3_POWER_UP
- * is set.
- */
-#define LVDS_CLKB_POWER_MASK (3 << 4)
-#define LVDS_CLKB_POWER_DOWN (0 << 4)
-#define LVDS_CLKB_POWER_UP (3 << 4)
-/*
- * Controls the B0-B3 data pairs. This must be set to match the DPLL p2
- * setting for whether we are in dual-channel mode. The B3 pair will
- * additionally only be powered up when LVDS_A3_POWER_UP is set.
- */
-#define LVDS_B0B3_POWER_MASK (3 << 2)
-#define LVDS_B0B3_POWER_DOWN (0 << 2)
-#define LVDS_B0B3_POWER_UP (3 << 2)
-
/* Video Data Island Packet control */
#define VIDEO_DIP_DATA _MMIO(0x61178)
/* Read the description of VIDEO_DIP_DATA (before Haswell) or VIDEO_DIP_ECC
@@ -3492,61 +3358,61 @@
#define _PIPEADSL 0x70000
#define PIPEDSL_CURR_FIELD REG_BIT(31) /* ctg+ */
#define PIPEDSL_LINE_MASK REG_GENMASK(19, 0)
-#define _PIPEACONF 0x70008
-#define PIPECONF_ENABLE REG_BIT(31)
-#define PIPECONF_DOUBLE_WIDE REG_BIT(30) /* pre-i965 */
-#define PIPECONF_STATE_ENABLE REG_BIT(30) /* i965+ */
-#define PIPECONF_DSI_PLL_LOCKED REG_BIT(29) /* vlv & pipe A only */
-#define PIPECONF_FRAME_START_DELAY_MASK REG_GENMASK(28, 27) /* pre-hsw */
-#define PIPECONF_FRAME_START_DELAY(x) REG_FIELD_PREP(PIPECONF_FRAME_START_DELAY_MASK, (x)) /* pre-hsw: 0-3 */
-#define PIPECONF_PIPE_LOCKED REG_BIT(25)
-#define PIPECONF_FORCE_BORDER REG_BIT(25)
-#define PIPECONF_GAMMA_MODE_MASK_I9XX REG_BIT(24) /* gmch */
-#define PIPECONF_GAMMA_MODE_MASK_ILK REG_GENMASK(25, 24) /* ilk-ivb */
-#define PIPECONF_GAMMA_MODE_8BIT REG_FIELD_PREP(PIPECONF_GAMMA_MODE_MASK, 0)
-#define PIPECONF_GAMMA_MODE_10BIT REG_FIELD_PREP(PIPECONF_GAMMA_MODE_MASK, 1)
-#define PIPECONF_GAMMA_MODE_12BIT REG_FIELD_PREP(PIPECONF_GAMMA_MODE_MASK_ILK, 2) /* ilk-ivb */
-#define PIPECONF_GAMMA_MODE_SPLIT REG_FIELD_PREP(PIPECONF_GAMMA_MODE_MASK_ILK, 3) /* ivb */
-#define PIPECONF_GAMMA_MODE(x) REG_FIELD_PREP(PIPECONF_GAMMA_MODE_MASK_ILK, (x)) /* pass in GAMMA_MODE_MODE_* */
-#define PIPECONF_INTERLACE_MASK REG_GENMASK(23, 21) /* gen3+ */
-#define PIPECONF_INTERLACE_PROGRESSIVE REG_FIELD_PREP(PIPECONF_INTERLACE_MASK, 0)
-#define PIPECONF_INTERLACE_W_SYNC_SHIFT_PANEL REG_FIELD_PREP(PIPECONF_INTERLACE_MASK, 4) /* gen4 only */
-#define PIPECONF_INTERLACE_W_SYNC_SHIFT REG_FIELD_PREP(PIPECONF_INTERLACE_MASK, 5) /* gen4 only */
-#define PIPECONF_INTERLACE_W_FIELD_INDICATION REG_FIELD_PREP(PIPECONF_INTERLACE_MASK, 6)
-#define PIPECONF_INTERLACE_FIELD_0_ONLY REG_FIELD_PREP(PIPECONF_INTERLACE_MASK, 7) /* gen3 only */
+#define _TRANSACONF 0x70008
+#define TRANSCONF_ENABLE REG_BIT(31)
+#define TRANSCONF_DOUBLE_WIDE REG_BIT(30) /* pre-i965 */
+#define TRANSCONF_STATE_ENABLE REG_BIT(30) /* i965+ */
+#define TRANSCONF_DSI_PLL_LOCKED REG_BIT(29) /* vlv & pipe A only */
+#define TRANSCONF_FRAME_START_DELAY_MASK REG_GENMASK(28, 27) /* pre-hsw */
+#define TRANSCONF_FRAME_START_DELAY(x) REG_FIELD_PREP(TRANSCONF_FRAME_START_DELAY_MASK, (x)) /* pre-hsw: 0-3 */
+#define TRANSCONF_PIPE_LOCKED REG_BIT(25)
+#define TRANSCONF_FORCE_BORDER REG_BIT(25)
+#define TRANSCONF_GAMMA_MODE_MASK_I9XX REG_BIT(24) /* gmch */
+#define TRANSCONF_GAMMA_MODE_MASK_ILK REG_GENMASK(25, 24) /* ilk-ivb */
+#define TRANSCONF_GAMMA_MODE_8BIT REG_FIELD_PREP(TRANSCONF_GAMMA_MODE_MASK, 0)
+#define TRANSCONF_GAMMA_MODE_10BIT REG_FIELD_PREP(TRANSCONF_GAMMA_MODE_MASK, 1)
+#define TRANSCONF_GAMMA_MODE_12BIT REG_FIELD_PREP(TRANSCONF_GAMMA_MODE_MASK_ILK, 2) /* ilk-ivb */
+#define TRANSCONF_GAMMA_MODE_SPLIT REG_FIELD_PREP(TRANSCONF_GAMMA_MODE_MASK_ILK, 3) /* ivb */
+#define TRANSCONF_GAMMA_MODE(x) REG_FIELD_PREP(TRANSCONF_GAMMA_MODE_MASK_ILK, (x)) /* pass in GAMMA_MODE_MODE_* */
+#define TRANSCONF_INTERLACE_MASK REG_GENMASK(23, 21) /* gen3+ */
+#define TRANSCONF_INTERLACE_PROGRESSIVE REG_FIELD_PREP(TRANSCONF_INTERLACE_MASK, 0)
+#define TRANSCONF_INTERLACE_W_SYNC_SHIFT_PANEL REG_FIELD_PREP(TRANSCONF_INTERLACE_MASK, 4) /* gen4 only */
+#define TRANSCONF_INTERLACE_W_SYNC_SHIFT REG_FIELD_PREP(TRANSCONF_INTERLACE_MASK, 5) /* gen4 only */
+#define TRANSCONF_INTERLACE_W_FIELD_INDICATION REG_FIELD_PREP(TRANSCONF_INTERLACE_MASK, 6)
+#define TRANSCONF_INTERLACE_FIELD_0_ONLY REG_FIELD_PREP(TRANSCONF_INTERLACE_MASK, 7) /* gen3 only */
/*
* ilk+: PF/D=progressive fetch/display, IF/D=interlaced fetch/display,
* DBL=power saving pixel doubling, PF-ID* requires panel fitter
*/
-#define PIPECONF_INTERLACE_MASK_ILK REG_GENMASK(23, 21) /* ilk+ */
-#define PIPECONF_INTERLACE_MASK_HSW REG_GENMASK(22, 21) /* hsw+ */
-#define PIPECONF_INTERLACE_PF_PD_ILK REG_FIELD_PREP(PIPECONF_INTERLACE_MASK_ILK, 0)
-#define PIPECONF_INTERLACE_PF_ID_ILK REG_FIELD_PREP(PIPECONF_INTERLACE_MASK_ILK, 1)
-#define PIPECONF_INTERLACE_IF_ID_ILK REG_FIELD_PREP(PIPECONF_INTERLACE_MASK_ILK, 3)
-#define PIPECONF_INTERLACE_IF_ID_DBL_ILK REG_FIELD_PREP(PIPECONF_INTERLACE_MASK_ILK, 4) /* ilk/snb only */
-#define PIPECONF_INTERLACE_PF_ID_DBL_ILK REG_FIELD_PREP(PIPECONF_INTERLACE_MASK_ILK, 5) /* ilk/snb only */
-#define PIPECONF_REFRESH_RATE_ALT_ILK REG_BIT(20)
-#define PIPECONF_MSA_TIMING_DELAY_MASK REG_GENMASK(19, 18) /* ilk/snb/ivb */
-#define PIPECONF_MSA_TIMING_DELAY(x) REG_FIELD_PREP(PIPECONF_MSA_TIMING_DELAY_MASK, (x))
-#define PIPECONF_CXSR_DOWNCLOCK REG_BIT(16)
-#define PIPECONF_REFRESH_RATE_ALT_VLV REG_BIT(14)
-#define PIPECONF_COLOR_RANGE_SELECT REG_BIT(13)
-#define PIPECONF_OUTPUT_COLORSPACE_MASK REG_GENMASK(12, 11) /* ilk-ivb */
-#define PIPECONF_OUTPUT_COLORSPACE_RGB REG_FIELD_PREP(PIPECONF_OUTPUT_COLORSPACE_MASK, 0) /* ilk-ivb */
-#define PIPECONF_OUTPUT_COLORSPACE_YUV601 REG_FIELD_PREP(PIPECONF_OUTPUT_COLORSPACE_MASK, 1) /* ilk-ivb */
-#define PIPECONF_OUTPUT_COLORSPACE_YUV709 REG_FIELD_PREP(PIPECONF_OUTPUT_COLORSPACE_MASK, 2) /* ilk-ivb */
-#define PIPECONF_OUTPUT_COLORSPACE_YUV_HSW REG_BIT(11) /* hsw only */
-#define PIPECONF_BPC_MASK REG_GENMASK(7, 5) /* ctg-ivb */
-#define PIPECONF_BPC_8 REG_FIELD_PREP(PIPECONF_BPC_MASK, 0)
-#define PIPECONF_BPC_10 REG_FIELD_PREP(PIPECONF_BPC_MASK, 1)
-#define PIPECONF_BPC_6 REG_FIELD_PREP(PIPECONF_BPC_MASK, 2)
-#define PIPECONF_BPC_12 REG_FIELD_PREP(PIPECONF_BPC_MASK, 3)
-#define PIPECONF_DITHER_EN REG_BIT(4)
-#define PIPECONF_DITHER_TYPE_MASK REG_GENMASK(3, 2)
-#define PIPECONF_DITHER_TYPE_SP REG_FIELD_PREP(PIPECONF_DITHER_TYPE_MASK, 0)
-#define PIPECONF_DITHER_TYPE_ST1 REG_FIELD_PREP(PIPECONF_DITHER_TYPE_MASK, 1)
-#define PIPECONF_DITHER_TYPE_ST2 REG_FIELD_PREP(PIPECONF_DITHER_TYPE_MASK, 2)
-#define PIPECONF_DITHER_TYPE_TEMP REG_FIELD_PREP(PIPECONF_DITHER_TYPE_MASK, 3)
+#define TRANSCONF_INTERLACE_MASK_ILK REG_GENMASK(23, 21) /* ilk+ */
+#define TRANSCONF_INTERLACE_MASK_HSW REG_GENMASK(22, 21) /* hsw+ */
+#define TRANSCONF_INTERLACE_PF_PD_ILK REG_FIELD_PREP(TRANSCONF_INTERLACE_MASK_ILK, 0)
+#define TRANSCONF_INTERLACE_PF_ID_ILK REG_FIELD_PREP(TRANSCONF_INTERLACE_MASK_ILK, 1)
+#define TRANSCONF_INTERLACE_IF_ID_ILK REG_FIELD_PREP(TRANSCONF_INTERLACE_MASK_ILK, 3)
+#define TRANSCONF_INTERLACE_IF_ID_DBL_ILK REG_FIELD_PREP(TRANSCONF_INTERLACE_MASK_ILK, 4) /* ilk/snb only */
+#define TRANSCONF_INTERLACE_PF_ID_DBL_ILK REG_FIELD_PREP(TRANSCONF_INTERLACE_MASK_ILK, 5) /* ilk/snb only */
+#define TRANSCONF_REFRESH_RATE_ALT_ILK REG_BIT(20)
+#define TRANSCONF_MSA_TIMING_DELAY_MASK REG_GENMASK(19, 18) /* ilk/snb/ivb */
+#define TRANSCONF_MSA_TIMING_DELAY(x) REG_FIELD_PREP(TRANSCONF_MSA_TIMING_DELAY_MASK, (x))
+#define TRANSCONF_CXSR_DOWNCLOCK REG_BIT(16)
+#define TRANSCONF_REFRESH_RATE_ALT_VLV REG_BIT(14)
+#define TRANSCONF_COLOR_RANGE_SELECT REG_BIT(13)
+#define TRANSCONF_OUTPUT_COLORSPACE_MASK REG_GENMASK(12, 11) /* ilk-ivb */
+#define TRANSCONF_OUTPUT_COLORSPACE_RGB REG_FIELD_PREP(TRANSCONF_OUTPUT_COLORSPACE_MASK, 0) /* ilk-ivb */
+#define TRANSCONF_OUTPUT_COLORSPACE_YUV601 REG_FIELD_PREP(TRANSCONF_OUTPUT_COLORSPACE_MASK, 1) /* ilk-ivb */
+#define TRANSCONF_OUTPUT_COLORSPACE_YUV709 REG_FIELD_PREP(TRANSCONF_OUTPUT_COLORSPACE_MASK, 2) /* ilk-ivb */
+#define TRANSCONF_OUTPUT_COLORSPACE_YUV_HSW REG_BIT(11) /* hsw only */
+#define TRANSCONF_BPC_MASK REG_GENMASK(7, 5) /* ctg-ivb */
+#define TRANSCONF_BPC_8 REG_FIELD_PREP(TRANSCONF_BPC_MASK, 0)
+#define TRANSCONF_BPC_10 REG_FIELD_PREP(TRANSCONF_BPC_MASK, 1)
+#define TRANSCONF_BPC_6 REG_FIELD_PREP(TRANSCONF_BPC_MASK, 2)
+#define TRANSCONF_BPC_12 REG_FIELD_PREP(TRANSCONF_BPC_MASK, 3)
+#define TRANSCONF_DITHER_EN REG_BIT(4)
+#define TRANSCONF_DITHER_TYPE_MASK REG_GENMASK(3, 2)
+#define TRANSCONF_DITHER_TYPE_SP REG_FIELD_PREP(TRANSCONF_DITHER_TYPE_MASK, 0)
+#define TRANSCONF_DITHER_TYPE_ST1 REG_FIELD_PREP(TRANSCONF_DITHER_TYPE_MASK, 1)
+#define TRANSCONF_DITHER_TYPE_ST2 REG_FIELD_PREP(TRANSCONF_DITHER_TYPE_MASK, 2)
+#define TRANSCONF_DITHER_TYPE_TEMP REG_FIELD_PREP(TRANSCONF_DITHER_TYPE_MASK, 3)
#define _PIPEASTAT 0x70024
#define PIPE_FIFO_UNDERRUN_STATUS (1UL << 31)
#define SPRITE1_FLIP_DONE_INT_EN_VLV (1UL << 30)
@@ -3615,7 +3481,7 @@
#define PIPE_DSI0_OFFSET 0x7b000
#define PIPE_DSI1_OFFSET 0x7b800
-#define PIPECONF(pipe) _MMIO_PIPE2(pipe, _PIPEACONF)
+#define TRANSCONF(trans) _MMIO_PIPE2((trans), _TRANSACONF)
#define PIPEDSL(pipe) _MMIO_PIPE2(pipe, _PIPEADSL)
#define PIPEFRAME(pipe) _MMIO_PIPE2(pipe, _PIPEAFRAMEHIGH)
#define PIPEFRAMEPIXEL(pipe) _MMIO_PIPE2(pipe, _PIPEAFRAMEPIXEL)
@@ -3631,36 +3497,38 @@
#define _PIPE_MISC_A 0x70030
#define _PIPE_MISC_B 0x71030
-#define PIPEMISC_YUV420_ENABLE REG_BIT(27) /* glk+ */
-#define PIPEMISC_YUV420_MODE_FULL_BLEND REG_BIT(26) /* glk+ */
-#define PIPEMISC_HDR_MODE_PRECISION REG_BIT(23) /* icl+ */
-#define PIPEMISC_OUTPUT_COLORSPACE_YUV REG_BIT(11)
-#define PIPEMISC_PIXEL_ROUNDING_TRUNC REG_BIT(8) /* tgl+ */
+#define PIPE_MISC_YUV420_ENABLE REG_BIT(27) /* glk+ */
+#define PIPE_MISC_YUV420_MODE_FULL_BLEND REG_BIT(26) /* glk+ */
+#define PIPE_MISC_HDR_MODE_PRECISION REG_BIT(23) /* icl+ */
+#define PIPE_MISC_OUTPUT_COLORSPACE_YUV REG_BIT(11)
+#define PIPE_MISC_PIXEL_ROUNDING_TRUNC REG_BIT(8) /* tgl+ */
/*
* For Display < 13, Bits 5-7 of PIPE MISC represent DITHER BPC with
* valid values of: 6, 8, 10 BPC.
* ADLP+, the bits 5-7 represent PORT OUTPUT BPC with valid values of:
* 6, 8, 10, 12 BPC.
*/
-#define PIPEMISC_BPC_MASK REG_GENMASK(7, 5)
-#define PIPEMISC_BPC_8 REG_FIELD_PREP(PIPEMISC_BPC_MASK, 0)
-#define PIPEMISC_BPC_10 REG_FIELD_PREP(PIPEMISC_BPC_MASK, 1)
-#define PIPEMISC_BPC_6 REG_FIELD_PREP(PIPEMISC_BPC_MASK, 2)
-#define PIPEMISC_BPC_12_ADLP REG_FIELD_PREP(PIPEMISC_BPC_MASK, 4) /* adlp+ */
-#define PIPEMISC_DITHER_ENABLE REG_BIT(4)
-#define PIPEMISC_DITHER_TYPE_MASK REG_GENMASK(3, 2)
-#define PIPEMISC_DITHER_TYPE_SP REG_FIELD_PREP(PIPEMISC_DITHER_TYPE_MASK, 0)
-#define PIPEMISC_DITHER_TYPE_ST1 REG_FIELD_PREP(PIPEMISC_DITHER_TYPE_MASK, 1)
-#define PIPEMISC_DITHER_TYPE_ST2 REG_FIELD_PREP(PIPEMISC_DITHER_TYPE_MASK, 2)
-#define PIPEMISC_DITHER_TYPE_TEMP REG_FIELD_PREP(PIPEMISC_DITHER_TYPE_MASK, 3)
-#define PIPEMISC(pipe) _MMIO_PIPE2(pipe, _PIPE_MISC_A)
+#define PIPE_MISC_BPC_MASK REG_GENMASK(7, 5)
+#define PIPE_MISC_BPC_8 REG_FIELD_PREP(PIPE_MISC_BPC_MASK, 0)
+#define PIPE_MISC_BPC_10 REG_FIELD_PREP(PIPE_MISC_BPC_MASK, 1)
+#define PIPE_MISC_BPC_6 REG_FIELD_PREP(PIPE_MISC_BPC_MASK, 2)
+#define PIPE_MISC_BPC_12_ADLP REG_FIELD_PREP(PIPE_MISC_BPC_MASK, 4) /* adlp+ */
+#define PIPE_MISC_DITHER_ENABLE REG_BIT(4)
+#define PIPE_MISC_DITHER_TYPE_MASK REG_GENMASK(3, 2)
+#define PIPE_MISC_DITHER_TYPE_SP REG_FIELD_PREP(PIPE_MISC_DITHER_TYPE_MASK, 0)
+#define PIPE_MISC_DITHER_TYPE_ST1 REG_FIELD_PREP(PIPE_MISC_DITHER_TYPE_MASK, 1)
+#define PIPE_MISC_DITHER_TYPE_ST2 REG_FIELD_PREP(PIPE_MISC_DITHER_TYPE_MASK, 2)
+#define PIPE_MISC_DITHER_TYPE_TEMP REG_FIELD_PREP(PIPE_MISC_DITHER_TYPE_MASK, 3)
+#define PIPE_MISC(pipe) _MMIO_PIPE(pipe, _PIPE_MISC_A, _PIPE_MISC_B)
#define _PIPE_MISC2_A 0x7002C
#define _PIPE_MISC2_B 0x7102C
#define PIPE_MISC2_BUBBLE_COUNTER_MASK REG_GENMASK(31, 24)
#define PIPE_MISC2_BUBBLE_COUNTER_SCALER_EN REG_FIELD_PREP(PIPE_MISC2_BUBBLE_COUNTER_MASK, 80)
#define PIPE_MISC2_BUBBLE_COUNTER_SCALER_DIS REG_FIELD_PREP(PIPE_MISC2_BUBBLE_COUNTER_MASK, 20)
-#define PIPE_MISC2(pipe) _MMIO_PIPE2(pipe, _PIPE_MISC2_A)
+#define PIPE_MISC2_FLIP_INFO_PLANE_SEL_MASK REG_GENMASK(2, 0) /* tgl+ */
+#define PIPE_MISC2_FLIP_INFO_PLANE_SEL(plane_id) REG_FIELD_PREP(PIPE_MISC2_FLIP_INFO_PLANE_SEL_MASK, (plane_id))
+#define PIPE_MISC2(pipe) _MMIO_PIPE(pipe, _PIPE_MISC2_A, _PIPE_MISC2_B)
/* Skylake+ pipe bottom (background) color */
#define _SKL_BOTTOM_COLOR_A 0x70034
@@ -4255,7 +4123,7 @@
/* Pipe B */
#define _PIPEBDSL (DISPLAY_MMIO_BASE(dev_priv) + 0x71000)
-#define _PIPEBCONF (DISPLAY_MMIO_BASE(dev_priv) + 0x71008)
+#define _TRANSBCONF (DISPLAY_MMIO_BASE(dev_priv) + 0x71008)
#define _PIPEBSTAT (DISPLAY_MMIO_BASE(dev_priv) + 0x71024)
#define _PIPEBFRAMEHIGH 0x71040
#define _PIPEBFRAMEPIXEL 0x71044
@@ -4526,6 +4394,7 @@
#define SP_CONST_ALPHA_ENABLE REG_BIT(31)
#define SP_CONST_ALPHA_MASK REG_GENMASK(7, 0)
#define SP_CONST_ALPHA(alpha) REG_FIELD_PREP(SP_CONST_ALPHA_MASK, (alpha))
+#define _SPASURFLIVE (VLV_DISPLAY_BASE + 0x721ac)
#define _SPACLRC0 (VLV_DISPLAY_BASE + 0x721d0)
#define SP_CONTRAST_MASK REG_GENMASK(26, 18)
#define SP_CONTRAST(x) REG_FIELD_PREP(SP_CONTRAST_MASK, (x)) /* u3.6 */
@@ -4549,6 +4418,7 @@
#define _SPBKEYMAXVAL (VLV_DISPLAY_BASE + 0x722a0)
#define _SPBTILEOFF (VLV_DISPLAY_BASE + 0x722a4)
#define _SPBCONSTALPHA (VLV_DISPLAY_BASE + 0x722a8)
+#define _SPBSURFLIVE (VLV_DISPLAY_BASE + 0x722ac)
#define _SPBCLRC0 (VLV_DISPLAY_BASE + 0x722d0)
#define _SPBCLRC1 (VLV_DISPLAY_BASE + 0x722d4)
#define _SPBGAMC (VLV_DISPLAY_BASE + 0x722e0)
@@ -4569,6 +4439,7 @@
#define SPKEYMAXVAL(pipe, plane_id) _MMIO_VLV_SPR((pipe), (plane_id), _SPAKEYMAXVAL, _SPBKEYMAXVAL)
#define SPTILEOFF(pipe, plane_id) _MMIO_VLV_SPR((pipe), (plane_id), _SPATILEOFF, _SPBTILEOFF)
#define SPCONSTALPHA(pipe, plane_id) _MMIO_VLV_SPR((pipe), (plane_id), _SPACONSTALPHA, _SPBCONSTALPHA)
+#define SPSURFLIVE(pipe, plane_id) _MMIO_VLV_SPR((pipe), (plane_id), _SPASURFLIVE, _SPBSURFLIVE)
#define SPCLRC0(pipe, plane_id) _MMIO_VLV_SPR((pipe), (plane_id), _SPACLRC0, _SPBCLRC0)
#define SPCLRC1(pipe, plane_id) _MMIO_VLV_SPR((pipe), (plane_id), _SPACLRC1, _SPBCLRC1)
#define SPGAMC(pipe, plane_id, i) _MMIO(_VLV_SPR((pipe), (plane_id), _SPAGAMC, _SPBGAMC) + (5 - (i)) * 4) /* 6 x u0.10 */
@@ -4720,10 +4591,13 @@
#define _PLANE_KEYVAL_2_A 0x70294
#define _PLANE_KEYMSK_1_A 0x70198
#define _PLANE_KEYMSK_2_A 0x70298
-#define PLANE_KEYMSK_ALPHA_ENABLE (1 << 31)
+#define PLANE_KEYMSK_ALPHA_ENABLE REG_BIT(31)
#define _PLANE_KEYMAX_1_A 0x701a0
#define _PLANE_KEYMAX_2_A 0x702a0
-#define PLANE_KEYMAX_ALPHA(a) ((a) << 24)
+#define PLANE_KEYMAX_ALPHA_MASK REG_GENMASK(31, 24)
+#define PLANE_KEYMAX_ALPHA(a) REG_FIELD_PREP(PLANE_KEYMAX_ALPHA_MASK, (a))
+#define _PLANE_SURFLIVE_1_A 0x701ac
+#define _PLANE_SURFLIVE_2_A 0x702ac
#define _PLANE_CC_VAL_1_A 0x701b4
#define _PLANE_CC_VAL_2_A 0x702b4
#define _PLANE_AUX_DIST_1_A 0x701c0
@@ -4908,6 +4782,13 @@
#define PLANE_KEYMAX(pipe, plane) \
_MMIO_PLANE(plane, _PLANE_KEYMAX_1(pipe), _PLANE_KEYMAX_2(pipe))
+#define _PLANE_SURFLIVE_1_B 0x711ac
+#define _PLANE_SURFLIVE_2_B 0x712ac
+#define _PLANE_SURFLIVE_1(pipe) _PIPE(pipe, _PLANE_SURFLIVE_1_A, _PLANE_SURFLIVE_1_B)
+#define _PLANE_SURFLIVE_2(pipe) _PIPE(pipe, _PLANE_SURFLIVE_2_A, _PLANE_SURFLIVE_2_B)
+#define PLANE_SURFLIVE(pipe, plane) \
+ _MMIO_PLANE(plane, _PLANE_SURFLIVE_1(pipe), _PLANE_SURFLIVE_2(pipe))
+
#define _PLANE_BUF_CFG_1_B 0x7127c
#define _PLANE_BUF_CFG_2_B 0x7137c
/* skl+: 10 bits, icl+ 11 bits, adlp+ 12 bits */
@@ -5432,6 +5313,7 @@
#define GEN8_PIPE_CDCLK_CRC_DONE (1 << 28)
#define XELPD_PIPE_SOFT_UNDERRUN (1 << 22)
#define XELPD_PIPE_HARD_UNDERRUN (1 << 21)
+#define GEN12_PIPE_VBLANK_UNMOD (1 << 19)
#define GEN8_PIPE_CURSOR_FAULT (1 << 10)
#define GEN8_PIPE_SPRITE_FAULT (1 << 9)
#define GEN8_PIPE_PRIMARY_FAULT (1 << 8)
@@ -6392,9 +6274,6 @@
#define FDI_PLL_CTL_1 _MMIO(0xfe000)
#define FDI_PLL_CTL_2 _MMIO(0xfe004)
-#define PCH_LVDS _MMIO(0xe1180)
-#define LVDS_DETECTED (1 << 1)
-
#define _PCH_DP_B 0xe4100
#define PCH_DP_B _MMIO(_PCH_DP_B)
#define _PCH_DPB_AUX_CH_CTL 0xe4110
@@ -6596,15 +6475,6 @@
/* XEHP_PCODE_FREQUENCY_CONFIG param2 */
#define PCODE_MBOX_DOMAIN_NONE 0x0
#define PCODE_MBOX_DOMAIN_MEDIAFF 0x3
-
-/* Wa_14017210380: mtl */
-#define PCODE_MBOX_GT_STATE 0x50
-/* sub-commands (param1) */
-#define PCODE_MBOX_GT_STATE_MEDIA_BUSY 0x1
-#define PCODE_MBOX_GT_STATE_MEDIA_NOT_BUSY 0x2
-/* param2 */
-#define PCODE_MBOX_GT_STATE_DOMAIN_MEDIA 0x1
-
#define GEN6_PCODE_DATA _MMIO(0x138128)
#define GEN6_PCODE_FREQ_IA_RATIO_SHIFT 8
#define GEN6_PCODE_FREQ_RING_RATIO_SHIFT 16
@@ -7224,21 +7094,23 @@ enum skl_power_gate {
ADLS_DPCLKA_DDIK_SEL_MASK)
/* ICL PLL */
-#define DPLL0_ENABLE 0x46010
-#define DPLL1_ENABLE 0x46014
+#define _DPLL0_ENABLE 0x46010
+#define _DPLL1_ENABLE 0x46014
#define _ADLS_DPLL2_ENABLE 0x46018
#define _ADLS_DPLL3_ENABLE 0x46030
-#define PLL_ENABLE (1 << 31)
-#define PLL_LOCK (1 << 30)
-#define PLL_POWER_ENABLE (1 << 27)
-#define PLL_POWER_STATE (1 << 26)
-#define ICL_DPLL_ENABLE(pll) _MMIO_PLL3(pll, DPLL0_ENABLE, DPLL1_ENABLE, \
- _ADLS_DPLL2_ENABLE, _ADLS_DPLL3_ENABLE)
+#define PLL_ENABLE REG_BIT(31)
+#define PLL_LOCK REG_BIT(30)
+#define PLL_POWER_ENABLE REG_BIT(27)
+#define PLL_POWER_STATE REG_BIT(26)
+#define ICL_DPLL_ENABLE(pll) _MMIO(_PICK_EVEN_2RANGES(pll, 3, \
+ _DPLL0_ENABLE, _DPLL1_ENABLE, \
+ _ADLS_DPLL3_ENABLE, _ADLS_DPLL3_ENABLE))
#define _DG2_PLL3_ENABLE 0x4601C
-#define DG2_PLL_ENABLE(pll) _MMIO_PLL3(pll, DPLL0_ENABLE, DPLL1_ENABLE, \
- _ADLS_DPLL2_ENABLE, _DG2_PLL3_ENABLE)
+#define DG2_PLL_ENABLE(pll) _MMIO(_PICK_EVEN_2RANGES(pll, 3, \
+ _DPLL0_ENABLE, _DPLL1_ENABLE, \
+ _DG2_PLL3_ENABLE, _DG2_PLL3_ENABLE))
#define TBT_PLL_ENABLE _MMIO(0x46020)
@@ -7246,13 +7118,14 @@ enum skl_power_gate {
#define _MG_PLL2_ENABLE 0x46034
#define _MG_PLL3_ENABLE 0x46038
#define _MG_PLL4_ENABLE 0x4603C
-/* Bits are the same as DPLL0_ENABLE */
+/* Bits are the same as _DPLL0_ENABLE */
#define MG_PLL_ENABLE(tc_port) _MMIO_PORT((tc_port), _MG_PLL1_ENABLE, \
_MG_PLL2_ENABLE)
/* DG1 PLL */
-#define DG1_DPLL_ENABLE(pll) _MMIO_PLL3(pll, DPLL0_ENABLE, DPLL1_ENABLE, \
- _MG_PLL1_ENABLE, _MG_PLL2_ENABLE)
+#define DG1_DPLL_ENABLE(pll) _MMIO(_PICK_EVEN_2RANGES(pll, 2, \
+ _DPLL0_ENABLE, _DPLL1_ENABLE, \
+ _MG_PLL1_ENABLE, _MG_PLL2_ENABLE))
/* ADL-P Type C PLL */
#define PORTTC1_PLL_ENABLE 0x46038
@@ -7312,9 +7185,9 @@ enum skl_power_gate {
#define _TGL_DPLL0_CFGCR0 0x164284
#define _TGL_DPLL1_CFGCR0 0x16428C
#define _TGL_TBTPLL_CFGCR0 0x16429C
-#define TGL_DPLL_CFGCR0(pll) _MMIO_PLL3(pll, _TGL_DPLL0_CFGCR0, \
- _TGL_DPLL1_CFGCR0, \
- _TGL_TBTPLL_CFGCR0)
+#define TGL_DPLL_CFGCR0(pll) _MMIO(_PICK_EVEN_2RANGES(pll, 2, \
+ _TGL_DPLL0_CFGCR0, _TGL_DPLL1_CFGCR0, \
+ _TGL_TBTPLL_CFGCR0, _TGL_TBTPLL_CFGCR0))
#define RKL_DPLL_CFGCR0(pll) _MMIO_PLL(pll, _TGL_DPLL0_CFGCR0, \
_TGL_DPLL1_CFGCR0)
@@ -7327,40 +7200,36 @@ enum skl_power_gate {
#define _TGL_DPLL0_CFGCR1 0x164288
#define _TGL_DPLL1_CFGCR1 0x164290
#define _TGL_TBTPLL_CFGCR1 0x1642A0
-#define TGL_DPLL_CFGCR1(pll) _MMIO_PLL3(pll, _TGL_DPLL0_CFGCR1, \
- _TGL_DPLL1_CFGCR1, \
- _TGL_TBTPLL_CFGCR1)
+#define TGL_DPLL_CFGCR1(pll) _MMIO(_PICK_EVEN_2RANGES(pll, 2, \
+ _TGL_DPLL0_CFGCR1, _TGL_DPLL1_CFGCR1, \
+ _TGL_TBTPLL_CFGCR1, _TGL_TBTPLL_CFGCR1))
#define RKL_DPLL_CFGCR1(pll) _MMIO_PLL(pll, _TGL_DPLL0_CFGCR1, \
_TGL_DPLL1_CFGCR1)
#define _DG1_DPLL2_CFGCR0 0x16C284
#define _DG1_DPLL3_CFGCR0 0x16C28C
-#define DG1_DPLL_CFGCR0(pll) _MMIO_PLL3(pll, _TGL_DPLL0_CFGCR0, \
- _TGL_DPLL1_CFGCR0, \
- _DG1_DPLL2_CFGCR0, \
- _DG1_DPLL3_CFGCR0)
+#define DG1_DPLL_CFGCR0(pll) _MMIO(_PICK_EVEN_2RANGES(pll, 2, \
+ _TGL_DPLL0_CFGCR0, _TGL_DPLL1_CFGCR0, \
+ _DG1_DPLL2_CFGCR0, _DG1_DPLL3_CFGCR0))
#define _DG1_DPLL2_CFGCR1 0x16C288
#define _DG1_DPLL3_CFGCR1 0x16C290
-#define DG1_DPLL_CFGCR1(pll) _MMIO_PLL3(pll, _TGL_DPLL0_CFGCR1, \
- _TGL_DPLL1_CFGCR1, \
- _DG1_DPLL2_CFGCR1, \
- _DG1_DPLL3_CFGCR1)
+#define DG1_DPLL_CFGCR1(pll) _MMIO(_PICK_EVEN_2RANGES(pll, 2, \
+ _TGL_DPLL0_CFGCR1, _TGL_DPLL1_CFGCR1, \
+ _DG1_DPLL2_CFGCR1, _DG1_DPLL3_CFGCR1))
/* For ADL-S DPLL4_CFGCR0/1 are used to control DPLL2 */
-#define _ADLS_DPLL3_CFGCR0 0x1642C0
#define _ADLS_DPLL4_CFGCR0 0x164294
-#define ADLS_DPLL_CFGCR0(pll) _MMIO_PLL3(pll, _TGL_DPLL0_CFGCR0, \
- _TGL_DPLL1_CFGCR0, \
- _ADLS_DPLL4_CFGCR0, \
- _ADLS_DPLL3_CFGCR0)
+#define _ADLS_DPLL3_CFGCR0 0x1642C0
+#define ADLS_DPLL_CFGCR0(pll) _MMIO(_PICK_EVEN_2RANGES(pll, 2, \
+ _TGL_DPLL0_CFGCR0, _TGL_DPLL1_CFGCR0, \
+ _ADLS_DPLL4_CFGCR0, _ADLS_DPLL3_CFGCR0))
-#define _ADLS_DPLL3_CFGCR1 0x1642C4
#define _ADLS_DPLL4_CFGCR1 0x164298
-#define ADLS_DPLL_CFGCR1(pll) _MMIO_PLL3(pll, _TGL_DPLL0_CFGCR1, \
- _TGL_DPLL1_CFGCR1, \
- _ADLS_DPLL4_CFGCR1, \
- _ADLS_DPLL3_CFGCR1)
+#define _ADLS_DPLL3_CFGCR1 0x1642C4
+#define ADLS_DPLL_CFGCR1(pll) _MMIO(_PICK_EVEN_2RANGES(pll, 2, \
+ _TGL_DPLL0_CFGCR1, _TGL_DPLL1_CFGCR1, \
+ _ADLS_DPLL4_CFGCR1, _ADLS_DPLL3_CFGCR1))
/* BXT display engine PLL */
#define BXT_DE_PLL_CTL _MMIO(0x6d000)
@@ -7380,6 +7249,8 @@ enum skl_power_gate {
#define DC_STATE_DISABLE 0
#define DC_STATE_EN_DC3CO REG_BIT(30)
#define DC_STATE_DC3CO_STATUS REG_BIT(29)
+#define HOLD_PHY_CLKREQ_PG1_LATCH REG_BIT(21)
+#define HOLD_PHY_PG1_LATCH REG_BIT(20)
#define DC_STATE_EN_UPTO_DC5 (1 << 0)
#define DC_STATE_EN_DC9 (1 << 3)
#define DC_STATE_EN_UPTO_DC6 (2 << 0)
@@ -7689,47 +7560,29 @@ enum skl_power_gate {
#define GEN9_TIMESTAMP_OVERRIDE_US_COUNTER_DENOMINATOR_SHIFT 12
#define GEN9_TIMESTAMP_OVERRIDE_US_COUNTER_DENOMINATOR_MASK (0xf << 12)
+/* g4x+, except vlv/chv! */
#define _PIPE_FRMTMSTMP_A 0x70048
+#define _PIPE_FRMTMSTMP_B 0x71048
#define PIPE_FRMTMSTMP(pipe) \
- _MMIO_PIPE2(pipe, _PIPE_FRMTMSTMP_A)
-
-/* Display Stream Splitter Control */
-#define DSS_CTL1 _MMIO(0x67400)
-#define SPLITTER_ENABLE (1 << 31)
-#define JOINER_ENABLE (1 << 30)
-#define DUAL_LINK_MODE_INTERLEAVE (1 << 24)
-#define DUAL_LINK_MODE_FRONTBACK (0 << 24)
-#define OVERLAP_PIXELS_MASK (0xf << 16)
-#define OVERLAP_PIXELS(pixels) ((pixels) << 16)
-#define LEFT_DL_BUF_TARGET_DEPTH_MASK (0xfff << 0)
-#define LEFT_DL_BUF_TARGET_DEPTH(pixels) ((pixels) << 0)
-#define MAX_DL_BUFFER_TARGET_DEPTH 0x5a0
-
-#define DSS_CTL2 _MMIO(0x67404)
-#define LEFT_BRANCH_VDSC_ENABLE (1 << 31)
-#define RIGHT_BRANCH_VDSC_ENABLE (1 << 15)
-#define RIGHT_DL_BUF_TARGET_DEPTH_MASK (0xfff << 0)
-#define RIGHT_DL_BUF_TARGET_DEPTH(pixels) ((pixels) << 0)
-
-#define _ICL_PIPE_DSS_CTL1_PB 0x78200
-#define _ICL_PIPE_DSS_CTL1_PC 0x78400
-#define ICL_PIPE_DSS_CTL1(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
- _ICL_PIPE_DSS_CTL1_PB, \
- _ICL_PIPE_DSS_CTL1_PC)
-#define BIG_JOINER_ENABLE (1 << 29)
-#define MASTER_BIG_JOINER_ENABLE (1 << 28)
-#define VGA_CENTERING_ENABLE (1 << 27)
-#define SPLITTER_CONFIGURATION_MASK REG_GENMASK(26, 25)
-#define SPLITTER_CONFIGURATION_2_SEGMENT REG_FIELD_PREP(SPLITTER_CONFIGURATION_MASK, 0)
-#define SPLITTER_CONFIGURATION_4_SEGMENT REG_FIELD_PREP(SPLITTER_CONFIGURATION_MASK, 1)
-#define UNCOMPRESSED_JOINER_MASTER (1 << 21)
-#define UNCOMPRESSED_JOINER_SLAVE (1 << 20)
-
-#define _ICL_PIPE_DSS_CTL2_PB 0x78204
-#define _ICL_PIPE_DSS_CTL2_PC 0x78404
-#define ICL_PIPE_DSS_CTL2(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
- _ICL_PIPE_DSS_CTL2_PB, \
- _ICL_PIPE_DSS_CTL2_PC)
+ _MMIO_PIPE(pipe, _PIPE_FRMTMSTMP_A, _PIPE_FRMTMSTMP_B)
+
+/* g4x+, except vlv/chv! */
+#define _PIPE_FLIPTMSTMP_A 0x7004C
+#define _PIPE_FLIPTMSTMP_B 0x7104C
+#define PIPE_FLIPTMSTMP(pipe) \
+ _MMIO_PIPE(pipe, _PIPE_FLIPTMSTMP_A, _PIPE_FLIPTMSTMP_B)
+
+/* tgl+ */
+#define _PIPE_FLIPDONETMSTMP_A 0x70054
+#define _PIPE_FLIPDONETMSTMP_B 0x71054
+#define PIPE_FLIPDONETIMSTMP(pipe) \
+ _MMIO_PIPE(pipe, _PIPE_FLIPDONETMSTMP_A, _PIPE_FLIPDONETMSTMP_B)
+
+#define _VLV_PIPE_MSA_MISC_A 0x70048
+#define VLV_PIPE_MSA_MISC(pipe) \
+ _MMIO_PIPE2(pipe, _VLV_PIPE_MSA_MISC_A)
+#define VLV_MSA_MISC1_HW_ENABLE REG_BIT(31)
+#define VLV_MSA_MISC1_SW_S3D_MASK REG_GENMASK(2, 0) /* MSA MISC1 3:1 */
#define GGC _MMIO(0x108040)
#define GMS_MASK REG_GENMASK(15, 8)
@@ -7754,314 +7607,6 @@ enum skl_power_gate {
#define ICL_PHY_MISC_DE_IO_COMP_PWR_DOWN (1 << 23)
#define DG2_PHY_DP_TX_ACK_MASK REG_GENMASK(23, 20)
-/* Icelake Display Stream Compression Registers */
-#define DSCA_PICTURE_PARAMETER_SET_0 _MMIO(0x6B200)
-#define DSCC_PICTURE_PARAMETER_SET_0 _MMIO(0x6BA00)
-#define _ICL_DSC0_PICTURE_PARAMETER_SET_0_PB 0x78270
-#define _ICL_DSC1_PICTURE_PARAMETER_SET_0_PB 0x78370
-#define _ICL_DSC0_PICTURE_PARAMETER_SET_0_PC 0x78470
-#define _ICL_DSC1_PICTURE_PARAMETER_SET_0_PC 0x78570
-#define ICL_DSC0_PICTURE_PARAMETER_SET_0(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
- _ICL_DSC0_PICTURE_PARAMETER_SET_0_PB, \
- _ICL_DSC0_PICTURE_PARAMETER_SET_0_PC)
-#define ICL_DSC1_PICTURE_PARAMETER_SET_0(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
- _ICL_DSC1_PICTURE_PARAMETER_SET_0_PB, \
- _ICL_DSC1_PICTURE_PARAMETER_SET_0_PC)
-#define DSC_ALT_ICH_SEL (1 << 20)
-#define DSC_VBR_ENABLE (1 << 19)
-#define DSC_422_ENABLE (1 << 18)
-#define DSC_COLOR_SPACE_CONVERSION (1 << 17)
-#define DSC_BLOCK_PREDICTION (1 << 16)
-#define DSC_LINE_BUF_DEPTH_SHIFT 12
-#define DSC_BPC_SHIFT 8
-#define DSC_VER_MIN_SHIFT 4
-#define DSC_VER_MAJ (0x1 << 0)
-
-#define DSCA_PICTURE_PARAMETER_SET_1 _MMIO(0x6B204)
-#define DSCC_PICTURE_PARAMETER_SET_1 _MMIO(0x6BA04)
-#define _ICL_DSC0_PICTURE_PARAMETER_SET_1_PB 0x78274
-#define _ICL_DSC1_PICTURE_PARAMETER_SET_1_PB 0x78374
-#define _ICL_DSC0_PICTURE_PARAMETER_SET_1_PC 0x78474
-#define _ICL_DSC1_PICTURE_PARAMETER_SET_1_PC 0x78574
-#define ICL_DSC0_PICTURE_PARAMETER_SET_1(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
- _ICL_DSC0_PICTURE_PARAMETER_SET_1_PB, \
- _ICL_DSC0_PICTURE_PARAMETER_SET_1_PC)
-#define ICL_DSC1_PICTURE_PARAMETER_SET_1(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
- _ICL_DSC1_PICTURE_PARAMETER_SET_1_PB, \
- _ICL_DSC1_PICTURE_PARAMETER_SET_1_PC)
-#define DSC_BPP(bpp) ((bpp) << 0)
-
-#define DSCA_PICTURE_PARAMETER_SET_2 _MMIO(0x6B208)
-#define DSCC_PICTURE_PARAMETER_SET_2 _MMIO(0x6BA08)
-#define _ICL_DSC0_PICTURE_PARAMETER_SET_2_PB 0x78278
-#define _ICL_DSC1_PICTURE_PARAMETER_SET_2_PB 0x78378
-#define _ICL_DSC0_PICTURE_PARAMETER_SET_2_PC 0x78478
-#define _ICL_DSC1_PICTURE_PARAMETER_SET_2_PC 0x78578
-#define ICL_DSC0_PICTURE_PARAMETER_SET_2(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
- _ICL_DSC0_PICTURE_PARAMETER_SET_2_PB, \
- _ICL_DSC0_PICTURE_PARAMETER_SET_2_PC)
-#define ICL_DSC1_PICTURE_PARAMETER_SET_2(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
- _ICL_DSC1_PICTURE_PARAMETER_SET_2_PB, \
- _ICL_DSC1_PICTURE_PARAMETER_SET_2_PC)
-#define DSC_PIC_WIDTH(pic_width) ((pic_width) << 16)
-#define DSC_PIC_HEIGHT(pic_height) ((pic_height) << 0)
-
-#define DSCA_PICTURE_PARAMETER_SET_3 _MMIO(0x6B20C)
-#define DSCC_PICTURE_PARAMETER_SET_3 _MMIO(0x6BA0C)
-#define _ICL_DSC0_PICTURE_PARAMETER_SET_3_PB 0x7827C
-#define _ICL_DSC1_PICTURE_PARAMETER_SET_3_PB 0x7837C
-#define _ICL_DSC0_PICTURE_PARAMETER_SET_3_PC 0x7847C
-#define _ICL_DSC1_PICTURE_PARAMETER_SET_3_PC 0x7857C
-#define ICL_DSC0_PICTURE_PARAMETER_SET_3(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
- _ICL_DSC0_PICTURE_PARAMETER_SET_3_PB, \
- _ICL_DSC0_PICTURE_PARAMETER_SET_3_PC)
-#define ICL_DSC1_PICTURE_PARAMETER_SET_3(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
- _ICL_DSC1_PICTURE_PARAMETER_SET_3_PB, \
- _ICL_DSC1_PICTURE_PARAMETER_SET_3_PC)
-#define DSC_SLICE_WIDTH(slice_width) ((slice_width) << 16)
-#define DSC_SLICE_HEIGHT(slice_height) ((slice_height) << 0)
-
-#define DSCA_PICTURE_PARAMETER_SET_4 _MMIO(0x6B210)
-#define DSCC_PICTURE_PARAMETER_SET_4 _MMIO(0x6BA10)
-#define _ICL_DSC0_PICTURE_PARAMETER_SET_4_PB 0x78280
-#define _ICL_DSC1_PICTURE_PARAMETER_SET_4_PB 0x78380
-#define _ICL_DSC0_PICTURE_PARAMETER_SET_4_PC 0x78480
-#define _ICL_DSC1_PICTURE_PARAMETER_SET_4_PC 0x78580
-#define ICL_DSC0_PICTURE_PARAMETER_SET_4(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
- _ICL_DSC0_PICTURE_PARAMETER_SET_4_PB, \
- _ICL_DSC0_PICTURE_PARAMETER_SET_4_PC)
-#define ICL_DSC1_PICTURE_PARAMETER_SET_4(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
- _ICL_DSC1_PICTURE_PARAMETER_SET_4_PB, \
- _ICL_DSC1_PICTURE_PARAMETER_SET_4_PC)
-#define DSC_INITIAL_DEC_DELAY(dec_delay) ((dec_delay) << 16)
-#define DSC_INITIAL_XMIT_DELAY(xmit_delay) ((xmit_delay) << 0)
-
-#define DSCA_PICTURE_PARAMETER_SET_5 _MMIO(0x6B214)
-#define DSCC_PICTURE_PARAMETER_SET_5 _MMIO(0x6BA14)
-#define _ICL_DSC0_PICTURE_PARAMETER_SET_5_PB 0x78284
-#define _ICL_DSC1_PICTURE_PARAMETER_SET_5_PB 0x78384
-#define _ICL_DSC0_PICTURE_PARAMETER_SET_5_PC 0x78484
-#define _ICL_DSC1_PICTURE_PARAMETER_SET_5_PC 0x78584
-#define ICL_DSC0_PICTURE_PARAMETER_SET_5(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
- _ICL_DSC0_PICTURE_PARAMETER_SET_5_PB, \
- _ICL_DSC0_PICTURE_PARAMETER_SET_5_PC)
-#define ICL_DSC1_PICTURE_PARAMETER_SET_5(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
- _ICL_DSC1_PICTURE_PARAMETER_SET_5_PB, \
- _ICL_DSC1_PICTURE_PARAMETER_SET_5_PC)
-#define DSC_SCALE_DEC_INT(scale_dec) ((scale_dec) << 16)
-#define DSC_SCALE_INC_INT(scale_inc) ((scale_inc) << 0)
-
-#define DSCA_PICTURE_PARAMETER_SET_6 _MMIO(0x6B218)
-#define DSCC_PICTURE_PARAMETER_SET_6 _MMIO(0x6BA18)
-#define _ICL_DSC0_PICTURE_PARAMETER_SET_6_PB 0x78288
-#define _ICL_DSC1_PICTURE_PARAMETER_SET_6_PB 0x78388
-#define _ICL_DSC0_PICTURE_PARAMETER_SET_6_PC 0x78488
-#define _ICL_DSC1_PICTURE_PARAMETER_SET_6_PC 0x78588
-#define ICL_DSC0_PICTURE_PARAMETER_SET_6(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
- _ICL_DSC0_PICTURE_PARAMETER_SET_6_PB, \
- _ICL_DSC0_PICTURE_PARAMETER_SET_6_PC)
-#define ICL_DSC1_PICTURE_PARAMETER_SET_6(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
- _ICL_DSC1_PICTURE_PARAMETER_SET_6_PB, \
- _ICL_DSC1_PICTURE_PARAMETER_SET_6_PC)
-#define DSC_FLATNESS_MAX_QP(max_qp) ((max_qp) << 24)
-#define DSC_FLATNESS_MIN_QP(min_qp) ((min_qp) << 16)
-#define DSC_FIRST_LINE_BPG_OFFSET(offset) ((offset) << 8)
-#define DSC_INITIAL_SCALE_VALUE(value) ((value) << 0)
-
-#define DSCA_PICTURE_PARAMETER_SET_7 _MMIO(0x6B21C)
-#define DSCC_PICTURE_PARAMETER_SET_7 _MMIO(0x6BA1C)
-#define _ICL_DSC0_PICTURE_PARAMETER_SET_7_PB 0x7828C
-#define _ICL_DSC1_PICTURE_PARAMETER_SET_7_PB 0x7838C
-#define _ICL_DSC0_PICTURE_PARAMETER_SET_7_PC 0x7848C
-#define _ICL_DSC1_PICTURE_PARAMETER_SET_7_PC 0x7858C
-#define ICL_DSC0_PICTURE_PARAMETER_SET_7(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
- _ICL_DSC0_PICTURE_PARAMETER_SET_7_PB, \
- _ICL_DSC0_PICTURE_PARAMETER_SET_7_PC)
-#define ICL_DSC1_PICTURE_PARAMETER_SET_7(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
- _ICL_DSC1_PICTURE_PARAMETER_SET_7_PB, \
- _ICL_DSC1_PICTURE_PARAMETER_SET_7_PC)
-#define DSC_NFL_BPG_OFFSET(bpg_offset) ((bpg_offset) << 16)
-#define DSC_SLICE_BPG_OFFSET(bpg_offset) ((bpg_offset) << 0)
-
-#define DSCA_PICTURE_PARAMETER_SET_8 _MMIO(0x6B220)
-#define DSCC_PICTURE_PARAMETER_SET_8 _MMIO(0x6BA20)
-#define _ICL_DSC0_PICTURE_PARAMETER_SET_8_PB 0x78290
-#define _ICL_DSC1_PICTURE_PARAMETER_SET_8_PB 0x78390
-#define _ICL_DSC0_PICTURE_PARAMETER_SET_8_PC 0x78490
-#define _ICL_DSC1_PICTURE_PARAMETER_SET_8_PC 0x78590
-#define ICL_DSC0_PICTURE_PARAMETER_SET_8(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
- _ICL_DSC0_PICTURE_PARAMETER_SET_8_PB, \
- _ICL_DSC0_PICTURE_PARAMETER_SET_8_PC)
-#define ICL_DSC1_PICTURE_PARAMETER_SET_8(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
- _ICL_DSC1_PICTURE_PARAMETER_SET_8_PB, \
- _ICL_DSC1_PICTURE_PARAMETER_SET_8_PC)
-#define DSC_INITIAL_OFFSET(initial_offset) ((initial_offset) << 16)
-#define DSC_FINAL_OFFSET(final_offset) ((final_offset) << 0)
-
-#define DSCA_PICTURE_PARAMETER_SET_9 _MMIO(0x6B224)
-#define DSCC_PICTURE_PARAMETER_SET_9 _MMIO(0x6BA24)
-#define _ICL_DSC0_PICTURE_PARAMETER_SET_9_PB 0x78294
-#define _ICL_DSC1_PICTURE_PARAMETER_SET_9_PB 0x78394
-#define _ICL_DSC0_PICTURE_PARAMETER_SET_9_PC 0x78494
-#define _ICL_DSC1_PICTURE_PARAMETER_SET_9_PC 0x78594
-#define ICL_DSC0_PICTURE_PARAMETER_SET_9(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
- _ICL_DSC0_PICTURE_PARAMETER_SET_9_PB, \
- _ICL_DSC0_PICTURE_PARAMETER_SET_9_PC)
-#define ICL_DSC1_PICTURE_PARAMETER_SET_9(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
- _ICL_DSC1_PICTURE_PARAMETER_SET_9_PB, \
- _ICL_DSC1_PICTURE_PARAMETER_SET_9_PC)
-#define DSC_RC_EDGE_FACTOR(rc_edge_fact) ((rc_edge_fact) << 16)
-#define DSC_RC_MODEL_SIZE(rc_model_size) ((rc_model_size) << 0)
-
-#define DSCA_PICTURE_PARAMETER_SET_10 _MMIO(0x6B228)
-#define DSCC_PICTURE_PARAMETER_SET_10 _MMIO(0x6BA28)
-#define _ICL_DSC0_PICTURE_PARAMETER_SET_10_PB 0x78298
-#define _ICL_DSC1_PICTURE_PARAMETER_SET_10_PB 0x78398
-#define _ICL_DSC0_PICTURE_PARAMETER_SET_10_PC 0x78498
-#define _ICL_DSC1_PICTURE_PARAMETER_SET_10_PC 0x78598
-#define ICL_DSC0_PICTURE_PARAMETER_SET_10(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
- _ICL_DSC0_PICTURE_PARAMETER_SET_10_PB, \
- _ICL_DSC0_PICTURE_PARAMETER_SET_10_PC)
-#define ICL_DSC1_PICTURE_PARAMETER_SET_10(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
- _ICL_DSC1_PICTURE_PARAMETER_SET_10_PB, \
- _ICL_DSC1_PICTURE_PARAMETER_SET_10_PC)
-#define DSC_RC_TARGET_OFF_LOW(rc_tgt_off_low) ((rc_tgt_off_low) << 20)
-#define DSC_RC_TARGET_OFF_HIGH(rc_tgt_off_high) ((rc_tgt_off_high) << 16)
-#define DSC_RC_QUANT_INC_LIMIT1(lim) ((lim) << 8)
-#define DSC_RC_QUANT_INC_LIMIT0(lim) ((lim) << 0)
-
-#define DSCA_PICTURE_PARAMETER_SET_11 _MMIO(0x6B22C)
-#define DSCC_PICTURE_PARAMETER_SET_11 _MMIO(0x6BA2C)
-#define _ICL_DSC0_PICTURE_PARAMETER_SET_11_PB 0x7829C
-#define _ICL_DSC1_PICTURE_PARAMETER_SET_11_PB 0x7839C
-#define _ICL_DSC0_PICTURE_PARAMETER_SET_11_PC 0x7849C
-#define _ICL_DSC1_PICTURE_PARAMETER_SET_11_PC 0x7859C
-#define ICL_DSC0_PICTURE_PARAMETER_SET_11(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
- _ICL_DSC0_PICTURE_PARAMETER_SET_11_PB, \
- _ICL_DSC0_PICTURE_PARAMETER_SET_11_PC)
-#define ICL_DSC1_PICTURE_PARAMETER_SET_11(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
- _ICL_DSC1_PICTURE_PARAMETER_SET_11_PB, \
- _ICL_DSC1_PICTURE_PARAMETER_SET_11_PC)
-
-#define DSCA_PICTURE_PARAMETER_SET_12 _MMIO(0x6B260)
-#define DSCC_PICTURE_PARAMETER_SET_12 _MMIO(0x6BA60)
-#define _ICL_DSC0_PICTURE_PARAMETER_SET_12_PB 0x782A0
-#define _ICL_DSC1_PICTURE_PARAMETER_SET_12_PB 0x783A0
-#define _ICL_DSC0_PICTURE_PARAMETER_SET_12_PC 0x784A0
-#define _ICL_DSC1_PICTURE_PARAMETER_SET_12_PC 0x785A0
-#define ICL_DSC0_PICTURE_PARAMETER_SET_12(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
- _ICL_DSC0_PICTURE_PARAMETER_SET_12_PB, \
- _ICL_DSC0_PICTURE_PARAMETER_SET_12_PC)
-#define ICL_DSC1_PICTURE_PARAMETER_SET_12(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
- _ICL_DSC1_PICTURE_PARAMETER_SET_12_PB, \
- _ICL_DSC1_PICTURE_PARAMETER_SET_12_PC)
-
-#define DSCA_PICTURE_PARAMETER_SET_13 _MMIO(0x6B264)
-#define DSCC_PICTURE_PARAMETER_SET_13 _MMIO(0x6BA64)
-#define _ICL_DSC0_PICTURE_PARAMETER_SET_13_PB 0x782A4
-#define _ICL_DSC1_PICTURE_PARAMETER_SET_13_PB 0x783A4
-#define _ICL_DSC0_PICTURE_PARAMETER_SET_13_PC 0x784A4
-#define _ICL_DSC1_PICTURE_PARAMETER_SET_13_PC 0x785A4
-#define ICL_DSC0_PICTURE_PARAMETER_SET_13(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
- _ICL_DSC0_PICTURE_PARAMETER_SET_13_PB, \
- _ICL_DSC0_PICTURE_PARAMETER_SET_13_PC)
-#define ICL_DSC1_PICTURE_PARAMETER_SET_13(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
- _ICL_DSC1_PICTURE_PARAMETER_SET_13_PB, \
- _ICL_DSC1_PICTURE_PARAMETER_SET_13_PC)
-
-#define DSCA_PICTURE_PARAMETER_SET_14 _MMIO(0x6B268)
-#define DSCC_PICTURE_PARAMETER_SET_14 _MMIO(0x6BA68)
-#define _ICL_DSC0_PICTURE_PARAMETER_SET_14_PB 0x782A8
-#define _ICL_DSC1_PICTURE_PARAMETER_SET_14_PB 0x783A8
-#define _ICL_DSC0_PICTURE_PARAMETER_SET_14_PC 0x784A8
-#define _ICL_DSC1_PICTURE_PARAMETER_SET_14_PC 0x785A8
-#define ICL_DSC0_PICTURE_PARAMETER_SET_14(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
- _ICL_DSC0_PICTURE_PARAMETER_SET_14_PB, \
- _ICL_DSC0_PICTURE_PARAMETER_SET_14_PC)
-#define ICL_DSC1_PICTURE_PARAMETER_SET_14(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
- _ICL_DSC1_PICTURE_PARAMETER_SET_14_PB, \
- _ICL_DSC1_PICTURE_PARAMETER_SET_14_PC)
-
-#define DSCA_PICTURE_PARAMETER_SET_15 _MMIO(0x6B26C)
-#define DSCC_PICTURE_PARAMETER_SET_15 _MMIO(0x6BA6C)
-#define _ICL_DSC0_PICTURE_PARAMETER_SET_15_PB 0x782AC
-#define _ICL_DSC1_PICTURE_PARAMETER_SET_15_PB 0x783AC
-#define _ICL_DSC0_PICTURE_PARAMETER_SET_15_PC 0x784AC
-#define _ICL_DSC1_PICTURE_PARAMETER_SET_15_PC 0x785AC
-#define ICL_DSC0_PICTURE_PARAMETER_SET_15(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
- _ICL_DSC0_PICTURE_PARAMETER_SET_15_PB, \
- _ICL_DSC0_PICTURE_PARAMETER_SET_15_PC)
-#define ICL_DSC1_PICTURE_PARAMETER_SET_15(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
- _ICL_DSC1_PICTURE_PARAMETER_SET_15_PB, \
- _ICL_DSC1_PICTURE_PARAMETER_SET_15_PC)
-
-#define DSCA_PICTURE_PARAMETER_SET_16 _MMIO(0x6B270)
-#define DSCC_PICTURE_PARAMETER_SET_16 _MMIO(0x6BA70)
-#define _ICL_DSC0_PICTURE_PARAMETER_SET_16_PB 0x782B0
-#define _ICL_DSC1_PICTURE_PARAMETER_SET_16_PB 0x783B0
-#define _ICL_DSC0_PICTURE_PARAMETER_SET_16_PC 0x784B0
-#define _ICL_DSC1_PICTURE_PARAMETER_SET_16_PC 0x785B0
-#define ICL_DSC0_PICTURE_PARAMETER_SET_16(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
- _ICL_DSC0_PICTURE_PARAMETER_SET_16_PB, \
- _ICL_DSC0_PICTURE_PARAMETER_SET_16_PC)
-#define ICL_DSC1_PICTURE_PARAMETER_SET_16(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
- _ICL_DSC1_PICTURE_PARAMETER_SET_16_PB, \
- _ICL_DSC1_PICTURE_PARAMETER_SET_16_PC)
-#define DSC_SLICE_ROW_PER_FRAME(slice_row_per_frame) ((slice_row_per_frame) << 20)
-#define DSC_SLICE_PER_LINE(slice_per_line) ((slice_per_line) << 16)
-#define DSC_SLICE_CHUNK_SIZE(slice_chunk_size) ((slice_chunk_size) << 0)
-
-/* Icelake Rate Control Buffer Threshold Registers */
-#define DSCA_RC_BUF_THRESH_0 _MMIO(0x6B230)
-#define DSCA_RC_BUF_THRESH_0_UDW _MMIO(0x6B230 + 4)
-#define DSCC_RC_BUF_THRESH_0 _MMIO(0x6BA30)
-#define DSCC_RC_BUF_THRESH_0_UDW _MMIO(0x6BA30 + 4)
-#define _ICL_DSC0_RC_BUF_THRESH_0_PB (0x78254)
-#define _ICL_DSC0_RC_BUF_THRESH_0_UDW_PB (0x78254 + 4)
-#define _ICL_DSC1_RC_BUF_THRESH_0_PB (0x78354)
-#define _ICL_DSC1_RC_BUF_THRESH_0_UDW_PB (0x78354 + 4)
-#define _ICL_DSC0_RC_BUF_THRESH_0_PC (0x78454)
-#define _ICL_DSC0_RC_BUF_THRESH_0_UDW_PC (0x78454 + 4)
-#define _ICL_DSC1_RC_BUF_THRESH_0_PC (0x78554)
-#define _ICL_DSC1_RC_BUF_THRESH_0_UDW_PC (0x78554 + 4)
-#define ICL_DSC0_RC_BUF_THRESH_0(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
- _ICL_DSC0_RC_BUF_THRESH_0_PB, \
- _ICL_DSC0_RC_BUF_THRESH_0_PC)
-#define ICL_DSC0_RC_BUF_THRESH_0_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
- _ICL_DSC0_RC_BUF_THRESH_0_UDW_PB, \
- _ICL_DSC0_RC_BUF_THRESH_0_UDW_PC)
-#define ICL_DSC1_RC_BUF_THRESH_0(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
- _ICL_DSC1_RC_BUF_THRESH_0_PB, \
- _ICL_DSC1_RC_BUF_THRESH_0_PC)
-#define ICL_DSC1_RC_BUF_THRESH_0_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
- _ICL_DSC1_RC_BUF_THRESH_0_UDW_PB, \
- _ICL_DSC1_RC_BUF_THRESH_0_UDW_PC)
-
-#define DSCA_RC_BUF_THRESH_1 _MMIO(0x6B238)
-#define DSCA_RC_BUF_THRESH_1_UDW _MMIO(0x6B238 + 4)
-#define DSCC_RC_BUF_THRESH_1 _MMIO(0x6BA38)
-#define DSCC_RC_BUF_THRESH_1_UDW _MMIO(0x6BA38 + 4)
-#define _ICL_DSC0_RC_BUF_THRESH_1_PB (0x7825C)
-#define _ICL_DSC0_RC_BUF_THRESH_1_UDW_PB (0x7825C + 4)
-#define _ICL_DSC1_RC_BUF_THRESH_1_PB (0x7835C)
-#define _ICL_DSC1_RC_BUF_THRESH_1_UDW_PB (0x7835C + 4)
-#define _ICL_DSC0_RC_BUF_THRESH_1_PC (0x7845C)
-#define _ICL_DSC0_RC_BUF_THRESH_1_UDW_PC (0x7845C + 4)
-#define _ICL_DSC1_RC_BUF_THRESH_1_PC (0x7855C)
-#define _ICL_DSC1_RC_BUF_THRESH_1_UDW_PC (0x7855C + 4)
-#define ICL_DSC0_RC_BUF_THRESH_1(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
- _ICL_DSC0_RC_BUF_THRESH_1_PB, \
- _ICL_DSC0_RC_BUF_THRESH_1_PC)
-#define ICL_DSC0_RC_BUF_THRESH_1_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
- _ICL_DSC0_RC_BUF_THRESH_1_UDW_PB, \
- _ICL_DSC0_RC_BUF_THRESH_1_UDW_PC)
-#define ICL_DSC1_RC_BUF_THRESH_1(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
- _ICL_DSC1_RC_BUF_THRESH_1_PB, \
- _ICL_DSC1_RC_BUF_THRESH_1_PC)
-#define ICL_DSC1_RC_BUF_THRESH_1_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
- _ICL_DSC1_RC_BUF_THRESH_1_UDW_PB, \
- _ICL_DSC1_RC_BUF_THRESH_1_UDW_PC)
-
#define PORT_TX_DFLEXDPSP(fia) _MMIO_FIA((fia), 0x008A0)
#define MODULAR_FIA_MASK (1 << 4)
#define TC_LIVE_STATE_TBT(idx) (1 << ((idx) * 8 + 6))
@@ -8105,8 +7650,54 @@ enum skl_power_gate {
#define DSB_HEAD(pipe, id) _MMIO(DSBSL_INSTANCE(pipe, id) + 0x0)
#define DSB_TAIL(pipe, id) _MMIO(DSBSL_INSTANCE(pipe, id) + 0x4)
#define DSB_CTRL(pipe, id) _MMIO(DSBSL_INSTANCE(pipe, id) + 0x8)
-#define DSB_ENABLE (1 << 31)
-#define DSB_STATUS_BUSY (1 << 0)
+#define DSB_ENABLE REG_BIT(31)
+#define DSB_BUF_REITERATE REG_BIT(29)
+#define DSB_WAIT_FOR_VBLANK REG_BIT(28)
+#define DSB_WAIT_FOR_LINE_IN REG_BIT(27)
+#define DSB_HALT REG_BIT(16)
+#define DSB_NON_POSTED REG_BIT(8)
+#define DSB_STATUS_BUSY REG_BIT(0)
+#define DSB_MMIOCTRL(pipe, id) _MMIO(DSBSL_INSTANCE(pipe, id) + 0xc)
+#define DSB_MMIO_DEAD_CLOCKS_ENABLE REG_BIT(31)
+#define DSB_MMIO_DEAD_CLOCKS_COUNT_MASK REG_GENMASK(15, 8)
+#define DSB_MMIO_DEAD_CLOCKS_COUNT(x) REG_FIELD_PREP(DSB_MMIO_DEAD_CLOCK_COUNT_MASK, (x))
+#define DSB_MMIO_CYCLES_MASK REG_GENMASK(7, 0)
+#define DSB_MMIO_CYCLES(x) REG_FIELD_PREP(DSB_MMIO_CYCLES_MASK, (x))
+#define DSB_POLLFUNC(pipe, id) _MMIO(DSBSL_INSTANCE(pipe, id) + 0x10)
+#define DSB_POLL_ENABLE REG_BIT(31)
+#define DSB_POLL_WAIT_MASK REG_GENMASK(30, 23)
+#define DSB_POLL_WAIT(x) REG_FIELD_PREP(DSB_POLL_WAIT_MASK, (x)) /* usec */
+#define DSB_POLL_COUNT_MASK REG_GENMASK(22, 15)
+#define DSB_POLL_COUNT(x) REG_FIELD_PREP(DSB_POLL_COUNT_MASK, (x))
+#define DSB_DEBUG(pipe, id) _MMIO(DSBSL_INSTANCE(pipe, id) + 0x14)
+#define DSB_POLLMASK(pipe, id) _MMIO(DSBSL_INSTANCE(pipe, id) + 0x1c)
+#define DSB_STATUS(pipe, id) _MMIO(DSBSL_INSTANCE(pipe, id) + 0x24)
+#define DSB_INTERRUPT(pipe, id) _MMIO(DSBSL_INSTANCE(pipe, id) + 0x28)
+#define DSB_ATS_FAULT_INT_EN REG_BIT(20)
+#define DSB_GTT_FAULT_INT_EN REG_BIT(19)
+#define DSB_RSPTIMEOUT_INT_EN REG_BIT(18)
+#define DSB_POLL_ERR_INT_EN REG_BIT(17)
+#define DSB_PROG_INT_EN REG_BIT(16)
+#define DSB_ATS_FAULT_INT_STATUS REG_BIT(4)
+#define DSB_GTT_FAULT_INT_STATUS REG_BIT(3)
+#define DSB_RSPTIMEOUT_INT_STATUS REG_BIT(2)
+#define DSB_POLL_ERR_INT_STATUS REG_BIT(1)
+#define DSB_PROG_INT_STATUS REG_BIT(0)
+#define DSB_CURRENT_HEAD(pipe, id) _MMIO(DSBSL_INSTANCE(pipe, id) + 0x2c)
+#define DSB_RM_TIMEOUT(pipe, id) _MMIO(DSBSL_INSTANCE(pipe, id) + 0x30)
+#define DSB_RM_CLAIM_TIMEOUT REG_BIT(31)
+#define DSB_RM_READY_TIMEOUT REG_BIT(30)
+#define DSB_RM_CLAIM_TIMEOUT_COUNT_MASK REG_GENMASK(23, 16)
+#define DSB_RM_CLAIM_TIMEOUT_COUNT(x) REG_FIELD_PREP(DSB_RM_CLAIM_TIMEOUT_COUNT_MASK, (x)) /* clocks */
+#define DSB_RM_READY_TIMEOUT_VALUE_MASK REG_GENMASK(15, 0)
+#define DSB_RM_READY_TIMEOUT_VALUE(x) REG_FIELD_PREP(DSB_RM_READY_TIMEOUT_VALUE, (x)) /* usec */
+#define DSB_RMTIMEOUTREG_CAPTURE(pipe, id) _MMIO(DSBSL_INSTANCE(pipe, id) + 0x34)
+#define DSB_PMCTRL(pipe, id) _MMIO(DSBSL_INSTANCE(pipe, id) + 0x38)
+#define DSB_PMCTRL_2(pipe, id) _MMIO(DSBSL_INSTANCE(pipe, id) + 0x3c)
+#define DSB_PF_LN_LOWER(pipe, id) _MMIO(DSBSL_INSTANCE(pipe, id) + 0x40)
+#define DSB_PF_LN_UPPER(pipe, id) _MMIO(DSBSL_INSTANCE(pipe, id) + 0x44)
+#define DSB_BUFRPT_CNT(pipe, id) _MMIO(DSBSL_INSTANCE(pipe, id) + 0x48)
+#define DSB_CHICKEN(pipe, id) _MMIO(DSBSL_INSTANCE(pipe, id) + 0xf0)
#define CLKREQ_POLICY _MMIO(0x101038)
#define CLKREQ_POLICY_MEM_UP_OVRD REG_BIT(1)
diff --git a/drivers/gpu/drm/i915/i915_reg_defs.h b/drivers/gpu/drm/i915/i915_reg_defs.h
index be43580a6979..db26de6b57bc 100644
--- a/drivers/gpu/drm/i915/i915_reg_defs.h
+++ b/drivers/gpu/drm/i915/i915_reg_defs.h
@@ -120,6 +120,35 @@
#define _PICK_EVEN(__index, __a, __b) ((__a) + (__index) * ((__b) - (__a)))
/*
+ * Like _PICK_EVEN(), but supports 2 ranges of evenly spaced address offsets.
+ * @__c_index corresponds to the index in which the second range starts to be
+ * used. Using math interval notation, the first range is used for indexes [ 0,
+ * @__c_index), while the second range is used for [ @__c_index, ... ). Example:
+ *
+ * #define _FOO_A 0xf000
+ * #define _FOO_B 0xf004
+ * #define _FOO_C 0xf008
+ * #define _SUPER_FOO_A 0xa000
+ * #define _SUPER_FOO_B 0xa100
+ * #define FOO(x) _MMIO(_PICK_EVEN_2RANGES(x, 3, \
+ * _FOO_A, _FOO_B, \
+ * _SUPER_FOO_A, _SUPER_FOO_B))
+ *
+ * This expands to:
+ * 0: 0xf000,
+ * 1: 0xf004,
+ * 2: 0xf008,
+ * 3: 0xa000,
+ * 4: 0xa100,
+ * 5: 0xa200,
+ * ...
+ */
+#define _PICK_EVEN_2RANGES(__index, __c_index, __a, __b, __c, __d) \
+ (BUILD_BUG_ON_ZERO(!__is_constexpr(__c_index)) + \
+ ((__index) < (__c_index) ? _PICK_EVEN(__index, __a, __b) : \
+ _PICK_EVEN((__index) - (__c_index), __c, __d)))
+
+/*
* Given the arbitrary numbers in varargs, pick the 0-based __index'th number.
*
* Always prefer _PICK_EVEN() over this if the numbers are evenly spaced.
@@ -136,6 +165,8 @@ typedef struct {
u32 reg;
} i915_mcr_reg_t;
+#define MCR_REG(offset) ((const i915_mcr_reg_t){ .reg = (offset) })
+
#define INVALID_MMIO_REG _MMIO(0)
/*
diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
index 7503dcb9043b..630a732aaecc 100644
--- a/drivers/gpu/drm/i915/i915_request.c
+++ b/drivers/gpu/drm/i915/i915_request.c
@@ -48,7 +48,6 @@
#include "i915_driver.h"
#include "i915_drv.h"
#include "i915_trace.h"
-#include "intel_pm.h"
struct execute_cb {
struct irq_work work;
diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c
index 595e8b574990..e88bb4f04305 100644
--- a/drivers/gpu/drm/i915/i915_sysfs.c
+++ b/drivers/gpu/drm/i915/i915_sysfs.c
@@ -37,7 +37,6 @@
#include "i915_drv.h"
#include "i915_sysfs.h"
-#include "intel_pm.h"
struct drm_i915_private *kdev_minor_to_i915(struct device *kdev)
{
diff --git a/drivers/gpu/drm/i915/i915_ttm_buddy_manager.c b/drivers/gpu/drm/i915/i915_ttm_buddy_manager.c
index a72698a2dbc8..a1bc804cfa15 100644
--- a/drivers/gpu/drm/i915/i915_ttm_buddy_manager.c
+++ b/drivers/gpu/drm/i915/i915_ttm_buddy_manager.c
@@ -139,13 +139,6 @@ static int i915_ttm_buddy_man_alloc(struct ttm_resource_manager *man,
mutex_unlock(&bman->lock);
- if (place->lpfn - place->fpfn == n_pages)
- bman_res->base.start = place->fpfn;
- else if (lpfn <= bman->visible_size)
- bman_res->base.start = 0;
- else
- bman_res->base.start = bman->visible_size;
-
*res = &bman_res->base;
return 0;
diff --git a/drivers/gpu/drm/i915/intel_device_info.c b/drivers/gpu/drm/i915/intel_device_info.c
index 98769e5f2c3d..fc5cd14adfcc 100644
--- a/drivers/gpu/drm/i915/intel_device_info.c
+++ b/drivers/gpu/drm/i915/intel_device_info.c
@@ -119,9 +119,14 @@ void intel_device_info_print(const struct intel_device_info *info,
drm_printf(p, "display version: %u\n",
runtime->display.ip.ver);
+ drm_printf(p, "graphics stepping: %s\n", intel_step_name(runtime->step.graphics_step));
+ drm_printf(p, "media stepping: %s\n", intel_step_name(runtime->step.media_step));
+ drm_printf(p, "display stepping: %s\n", intel_step_name(runtime->step.display_step));
+ drm_printf(p, "base die stepping: %s\n", intel_step_name(runtime->step.basedie_step));
+
drm_printf(p, "gt: %d\n", info->gt);
- drm_printf(p, "memory-regions: %x\n", runtime->memory_regions);
- drm_printf(p, "page-sizes: %x\n", runtime->page_sizes);
+ drm_printf(p, "memory-regions: 0x%x\n", runtime->memory_regions);
+ drm_printf(p, "page-sizes: 0x%x\n", runtime->page_sizes);
drm_printf(p, "platform: %s\n", intel_platform_name(info->platform));
drm_printf(p, "ppgtt-size: %d\n", runtime->ppgtt_size);
drm_printf(p, "ppgtt-type: %d\n", runtime->ppgtt_type);
@@ -202,6 +207,10 @@ static const u16 subplatform_rpl_ids[] = {
INTEL_RPLP_IDS(0),
};
+static const u16 subplatform_rplu_ids[] = {
+ INTEL_RPLU_IDS(0),
+};
+
static const u16 subplatform_g10_ids[] = {
INTEL_DG2_G10_IDS(0),
INTEL_ATS_M150_IDS(0),
@@ -269,6 +278,9 @@ static void intel_device_info_subplatform_init(struct drm_i915_private *i915)
} else if (find_devid(devid, subplatform_rpl_ids,
ARRAY_SIZE(subplatform_rpl_ids))) {
mask = BIT(INTEL_SUBPLATFORM_RPL);
+ if (find_devid(devid, subplatform_rplu_ids,
+ ARRAY_SIZE(subplatform_rplu_ids)))
+ mask |= BIT(INTEL_SUBPLATFORM_RPLU);
} else if (find_devid(devid, subplatform_g10_ids,
ARRAY_SIZE(subplatform_g10_ids))) {
mask = BIT(INTEL_SUBPLATFORM_G10);
@@ -436,6 +448,14 @@ void intel_device_info_runtime_init(struct drm_i915_private *dev_priv)
runtime->num_sprites[pipe] = 1;
}
+ if (HAS_DISPLAY(dev_priv) &&
+ (IS_DGFX(dev_priv) || DISPLAY_VER(dev_priv) >= 14) &&
+ !(intel_de_read(dev_priv, GU_CNTL_PROTECTED) & DEPRESENT)) {
+ drm_info(&dev_priv->drm, "Display not present, disabling\n");
+
+ runtime->pipe_mask = 0;
+ }
+
if (HAS_DISPLAY(dev_priv) && IS_GRAPHICS_VER(dev_priv, 7, 8) &&
HAS_PCH_SPLIT(dev_priv)) {
u32 fuse_strap = intel_de_read(dev_priv, FUSE_STRAP);
@@ -457,8 +477,6 @@ void intel_device_info_runtime_init(struct drm_i915_private *dev_priv)
drm_info(&dev_priv->drm,
"Display fused off, disabling\n");
runtime->pipe_mask = 0;
- runtime->cpu_transcoder_mask = 0;
- runtime->fbc_mask = 0;
} else if (fuse_strap & IVB_PIPE_C_DISABLE) {
drm_info(&dev_priv->drm, "PipeC fused off\n");
runtime->pipe_mask &= ~BIT(PIPE_C);
@@ -535,5 +553,5 @@ void intel_driver_caps_print(const struct intel_driver_caps *caps,
{
drm_printf(p, "Has logical contexts? %s\n",
str_yes_no(caps->has_logical_contexts));
- drm_printf(p, "scheduler: %x\n", caps->scheduler);
+ drm_printf(p, "scheduler: 0x%x\n", caps->scheduler);
}
diff --git a/drivers/gpu/drm/i915/intel_device_info.h b/drivers/gpu/drm/i915/intel_device_info.h
index 80bda653d61b..b30cc8b97c3a 100644
--- a/drivers/gpu/drm/i915/intel_device_info.h
+++ b/drivers/gpu/drm/i915/intel_device_info.h
@@ -127,6 +127,7 @@ enum intel_platform {
* bit set
*/
#define INTEL_SUBPLATFORM_N 1
+#define INTEL_SUBPLATFORM_RPLU 2
/* MTL */
#define INTEL_SUBPLATFORM_M 0
diff --git a/drivers/gpu/drm/i915/intel_gvt_mmio_table.c b/drivers/gpu/drm/i915/intel_gvt_mmio_table.c
index 1f4805aa2b08..091743e32e17 100644
--- a/drivers/gpu/drm/i915/intel_gvt_mmio_table.c
+++ b/drivers/gpu/drm/i915/intel_gvt_mmio_table.c
@@ -8,6 +8,7 @@
#include "display/intel_display_types.h"
#include "display/intel_dmc_regs.h"
#include "display/intel_dpio_phy.h"
+#include "display/intel_lvds_regs.h"
#include "display/vlv_dsi_pll_regs.h"
#include "gt/intel_gt_regs.h"
#include "gvt/gvt.h"
@@ -117,10 +118,10 @@ static int iterate_generic_mmio(struct intel_gvt_mmio_table_iter *iter)
MMIO_D(PIPEDSL(PIPE_B));
MMIO_D(PIPEDSL(PIPE_C));
MMIO_D(PIPEDSL(_PIPE_EDP));
- MMIO_D(PIPECONF(PIPE_A));
- MMIO_D(PIPECONF(PIPE_B));
- MMIO_D(PIPECONF(PIPE_C));
- MMIO_D(PIPECONF(_PIPE_EDP));
+ MMIO_D(TRANSCONF(TRANSCODER_A));
+ MMIO_D(TRANSCONF(TRANSCODER_B));
+ MMIO_D(TRANSCONF(TRANSCODER_C));
+ MMIO_D(TRANSCONF(TRANSCODER_EDP));
MMIO_D(PIPESTAT(PIPE_A));
MMIO_D(PIPESTAT(PIPE_B));
MMIO_D(PIPESTAT(PIPE_C));
@@ -218,41 +219,41 @@ static int iterate_generic_mmio(struct intel_gvt_mmio_table_iter *iter)
MMIO_D(SPRSCALE(PIPE_C));
MMIO_D(SPRSURFLIVE(PIPE_C));
MMIO_D(REG_50080(PIPE_C, PLANE_SPRITE0));
- MMIO_D(HTOTAL(TRANSCODER_A));
- MMIO_D(HBLANK(TRANSCODER_A));
- MMIO_D(HSYNC(TRANSCODER_A));
- MMIO_D(VTOTAL(TRANSCODER_A));
- MMIO_D(VBLANK(TRANSCODER_A));
- MMIO_D(VSYNC(TRANSCODER_A));
+ MMIO_D(TRANS_HTOTAL(TRANSCODER_A));
+ MMIO_D(TRANS_HBLANK(TRANSCODER_A));
+ MMIO_D(TRANS_HSYNC(TRANSCODER_A));
+ MMIO_D(TRANS_VTOTAL(TRANSCODER_A));
+ MMIO_D(TRANS_VBLANK(TRANSCODER_A));
+ MMIO_D(TRANS_VSYNC(TRANSCODER_A));
MMIO_D(BCLRPAT(TRANSCODER_A));
- MMIO_D(VSYNCSHIFT(TRANSCODER_A));
+ MMIO_D(TRANS_VSYNCSHIFT(TRANSCODER_A));
MMIO_D(PIPESRC(TRANSCODER_A));
- MMIO_D(HTOTAL(TRANSCODER_B));
- MMIO_D(HBLANK(TRANSCODER_B));
- MMIO_D(HSYNC(TRANSCODER_B));
- MMIO_D(VTOTAL(TRANSCODER_B));
- MMIO_D(VBLANK(TRANSCODER_B));
- MMIO_D(VSYNC(TRANSCODER_B));
+ MMIO_D(TRANS_HTOTAL(TRANSCODER_B));
+ MMIO_D(TRANS_HBLANK(TRANSCODER_B));
+ MMIO_D(TRANS_HSYNC(TRANSCODER_B));
+ MMIO_D(TRANS_VTOTAL(TRANSCODER_B));
+ MMIO_D(TRANS_VBLANK(TRANSCODER_B));
+ MMIO_D(TRANS_VSYNC(TRANSCODER_B));
MMIO_D(BCLRPAT(TRANSCODER_B));
- MMIO_D(VSYNCSHIFT(TRANSCODER_B));
+ MMIO_D(TRANS_VSYNCSHIFT(TRANSCODER_B));
MMIO_D(PIPESRC(TRANSCODER_B));
- MMIO_D(HTOTAL(TRANSCODER_C));
- MMIO_D(HBLANK(TRANSCODER_C));
- MMIO_D(HSYNC(TRANSCODER_C));
- MMIO_D(VTOTAL(TRANSCODER_C));
- MMIO_D(VBLANK(TRANSCODER_C));
- MMIO_D(VSYNC(TRANSCODER_C));
+ MMIO_D(TRANS_HTOTAL(TRANSCODER_C));
+ MMIO_D(TRANS_HBLANK(TRANSCODER_C));
+ MMIO_D(TRANS_HSYNC(TRANSCODER_C));
+ MMIO_D(TRANS_VTOTAL(TRANSCODER_C));
+ MMIO_D(TRANS_VBLANK(TRANSCODER_C));
+ MMIO_D(TRANS_VSYNC(TRANSCODER_C));
MMIO_D(BCLRPAT(TRANSCODER_C));
- MMIO_D(VSYNCSHIFT(TRANSCODER_C));
+ MMIO_D(TRANS_VSYNCSHIFT(TRANSCODER_C));
MMIO_D(PIPESRC(TRANSCODER_C));
- MMIO_D(HTOTAL(TRANSCODER_EDP));
- MMIO_D(HBLANK(TRANSCODER_EDP));
- MMIO_D(HSYNC(TRANSCODER_EDP));
- MMIO_D(VTOTAL(TRANSCODER_EDP));
- MMIO_D(VBLANK(TRANSCODER_EDP));
- MMIO_D(VSYNC(TRANSCODER_EDP));
+ MMIO_D(TRANS_HTOTAL(TRANSCODER_EDP));
+ MMIO_D(TRANS_HBLANK(TRANSCODER_EDP));
+ MMIO_D(TRANS_HSYNC(TRANSCODER_EDP));
+ MMIO_D(TRANS_VTOTAL(TRANSCODER_EDP));
+ MMIO_D(TRANS_VBLANK(TRANSCODER_EDP));
+ MMIO_D(TRANS_VSYNC(TRANSCODER_EDP));
MMIO_D(BCLRPAT(TRANSCODER_EDP));
- MMIO_D(VSYNCSHIFT(TRANSCODER_EDP));
+ MMIO_D(TRANS_VSYNCSHIFT(TRANSCODER_EDP));
MMIO_D(PIPE_DATA_M1(TRANSCODER_A));
MMIO_D(PIPE_DATA_N1(TRANSCODER_A));
MMIO_D(PIPE_DATA_M2(TRANSCODER_A));
@@ -493,9 +494,9 @@ static int iterate_generic_mmio(struct intel_gvt_mmio_table_iter *iter)
MMIO_D(GAMMA_MODE(PIPE_A));
MMIO_D(GAMMA_MODE(PIPE_B));
MMIO_D(GAMMA_MODE(PIPE_C));
- MMIO_D(PIPE_MULT(PIPE_A));
- MMIO_D(PIPE_MULT(PIPE_B));
- MMIO_D(PIPE_MULT(PIPE_C));
+ MMIO_D(TRANS_MULT(TRANSCODER_A));
+ MMIO_D(TRANS_MULT(TRANSCODER_B));
+ MMIO_D(TRANS_MULT(TRANSCODER_C));
MMIO_D(HSW_TVIDEO_DIP_CTL(TRANSCODER_A));
MMIO_D(HSW_TVIDEO_DIP_CTL(TRANSCODER_B));
MMIO_D(HSW_TVIDEO_DIP_CTL(TRANSCODER_C));
@@ -788,9 +789,9 @@ static int iterate_bdw_plus_mmio(struct intel_gvt_mmio_table_iter *iter)
MMIO_RING_D(RING_REG);
#undef RING_REG
- MMIO_D(PIPEMISC(PIPE_A));
- MMIO_D(PIPEMISC(PIPE_B));
- MMIO_D(PIPEMISC(PIPE_C));
+ MMIO_D(PIPE_MISC(PIPE_A));
+ MMIO_D(PIPE_MISC(PIPE_B));
+ MMIO_D(PIPE_MISC(PIPE_C));
MMIO_D(_MMIO(0x1c1d0));
MMIO_D(GEN6_MBCUNIT_SNPCR);
MMIO_D(GEN7_MISCCPCTL);
diff --git a/drivers/gpu/drm/i915/intel_pcode.c b/drivers/gpu/drm/i915/intel_pcode.c
index a234d9b4ed14..3db2ba439bb5 100644
--- a/drivers/gpu/drm/i915/intel_pcode.c
+++ b/drivers/gpu/drm/i915/intel_pcode.c
@@ -204,15 +204,42 @@ out:
#undef COND
}
+static int pcode_init_wait(struct intel_uncore *uncore, int timeout_ms)
+{
+ if (__intel_wait_for_register_fw(uncore,
+ GEN6_PCODE_MAILBOX,
+ GEN6_PCODE_READY, 0,
+ 500, timeout_ms,
+ NULL))
+ return -EPROBE_DEFER;
+
+ return skl_pcode_request(uncore,
+ DG1_PCODE_STATUS,
+ DG1_UNCORE_GET_INIT_STATUS,
+ DG1_UNCORE_INIT_STATUS_COMPLETE,
+ DG1_UNCORE_INIT_STATUS_COMPLETE, timeout_ms);
+}
+
int intel_pcode_init(struct intel_uncore *uncore)
{
+ int err;
+
if (!IS_DGFX(uncore->i915))
return 0;
- return skl_pcode_request(uncore, DG1_PCODE_STATUS,
- DG1_UNCORE_GET_INIT_STATUS,
- DG1_UNCORE_INIT_STATUS_COMPLETE,
- DG1_UNCORE_INIT_STATUS_COMPLETE, 180000);
+ /*
+ * Wait 10 seconds so that the punit to settle and complete
+ * any outstanding transactions upon module load
+ */
+ err = pcode_init_wait(uncore, 10000);
+
+ if (err) {
+ drm_notice(&uncore->i915->drm,
+ "Waiting for HW initialisation...\n");
+ err = pcode_init_wait(uncore, 180000);
+ }
+
+ return err;
}
int snb_pcode_read_p(struct intel_uncore *uncore, u32 mbcmd, u32 p1, u32 p2, u32 *val)
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 59714b1080d4..c45af0d981fd 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -44,13 +44,6 @@ struct drm_i915_clock_gating_funcs {
void (*init_clock_gating)(struct drm_i915_private *i915);
};
-/* used in computing the new watermarks state */
-struct intel_wm_config {
- unsigned int num_pipes_active;
- bool sprites_enabled;
- bool sprites_scaled;
-};
-
static void gen9_init_clock_gating(struct drm_i915_private *dev_priv)
{
if (HAS_LLC(dev_priv)) {
@@ -131,3961 +124,6 @@ static void glk_init_clock_gating(struct drm_i915_private *dev_priv)
PWM1_GATING_DIS | PWM2_GATING_DIS);
}
-static void pnv_get_mem_freq(struct drm_i915_private *dev_priv)
-{
- u32 tmp;
-
- tmp = intel_uncore_read(&dev_priv->uncore, CLKCFG);
-
- switch (tmp & CLKCFG_FSB_MASK) {
- case CLKCFG_FSB_533:
- dev_priv->fsb_freq = 533; /* 133*4 */
- break;
- case CLKCFG_FSB_800:
- dev_priv->fsb_freq = 800; /* 200*4 */
- break;
- case CLKCFG_FSB_667:
- dev_priv->fsb_freq = 667; /* 167*4 */
- break;
- case CLKCFG_FSB_400:
- dev_priv->fsb_freq = 400; /* 100*4 */
- break;
- }
-
- switch (tmp & CLKCFG_MEM_MASK) {
- case CLKCFG_MEM_533:
- dev_priv->mem_freq = 533;
- break;
- case CLKCFG_MEM_667:
- dev_priv->mem_freq = 667;
- break;
- case CLKCFG_MEM_800:
- dev_priv->mem_freq = 800;
- break;
- }
-
- /* detect pineview DDR3 setting */
- tmp = intel_uncore_read(&dev_priv->uncore, CSHRDDR3CTL);
- dev_priv->is_ddr3 = (tmp & CSHRDDR3CTL_DDR3) ? 1 : 0;
-}
-
-static void ilk_get_mem_freq(struct drm_i915_private *dev_priv)
-{
- u16 ddrpll, csipll;
-
- ddrpll = intel_uncore_read16(&dev_priv->uncore, DDRMPLL1);
- csipll = intel_uncore_read16(&dev_priv->uncore, CSIPLL0);
-
- switch (ddrpll & 0xff) {
- case 0xc:
- dev_priv->mem_freq = 800;
- break;
- case 0x10:
- dev_priv->mem_freq = 1066;
- break;
- case 0x14:
- dev_priv->mem_freq = 1333;
- break;
- case 0x18:
- dev_priv->mem_freq = 1600;
- break;
- default:
- drm_dbg(&dev_priv->drm, "unknown memory frequency 0x%02x\n",
- ddrpll & 0xff);
- dev_priv->mem_freq = 0;
- break;
- }
-
- switch (csipll & 0x3ff) {
- case 0x00c:
- dev_priv->fsb_freq = 3200;
- break;
- case 0x00e:
- dev_priv->fsb_freq = 3733;
- break;
- case 0x010:
- dev_priv->fsb_freq = 4266;
- break;
- case 0x012:
- dev_priv->fsb_freq = 4800;
- break;
- case 0x014:
- dev_priv->fsb_freq = 5333;
- break;
- case 0x016:
- dev_priv->fsb_freq = 5866;
- break;
- case 0x018:
- dev_priv->fsb_freq = 6400;
- break;
- default:
- drm_dbg(&dev_priv->drm, "unknown fsb frequency 0x%04x\n",
- csipll & 0x3ff);
- dev_priv->fsb_freq = 0;
- break;
- }
-}
-
-static const struct cxsr_latency cxsr_latency_table[] = {
- {1, 0, 800, 400, 3382, 33382, 3983, 33983}, /* DDR2-400 SC */
- {1, 0, 800, 667, 3354, 33354, 3807, 33807}, /* DDR2-667 SC */
- {1, 0, 800, 800, 3347, 33347, 3763, 33763}, /* DDR2-800 SC */
- {1, 1, 800, 667, 6420, 36420, 6873, 36873}, /* DDR3-667 SC */
- {1, 1, 800, 800, 5902, 35902, 6318, 36318}, /* DDR3-800 SC */
-
- {1, 0, 667, 400, 3400, 33400, 4021, 34021}, /* DDR2-400 SC */
- {1, 0, 667, 667, 3372, 33372, 3845, 33845}, /* DDR2-667 SC */
- {1, 0, 667, 800, 3386, 33386, 3822, 33822}, /* DDR2-800 SC */
- {1, 1, 667, 667, 6438, 36438, 6911, 36911}, /* DDR3-667 SC */
- {1, 1, 667, 800, 5941, 35941, 6377, 36377}, /* DDR3-800 SC */
-
- {1, 0, 400, 400, 3472, 33472, 4173, 34173}, /* DDR2-400 SC */
- {1, 0, 400, 667, 3443, 33443, 3996, 33996}, /* DDR2-667 SC */
- {1, 0, 400, 800, 3430, 33430, 3946, 33946}, /* DDR2-800 SC */
- {1, 1, 400, 667, 6509, 36509, 7062, 37062}, /* DDR3-667 SC */
- {1, 1, 400, 800, 5985, 35985, 6501, 36501}, /* DDR3-800 SC */
-
- {0, 0, 800, 400, 3438, 33438, 4065, 34065}, /* DDR2-400 SC */
- {0, 0, 800, 667, 3410, 33410, 3889, 33889}, /* DDR2-667 SC */
- {0, 0, 800, 800, 3403, 33403, 3845, 33845}, /* DDR2-800 SC */
- {0, 1, 800, 667, 6476, 36476, 6955, 36955}, /* DDR3-667 SC */
- {0, 1, 800, 800, 5958, 35958, 6400, 36400}, /* DDR3-800 SC */
-
- {0, 0, 667, 400, 3456, 33456, 4103, 34106}, /* DDR2-400 SC */
- {0, 0, 667, 667, 3428, 33428, 3927, 33927}, /* DDR2-667 SC */
- {0, 0, 667, 800, 3443, 33443, 3905, 33905}, /* DDR2-800 SC */
- {0, 1, 667, 667, 6494, 36494, 6993, 36993}, /* DDR3-667 SC */
- {0, 1, 667, 800, 5998, 35998, 6460, 36460}, /* DDR3-800 SC */
-
- {0, 0, 400, 400, 3528, 33528, 4255, 34255}, /* DDR2-400 SC */
- {0, 0, 400, 667, 3500, 33500, 4079, 34079}, /* DDR2-667 SC */
- {0, 0, 400, 800, 3487, 33487, 4029, 34029}, /* DDR2-800 SC */
- {0, 1, 400, 667, 6566, 36566, 7145, 37145}, /* DDR3-667 SC */
- {0, 1, 400, 800, 6042, 36042, 6584, 36584}, /* DDR3-800 SC */
-};
-
-static const struct cxsr_latency *intel_get_cxsr_latency(bool is_desktop,
- bool is_ddr3,
- int fsb,
- int mem)
-{
- const struct cxsr_latency *latency;
- int i;
-
- if (fsb == 0 || mem == 0)
- return NULL;
-
- for (i = 0; i < ARRAY_SIZE(cxsr_latency_table); i++) {
- latency = &cxsr_latency_table[i];
- if (is_desktop == latency->is_desktop &&
- is_ddr3 == latency->is_ddr3 &&
- fsb == latency->fsb_freq && mem == latency->mem_freq)
- return latency;
- }
-
- DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
-
- return NULL;
-}
-
-static void chv_set_memory_dvfs(struct drm_i915_private *dev_priv, bool enable)
-{
- u32 val;
-
- vlv_punit_get(dev_priv);
-
- val = vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2);
- if (enable)
- val &= ~FORCE_DDR_HIGH_FREQ;
- else
- val |= FORCE_DDR_HIGH_FREQ;
- val &= ~FORCE_DDR_LOW_FREQ;
- val |= FORCE_DDR_FREQ_REQ_ACK;
- vlv_punit_write(dev_priv, PUNIT_REG_DDR_SETUP2, val);
-
- if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2) &
- FORCE_DDR_FREQ_REQ_ACK) == 0, 3))
- drm_err(&dev_priv->drm,
- "timed out waiting for Punit DDR DVFS request\n");
-
- vlv_punit_put(dev_priv);
-}
-
-static void chv_set_memory_pm5(struct drm_i915_private *dev_priv, bool enable)
-{
- u32 val;
-
- vlv_punit_get(dev_priv);
-
- val = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM);
- if (enable)
- val |= DSP_MAXFIFO_PM5_ENABLE;
- else
- val &= ~DSP_MAXFIFO_PM5_ENABLE;
- vlv_punit_write(dev_priv, PUNIT_REG_DSPSSPM, val);
-
- vlv_punit_put(dev_priv);
-}
-
-#define FW_WM(value, plane) \
- (((value) << DSPFW_ ## plane ## _SHIFT) & DSPFW_ ## plane ## _MASK)
-
-static bool _intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable)
-{
- bool was_enabled;
- u32 val;
-
- if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
- was_enabled = intel_uncore_read(&dev_priv->uncore, FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
- intel_uncore_write(&dev_priv->uncore, FW_BLC_SELF_VLV, enable ? FW_CSPWRDWNEN : 0);
- intel_uncore_posting_read(&dev_priv->uncore, FW_BLC_SELF_VLV);
- } else if (IS_G4X(dev_priv) || IS_I965GM(dev_priv)) {
- was_enabled = intel_uncore_read(&dev_priv->uncore, FW_BLC_SELF) & FW_BLC_SELF_EN;
- intel_uncore_write(&dev_priv->uncore, FW_BLC_SELF, enable ? FW_BLC_SELF_EN : 0);
- intel_uncore_posting_read(&dev_priv->uncore, FW_BLC_SELF);
- } else if (IS_PINEVIEW(dev_priv)) {
- val = intel_uncore_read(&dev_priv->uncore, DSPFW3);
- was_enabled = val & PINEVIEW_SELF_REFRESH_EN;
- if (enable)
- val |= PINEVIEW_SELF_REFRESH_EN;
- else
- val &= ~PINEVIEW_SELF_REFRESH_EN;
- intel_uncore_write(&dev_priv->uncore, DSPFW3, val);
- intel_uncore_posting_read(&dev_priv->uncore, DSPFW3);
- } else if (IS_I945G(dev_priv) || IS_I945GM(dev_priv)) {
- was_enabled = intel_uncore_read(&dev_priv->uncore, FW_BLC_SELF) & FW_BLC_SELF_EN;
- val = enable ? _MASKED_BIT_ENABLE(FW_BLC_SELF_EN) :
- _MASKED_BIT_DISABLE(FW_BLC_SELF_EN);
- intel_uncore_write(&dev_priv->uncore, FW_BLC_SELF, val);
- intel_uncore_posting_read(&dev_priv->uncore, FW_BLC_SELF);
- } else if (IS_I915GM(dev_priv)) {
- /*
- * FIXME can't find a bit like this for 915G, and
- * and yet it does have the related watermark in
- * FW_BLC_SELF. What's going on?
- */
- was_enabled = intel_uncore_read(&dev_priv->uncore, INSTPM) & INSTPM_SELF_EN;
- val = enable ? _MASKED_BIT_ENABLE(INSTPM_SELF_EN) :
- _MASKED_BIT_DISABLE(INSTPM_SELF_EN);
- intel_uncore_write(&dev_priv->uncore, INSTPM, val);
- intel_uncore_posting_read(&dev_priv->uncore, INSTPM);
- } else {
- return false;
- }
-
- trace_intel_memory_cxsr(dev_priv, was_enabled, enable);
-
- drm_dbg_kms(&dev_priv->drm, "memory self-refresh is %s (was %s)\n",
- str_enabled_disabled(enable),
- str_enabled_disabled(was_enabled));
-
- return was_enabled;
-}
-
-/**
- * intel_set_memory_cxsr - Configure CxSR state
- * @dev_priv: i915 device
- * @enable: Allow vs. disallow CxSR
- *
- * Allow or disallow the system to enter a special CxSR
- * (C-state self refresh) state. What typically happens in CxSR mode
- * is that several display FIFOs may get combined into a single larger
- * FIFO for a particular plane (so called max FIFO mode) to allow the
- * system to defer memory fetches longer, and the memory will enter
- * self refresh.
- *
- * Note that enabling CxSR does not guarantee that the system enter
- * this special mode, nor does it guarantee that the system stays
- * in that mode once entered. So this just allows/disallows the system
- * to autonomously utilize the CxSR mode. Other factors such as core
- * C-states will affect when/if the system actually enters/exits the
- * CxSR mode.
- *
- * Note that on VLV/CHV this actually only controls the max FIFO mode,
- * and the system is free to enter/exit memory self refresh at any time
- * even when the use of CxSR has been disallowed.
- *
- * While the system is actually in the CxSR/max FIFO mode, some plane
- * control registers will not get latched on vblank. Thus in order to
- * guarantee the system will respond to changes in the plane registers
- * we must always disallow CxSR prior to making changes to those registers.
- * Unfortunately the system will re-evaluate the CxSR conditions at
- * frame start which happens after vblank start (which is when the plane
- * registers would get latched), so we can't proceed with the plane update
- * during the same frame where we disallowed CxSR.
- *
- * Certain platforms also have a deeper HPLL SR mode. Fortunately the
- * HPLL SR mode depends on CxSR itself, so we don't have to hand hold
- * the hardware w.r.t. HPLL SR when writing to plane registers.
- * Disallowing just CxSR is sufficient.
- */
-bool intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable)
-{
- bool ret;
-
- mutex_lock(&dev_priv->display.wm.wm_mutex);
- ret = _intel_set_memory_cxsr(dev_priv, enable);
- if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
- dev_priv->display.wm.vlv.cxsr = enable;
- else if (IS_G4X(dev_priv))
- dev_priv->display.wm.g4x.cxsr = enable;
- mutex_unlock(&dev_priv->display.wm.wm_mutex);
-
- return ret;
-}
-
-/*
- * Latency for FIFO fetches is dependent on several factors:
- * - memory configuration (speed, channels)
- * - chipset
- * - current MCH state
- * It can be fairly high in some situations, so here we assume a fairly
- * pessimal value. It's a tradeoff between extra memory fetches (if we
- * set this value too high, the FIFO will fetch frequently to stay full)
- * and power consumption (set it too low to save power and we might see
- * FIFO underruns and display "flicker").
- *
- * A value of 5us seems to be a good balance; safe for very low end
- * platforms but not overly aggressive on lower latency configs.
- */
-static const int pessimal_latency_ns = 5000;
-
-#define VLV_FIFO_START(dsparb, dsparb2, lo_shift, hi_shift) \
- ((((dsparb) >> (lo_shift)) & 0xff) | ((((dsparb2) >> (hi_shift)) & 0x1) << 8))
-
-static void vlv_get_fifo_size(struct intel_crtc_state *crtc_state)
-{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- struct vlv_fifo_state *fifo_state = &crtc_state->wm.vlv.fifo_state;
- enum pipe pipe = crtc->pipe;
- int sprite0_start, sprite1_start;
- u32 dsparb, dsparb2, dsparb3;
-
- switch (pipe) {
- case PIPE_A:
- dsparb = intel_uncore_read(&dev_priv->uncore, DSPARB);
- dsparb2 = intel_uncore_read(&dev_priv->uncore, DSPARB2);
- sprite0_start = VLV_FIFO_START(dsparb, dsparb2, 0, 0);
- sprite1_start = VLV_FIFO_START(dsparb, dsparb2, 8, 4);
- break;
- case PIPE_B:
- dsparb = intel_uncore_read(&dev_priv->uncore, DSPARB);
- dsparb2 = intel_uncore_read(&dev_priv->uncore, DSPARB2);
- sprite0_start = VLV_FIFO_START(dsparb, dsparb2, 16, 8);
- sprite1_start = VLV_FIFO_START(dsparb, dsparb2, 24, 12);
- break;
- case PIPE_C:
- dsparb2 = intel_uncore_read(&dev_priv->uncore, DSPARB2);
- dsparb3 = intel_uncore_read(&dev_priv->uncore, DSPARB3);
- sprite0_start = VLV_FIFO_START(dsparb3, dsparb2, 0, 16);
- sprite1_start = VLV_FIFO_START(dsparb3, dsparb2, 8, 20);
- break;
- default:
- MISSING_CASE(pipe);
- return;
- }
-
- fifo_state->plane[PLANE_PRIMARY] = sprite0_start;
- fifo_state->plane[PLANE_SPRITE0] = sprite1_start - sprite0_start;
- fifo_state->plane[PLANE_SPRITE1] = 511 - sprite1_start;
- fifo_state->plane[PLANE_CURSOR] = 63;
-}
-
-static int i9xx_get_fifo_size(struct drm_i915_private *dev_priv,
- enum i9xx_plane_id i9xx_plane)
-{
- u32 dsparb = intel_uncore_read(&dev_priv->uncore, DSPARB);
- int size;
-
- size = dsparb & 0x7f;
- if (i9xx_plane == PLANE_B)
- size = ((dsparb >> DSPARB_CSTART_SHIFT) & 0x7f) - size;
-
- drm_dbg_kms(&dev_priv->drm, "FIFO size - (0x%08x) %c: %d\n",
- dsparb, plane_name(i9xx_plane), size);
-
- return size;
-}
-
-static int i830_get_fifo_size(struct drm_i915_private *dev_priv,
- enum i9xx_plane_id i9xx_plane)
-{
- u32 dsparb = intel_uncore_read(&dev_priv->uncore, DSPARB);
- int size;
-
- size = dsparb & 0x1ff;
- if (i9xx_plane == PLANE_B)
- size = ((dsparb >> DSPARB_BEND_SHIFT) & 0x1ff) - size;
- size >>= 1; /* Convert to cachelines */
-
- drm_dbg_kms(&dev_priv->drm, "FIFO size - (0x%08x) %c: %d\n",
- dsparb, plane_name(i9xx_plane), size);
-
- return size;
-}
-
-static int i845_get_fifo_size(struct drm_i915_private *dev_priv,
- enum i9xx_plane_id i9xx_plane)
-{
- u32 dsparb = intel_uncore_read(&dev_priv->uncore, DSPARB);
- int size;
-
- size = dsparb & 0x7f;
- size >>= 2; /* Convert to cachelines */
-
- drm_dbg_kms(&dev_priv->drm, "FIFO size - (0x%08x) %c: %d\n",
- dsparb, plane_name(i9xx_plane), size);
-
- return size;
-}
-
-/* Pineview has different values for various configs */
-static const struct intel_watermark_params pnv_display_wm = {
- .fifo_size = PINEVIEW_DISPLAY_FIFO,
- .max_wm = PINEVIEW_MAX_WM,
- .default_wm = PINEVIEW_DFT_WM,
- .guard_size = PINEVIEW_GUARD_WM,
- .cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
-};
-
-static const struct intel_watermark_params pnv_display_hplloff_wm = {
- .fifo_size = PINEVIEW_DISPLAY_FIFO,
- .max_wm = PINEVIEW_MAX_WM,
- .default_wm = PINEVIEW_DFT_HPLLOFF_WM,
- .guard_size = PINEVIEW_GUARD_WM,
- .cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
-};
-
-static const struct intel_watermark_params pnv_cursor_wm = {
- .fifo_size = PINEVIEW_CURSOR_FIFO,
- .max_wm = PINEVIEW_CURSOR_MAX_WM,
- .default_wm = PINEVIEW_CURSOR_DFT_WM,
- .guard_size = PINEVIEW_CURSOR_GUARD_WM,
- .cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
-};
-
-static const struct intel_watermark_params pnv_cursor_hplloff_wm = {
- .fifo_size = PINEVIEW_CURSOR_FIFO,
- .max_wm = PINEVIEW_CURSOR_MAX_WM,
- .default_wm = PINEVIEW_CURSOR_DFT_WM,
- .guard_size = PINEVIEW_CURSOR_GUARD_WM,
- .cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
-};
-
-static const struct intel_watermark_params i965_cursor_wm_info = {
- .fifo_size = I965_CURSOR_FIFO,
- .max_wm = I965_CURSOR_MAX_WM,
- .default_wm = I965_CURSOR_DFT_WM,
- .guard_size = 2,
- .cacheline_size = I915_FIFO_LINE_SIZE,
-};
-
-static const struct intel_watermark_params i945_wm_info = {
- .fifo_size = I945_FIFO_SIZE,
- .max_wm = I915_MAX_WM,
- .default_wm = 1,
- .guard_size = 2,
- .cacheline_size = I915_FIFO_LINE_SIZE,
-};
-
-static const struct intel_watermark_params i915_wm_info = {
- .fifo_size = I915_FIFO_SIZE,
- .max_wm = I915_MAX_WM,
- .default_wm = 1,
- .guard_size = 2,
- .cacheline_size = I915_FIFO_LINE_SIZE,
-};
-
-static const struct intel_watermark_params i830_a_wm_info = {
- .fifo_size = I855GM_FIFO_SIZE,
- .max_wm = I915_MAX_WM,
- .default_wm = 1,
- .guard_size = 2,
- .cacheline_size = I830_FIFO_LINE_SIZE,
-};
-
-static const struct intel_watermark_params i830_bc_wm_info = {
- .fifo_size = I855GM_FIFO_SIZE,
- .max_wm = I915_MAX_WM/2,
- .default_wm = 1,
- .guard_size = 2,
- .cacheline_size = I830_FIFO_LINE_SIZE,
-};
-
-static const struct intel_watermark_params i845_wm_info = {
- .fifo_size = I830_FIFO_SIZE,
- .max_wm = I915_MAX_WM,
- .default_wm = 1,
- .guard_size = 2,
- .cacheline_size = I830_FIFO_LINE_SIZE,
-};
-
-/**
- * intel_wm_method1 - Method 1 / "small buffer" watermark formula
- * @pixel_rate: Pipe pixel rate in kHz
- * @cpp: Plane bytes per pixel
- * @latency: Memory wakeup latency in 0.1us units
- *
- * Compute the watermark using the method 1 or "small buffer"
- * formula. The caller may additonally add extra cachelines
- * to account for TLB misses and clock crossings.
- *
- * This method is concerned with the short term drain rate
- * of the FIFO, ie. it does not account for blanking periods
- * which would effectively reduce the average drain rate across
- * a longer period. The name "small" refers to the fact the
- * FIFO is relatively small compared to the amount of data
- * fetched.
- *
- * The FIFO level vs. time graph might look something like:
- *
- * |\ |\
- * | \ | \
- * __---__---__ (- plane active, _ blanking)
- * -> time
- *
- * or perhaps like this:
- *
- * |\|\ |\|\
- * __----__----__ (- plane active, _ blanking)
- * -> time
- *
- * Returns:
- * The watermark in bytes
- */
-static unsigned int intel_wm_method1(unsigned int pixel_rate,
- unsigned int cpp,
- unsigned int latency)
-{
- u64 ret;
-
- ret = mul_u32_u32(pixel_rate, cpp * latency);
- ret = DIV_ROUND_UP_ULL(ret, 10000);
-
- return ret;
-}
-
-/**
- * intel_wm_method2 - Method 2 / "large buffer" watermark formula
- * @pixel_rate: Pipe pixel rate in kHz
- * @htotal: Pipe horizontal total
- * @width: Plane width in pixels
- * @cpp: Plane bytes per pixel
- * @latency: Memory wakeup latency in 0.1us units
- *
- * Compute the watermark using the method 2 or "large buffer"
- * formula. The caller may additonally add extra cachelines
- * to account for TLB misses and clock crossings.
- *
- * This method is concerned with the long term drain rate
- * of the FIFO, ie. it does account for blanking periods
- * which effectively reduce the average drain rate across
- * a longer period. The name "large" refers to the fact the
- * FIFO is relatively large compared to the amount of data
- * fetched.
- *
- * The FIFO level vs. time graph might look something like:
- *
- * |\___ |\___
- * | \___ | \___
- * | \ | \
- * __ --__--__--__--__--__--__ (- plane active, _ blanking)
- * -> time
- *
- * Returns:
- * The watermark in bytes
- */
-static unsigned int intel_wm_method2(unsigned int pixel_rate,
- unsigned int htotal,
- unsigned int width,
- unsigned int cpp,
- unsigned int latency)
-{
- unsigned int ret;
-
- /*
- * FIXME remove once all users are computing
- * watermarks in the correct place.
- */
- if (WARN_ON_ONCE(htotal == 0))
- htotal = 1;
-
- ret = (latency * pixel_rate) / (htotal * 10000);
- ret = (ret + 1) * width * cpp;
-
- return ret;
-}
-
-/**
- * intel_calculate_wm - calculate watermark level
- * @pixel_rate: pixel clock
- * @wm: chip FIFO params
- * @fifo_size: size of the FIFO buffer
- * @cpp: bytes per pixel
- * @latency_ns: memory latency for the platform
- *
- * Calculate the watermark level (the level at which the display plane will
- * start fetching from memory again). Each chip has a different display
- * FIFO size and allocation, so the caller needs to figure that out and pass
- * in the correct intel_watermark_params structure.
- *
- * As the pixel clock runs, the FIFO will be drained at a rate that depends
- * on the pixel size. When it reaches the watermark level, it'll start
- * fetching FIFO line sized based chunks from memory until the FIFO fills
- * past the watermark point. If the FIFO drains completely, a FIFO underrun
- * will occur, and a display engine hang could result.
- */
-static unsigned int intel_calculate_wm(int pixel_rate,
- const struct intel_watermark_params *wm,
- int fifo_size, int cpp,
- unsigned int latency_ns)
-{
- int entries, wm_size;
-
- /*
- * Note: we need to make sure we don't overflow for various clock &
- * latency values.
- * clocks go from a few thousand to several hundred thousand.
- * latency is usually a few thousand
- */
- entries = intel_wm_method1(pixel_rate, cpp,
- latency_ns / 100);
- entries = DIV_ROUND_UP(entries, wm->cacheline_size) +
- wm->guard_size;
- DRM_DEBUG_KMS("FIFO entries required for mode: %d\n", entries);
-
- wm_size = fifo_size - entries;
- DRM_DEBUG_KMS("FIFO watermark level: %d\n", wm_size);
-
- /* Don't promote wm_size to unsigned... */
- if (wm_size > wm->max_wm)
- wm_size = wm->max_wm;
- if (wm_size <= 0)
- wm_size = wm->default_wm;
-
- /*
- * Bspec seems to indicate that the value shouldn't be lower than
- * 'burst size + 1'. Certainly 830 is quite unhappy with low values.
- * Lets go for 8 which is the burst size since certain platforms
- * already use a hardcoded 8 (which is what the spec says should be
- * done).
- */
- if (wm_size <= 8)
- wm_size = 8;
-
- return wm_size;
-}
-
-static bool is_disabling(int old, int new, int threshold)
-{
- return old >= threshold && new < threshold;
-}
-
-static bool is_enabling(int old, int new, int threshold)
-{
- return old < threshold && new >= threshold;
-}
-
-static int intel_wm_num_levels(struct drm_i915_private *dev_priv)
-{
- return dev_priv->display.wm.max_level + 1;
-}
-
-bool intel_wm_plane_visible(const struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *plane_state)
-{
- struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
-
- /* FIXME check the 'enable' instead */
- if (!crtc_state->hw.active)
- return false;
-
- /*
- * Treat cursor with fb as always visible since cursor updates
- * can happen faster than the vrefresh rate, and the current
- * watermark code doesn't handle that correctly. Cursor updates
- * which set/clear the fb or change the cursor size are going
- * to get throttled by intel_legacy_cursor_update() to work
- * around this problem with the watermark code.
- */
- if (plane->id == PLANE_CURSOR)
- return plane_state->hw.fb != NULL;
- else
- return plane_state->uapi.visible;
-}
-
-static bool intel_crtc_active(struct intel_crtc *crtc)
-{
- /* Be paranoid as we can arrive here with only partial
- * state retrieved from the hardware during setup.
- *
- * We can ditch the adjusted_mode.crtc_clock check as soon
- * as Haswell has gained clock readout/fastboot support.
- *
- * We can ditch the crtc->primary->state->fb check as soon as we can
- * properly reconstruct framebuffers.
- *
- * FIXME: The intel_crtc->active here should be switched to
- * crtc->state->active once we have proper CRTC states wired up
- * for atomic.
- */
- return crtc && crtc->active && crtc->base.primary->state->fb &&
- crtc->config->hw.adjusted_mode.crtc_clock;
-}
-
-static struct intel_crtc *single_enabled_crtc(struct drm_i915_private *dev_priv)
-{
- struct intel_crtc *crtc, *enabled = NULL;
-
- for_each_intel_crtc(&dev_priv->drm, crtc) {
- if (intel_crtc_active(crtc)) {
- if (enabled)
- return NULL;
- enabled = crtc;
- }
- }
-
- return enabled;
-}
-
-static void pnv_update_wm(struct drm_i915_private *dev_priv)
-{
- struct intel_crtc *crtc;
- const struct cxsr_latency *latency;
- u32 reg;
- unsigned int wm;
-
- latency = intel_get_cxsr_latency(!IS_MOBILE(dev_priv),
- dev_priv->is_ddr3,
- dev_priv->fsb_freq,
- dev_priv->mem_freq);
- if (!latency) {
- drm_dbg_kms(&dev_priv->drm,
- "Unknown FSB/MEM found, disable CxSR\n");
- intel_set_memory_cxsr(dev_priv, false);
- return;
- }
-
- crtc = single_enabled_crtc(dev_priv);
- if (crtc) {
- const struct drm_framebuffer *fb =
- crtc->base.primary->state->fb;
- int pixel_rate = crtc->config->pixel_rate;
- int cpp = fb->format->cpp[0];
-
- /* Display SR */
- wm = intel_calculate_wm(pixel_rate, &pnv_display_wm,
- pnv_display_wm.fifo_size,
- cpp, latency->display_sr);
- reg = intel_uncore_read(&dev_priv->uncore, DSPFW1);
- reg &= ~DSPFW_SR_MASK;
- reg |= FW_WM(wm, SR);
- intel_uncore_write(&dev_priv->uncore, DSPFW1, reg);
- drm_dbg_kms(&dev_priv->drm, "DSPFW1 register is %x\n", reg);
-
- /* cursor SR */
- wm = intel_calculate_wm(pixel_rate, &pnv_cursor_wm,
- pnv_display_wm.fifo_size,
- 4, latency->cursor_sr);
- intel_uncore_rmw(&dev_priv->uncore, DSPFW3, DSPFW_CURSOR_SR_MASK,
- FW_WM(wm, CURSOR_SR));
-
- /* Display HPLL off SR */
- wm = intel_calculate_wm(pixel_rate, &pnv_display_hplloff_wm,
- pnv_display_hplloff_wm.fifo_size,
- cpp, latency->display_hpll_disable);
- intel_uncore_rmw(&dev_priv->uncore, DSPFW3, DSPFW_HPLL_SR_MASK, FW_WM(wm, HPLL_SR));
-
- /* cursor HPLL off SR */
- wm = intel_calculate_wm(pixel_rate, &pnv_cursor_hplloff_wm,
- pnv_display_hplloff_wm.fifo_size,
- 4, latency->cursor_hpll_disable);
- reg = intel_uncore_read(&dev_priv->uncore, DSPFW3);
- reg &= ~DSPFW_HPLL_CURSOR_MASK;
- reg |= FW_WM(wm, HPLL_CURSOR);
- intel_uncore_write(&dev_priv->uncore, DSPFW3, reg);
- drm_dbg_kms(&dev_priv->drm, "DSPFW3 register is %x\n", reg);
-
- intel_set_memory_cxsr(dev_priv, true);
- } else {
- intel_set_memory_cxsr(dev_priv, false);
- }
-}
-
-/*
- * Documentation says:
- * "If the line size is small, the TLB fetches can get in the way of the
- * data fetches, causing some lag in the pixel data return which is not
- * accounted for in the above formulas. The following adjustment only
- * needs to be applied if eight whole lines fit in the buffer at once.
- * The WM is adjusted upwards by the difference between the FIFO size
- * and the size of 8 whole lines. This adjustment is always performed
- * in the actual pixel depth regardless of whether FBC is enabled or not."
- */
-static unsigned int g4x_tlb_miss_wa(int fifo_size, int width, int cpp)
-{
- int tlb_miss = fifo_size * 64 - width * cpp * 8;
-
- return max(0, tlb_miss);
-}
-
-static void g4x_write_wm_values(struct drm_i915_private *dev_priv,
- const struct g4x_wm_values *wm)
-{
- enum pipe pipe;
-
- for_each_pipe(dev_priv, pipe)
- trace_g4x_wm(intel_crtc_for_pipe(dev_priv, pipe), wm);
-
- intel_uncore_write(&dev_priv->uncore, DSPFW1,
- FW_WM(wm->sr.plane, SR) |
- FW_WM(wm->pipe[PIPE_B].plane[PLANE_CURSOR], CURSORB) |
- FW_WM(wm->pipe[PIPE_B].plane[PLANE_PRIMARY], PLANEB) |
- FW_WM(wm->pipe[PIPE_A].plane[PLANE_PRIMARY], PLANEA));
- intel_uncore_write(&dev_priv->uncore, DSPFW2,
- (wm->fbc_en ? DSPFW_FBC_SR_EN : 0) |
- FW_WM(wm->sr.fbc, FBC_SR) |
- FW_WM(wm->hpll.fbc, FBC_HPLL_SR) |
- FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE0], SPRITEB) |
- FW_WM(wm->pipe[PIPE_A].plane[PLANE_CURSOR], CURSORA) |
- FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE0], SPRITEA));
- intel_uncore_write(&dev_priv->uncore, DSPFW3,
- (wm->hpll_en ? DSPFW_HPLL_SR_EN : 0) |
- FW_WM(wm->sr.cursor, CURSOR_SR) |
- FW_WM(wm->hpll.cursor, HPLL_CURSOR) |
- FW_WM(wm->hpll.plane, HPLL_SR));
-
- intel_uncore_posting_read(&dev_priv->uncore, DSPFW1);
-}
-
-#define FW_WM_VLV(value, plane) \
- (((value) << DSPFW_ ## plane ## _SHIFT) & DSPFW_ ## plane ## _MASK_VLV)
-
-static void vlv_write_wm_values(struct drm_i915_private *dev_priv,
- const struct vlv_wm_values *wm)
-{
- enum pipe pipe;
-
- for_each_pipe(dev_priv, pipe) {
- trace_vlv_wm(intel_crtc_for_pipe(dev_priv, pipe), wm);
-
- intel_uncore_write(&dev_priv->uncore, VLV_DDL(pipe),
- (wm->ddl[pipe].plane[PLANE_CURSOR] << DDL_CURSOR_SHIFT) |
- (wm->ddl[pipe].plane[PLANE_SPRITE1] << DDL_SPRITE_SHIFT(1)) |
- (wm->ddl[pipe].plane[PLANE_SPRITE0] << DDL_SPRITE_SHIFT(0)) |
- (wm->ddl[pipe].plane[PLANE_PRIMARY] << DDL_PLANE_SHIFT));
- }
-
- /*
- * Zero the (unused) WM1 watermarks, and also clear all the
- * high order bits so that there are no out of bounds values
- * present in the registers during the reprogramming.
- */
- intel_uncore_write(&dev_priv->uncore, DSPHOWM, 0);
- intel_uncore_write(&dev_priv->uncore, DSPHOWM1, 0);
- intel_uncore_write(&dev_priv->uncore, DSPFW4, 0);
- intel_uncore_write(&dev_priv->uncore, DSPFW5, 0);
- intel_uncore_write(&dev_priv->uncore, DSPFW6, 0);
-
- intel_uncore_write(&dev_priv->uncore, DSPFW1,
- FW_WM(wm->sr.plane, SR) |
- FW_WM(wm->pipe[PIPE_B].plane[PLANE_CURSOR], CURSORB) |
- FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_PRIMARY], PLANEB) |
- FW_WM_VLV(wm->pipe[PIPE_A].plane[PLANE_PRIMARY], PLANEA));
- intel_uncore_write(&dev_priv->uncore, DSPFW2,
- FW_WM_VLV(wm->pipe[PIPE_A].plane[PLANE_SPRITE1], SPRITEB) |
- FW_WM(wm->pipe[PIPE_A].plane[PLANE_CURSOR], CURSORA) |
- FW_WM_VLV(wm->pipe[PIPE_A].plane[PLANE_SPRITE0], SPRITEA));
- intel_uncore_write(&dev_priv->uncore, DSPFW3,
- FW_WM(wm->sr.cursor, CURSOR_SR));
-
- if (IS_CHERRYVIEW(dev_priv)) {
- intel_uncore_write(&dev_priv->uncore, DSPFW7_CHV,
- FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE1], SPRITED) |
- FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE0], SPRITEC));
- intel_uncore_write(&dev_priv->uncore, DSPFW8_CHV,
- FW_WM_VLV(wm->pipe[PIPE_C].plane[PLANE_SPRITE1], SPRITEF) |
- FW_WM_VLV(wm->pipe[PIPE_C].plane[PLANE_SPRITE0], SPRITEE));
- intel_uncore_write(&dev_priv->uncore, DSPFW9_CHV,
- FW_WM_VLV(wm->pipe[PIPE_C].plane[PLANE_PRIMARY], PLANEC) |
- FW_WM(wm->pipe[PIPE_C].plane[PLANE_CURSOR], CURSORC));
- intel_uncore_write(&dev_priv->uncore, DSPHOWM,
- FW_WM(wm->sr.plane >> 9, SR_HI) |
- FW_WM(wm->pipe[PIPE_C].plane[PLANE_SPRITE1] >> 8, SPRITEF_HI) |
- FW_WM(wm->pipe[PIPE_C].plane[PLANE_SPRITE0] >> 8, SPRITEE_HI) |
- FW_WM(wm->pipe[PIPE_C].plane[PLANE_PRIMARY] >> 8, PLANEC_HI) |
- FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE1] >> 8, SPRITED_HI) |
- FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE0] >> 8, SPRITEC_HI) |
- FW_WM(wm->pipe[PIPE_B].plane[PLANE_PRIMARY] >> 8, PLANEB_HI) |
- FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE1] >> 8, SPRITEB_HI) |
- FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE0] >> 8, SPRITEA_HI) |
- FW_WM(wm->pipe[PIPE_A].plane[PLANE_PRIMARY] >> 8, PLANEA_HI));
- } else {
- intel_uncore_write(&dev_priv->uncore, DSPFW7,
- FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE1], SPRITED) |
- FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE0], SPRITEC));
- intel_uncore_write(&dev_priv->uncore, DSPHOWM,
- FW_WM(wm->sr.plane >> 9, SR_HI) |
- FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE1] >> 8, SPRITED_HI) |
- FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE0] >> 8, SPRITEC_HI) |
- FW_WM(wm->pipe[PIPE_B].plane[PLANE_PRIMARY] >> 8, PLANEB_HI) |
- FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE1] >> 8, SPRITEB_HI) |
- FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE0] >> 8, SPRITEA_HI) |
- FW_WM(wm->pipe[PIPE_A].plane[PLANE_PRIMARY] >> 8, PLANEA_HI));
- }
-
- intel_uncore_posting_read(&dev_priv->uncore, DSPFW1);
-}
-
-#undef FW_WM_VLV
-
-static void g4x_setup_wm_latency(struct drm_i915_private *dev_priv)
-{
- /* all latencies in usec */
- dev_priv->display.wm.pri_latency[G4X_WM_LEVEL_NORMAL] = 5;
- dev_priv->display.wm.pri_latency[G4X_WM_LEVEL_SR] = 12;
- dev_priv->display.wm.pri_latency[G4X_WM_LEVEL_HPLL] = 35;
-
- dev_priv->display.wm.max_level = G4X_WM_LEVEL_HPLL;
-}
-
-static int g4x_plane_fifo_size(enum plane_id plane_id, int level)
-{
- /*
- * DSPCNTR[13] supposedly controls whether the
- * primary plane can use the FIFO space otherwise
- * reserved for the sprite plane. It's not 100% clear
- * what the actual FIFO size is, but it looks like we
- * can happily set both primary and sprite watermarks
- * up to 127 cachelines. So that would seem to mean
- * that either DSPCNTR[13] doesn't do anything, or that
- * the total FIFO is >= 256 cachelines in size. Either
- * way, we don't seem to have to worry about this
- * repartitioning as the maximum watermark value the
- * register can hold for each plane is lower than the
- * minimum FIFO size.
- */
- switch (plane_id) {
- case PLANE_CURSOR:
- return 63;
- case PLANE_PRIMARY:
- return level == G4X_WM_LEVEL_NORMAL ? 127 : 511;
- case PLANE_SPRITE0:
- return level == G4X_WM_LEVEL_NORMAL ? 127 : 0;
- default:
- MISSING_CASE(plane_id);
- return 0;
- }
-}
-
-static int g4x_fbc_fifo_size(int level)
-{
- switch (level) {
- case G4X_WM_LEVEL_SR:
- return 7;
- case G4X_WM_LEVEL_HPLL:
- return 15;
- default:
- MISSING_CASE(level);
- return 0;
- }
-}
-
-static u16 g4x_compute_wm(const struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *plane_state,
- int level)
-{
- struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
- struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
- const struct drm_display_mode *pipe_mode =
- &crtc_state->hw.pipe_mode;
- unsigned int latency = dev_priv->display.wm.pri_latency[level] * 10;
- unsigned int pixel_rate, htotal, cpp, width, wm;
-
- if (latency == 0)
- return USHRT_MAX;
-
- if (!intel_wm_plane_visible(crtc_state, plane_state))
- return 0;
-
- cpp = plane_state->hw.fb->format->cpp[0];
-
- /*
- * WaUse32BppForSRWM:ctg,elk
- *
- * The spec fails to list this restriction for the
- * HPLL watermark, which seems a little strange.
- * Let's use 32bpp for the HPLL watermark as well.
- */
- if (plane->id == PLANE_PRIMARY &&
- level != G4X_WM_LEVEL_NORMAL)
- cpp = max(cpp, 4u);
-
- pixel_rate = crtc_state->pixel_rate;
- htotal = pipe_mode->crtc_htotal;
- width = drm_rect_width(&plane_state->uapi.src) >> 16;
-
- if (plane->id == PLANE_CURSOR) {
- wm = intel_wm_method2(pixel_rate, htotal, width, cpp, latency);
- } else if (plane->id == PLANE_PRIMARY &&
- level == G4X_WM_LEVEL_NORMAL) {
- wm = intel_wm_method1(pixel_rate, cpp, latency);
- } else {
- unsigned int small, large;
-
- small = intel_wm_method1(pixel_rate, cpp, latency);
- large = intel_wm_method2(pixel_rate, htotal, width, cpp, latency);
-
- wm = min(small, large);
- }
-
- wm += g4x_tlb_miss_wa(g4x_plane_fifo_size(plane->id, level),
- width, cpp);
-
- wm = DIV_ROUND_UP(wm, 64) + 2;
-
- return min_t(unsigned int, wm, USHRT_MAX);
-}
-
-static bool g4x_raw_plane_wm_set(struct intel_crtc_state *crtc_state,
- int level, enum plane_id plane_id, u16 value)
-{
- struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
- bool dirty = false;
-
- for (; level < intel_wm_num_levels(dev_priv); level++) {
- struct g4x_pipe_wm *raw = &crtc_state->wm.g4x.raw[level];
-
- dirty |= raw->plane[plane_id] != value;
- raw->plane[plane_id] = value;
- }
-
- return dirty;
-}
-
-static bool g4x_raw_fbc_wm_set(struct intel_crtc_state *crtc_state,
- int level, u16 value)
-{
- struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
- bool dirty = false;
-
- /* NORMAL level doesn't have an FBC watermark */
- level = max(level, G4X_WM_LEVEL_SR);
-
- for (; level < intel_wm_num_levels(dev_priv); level++) {
- struct g4x_pipe_wm *raw = &crtc_state->wm.g4x.raw[level];
-
- dirty |= raw->fbc != value;
- raw->fbc = value;
- }
-
- return dirty;
-}
-
-static u32 ilk_compute_fbc_wm(const struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *plane_state,
- u32 pri_val);
-
-static bool g4x_raw_plane_wm_compute(struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *plane_state)
-{
- struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
- struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
- int num_levels = intel_wm_num_levels(to_i915(plane->base.dev));
- enum plane_id plane_id = plane->id;
- bool dirty = false;
- int level;
-
- if (!intel_wm_plane_visible(crtc_state, plane_state)) {
- dirty |= g4x_raw_plane_wm_set(crtc_state, 0, plane_id, 0);
- if (plane_id == PLANE_PRIMARY)
- dirty |= g4x_raw_fbc_wm_set(crtc_state, 0, 0);
- goto out;
- }
-
- for (level = 0; level < num_levels; level++) {
- struct g4x_pipe_wm *raw = &crtc_state->wm.g4x.raw[level];
- int wm, max_wm;
-
- wm = g4x_compute_wm(crtc_state, plane_state, level);
- max_wm = g4x_plane_fifo_size(plane_id, level);
-
- if (wm > max_wm)
- break;
-
- dirty |= raw->plane[plane_id] != wm;
- raw->plane[plane_id] = wm;
-
- if (plane_id != PLANE_PRIMARY ||
- level == G4X_WM_LEVEL_NORMAL)
- continue;
-
- wm = ilk_compute_fbc_wm(crtc_state, plane_state,
- raw->plane[plane_id]);
- max_wm = g4x_fbc_fifo_size(level);
-
- /*
- * FBC wm is not mandatory as we
- * can always just disable its use.
- */
- if (wm > max_wm)
- wm = USHRT_MAX;
-
- dirty |= raw->fbc != wm;
- raw->fbc = wm;
- }
-
- /* mark watermarks as invalid */
- dirty |= g4x_raw_plane_wm_set(crtc_state, level, plane_id, USHRT_MAX);
-
- if (plane_id == PLANE_PRIMARY)
- dirty |= g4x_raw_fbc_wm_set(crtc_state, level, USHRT_MAX);
-
- out:
- if (dirty) {
- drm_dbg_kms(&dev_priv->drm,
- "%s watermarks: normal=%d, SR=%d, HPLL=%d\n",
- plane->base.name,
- crtc_state->wm.g4x.raw[G4X_WM_LEVEL_NORMAL].plane[plane_id],
- crtc_state->wm.g4x.raw[G4X_WM_LEVEL_SR].plane[plane_id],
- crtc_state->wm.g4x.raw[G4X_WM_LEVEL_HPLL].plane[plane_id]);
-
- if (plane_id == PLANE_PRIMARY)
- drm_dbg_kms(&dev_priv->drm,
- "FBC watermarks: SR=%d, HPLL=%d\n",
- crtc_state->wm.g4x.raw[G4X_WM_LEVEL_SR].fbc,
- crtc_state->wm.g4x.raw[G4X_WM_LEVEL_HPLL].fbc);
- }
-
- return dirty;
-}
-
-static bool g4x_raw_plane_wm_is_valid(const struct intel_crtc_state *crtc_state,
- enum plane_id plane_id, int level)
-{
- const struct g4x_pipe_wm *raw = &crtc_state->wm.g4x.raw[level];
-
- return raw->plane[plane_id] <= g4x_plane_fifo_size(plane_id, level);
-}
-
-static bool g4x_raw_crtc_wm_is_valid(const struct intel_crtc_state *crtc_state,
- int level)
-{
- struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
-
- if (level > dev_priv->display.wm.max_level)
- return false;
-
- return g4x_raw_plane_wm_is_valid(crtc_state, PLANE_PRIMARY, level) &&
- g4x_raw_plane_wm_is_valid(crtc_state, PLANE_SPRITE0, level) &&
- g4x_raw_plane_wm_is_valid(crtc_state, PLANE_CURSOR, level);
-}
-
-/* mark all levels starting from 'level' as invalid */
-static void g4x_invalidate_wms(struct intel_crtc *crtc,
- struct g4x_wm_state *wm_state, int level)
-{
- if (level <= G4X_WM_LEVEL_NORMAL) {
- enum plane_id plane_id;
-
- for_each_plane_id_on_crtc(crtc, plane_id)
- wm_state->wm.plane[plane_id] = USHRT_MAX;
- }
-
- if (level <= G4X_WM_LEVEL_SR) {
- wm_state->cxsr = false;
- wm_state->sr.cursor = USHRT_MAX;
- wm_state->sr.plane = USHRT_MAX;
- wm_state->sr.fbc = USHRT_MAX;
- }
-
- if (level <= G4X_WM_LEVEL_HPLL) {
- wm_state->hpll_en = false;
- wm_state->hpll.cursor = USHRT_MAX;
- wm_state->hpll.plane = USHRT_MAX;
- wm_state->hpll.fbc = USHRT_MAX;
- }
-}
-
-static bool g4x_compute_fbc_en(const struct g4x_wm_state *wm_state,
- int level)
-{
- if (level < G4X_WM_LEVEL_SR)
- return false;
-
- if (level >= G4X_WM_LEVEL_SR &&
- wm_state->sr.fbc > g4x_fbc_fifo_size(G4X_WM_LEVEL_SR))
- return false;
-
- if (level >= G4X_WM_LEVEL_HPLL &&
- wm_state->hpll.fbc > g4x_fbc_fifo_size(G4X_WM_LEVEL_HPLL))
- return false;
-
- return true;
-}
-
-static int _g4x_compute_pipe_wm(struct intel_crtc_state *crtc_state)
-{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct g4x_wm_state *wm_state = &crtc_state->wm.g4x.optimal;
- u8 active_planes = crtc_state->active_planes & ~BIT(PLANE_CURSOR);
- const struct g4x_pipe_wm *raw;
- enum plane_id plane_id;
- int level;
-
- level = G4X_WM_LEVEL_NORMAL;
- if (!g4x_raw_crtc_wm_is_valid(crtc_state, level))
- goto out;
-
- raw = &crtc_state->wm.g4x.raw[level];
- for_each_plane_id_on_crtc(crtc, plane_id)
- wm_state->wm.plane[plane_id] = raw->plane[plane_id];
-
- level = G4X_WM_LEVEL_SR;
- if (!g4x_raw_crtc_wm_is_valid(crtc_state, level))
- goto out;
-
- raw = &crtc_state->wm.g4x.raw[level];
- wm_state->sr.plane = raw->plane[PLANE_PRIMARY];
- wm_state->sr.cursor = raw->plane[PLANE_CURSOR];
- wm_state->sr.fbc = raw->fbc;
-
- wm_state->cxsr = active_planes == BIT(PLANE_PRIMARY);
-
- level = G4X_WM_LEVEL_HPLL;
- if (!g4x_raw_crtc_wm_is_valid(crtc_state, level))
- goto out;
-
- raw = &crtc_state->wm.g4x.raw[level];
- wm_state->hpll.plane = raw->plane[PLANE_PRIMARY];
- wm_state->hpll.cursor = raw->plane[PLANE_CURSOR];
- wm_state->hpll.fbc = raw->fbc;
-
- wm_state->hpll_en = wm_state->cxsr;
-
- level++;
-
- out:
- if (level == G4X_WM_LEVEL_NORMAL)
- return -EINVAL;
-
- /* invalidate the higher levels */
- g4x_invalidate_wms(crtc, wm_state, level);
-
- /*
- * Determine if the FBC watermark(s) can be used. IF
- * this isn't the case we prefer to disable the FBC
- * watermark(s) rather than disable the SR/HPLL
- * level(s) entirely. 'level-1' is the highest valid
- * level here.
- */
- wm_state->fbc_en = g4x_compute_fbc_en(wm_state, level - 1);
-
- return 0;
-}
-
-static int g4x_compute_pipe_wm(struct intel_atomic_state *state,
- struct intel_crtc *crtc)
-{
- struct intel_crtc_state *crtc_state =
- intel_atomic_get_new_crtc_state(state, crtc);
- const struct intel_plane_state *old_plane_state;
- const struct intel_plane_state *new_plane_state;
- struct intel_plane *plane;
- unsigned int dirty = 0;
- int i;
-
- for_each_oldnew_intel_plane_in_state(state, plane,
- old_plane_state,
- new_plane_state, i) {
- if (new_plane_state->hw.crtc != &crtc->base &&
- old_plane_state->hw.crtc != &crtc->base)
- continue;
-
- if (g4x_raw_plane_wm_compute(crtc_state, new_plane_state))
- dirty |= BIT(plane->id);
- }
-
- if (!dirty)
- return 0;
-
- return _g4x_compute_pipe_wm(crtc_state);
-}
-
-static int g4x_compute_intermediate_wm(struct intel_atomic_state *state,
- struct intel_crtc *crtc)
-{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- struct intel_crtc_state *new_crtc_state =
- intel_atomic_get_new_crtc_state(state, crtc);
- const struct intel_crtc_state *old_crtc_state =
- intel_atomic_get_old_crtc_state(state, crtc);
- struct g4x_wm_state *intermediate = &new_crtc_state->wm.g4x.intermediate;
- const struct g4x_wm_state *optimal = &new_crtc_state->wm.g4x.optimal;
- const struct g4x_wm_state *active = &old_crtc_state->wm.g4x.optimal;
- enum plane_id plane_id;
-
- if (!new_crtc_state->hw.active ||
- intel_crtc_needs_modeset(new_crtc_state)) {
- *intermediate = *optimal;
-
- intermediate->cxsr = false;
- intermediate->hpll_en = false;
- goto out;
- }
-
- intermediate->cxsr = optimal->cxsr && active->cxsr &&
- !new_crtc_state->disable_cxsr;
- intermediate->hpll_en = optimal->hpll_en && active->hpll_en &&
- !new_crtc_state->disable_cxsr;
- intermediate->fbc_en = optimal->fbc_en && active->fbc_en;
-
- for_each_plane_id_on_crtc(crtc, plane_id) {
- intermediate->wm.plane[plane_id] =
- max(optimal->wm.plane[plane_id],
- active->wm.plane[plane_id]);
-
- drm_WARN_ON(&dev_priv->drm, intermediate->wm.plane[plane_id] >
- g4x_plane_fifo_size(plane_id, G4X_WM_LEVEL_NORMAL));
- }
-
- intermediate->sr.plane = max(optimal->sr.plane,
- active->sr.plane);
- intermediate->sr.cursor = max(optimal->sr.cursor,
- active->sr.cursor);
- intermediate->sr.fbc = max(optimal->sr.fbc,
- active->sr.fbc);
-
- intermediate->hpll.plane = max(optimal->hpll.plane,
- active->hpll.plane);
- intermediate->hpll.cursor = max(optimal->hpll.cursor,
- active->hpll.cursor);
- intermediate->hpll.fbc = max(optimal->hpll.fbc,
- active->hpll.fbc);
-
- drm_WARN_ON(&dev_priv->drm,
- (intermediate->sr.plane >
- g4x_plane_fifo_size(PLANE_PRIMARY, G4X_WM_LEVEL_SR) ||
- intermediate->sr.cursor >
- g4x_plane_fifo_size(PLANE_CURSOR, G4X_WM_LEVEL_SR)) &&
- intermediate->cxsr);
- drm_WARN_ON(&dev_priv->drm,
- (intermediate->sr.plane >
- g4x_plane_fifo_size(PLANE_PRIMARY, G4X_WM_LEVEL_HPLL) ||
- intermediate->sr.cursor >
- g4x_plane_fifo_size(PLANE_CURSOR, G4X_WM_LEVEL_HPLL)) &&
- intermediate->hpll_en);
-
- drm_WARN_ON(&dev_priv->drm,
- intermediate->sr.fbc > g4x_fbc_fifo_size(1) &&
- intermediate->fbc_en && intermediate->cxsr);
- drm_WARN_ON(&dev_priv->drm,
- intermediate->hpll.fbc > g4x_fbc_fifo_size(2) &&
- intermediate->fbc_en && intermediate->hpll_en);
-
-out:
- /*
- * If our intermediate WM are identical to the final WM, then we can
- * omit the post-vblank programming; only update if it's different.
- */
- if (memcmp(intermediate, optimal, sizeof(*intermediate)) != 0)
- new_crtc_state->wm.need_postvbl_update = true;
-
- return 0;
-}
-
-static void g4x_merge_wm(struct drm_i915_private *dev_priv,
- struct g4x_wm_values *wm)
-{
- struct intel_crtc *crtc;
- int num_active_pipes = 0;
-
- wm->cxsr = true;
- wm->hpll_en = true;
- wm->fbc_en = true;
-
- for_each_intel_crtc(&dev_priv->drm, crtc) {
- const struct g4x_wm_state *wm_state = &crtc->wm.active.g4x;
-
- if (!crtc->active)
- continue;
-
- if (!wm_state->cxsr)
- wm->cxsr = false;
- if (!wm_state->hpll_en)
- wm->hpll_en = false;
- if (!wm_state->fbc_en)
- wm->fbc_en = false;
-
- num_active_pipes++;
- }
-
- if (num_active_pipes != 1) {
- wm->cxsr = false;
- wm->hpll_en = false;
- wm->fbc_en = false;
- }
-
- for_each_intel_crtc(&dev_priv->drm, crtc) {
- const struct g4x_wm_state *wm_state = &crtc->wm.active.g4x;
- enum pipe pipe = crtc->pipe;
-
- wm->pipe[pipe] = wm_state->wm;
- if (crtc->active && wm->cxsr)
- wm->sr = wm_state->sr;
- if (crtc->active && wm->hpll_en)
- wm->hpll = wm_state->hpll;
- }
-}
-
-static void g4x_program_watermarks(struct drm_i915_private *dev_priv)
-{
- struct g4x_wm_values *old_wm = &dev_priv->display.wm.g4x;
- struct g4x_wm_values new_wm = {};
-
- g4x_merge_wm(dev_priv, &new_wm);
-
- if (memcmp(old_wm, &new_wm, sizeof(new_wm)) == 0)
- return;
-
- if (is_disabling(old_wm->cxsr, new_wm.cxsr, true))
- _intel_set_memory_cxsr(dev_priv, false);
-
- g4x_write_wm_values(dev_priv, &new_wm);
-
- if (is_enabling(old_wm->cxsr, new_wm.cxsr, true))
- _intel_set_memory_cxsr(dev_priv, true);
-
- *old_wm = new_wm;
-}
-
-static void g4x_initial_watermarks(struct intel_atomic_state *state,
- struct intel_crtc *crtc)
-{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- const struct intel_crtc_state *crtc_state =
- intel_atomic_get_new_crtc_state(state, crtc);
-
- mutex_lock(&dev_priv->display.wm.wm_mutex);
- crtc->wm.active.g4x = crtc_state->wm.g4x.intermediate;
- g4x_program_watermarks(dev_priv);
- mutex_unlock(&dev_priv->display.wm.wm_mutex);
-}
-
-static void g4x_optimize_watermarks(struct intel_atomic_state *state,
- struct intel_crtc *crtc)
-{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- const struct intel_crtc_state *crtc_state =
- intel_atomic_get_new_crtc_state(state, crtc);
-
- if (!crtc_state->wm.need_postvbl_update)
- return;
-
- mutex_lock(&dev_priv->display.wm.wm_mutex);
- crtc->wm.active.g4x = crtc_state->wm.g4x.optimal;
- g4x_program_watermarks(dev_priv);
- mutex_unlock(&dev_priv->display.wm.wm_mutex);
-}
-
-/* latency must be in 0.1us units. */
-static unsigned int vlv_wm_method2(unsigned int pixel_rate,
- unsigned int htotal,
- unsigned int width,
- unsigned int cpp,
- unsigned int latency)
-{
- unsigned int ret;
-
- ret = intel_wm_method2(pixel_rate, htotal,
- width, cpp, latency);
- ret = DIV_ROUND_UP(ret, 64);
-
- return ret;
-}
-
-static void vlv_setup_wm_latency(struct drm_i915_private *dev_priv)
-{
- /* all latencies in usec */
- dev_priv->display.wm.pri_latency[VLV_WM_LEVEL_PM2] = 3;
-
- dev_priv->display.wm.max_level = VLV_WM_LEVEL_PM2;
-
- if (IS_CHERRYVIEW(dev_priv)) {
- dev_priv->display.wm.pri_latency[VLV_WM_LEVEL_PM5] = 12;
- dev_priv->display.wm.pri_latency[VLV_WM_LEVEL_DDR_DVFS] = 33;
-
- dev_priv->display.wm.max_level = VLV_WM_LEVEL_DDR_DVFS;
- }
-}
-
-static u16 vlv_compute_wm_level(const struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *plane_state,
- int level)
-{
- struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
- struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
- const struct drm_display_mode *pipe_mode =
- &crtc_state->hw.pipe_mode;
- unsigned int pixel_rate, htotal, cpp, width, wm;
-
- if (dev_priv->display.wm.pri_latency[level] == 0)
- return USHRT_MAX;
-
- if (!intel_wm_plane_visible(crtc_state, plane_state))
- return 0;
-
- cpp = plane_state->hw.fb->format->cpp[0];
- pixel_rate = crtc_state->pixel_rate;
- htotal = pipe_mode->crtc_htotal;
- width = drm_rect_width(&plane_state->uapi.src) >> 16;
-
- if (plane->id == PLANE_CURSOR) {
- /*
- * FIXME the formula gives values that are
- * too big for the cursor FIFO, and hence we
- * would never be able to use cursors. For
- * now just hardcode the watermark.
- */
- wm = 63;
- } else {
- wm = vlv_wm_method2(pixel_rate, htotal, width, cpp,
- dev_priv->display.wm.pri_latency[level] * 10);
- }
-
- return min_t(unsigned int, wm, USHRT_MAX);
-}
-
-static bool vlv_need_sprite0_fifo_workaround(unsigned int active_planes)
-{
- return (active_planes & (BIT(PLANE_SPRITE0) |
- BIT(PLANE_SPRITE1))) == BIT(PLANE_SPRITE1);
-}
-
-static int vlv_compute_fifo(struct intel_crtc_state *crtc_state)
-{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- const struct g4x_pipe_wm *raw =
- &crtc_state->wm.vlv.raw[VLV_WM_LEVEL_PM2];
- struct vlv_fifo_state *fifo_state = &crtc_state->wm.vlv.fifo_state;
- u8 active_planes = crtc_state->active_planes & ~BIT(PLANE_CURSOR);
- int num_active_planes = hweight8(active_planes);
- const int fifo_size = 511;
- int fifo_extra, fifo_left = fifo_size;
- int sprite0_fifo_extra = 0;
- unsigned int total_rate;
- enum plane_id plane_id;
-
- /*
- * When enabling sprite0 after sprite1 has already been enabled
- * we tend to get an underrun unless sprite0 already has some
- * FIFO space allcoated. Hence we always allocate at least one
- * cacheline for sprite0 whenever sprite1 is enabled.
- *
- * All other plane enable sequences appear immune to this problem.
- */
- if (vlv_need_sprite0_fifo_workaround(active_planes))
- sprite0_fifo_extra = 1;
-
- total_rate = raw->plane[PLANE_PRIMARY] +
- raw->plane[PLANE_SPRITE0] +
- raw->plane[PLANE_SPRITE1] +
- sprite0_fifo_extra;
-
- if (total_rate > fifo_size)
- return -EINVAL;
-
- if (total_rate == 0)
- total_rate = 1;
-
- for_each_plane_id_on_crtc(crtc, plane_id) {
- unsigned int rate;
-
- if ((active_planes & BIT(plane_id)) == 0) {
- fifo_state->plane[plane_id] = 0;
- continue;
- }
-
- rate = raw->plane[plane_id];
- fifo_state->plane[plane_id] = fifo_size * rate / total_rate;
- fifo_left -= fifo_state->plane[plane_id];
- }
-
- fifo_state->plane[PLANE_SPRITE0] += sprite0_fifo_extra;
- fifo_left -= sprite0_fifo_extra;
-
- fifo_state->plane[PLANE_CURSOR] = 63;
-
- fifo_extra = DIV_ROUND_UP(fifo_left, num_active_planes ?: 1);
-
- /* spread the remainder evenly */
- for_each_plane_id_on_crtc(crtc, plane_id) {
- int plane_extra;
-
- if (fifo_left == 0)
- break;
-
- if ((active_planes & BIT(plane_id)) == 0)
- continue;
-
- plane_extra = min(fifo_extra, fifo_left);
- fifo_state->plane[plane_id] += plane_extra;
- fifo_left -= plane_extra;
- }
-
- drm_WARN_ON(&dev_priv->drm, active_planes != 0 && fifo_left != 0);
-
- /* give it all to the first plane if none are active */
- if (active_planes == 0) {
- drm_WARN_ON(&dev_priv->drm, fifo_left != fifo_size);
- fifo_state->plane[PLANE_PRIMARY] = fifo_left;
- }
-
- return 0;
-}
-
-/* mark all levels starting from 'level' as invalid */
-static void vlv_invalidate_wms(struct intel_crtc *crtc,
- struct vlv_wm_state *wm_state, int level)
-{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-
- for (; level < intel_wm_num_levels(dev_priv); level++) {
- enum plane_id plane_id;
-
- for_each_plane_id_on_crtc(crtc, plane_id)
- wm_state->wm[level].plane[plane_id] = USHRT_MAX;
-
- wm_state->sr[level].cursor = USHRT_MAX;
- wm_state->sr[level].plane = USHRT_MAX;
- }
-}
-
-static u16 vlv_invert_wm_value(u16 wm, u16 fifo_size)
-{
- if (wm > fifo_size)
- return USHRT_MAX;
- else
- return fifo_size - wm;
-}
-
-/*
- * Starting from 'level' set all higher
- * levels to 'value' in the "raw" watermarks.
- */
-static bool vlv_raw_plane_wm_set(struct intel_crtc_state *crtc_state,
- int level, enum plane_id plane_id, u16 value)
-{
- struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
- int num_levels = intel_wm_num_levels(dev_priv);
- bool dirty = false;
-
- for (; level < num_levels; level++) {
- struct g4x_pipe_wm *raw = &crtc_state->wm.vlv.raw[level];
-
- dirty |= raw->plane[plane_id] != value;
- raw->plane[plane_id] = value;
- }
-
- return dirty;
-}
-
-static bool vlv_raw_plane_wm_compute(struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *plane_state)
-{
- struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
- struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
- enum plane_id plane_id = plane->id;
- int num_levels = intel_wm_num_levels(to_i915(plane->base.dev));
- int level;
- bool dirty = false;
-
- if (!intel_wm_plane_visible(crtc_state, plane_state)) {
- dirty |= vlv_raw_plane_wm_set(crtc_state, 0, plane_id, 0);
- goto out;
- }
-
- for (level = 0; level < num_levels; level++) {
- struct g4x_pipe_wm *raw = &crtc_state->wm.vlv.raw[level];
- int wm = vlv_compute_wm_level(crtc_state, plane_state, level);
- int max_wm = plane_id == PLANE_CURSOR ? 63 : 511;
-
- if (wm > max_wm)
- break;
-
- dirty |= raw->plane[plane_id] != wm;
- raw->plane[plane_id] = wm;
- }
-
- /* mark all higher levels as invalid */
- dirty |= vlv_raw_plane_wm_set(crtc_state, level, plane_id, USHRT_MAX);
-
-out:
- if (dirty)
- drm_dbg_kms(&dev_priv->drm,
- "%s watermarks: PM2=%d, PM5=%d, DDR DVFS=%d\n",
- plane->base.name,
- crtc_state->wm.vlv.raw[VLV_WM_LEVEL_PM2].plane[plane_id],
- crtc_state->wm.vlv.raw[VLV_WM_LEVEL_PM5].plane[plane_id],
- crtc_state->wm.vlv.raw[VLV_WM_LEVEL_DDR_DVFS].plane[plane_id]);
-
- return dirty;
-}
-
-static bool vlv_raw_plane_wm_is_valid(const struct intel_crtc_state *crtc_state,
- enum plane_id plane_id, int level)
-{
- const struct g4x_pipe_wm *raw =
- &crtc_state->wm.vlv.raw[level];
- const struct vlv_fifo_state *fifo_state =
- &crtc_state->wm.vlv.fifo_state;
-
- return raw->plane[plane_id] <= fifo_state->plane[plane_id];
-}
-
-static bool vlv_raw_crtc_wm_is_valid(const struct intel_crtc_state *crtc_state, int level)
-{
- return vlv_raw_plane_wm_is_valid(crtc_state, PLANE_PRIMARY, level) &&
- vlv_raw_plane_wm_is_valid(crtc_state, PLANE_SPRITE0, level) &&
- vlv_raw_plane_wm_is_valid(crtc_state, PLANE_SPRITE1, level) &&
- vlv_raw_plane_wm_is_valid(crtc_state, PLANE_CURSOR, level);
-}
-
-static int _vlv_compute_pipe_wm(struct intel_crtc_state *crtc_state)
-{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- struct vlv_wm_state *wm_state = &crtc_state->wm.vlv.optimal;
- const struct vlv_fifo_state *fifo_state =
- &crtc_state->wm.vlv.fifo_state;
- u8 active_planes = crtc_state->active_planes & ~BIT(PLANE_CURSOR);
- int num_active_planes = hweight8(active_planes);
- enum plane_id plane_id;
- int level;
-
- /* initially allow all levels */
- wm_state->num_levels = intel_wm_num_levels(dev_priv);
- /*
- * Note that enabling cxsr with no primary/sprite planes
- * enabled can wedge the pipe. Hence we only allow cxsr
- * with exactly one enabled primary/sprite plane.
- */
- wm_state->cxsr = crtc->pipe != PIPE_C && num_active_planes == 1;
-
- for (level = 0; level < wm_state->num_levels; level++) {
- const struct g4x_pipe_wm *raw = &crtc_state->wm.vlv.raw[level];
- const int sr_fifo_size = INTEL_NUM_PIPES(dev_priv) * 512 - 1;
-
- if (!vlv_raw_crtc_wm_is_valid(crtc_state, level))
- break;
-
- for_each_plane_id_on_crtc(crtc, plane_id) {
- wm_state->wm[level].plane[plane_id] =
- vlv_invert_wm_value(raw->plane[plane_id],
- fifo_state->plane[plane_id]);
- }
-
- wm_state->sr[level].plane =
- vlv_invert_wm_value(max3(raw->plane[PLANE_PRIMARY],
- raw->plane[PLANE_SPRITE0],
- raw->plane[PLANE_SPRITE1]),
- sr_fifo_size);
-
- wm_state->sr[level].cursor =
- vlv_invert_wm_value(raw->plane[PLANE_CURSOR],
- 63);
- }
-
- if (level == 0)
- return -EINVAL;
-
- /* limit to only levels we can actually handle */
- wm_state->num_levels = level;
-
- /* invalidate the higher levels */
- vlv_invalidate_wms(crtc, wm_state, level);
-
- return 0;
-}
-
-static int vlv_compute_pipe_wm(struct intel_atomic_state *state,
- struct intel_crtc *crtc)
-{
- struct intel_crtc_state *crtc_state =
- intel_atomic_get_new_crtc_state(state, crtc);
- const struct intel_plane_state *old_plane_state;
- const struct intel_plane_state *new_plane_state;
- struct intel_plane *plane;
- unsigned int dirty = 0;
- int i;
-
- for_each_oldnew_intel_plane_in_state(state, plane,
- old_plane_state,
- new_plane_state, i) {
- if (new_plane_state->hw.crtc != &crtc->base &&
- old_plane_state->hw.crtc != &crtc->base)
- continue;
-
- if (vlv_raw_plane_wm_compute(crtc_state, new_plane_state))
- dirty |= BIT(plane->id);
- }
-
- /*
- * DSPARB registers may have been reset due to the
- * power well being turned off. Make sure we restore
- * them to a consistent state even if no primary/sprite
- * planes are initially active. We also force a FIFO
- * recomputation so that we are sure to sanitize the
- * FIFO setting we took over from the BIOS even if there
- * are no active planes on the crtc.
- */
- if (intel_crtc_needs_modeset(crtc_state))
- dirty = ~0;
-
- if (!dirty)
- return 0;
-
- /* cursor changes don't warrant a FIFO recompute */
- if (dirty & ~BIT(PLANE_CURSOR)) {
- const struct intel_crtc_state *old_crtc_state =
- intel_atomic_get_old_crtc_state(state, crtc);
- const struct vlv_fifo_state *old_fifo_state =
- &old_crtc_state->wm.vlv.fifo_state;
- const struct vlv_fifo_state *new_fifo_state =
- &crtc_state->wm.vlv.fifo_state;
- int ret;
-
- ret = vlv_compute_fifo(crtc_state);
- if (ret)
- return ret;
-
- if (intel_crtc_needs_modeset(crtc_state) ||
- memcmp(old_fifo_state, new_fifo_state,
- sizeof(*new_fifo_state)) != 0)
- crtc_state->fifo_changed = true;
- }
-
- return _vlv_compute_pipe_wm(crtc_state);
-}
-
-#define VLV_FIFO(plane, value) \
- (((value) << DSPARB_ ## plane ## _SHIFT_VLV) & DSPARB_ ## plane ## _MASK_VLV)
-
-static void vlv_atomic_update_fifo(struct intel_atomic_state *state,
- struct intel_crtc *crtc)
-{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- struct intel_uncore *uncore = &dev_priv->uncore;
- const struct intel_crtc_state *crtc_state =
- intel_atomic_get_new_crtc_state(state, crtc);
- const struct vlv_fifo_state *fifo_state =
- &crtc_state->wm.vlv.fifo_state;
- int sprite0_start, sprite1_start, fifo_size;
- u32 dsparb, dsparb2, dsparb3;
-
- if (!crtc_state->fifo_changed)
- return;
-
- sprite0_start = fifo_state->plane[PLANE_PRIMARY];
- sprite1_start = fifo_state->plane[PLANE_SPRITE0] + sprite0_start;
- fifo_size = fifo_state->plane[PLANE_SPRITE1] + sprite1_start;
-
- drm_WARN_ON(&dev_priv->drm, fifo_state->plane[PLANE_CURSOR] != 63);
- drm_WARN_ON(&dev_priv->drm, fifo_size != 511);
-
- trace_vlv_fifo_size(crtc, sprite0_start, sprite1_start, fifo_size);
-
- /*
- * uncore.lock serves a double purpose here. It allows us to
- * use the less expensive I915_{READ,WRITE}_FW() functions, and
- * it protects the DSPARB registers from getting clobbered by
- * parallel updates from multiple pipes.
- *
- * intel_pipe_update_start() has already disabled interrupts
- * for us, so a plain spin_lock() is sufficient here.
- */
- spin_lock(&uncore->lock);
-
- switch (crtc->pipe) {
- case PIPE_A:
- dsparb = intel_uncore_read_fw(uncore, DSPARB);
- dsparb2 = intel_uncore_read_fw(uncore, DSPARB2);
-
- dsparb &= ~(VLV_FIFO(SPRITEA, 0xff) |
- VLV_FIFO(SPRITEB, 0xff));
- dsparb |= (VLV_FIFO(SPRITEA, sprite0_start) |
- VLV_FIFO(SPRITEB, sprite1_start));
-
- dsparb2 &= ~(VLV_FIFO(SPRITEA_HI, 0x1) |
- VLV_FIFO(SPRITEB_HI, 0x1));
- dsparb2 |= (VLV_FIFO(SPRITEA_HI, sprite0_start >> 8) |
- VLV_FIFO(SPRITEB_HI, sprite1_start >> 8));
-
- intel_uncore_write_fw(uncore, DSPARB, dsparb);
- intel_uncore_write_fw(uncore, DSPARB2, dsparb2);
- break;
- case PIPE_B:
- dsparb = intel_uncore_read_fw(uncore, DSPARB);
- dsparb2 = intel_uncore_read_fw(uncore, DSPARB2);
-
- dsparb &= ~(VLV_FIFO(SPRITEC, 0xff) |
- VLV_FIFO(SPRITED, 0xff));
- dsparb |= (VLV_FIFO(SPRITEC, sprite0_start) |
- VLV_FIFO(SPRITED, sprite1_start));
-
- dsparb2 &= ~(VLV_FIFO(SPRITEC_HI, 0xff) |
- VLV_FIFO(SPRITED_HI, 0xff));
- dsparb2 |= (VLV_FIFO(SPRITEC_HI, sprite0_start >> 8) |
- VLV_FIFO(SPRITED_HI, sprite1_start >> 8));
-
- intel_uncore_write_fw(uncore, DSPARB, dsparb);
- intel_uncore_write_fw(uncore, DSPARB2, dsparb2);
- break;
- case PIPE_C:
- dsparb3 = intel_uncore_read_fw(uncore, DSPARB3);
- dsparb2 = intel_uncore_read_fw(uncore, DSPARB2);
-
- dsparb3 &= ~(VLV_FIFO(SPRITEE, 0xff) |
- VLV_FIFO(SPRITEF, 0xff));
- dsparb3 |= (VLV_FIFO(SPRITEE, sprite0_start) |
- VLV_FIFO(SPRITEF, sprite1_start));
-
- dsparb2 &= ~(VLV_FIFO(SPRITEE_HI, 0xff) |
- VLV_FIFO(SPRITEF_HI, 0xff));
- dsparb2 |= (VLV_FIFO(SPRITEE_HI, sprite0_start >> 8) |
- VLV_FIFO(SPRITEF_HI, sprite1_start >> 8));
-
- intel_uncore_write_fw(uncore, DSPARB3, dsparb3);
- intel_uncore_write_fw(uncore, DSPARB2, dsparb2);
- break;
- default:
- break;
- }
-
- intel_uncore_posting_read_fw(uncore, DSPARB);
-
- spin_unlock(&uncore->lock);
-}
-
-#undef VLV_FIFO
-
-static int vlv_compute_intermediate_wm(struct intel_atomic_state *state,
- struct intel_crtc *crtc)
-{
- struct intel_crtc_state *new_crtc_state =
- intel_atomic_get_new_crtc_state(state, crtc);
- const struct intel_crtc_state *old_crtc_state =
- intel_atomic_get_old_crtc_state(state, crtc);
- struct vlv_wm_state *intermediate = &new_crtc_state->wm.vlv.intermediate;
- const struct vlv_wm_state *optimal = &new_crtc_state->wm.vlv.optimal;
- const struct vlv_wm_state *active = &old_crtc_state->wm.vlv.optimal;
- int level;
-
- if (!new_crtc_state->hw.active ||
- intel_crtc_needs_modeset(new_crtc_state)) {
- *intermediate = *optimal;
-
- intermediate->cxsr = false;
- goto out;
- }
-
- intermediate->num_levels = min(optimal->num_levels, active->num_levels);
- intermediate->cxsr = optimal->cxsr && active->cxsr &&
- !new_crtc_state->disable_cxsr;
-
- for (level = 0; level < intermediate->num_levels; level++) {
- enum plane_id plane_id;
-
- for_each_plane_id_on_crtc(crtc, plane_id) {
- intermediate->wm[level].plane[plane_id] =
- min(optimal->wm[level].plane[plane_id],
- active->wm[level].plane[plane_id]);
- }
-
- intermediate->sr[level].plane = min(optimal->sr[level].plane,
- active->sr[level].plane);
- intermediate->sr[level].cursor = min(optimal->sr[level].cursor,
- active->sr[level].cursor);
- }
-
- vlv_invalidate_wms(crtc, intermediate, level);
-
-out:
- /*
- * If our intermediate WM are identical to the final WM, then we can
- * omit the post-vblank programming; only update if it's different.
- */
- if (memcmp(intermediate, optimal, sizeof(*intermediate)) != 0)
- new_crtc_state->wm.need_postvbl_update = true;
-
- return 0;
-}
-
-static void vlv_merge_wm(struct drm_i915_private *dev_priv,
- struct vlv_wm_values *wm)
-{
- struct intel_crtc *crtc;
- int num_active_pipes = 0;
-
- wm->level = dev_priv->display.wm.max_level;
- wm->cxsr = true;
-
- for_each_intel_crtc(&dev_priv->drm, crtc) {
- const struct vlv_wm_state *wm_state = &crtc->wm.active.vlv;
-
- if (!crtc->active)
- continue;
-
- if (!wm_state->cxsr)
- wm->cxsr = false;
-
- num_active_pipes++;
- wm->level = min_t(int, wm->level, wm_state->num_levels - 1);
- }
-
- if (num_active_pipes != 1)
- wm->cxsr = false;
-
- if (num_active_pipes > 1)
- wm->level = VLV_WM_LEVEL_PM2;
-
- for_each_intel_crtc(&dev_priv->drm, crtc) {
- const struct vlv_wm_state *wm_state = &crtc->wm.active.vlv;
- enum pipe pipe = crtc->pipe;
-
- wm->pipe[pipe] = wm_state->wm[wm->level];
- if (crtc->active && wm->cxsr)
- wm->sr = wm_state->sr[wm->level];
-
- wm->ddl[pipe].plane[PLANE_PRIMARY] = DDL_PRECISION_HIGH | 2;
- wm->ddl[pipe].plane[PLANE_SPRITE0] = DDL_PRECISION_HIGH | 2;
- wm->ddl[pipe].plane[PLANE_SPRITE1] = DDL_PRECISION_HIGH | 2;
- wm->ddl[pipe].plane[PLANE_CURSOR] = DDL_PRECISION_HIGH | 2;
- }
-}
-
-static void vlv_program_watermarks(struct drm_i915_private *dev_priv)
-{
- struct vlv_wm_values *old_wm = &dev_priv->display.wm.vlv;
- struct vlv_wm_values new_wm = {};
-
- vlv_merge_wm(dev_priv, &new_wm);
-
- if (memcmp(old_wm, &new_wm, sizeof(new_wm)) == 0)
- return;
-
- if (is_disabling(old_wm->level, new_wm.level, VLV_WM_LEVEL_DDR_DVFS))
- chv_set_memory_dvfs(dev_priv, false);
-
- if (is_disabling(old_wm->level, new_wm.level, VLV_WM_LEVEL_PM5))
- chv_set_memory_pm5(dev_priv, false);
-
- if (is_disabling(old_wm->cxsr, new_wm.cxsr, true))
- _intel_set_memory_cxsr(dev_priv, false);
-
- vlv_write_wm_values(dev_priv, &new_wm);
-
- if (is_enabling(old_wm->cxsr, new_wm.cxsr, true))
- _intel_set_memory_cxsr(dev_priv, true);
-
- if (is_enabling(old_wm->level, new_wm.level, VLV_WM_LEVEL_PM5))
- chv_set_memory_pm5(dev_priv, true);
-
- if (is_enabling(old_wm->level, new_wm.level, VLV_WM_LEVEL_DDR_DVFS))
- chv_set_memory_dvfs(dev_priv, true);
-
- *old_wm = new_wm;
-}
-
-static void vlv_initial_watermarks(struct intel_atomic_state *state,
- struct intel_crtc *crtc)
-{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- const struct intel_crtc_state *crtc_state =
- intel_atomic_get_new_crtc_state(state, crtc);
-
- mutex_lock(&dev_priv->display.wm.wm_mutex);
- crtc->wm.active.vlv = crtc_state->wm.vlv.intermediate;
- vlv_program_watermarks(dev_priv);
- mutex_unlock(&dev_priv->display.wm.wm_mutex);
-}
-
-static void vlv_optimize_watermarks(struct intel_atomic_state *state,
- struct intel_crtc *crtc)
-{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- const struct intel_crtc_state *crtc_state =
- intel_atomic_get_new_crtc_state(state, crtc);
-
- if (!crtc_state->wm.need_postvbl_update)
- return;
-
- mutex_lock(&dev_priv->display.wm.wm_mutex);
- crtc->wm.active.vlv = crtc_state->wm.vlv.optimal;
- vlv_program_watermarks(dev_priv);
- mutex_unlock(&dev_priv->display.wm.wm_mutex);
-}
-
-static void i965_update_wm(struct drm_i915_private *dev_priv)
-{
- struct intel_crtc *crtc;
- int srwm = 1;
- int cursor_sr = 16;
- bool cxsr_enabled;
-
- /* Calc sr entries for one plane configs */
- crtc = single_enabled_crtc(dev_priv);
- if (crtc) {
- /* self-refresh has much higher latency */
- static const int sr_latency_ns = 12000;
- const struct drm_display_mode *pipe_mode =
- &crtc->config->hw.pipe_mode;
- const struct drm_framebuffer *fb =
- crtc->base.primary->state->fb;
- int pixel_rate = crtc->config->pixel_rate;
- int htotal = pipe_mode->crtc_htotal;
- int width = drm_rect_width(&crtc->base.primary->state->src) >> 16;
- int cpp = fb->format->cpp[0];
- int entries;
-
- entries = intel_wm_method2(pixel_rate, htotal,
- width, cpp, sr_latency_ns / 100);
- entries = DIV_ROUND_UP(entries, I915_FIFO_LINE_SIZE);
- srwm = I965_FIFO_SIZE - entries;
- if (srwm < 0)
- srwm = 1;
- srwm &= 0x1ff;
- drm_dbg_kms(&dev_priv->drm,
- "self-refresh entries: %d, wm: %d\n",
- entries, srwm);
-
- entries = intel_wm_method2(pixel_rate, htotal,
- crtc->base.cursor->state->crtc_w, 4,
- sr_latency_ns / 100);
- entries = DIV_ROUND_UP(entries,
- i965_cursor_wm_info.cacheline_size) +
- i965_cursor_wm_info.guard_size;
-
- cursor_sr = i965_cursor_wm_info.fifo_size - entries;
- if (cursor_sr > i965_cursor_wm_info.max_wm)
- cursor_sr = i965_cursor_wm_info.max_wm;
-
- drm_dbg_kms(&dev_priv->drm,
- "self-refresh watermark: display plane %d "
- "cursor %d\n", srwm, cursor_sr);
-
- cxsr_enabled = true;
- } else {
- cxsr_enabled = false;
- /* Turn off self refresh if both pipes are enabled */
- intel_set_memory_cxsr(dev_priv, false);
- }
-
- drm_dbg_kms(&dev_priv->drm,
- "Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n",
- srwm);
-
- /* 965 has limitations... */
- intel_uncore_write(&dev_priv->uncore, DSPFW1, FW_WM(srwm, SR) |
- FW_WM(8, CURSORB) |
- FW_WM(8, PLANEB) |
- FW_WM(8, PLANEA));
- intel_uncore_write(&dev_priv->uncore, DSPFW2, FW_WM(8, CURSORA) |
- FW_WM(8, PLANEC_OLD));
- /* update cursor SR watermark */
- intel_uncore_write(&dev_priv->uncore, DSPFW3, FW_WM(cursor_sr, CURSOR_SR));
-
- if (cxsr_enabled)
- intel_set_memory_cxsr(dev_priv, true);
-}
-
-#undef FW_WM
-
-static struct intel_crtc *intel_crtc_for_plane(struct drm_i915_private *i915,
- enum i9xx_plane_id i9xx_plane)
-{
- struct intel_plane *plane;
-
- for_each_intel_plane(&i915->drm, plane) {
- if (plane->id == PLANE_PRIMARY &&
- plane->i9xx_plane == i9xx_plane)
- return intel_crtc_for_pipe(i915, plane->pipe);
- }
-
- return NULL;
-}
-
-static void i9xx_update_wm(struct drm_i915_private *dev_priv)
-{
- const struct intel_watermark_params *wm_info;
- u32 fwater_lo;
- u32 fwater_hi;
- int cwm, srwm = 1;
- int fifo_size;
- int planea_wm, planeb_wm;
- struct intel_crtc *crtc;
-
- if (IS_I945GM(dev_priv))
- wm_info = &i945_wm_info;
- else if (DISPLAY_VER(dev_priv) != 2)
- wm_info = &i915_wm_info;
- else
- wm_info = &i830_a_wm_info;
-
- if (DISPLAY_VER(dev_priv) == 2)
- fifo_size = i830_get_fifo_size(dev_priv, PLANE_A);
- else
- fifo_size = i9xx_get_fifo_size(dev_priv, PLANE_A);
- crtc = intel_crtc_for_plane(dev_priv, PLANE_A);
- if (intel_crtc_active(crtc)) {
- const struct drm_framebuffer *fb =
- crtc->base.primary->state->fb;
- int cpp;
-
- if (DISPLAY_VER(dev_priv) == 2)
- cpp = 4;
- else
- cpp = fb->format->cpp[0];
-
- planea_wm = intel_calculate_wm(crtc->config->pixel_rate,
- wm_info, fifo_size, cpp,
- pessimal_latency_ns);
- } else {
- planea_wm = fifo_size - wm_info->guard_size;
- if (planea_wm > (long)wm_info->max_wm)
- planea_wm = wm_info->max_wm;
- }
-
- if (DISPLAY_VER(dev_priv) == 2)
- wm_info = &i830_bc_wm_info;
-
- if (DISPLAY_VER(dev_priv) == 2)
- fifo_size = i830_get_fifo_size(dev_priv, PLANE_B);
- else
- fifo_size = i9xx_get_fifo_size(dev_priv, PLANE_B);
- crtc = intel_crtc_for_plane(dev_priv, PLANE_B);
- if (intel_crtc_active(crtc)) {
- const struct drm_framebuffer *fb =
- crtc->base.primary->state->fb;
- int cpp;
-
- if (DISPLAY_VER(dev_priv) == 2)
- cpp = 4;
- else
- cpp = fb->format->cpp[0];
-
- planeb_wm = intel_calculate_wm(crtc->config->pixel_rate,
- wm_info, fifo_size, cpp,
- pessimal_latency_ns);
- } else {
- planeb_wm = fifo_size - wm_info->guard_size;
- if (planeb_wm > (long)wm_info->max_wm)
- planeb_wm = wm_info->max_wm;
- }
-
- drm_dbg_kms(&dev_priv->drm,
- "FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm);
-
- crtc = single_enabled_crtc(dev_priv);
- if (IS_I915GM(dev_priv) && crtc) {
- struct drm_i915_gem_object *obj;
-
- obj = intel_fb_obj(crtc->base.primary->state->fb);
-
- /* self-refresh seems busted with untiled */
- if (!i915_gem_object_is_tiled(obj))
- crtc = NULL;
- }
-
- /*
- * Overlay gets an aggressive default since video jitter is bad.
- */
- cwm = 2;
-
- /* Play safe and disable self-refresh before adjusting watermarks. */
- intel_set_memory_cxsr(dev_priv, false);
-
- /* Calc sr entries for one plane configs */
- if (HAS_FW_BLC(dev_priv) && crtc) {
- /* self-refresh has much higher latency */
- static const int sr_latency_ns = 6000;
- const struct drm_display_mode *pipe_mode =
- &crtc->config->hw.pipe_mode;
- const struct drm_framebuffer *fb =
- crtc->base.primary->state->fb;
- int pixel_rate = crtc->config->pixel_rate;
- int htotal = pipe_mode->crtc_htotal;
- int width = drm_rect_width(&crtc->base.primary->state->src) >> 16;
- int cpp;
- int entries;
-
- if (IS_I915GM(dev_priv) || IS_I945GM(dev_priv))
- cpp = 4;
- else
- cpp = fb->format->cpp[0];
-
- entries = intel_wm_method2(pixel_rate, htotal, width, cpp,
- sr_latency_ns / 100);
- entries = DIV_ROUND_UP(entries, wm_info->cacheline_size);
- drm_dbg_kms(&dev_priv->drm,
- "self-refresh entries: %d\n", entries);
- srwm = wm_info->fifo_size - entries;
- if (srwm < 0)
- srwm = 1;
-
- if (IS_I945G(dev_priv) || IS_I945GM(dev_priv))
- intel_uncore_write(&dev_priv->uncore, FW_BLC_SELF,
- FW_BLC_SELF_FIFO_MASK | (srwm & 0xff));
- else
- intel_uncore_write(&dev_priv->uncore, FW_BLC_SELF, srwm & 0x3f);
- }
-
- drm_dbg_kms(&dev_priv->drm,
- "Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n",
- planea_wm, planeb_wm, cwm, srwm);
-
- fwater_lo = ((planeb_wm & 0x3f) << 16) | (planea_wm & 0x3f);
- fwater_hi = (cwm & 0x1f);
-
- /* Set request length to 8 cachelines per fetch */
- fwater_lo = fwater_lo | (1 << 24) | (1 << 8);
- fwater_hi = fwater_hi | (1 << 8);
-
- intel_uncore_write(&dev_priv->uncore, FW_BLC, fwater_lo);
- intel_uncore_write(&dev_priv->uncore, FW_BLC2, fwater_hi);
-
- if (crtc)
- intel_set_memory_cxsr(dev_priv, true);
-}
-
-static void i845_update_wm(struct drm_i915_private *dev_priv)
-{
- struct intel_crtc *crtc;
- u32 fwater_lo;
- int planea_wm;
-
- crtc = single_enabled_crtc(dev_priv);
- if (crtc == NULL)
- return;
-
- planea_wm = intel_calculate_wm(crtc->config->pixel_rate,
- &i845_wm_info,
- i845_get_fifo_size(dev_priv, PLANE_A),
- 4, pessimal_latency_ns);
- fwater_lo = intel_uncore_read(&dev_priv->uncore, FW_BLC) & ~0xfff;
- fwater_lo |= (3<<8) | planea_wm;
-
- drm_dbg_kms(&dev_priv->drm,
- "Setting FIFO watermarks - A: %d\n", planea_wm);
-
- intel_uncore_write(&dev_priv->uncore, FW_BLC, fwater_lo);
-}
-
-/* latency must be in 0.1us units. */
-static unsigned int ilk_wm_method1(unsigned int pixel_rate,
- unsigned int cpp,
- unsigned int latency)
-{
- unsigned int ret;
-
- ret = intel_wm_method1(pixel_rate, cpp, latency);
- ret = DIV_ROUND_UP(ret, 64) + 2;
-
- return ret;
-}
-
-/* latency must be in 0.1us units. */
-static unsigned int ilk_wm_method2(unsigned int pixel_rate,
- unsigned int htotal,
- unsigned int width,
- unsigned int cpp,
- unsigned int latency)
-{
- unsigned int ret;
-
- ret = intel_wm_method2(pixel_rate, htotal,
- width, cpp, latency);
- ret = DIV_ROUND_UP(ret, 64) + 2;
-
- return ret;
-}
-
-static u32 ilk_wm_fbc(u32 pri_val, u32 horiz_pixels, u8 cpp)
-{
- /*
- * Neither of these should be possible since this function shouldn't be
- * called if the CRTC is off or the plane is invisible. But let's be
- * extra paranoid to avoid a potential divide-by-zero if we screw up
- * elsewhere in the driver.
- */
- if (WARN_ON(!cpp))
- return 0;
- if (WARN_ON(!horiz_pixels))
- return 0;
-
- return DIV_ROUND_UP(pri_val * 64, horiz_pixels * cpp) + 2;
-}
-
-struct ilk_wm_maximums {
- u16 pri;
- u16 spr;
- u16 cur;
- u16 fbc;
-};
-
-/*
- * For both WM_PIPE and WM_LP.
- * mem_value must be in 0.1us units.
- */
-static u32 ilk_compute_pri_wm(const struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *plane_state,
- u32 mem_value, bool is_lp)
-{
- u32 method1, method2;
- int cpp;
-
- if (mem_value == 0)
- return U32_MAX;
-
- if (!intel_wm_plane_visible(crtc_state, plane_state))
- return 0;
-
- cpp = plane_state->hw.fb->format->cpp[0];
-
- method1 = ilk_wm_method1(crtc_state->pixel_rate, cpp, mem_value);
-
- if (!is_lp)
- return method1;
-
- method2 = ilk_wm_method2(crtc_state->pixel_rate,
- crtc_state->hw.pipe_mode.crtc_htotal,
- drm_rect_width(&plane_state->uapi.src) >> 16,
- cpp, mem_value);
-
- return min(method1, method2);
-}
-
-/*
- * For both WM_PIPE and WM_LP.
- * mem_value must be in 0.1us units.
- */
-static u32 ilk_compute_spr_wm(const struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *plane_state,
- u32 mem_value)
-{
- u32 method1, method2;
- int cpp;
-
- if (mem_value == 0)
- return U32_MAX;
-
- if (!intel_wm_plane_visible(crtc_state, plane_state))
- return 0;
-
- cpp = plane_state->hw.fb->format->cpp[0];
-
- method1 = ilk_wm_method1(crtc_state->pixel_rate, cpp, mem_value);
- method2 = ilk_wm_method2(crtc_state->pixel_rate,
- crtc_state->hw.pipe_mode.crtc_htotal,
- drm_rect_width(&plane_state->uapi.src) >> 16,
- cpp, mem_value);
- return min(method1, method2);
-}
-
-/*
- * For both WM_PIPE and WM_LP.
- * mem_value must be in 0.1us units.
- */
-static u32 ilk_compute_cur_wm(const struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *plane_state,
- u32 mem_value)
-{
- int cpp;
-
- if (mem_value == 0)
- return U32_MAX;
-
- if (!intel_wm_plane_visible(crtc_state, plane_state))
- return 0;
-
- cpp = plane_state->hw.fb->format->cpp[0];
-
- return ilk_wm_method2(crtc_state->pixel_rate,
- crtc_state->hw.pipe_mode.crtc_htotal,
- drm_rect_width(&plane_state->uapi.src) >> 16,
- cpp, mem_value);
-}
-
-/* Only for WM_LP. */
-static u32 ilk_compute_fbc_wm(const struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *plane_state,
- u32 pri_val)
-{
- int cpp;
-
- if (!intel_wm_plane_visible(crtc_state, plane_state))
- return 0;
-
- cpp = plane_state->hw.fb->format->cpp[0];
-
- return ilk_wm_fbc(pri_val, drm_rect_width(&plane_state->uapi.src) >> 16,
- cpp);
-}
-
-static unsigned int
-ilk_display_fifo_size(const struct drm_i915_private *dev_priv)
-{
- if (DISPLAY_VER(dev_priv) >= 8)
- return 3072;
- else if (DISPLAY_VER(dev_priv) >= 7)
- return 768;
- else
- return 512;
-}
-
-static unsigned int
-ilk_plane_wm_reg_max(const struct drm_i915_private *dev_priv,
- int level, bool is_sprite)
-{
- if (DISPLAY_VER(dev_priv) >= 8)
- /* BDW primary/sprite plane watermarks */
- return level == 0 ? 255 : 2047;
- else if (DISPLAY_VER(dev_priv) >= 7)
- /* IVB/HSW primary/sprite plane watermarks */
- return level == 0 ? 127 : 1023;
- else if (!is_sprite)
- /* ILK/SNB primary plane watermarks */
- return level == 0 ? 127 : 511;
- else
- /* ILK/SNB sprite plane watermarks */
- return level == 0 ? 63 : 255;
-}
-
-static unsigned int
-ilk_cursor_wm_reg_max(const struct drm_i915_private *dev_priv, int level)
-{
- if (DISPLAY_VER(dev_priv) >= 7)
- return level == 0 ? 63 : 255;
- else
- return level == 0 ? 31 : 63;
-}
-
-static unsigned int ilk_fbc_wm_reg_max(const struct drm_i915_private *dev_priv)
-{
- if (DISPLAY_VER(dev_priv) >= 8)
- return 31;
- else
- return 15;
-}
-
-/* Calculate the maximum primary/sprite plane watermark */
-static unsigned int ilk_plane_wm_max(const struct drm_i915_private *dev_priv,
- int level,
- const struct intel_wm_config *config,
- enum intel_ddb_partitioning ddb_partitioning,
- bool is_sprite)
-{
- unsigned int fifo_size = ilk_display_fifo_size(dev_priv);
-
- /* if sprites aren't enabled, sprites get nothing */
- if (is_sprite && !config->sprites_enabled)
- return 0;
-
- /* HSW allows LP1+ watermarks even with multiple pipes */
- if (level == 0 || config->num_pipes_active > 1) {
- fifo_size /= INTEL_NUM_PIPES(dev_priv);
-
- /*
- * For some reason the non self refresh
- * FIFO size is only half of the self
- * refresh FIFO size on ILK/SNB.
- */
- if (DISPLAY_VER(dev_priv) <= 6)
- fifo_size /= 2;
- }
-
- if (config->sprites_enabled) {
- /* level 0 is always calculated with 1:1 split */
- if (level > 0 && ddb_partitioning == INTEL_DDB_PART_5_6) {
- if (is_sprite)
- fifo_size *= 5;
- fifo_size /= 6;
- } else {
- fifo_size /= 2;
- }
- }
-
- /* clamp to max that the registers can hold */
- return min(fifo_size, ilk_plane_wm_reg_max(dev_priv, level, is_sprite));
-}
-
-/* Calculate the maximum cursor plane watermark */
-static unsigned int ilk_cursor_wm_max(const struct drm_i915_private *dev_priv,
- int level,
- const struct intel_wm_config *config)
-{
- /* HSW LP1+ watermarks w/ multiple pipes */
- if (level > 0 && config->num_pipes_active > 1)
- return 64;
-
- /* otherwise just report max that registers can hold */
- return ilk_cursor_wm_reg_max(dev_priv, level);
-}
-
-static void ilk_compute_wm_maximums(const struct drm_i915_private *dev_priv,
- int level,
- const struct intel_wm_config *config,
- enum intel_ddb_partitioning ddb_partitioning,
- struct ilk_wm_maximums *max)
-{
- max->pri = ilk_plane_wm_max(dev_priv, level, config, ddb_partitioning, false);
- max->spr = ilk_plane_wm_max(dev_priv, level, config, ddb_partitioning, true);
- max->cur = ilk_cursor_wm_max(dev_priv, level, config);
- max->fbc = ilk_fbc_wm_reg_max(dev_priv);
-}
-
-static void ilk_compute_wm_reg_maximums(const struct drm_i915_private *dev_priv,
- int level,
- struct ilk_wm_maximums *max)
-{
- max->pri = ilk_plane_wm_reg_max(dev_priv, level, false);
- max->spr = ilk_plane_wm_reg_max(dev_priv, level, true);
- max->cur = ilk_cursor_wm_reg_max(dev_priv, level);
- max->fbc = ilk_fbc_wm_reg_max(dev_priv);
-}
-
-static bool ilk_validate_wm_level(int level,
- const struct ilk_wm_maximums *max,
- struct intel_wm_level *result)
-{
- bool ret;
-
- /* already determined to be invalid? */
- if (!result->enable)
- return false;
-
- result->enable = result->pri_val <= max->pri &&
- result->spr_val <= max->spr &&
- result->cur_val <= max->cur;
-
- ret = result->enable;
-
- /*
- * HACK until we can pre-compute everything,
- * and thus fail gracefully if LP0 watermarks
- * are exceeded...
- */
- if (level == 0 && !result->enable) {
- if (result->pri_val > max->pri)
- DRM_DEBUG_KMS("Primary WM%d too large %u (max %u)\n",
- level, result->pri_val, max->pri);
- if (result->spr_val > max->spr)
- DRM_DEBUG_KMS("Sprite WM%d too large %u (max %u)\n",
- level, result->spr_val, max->spr);
- if (result->cur_val > max->cur)
- DRM_DEBUG_KMS("Cursor WM%d too large %u (max %u)\n",
- level, result->cur_val, max->cur);
-
- result->pri_val = min_t(u32, result->pri_val, max->pri);
- result->spr_val = min_t(u32, result->spr_val, max->spr);
- result->cur_val = min_t(u32, result->cur_val, max->cur);
- result->enable = true;
- }
-
- return ret;
-}
-
-static void ilk_compute_wm_level(const struct drm_i915_private *dev_priv,
- const struct intel_crtc *crtc,
- int level,
- struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *pristate,
- const struct intel_plane_state *sprstate,
- const struct intel_plane_state *curstate,
- struct intel_wm_level *result)
-{
- u16 pri_latency = dev_priv->display.wm.pri_latency[level];
- u16 spr_latency = dev_priv->display.wm.spr_latency[level];
- u16 cur_latency = dev_priv->display.wm.cur_latency[level];
-
- /* WM1+ latency values stored in 0.5us units */
- if (level > 0) {
- pri_latency *= 5;
- spr_latency *= 5;
- cur_latency *= 5;
- }
-
- if (pristate) {
- result->pri_val = ilk_compute_pri_wm(crtc_state, pristate,
- pri_latency, level);
- result->fbc_val = ilk_compute_fbc_wm(crtc_state, pristate, result->pri_val);
- }
-
- if (sprstate)
- result->spr_val = ilk_compute_spr_wm(crtc_state, sprstate, spr_latency);
-
- if (curstate)
- result->cur_val = ilk_compute_cur_wm(crtc_state, curstate, cur_latency);
-
- result->enable = true;
-}
-
-static void hsw_read_wm_latency(struct drm_i915_private *i915, u16 wm[])
-{
- u64 sskpd;
-
- sskpd = intel_uncore_read64(&i915->uncore, MCH_SSKPD);
-
- wm[0] = REG_FIELD_GET64(SSKPD_NEW_WM0_MASK_HSW, sskpd);
- if (wm[0] == 0)
- wm[0] = REG_FIELD_GET64(SSKPD_OLD_WM0_MASK_HSW, sskpd);
- wm[1] = REG_FIELD_GET64(SSKPD_WM1_MASK_HSW, sskpd);
- wm[2] = REG_FIELD_GET64(SSKPD_WM2_MASK_HSW, sskpd);
- wm[3] = REG_FIELD_GET64(SSKPD_WM3_MASK_HSW, sskpd);
- wm[4] = REG_FIELD_GET64(SSKPD_WM4_MASK_HSW, sskpd);
-}
-
-static void snb_read_wm_latency(struct drm_i915_private *i915, u16 wm[])
-{
- u32 sskpd;
-
- sskpd = intel_uncore_read(&i915->uncore, MCH_SSKPD);
-
- wm[0] = REG_FIELD_GET(SSKPD_WM0_MASK_SNB, sskpd);
- wm[1] = REG_FIELD_GET(SSKPD_WM1_MASK_SNB, sskpd);
- wm[2] = REG_FIELD_GET(SSKPD_WM2_MASK_SNB, sskpd);
- wm[3] = REG_FIELD_GET(SSKPD_WM3_MASK_SNB, sskpd);
-}
-
-static void ilk_read_wm_latency(struct drm_i915_private *i915, u16 wm[])
-{
- u32 mltr;
-
- mltr = intel_uncore_read(&i915->uncore, MLTR_ILK);
-
- /* ILK primary LP0 latency is 700 ns */
- wm[0] = 7;
- wm[1] = REG_FIELD_GET(MLTR_WM1_MASK, mltr);
- wm[2] = REG_FIELD_GET(MLTR_WM2_MASK, mltr);
-}
-
-static void intel_fixup_spr_wm_latency(struct drm_i915_private *dev_priv,
- u16 wm[5])
-{
- /* ILK sprite LP0 latency is 1300 ns */
- if (DISPLAY_VER(dev_priv) == 5)
- wm[0] = 13;
-}
-
-static void intel_fixup_cur_wm_latency(struct drm_i915_private *dev_priv,
- u16 wm[5])
-{
- /* ILK cursor LP0 latency is 1300 ns */
- if (DISPLAY_VER(dev_priv) == 5)
- wm[0] = 13;
-}
-
-int ilk_wm_max_level(const struct drm_i915_private *dev_priv)
-{
- /* how many WM levels are we expecting */
- if (HAS_HW_SAGV_WM(dev_priv))
- return 5;
- else if (DISPLAY_VER(dev_priv) >= 9)
- return 7;
- else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
- return 4;
- else if (DISPLAY_VER(dev_priv) >= 6)
- return 3;
- else
- return 2;
-}
-
-void intel_print_wm_latency(struct drm_i915_private *dev_priv,
- const char *name, const u16 wm[])
-{
- int level, max_level = ilk_wm_max_level(dev_priv);
-
- for (level = 0; level <= max_level; level++) {
- unsigned int latency = wm[level];
-
- if (latency == 0) {
- drm_dbg_kms(&dev_priv->drm,
- "%s WM%d latency not provided\n",
- name, level);
- continue;
- }
-
- /*
- * - latencies are in us on gen9.
- * - before then, WM1+ latency values are in 0.5us units
- */
- if (DISPLAY_VER(dev_priv) >= 9)
- latency *= 10;
- else if (level > 0)
- latency *= 5;
-
- drm_dbg_kms(&dev_priv->drm,
- "%s WM%d latency %u (%u.%u usec)\n", name, level,
- wm[level], latency / 10, latency % 10);
- }
-}
-
-static bool ilk_increase_wm_latency(struct drm_i915_private *dev_priv,
- u16 wm[5], u16 min)
-{
- int level, max_level = ilk_wm_max_level(dev_priv);
-
- if (wm[0] >= min)
- return false;
-
- wm[0] = max(wm[0], min);
- for (level = 1; level <= max_level; level++)
- wm[level] = max_t(u16, wm[level], DIV_ROUND_UP(min, 5));
-
- return true;
-}
-
-static void snb_wm_latency_quirk(struct drm_i915_private *dev_priv)
-{
- bool changed;
-
- /*
- * The BIOS provided WM memory latency values are often
- * inadequate for high resolution displays. Adjust them.
- */
- changed = ilk_increase_wm_latency(dev_priv, dev_priv->display.wm.pri_latency, 12);
- changed |= ilk_increase_wm_latency(dev_priv, dev_priv->display.wm.spr_latency, 12);
- changed |= ilk_increase_wm_latency(dev_priv, dev_priv->display.wm.cur_latency, 12);
-
- if (!changed)
- return;
-
- drm_dbg_kms(&dev_priv->drm,
- "WM latency values increased to avoid potential underruns\n");
- intel_print_wm_latency(dev_priv, "Primary", dev_priv->display.wm.pri_latency);
- intel_print_wm_latency(dev_priv, "Sprite", dev_priv->display.wm.spr_latency);
- intel_print_wm_latency(dev_priv, "Cursor", dev_priv->display.wm.cur_latency);
-}
-
-static void snb_wm_lp3_irq_quirk(struct drm_i915_private *dev_priv)
-{
- /*
- * On some SNB machines (Thinkpad X220 Tablet at least)
- * LP3 usage can cause vblank interrupts to be lost.
- * The DEIIR bit will go high but it looks like the CPU
- * never gets interrupted.
- *
- * It's not clear whether other interrupt source could
- * be affected or if this is somehow limited to vblank
- * interrupts only. To play it safe we disable LP3
- * watermarks entirely.
- */
- if (dev_priv->display.wm.pri_latency[3] == 0 &&
- dev_priv->display.wm.spr_latency[3] == 0 &&
- dev_priv->display.wm.cur_latency[3] == 0)
- return;
-
- dev_priv->display.wm.pri_latency[3] = 0;
- dev_priv->display.wm.spr_latency[3] = 0;
- dev_priv->display.wm.cur_latency[3] = 0;
-
- drm_dbg_kms(&dev_priv->drm,
- "LP3 watermarks disabled due to potential for lost interrupts\n");
- intel_print_wm_latency(dev_priv, "Primary", dev_priv->display.wm.pri_latency);
- intel_print_wm_latency(dev_priv, "Sprite", dev_priv->display.wm.spr_latency);
- intel_print_wm_latency(dev_priv, "Cursor", dev_priv->display.wm.cur_latency);
-}
-
-static void ilk_setup_wm_latency(struct drm_i915_private *dev_priv)
-{
- if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
- hsw_read_wm_latency(dev_priv, dev_priv->display.wm.pri_latency);
- else if (DISPLAY_VER(dev_priv) >= 6)
- snb_read_wm_latency(dev_priv, dev_priv->display.wm.pri_latency);
- else
- ilk_read_wm_latency(dev_priv, dev_priv->display.wm.pri_latency);
-
- memcpy(dev_priv->display.wm.spr_latency, dev_priv->display.wm.pri_latency,
- sizeof(dev_priv->display.wm.pri_latency));
- memcpy(dev_priv->display.wm.cur_latency, dev_priv->display.wm.pri_latency,
- sizeof(dev_priv->display.wm.pri_latency));
-
- intel_fixup_spr_wm_latency(dev_priv, dev_priv->display.wm.spr_latency);
- intel_fixup_cur_wm_latency(dev_priv, dev_priv->display.wm.cur_latency);
-
- intel_print_wm_latency(dev_priv, "Primary", dev_priv->display.wm.pri_latency);
- intel_print_wm_latency(dev_priv, "Sprite", dev_priv->display.wm.spr_latency);
- intel_print_wm_latency(dev_priv, "Cursor", dev_priv->display.wm.cur_latency);
-
- if (DISPLAY_VER(dev_priv) == 6) {
- snb_wm_latency_quirk(dev_priv);
- snb_wm_lp3_irq_quirk(dev_priv);
- }
-}
-
-static bool ilk_validate_pipe_wm(const struct drm_i915_private *dev_priv,
- struct intel_pipe_wm *pipe_wm)
-{
- /* LP0 watermark maximums depend on this pipe alone */
- const struct intel_wm_config config = {
- .num_pipes_active = 1,
- .sprites_enabled = pipe_wm->sprites_enabled,
- .sprites_scaled = pipe_wm->sprites_scaled,
- };
- struct ilk_wm_maximums max;
-
- /* LP0 watermarks always use 1/2 DDB partitioning */
- ilk_compute_wm_maximums(dev_priv, 0, &config, INTEL_DDB_PART_1_2, &max);
-
- /* At least LP0 must be valid */
- if (!ilk_validate_wm_level(0, &max, &pipe_wm->wm[0])) {
- drm_dbg_kms(&dev_priv->drm, "LP0 watermark invalid\n");
- return false;
- }
-
- return true;
-}
-
-/* Compute new watermarks for the pipe */
-static int ilk_compute_pipe_wm(struct intel_atomic_state *state,
- struct intel_crtc *crtc)
-{
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
- struct intel_crtc_state *crtc_state =
- intel_atomic_get_new_crtc_state(state, crtc);
- struct intel_pipe_wm *pipe_wm;
- struct intel_plane *plane;
- const struct intel_plane_state *plane_state;
- const struct intel_plane_state *pristate = NULL;
- const struct intel_plane_state *sprstate = NULL;
- const struct intel_plane_state *curstate = NULL;
- int level, max_level = ilk_wm_max_level(dev_priv), usable_level;
- struct ilk_wm_maximums max;
-
- pipe_wm = &crtc_state->wm.ilk.optimal;
-
- intel_atomic_crtc_state_for_each_plane_state(plane, plane_state, crtc_state) {
- if (plane->base.type == DRM_PLANE_TYPE_PRIMARY)
- pristate = plane_state;
- else if (plane->base.type == DRM_PLANE_TYPE_OVERLAY)
- sprstate = plane_state;
- else if (plane->base.type == DRM_PLANE_TYPE_CURSOR)
- curstate = plane_state;
- }
-
- pipe_wm->pipe_enabled = crtc_state->hw.active;
- pipe_wm->sprites_enabled = crtc_state->active_planes & BIT(PLANE_SPRITE0);
- pipe_wm->sprites_scaled = crtc_state->scaled_planes & BIT(PLANE_SPRITE0);
-
- usable_level = max_level;
-
- /* ILK/SNB: LP2+ watermarks only w/o sprites */
- if (DISPLAY_VER(dev_priv) <= 6 && pipe_wm->sprites_enabled)
- usable_level = 1;
-
- /* ILK/SNB/IVB: LP1+ watermarks only w/o scaling */
- if (pipe_wm->sprites_scaled)
- usable_level = 0;
-
- memset(&pipe_wm->wm, 0, sizeof(pipe_wm->wm));
- ilk_compute_wm_level(dev_priv, crtc, 0, crtc_state,
- pristate, sprstate, curstate, &pipe_wm->wm[0]);
-
- if (!ilk_validate_pipe_wm(dev_priv, pipe_wm))
- return -EINVAL;
-
- ilk_compute_wm_reg_maximums(dev_priv, 1, &max);
-
- for (level = 1; level <= usable_level; level++) {
- struct intel_wm_level *wm = &pipe_wm->wm[level];
-
- ilk_compute_wm_level(dev_priv, crtc, level, crtc_state,
- pristate, sprstate, curstate, wm);
-
- /*
- * Disable any watermark level that exceeds the
- * register maximums since such watermarks are
- * always invalid.
- */
- if (!ilk_validate_wm_level(level, &max, wm)) {
- memset(wm, 0, sizeof(*wm));
- break;
- }
- }
-
- return 0;
-}
-
-/*
- * Build a set of 'intermediate' watermark values that satisfy both the old
- * state and the new state. These can be programmed to the hardware
- * immediately.
- */
-static int ilk_compute_intermediate_wm(struct intel_atomic_state *state,
- struct intel_crtc *crtc)
-{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- struct intel_crtc_state *new_crtc_state =
- intel_atomic_get_new_crtc_state(state, crtc);
- const struct intel_crtc_state *old_crtc_state =
- intel_atomic_get_old_crtc_state(state, crtc);
- struct intel_pipe_wm *a = &new_crtc_state->wm.ilk.intermediate;
- const struct intel_pipe_wm *b = &old_crtc_state->wm.ilk.optimal;
- int level, max_level = ilk_wm_max_level(dev_priv);
-
- /*
- * Start with the final, target watermarks, then combine with the
- * currently active watermarks to get values that are safe both before
- * and after the vblank.
- */
- *a = new_crtc_state->wm.ilk.optimal;
- if (!new_crtc_state->hw.active ||
- intel_crtc_needs_modeset(new_crtc_state) ||
- state->skip_intermediate_wm)
- return 0;
-
- a->pipe_enabled |= b->pipe_enabled;
- a->sprites_enabled |= b->sprites_enabled;
- a->sprites_scaled |= b->sprites_scaled;
-
- for (level = 0; level <= max_level; level++) {
- struct intel_wm_level *a_wm = &a->wm[level];
- const struct intel_wm_level *b_wm = &b->wm[level];
-
- a_wm->enable &= b_wm->enable;
- a_wm->pri_val = max(a_wm->pri_val, b_wm->pri_val);
- a_wm->spr_val = max(a_wm->spr_val, b_wm->spr_val);
- a_wm->cur_val = max(a_wm->cur_val, b_wm->cur_val);
- a_wm->fbc_val = max(a_wm->fbc_val, b_wm->fbc_val);
- }
-
- /*
- * We need to make sure that these merged watermark values are
- * actually a valid configuration themselves. If they're not,
- * there's no safe way to transition from the old state to
- * the new state, so we need to fail the atomic transaction.
- */
- if (!ilk_validate_pipe_wm(dev_priv, a))
- return -EINVAL;
-
- /*
- * If our intermediate WM are identical to the final WM, then we can
- * omit the post-vblank programming; only update if it's different.
- */
- if (memcmp(a, &new_crtc_state->wm.ilk.optimal, sizeof(*a)) != 0)
- new_crtc_state->wm.need_postvbl_update = true;
-
- return 0;
-}
-
-/*
- * Merge the watermarks from all active pipes for a specific level.
- */
-static void ilk_merge_wm_level(struct drm_i915_private *dev_priv,
- int level,
- struct intel_wm_level *ret_wm)
-{
- const struct intel_crtc *crtc;
-
- ret_wm->enable = true;
-
- for_each_intel_crtc(&dev_priv->drm, crtc) {
- const struct intel_pipe_wm *active = &crtc->wm.active.ilk;
- const struct intel_wm_level *wm = &active->wm[level];
-
- if (!active->pipe_enabled)
- continue;
-
- /*
- * The watermark values may have been used in the past,
- * so we must maintain them in the registers for some
- * time even if the level is now disabled.
- */
- if (!wm->enable)
- ret_wm->enable = false;
-
- ret_wm->pri_val = max(ret_wm->pri_val, wm->pri_val);
- ret_wm->spr_val = max(ret_wm->spr_val, wm->spr_val);
- ret_wm->cur_val = max(ret_wm->cur_val, wm->cur_val);
- ret_wm->fbc_val = max(ret_wm->fbc_val, wm->fbc_val);
- }
-}
-
-/*
- * Merge all low power watermarks for all active pipes.
- */
-static void ilk_wm_merge(struct drm_i915_private *dev_priv,
- const struct intel_wm_config *config,
- const struct ilk_wm_maximums *max,
- struct intel_pipe_wm *merged)
-{
- int level, max_level = ilk_wm_max_level(dev_priv);
- int last_enabled_level = max_level;
-
- /* ILK/SNB/IVB: LP1+ watermarks only w/ single pipe */
- if ((DISPLAY_VER(dev_priv) <= 6 || IS_IVYBRIDGE(dev_priv)) &&
- config->num_pipes_active > 1)
- last_enabled_level = 0;
-
- /* ILK: FBC WM must be disabled always */
- merged->fbc_wm_enabled = DISPLAY_VER(dev_priv) >= 6;
-
- /* merge each WM1+ level */
- for (level = 1; level <= max_level; level++) {
- struct intel_wm_level *wm = &merged->wm[level];
-
- ilk_merge_wm_level(dev_priv, level, wm);
-
- if (level > last_enabled_level)
- wm->enable = false;
- else if (!ilk_validate_wm_level(level, max, wm))
- /* make sure all following levels get disabled */
- last_enabled_level = level - 1;
-
- /*
- * The spec says it is preferred to disable
- * FBC WMs instead of disabling a WM level.
- */
- if (wm->fbc_val > max->fbc) {
- if (wm->enable)
- merged->fbc_wm_enabled = false;
- wm->fbc_val = 0;
- }
- }
-
- /* ILK: LP2+ must be disabled when FBC WM is disabled but FBC enabled */
- if (DISPLAY_VER(dev_priv) == 5 && HAS_FBC(dev_priv) &&
- dev_priv->params.enable_fbc && !merged->fbc_wm_enabled) {
- for (level = 2; level <= max_level; level++) {
- struct intel_wm_level *wm = &merged->wm[level];
-
- wm->enable = false;
- }
- }
-}
-
-static int ilk_wm_lp_to_level(int wm_lp, const struct intel_pipe_wm *pipe_wm)
-{
- /* LP1,LP2,LP3 levels are either 1,2,3 or 1,3,4 */
- return wm_lp + (wm_lp >= 2 && pipe_wm->wm[4].enable);
-}
-
-/* The value we need to program into the WM_LPx latency field */
-static unsigned int ilk_wm_lp_latency(struct drm_i915_private *dev_priv,
- int level)
-{
- if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
- return 2 * level;
- else
- return dev_priv->display.wm.pri_latency[level];
-}
-
-static void ilk_compute_wm_results(struct drm_i915_private *dev_priv,
- const struct intel_pipe_wm *merged,
- enum intel_ddb_partitioning partitioning,
- struct ilk_wm_values *results)
-{
- struct intel_crtc *crtc;
- int level, wm_lp;
-
- results->enable_fbc_wm = merged->fbc_wm_enabled;
- results->partitioning = partitioning;
-
- /* LP1+ register values */
- for (wm_lp = 1; wm_lp <= 3; wm_lp++) {
- const struct intel_wm_level *r;
-
- level = ilk_wm_lp_to_level(wm_lp, merged);
-
- r = &merged->wm[level];
-
- /*
- * Maintain the watermark values even if the level is
- * disabled. Doing otherwise could cause underruns.
- */
- results->wm_lp[wm_lp - 1] =
- WM_LP_LATENCY(ilk_wm_lp_latency(dev_priv, level)) |
- WM_LP_PRIMARY(r->pri_val) |
- WM_LP_CURSOR(r->cur_val);
-
- if (r->enable)
- results->wm_lp[wm_lp - 1] |= WM_LP_ENABLE;
-
- if (DISPLAY_VER(dev_priv) >= 8)
- results->wm_lp[wm_lp - 1] |= WM_LP_FBC_BDW(r->fbc_val);
- else
- results->wm_lp[wm_lp - 1] |= WM_LP_FBC_ILK(r->fbc_val);
-
- results->wm_lp_spr[wm_lp - 1] = WM_LP_SPRITE(r->spr_val);
-
- /*
- * Always set WM_LP_SPRITE_EN when spr_val != 0, even if the
- * level is disabled. Doing otherwise could cause underruns.
- */
- if (DISPLAY_VER(dev_priv) <= 6 && r->spr_val) {
- drm_WARN_ON(&dev_priv->drm, wm_lp != 1);
- results->wm_lp_spr[wm_lp - 1] |= WM_LP_SPRITE_ENABLE;
- }
- }
-
- /* LP0 register values */
- for_each_intel_crtc(&dev_priv->drm, crtc) {
- enum pipe pipe = crtc->pipe;
- const struct intel_pipe_wm *pipe_wm = &crtc->wm.active.ilk;
- const struct intel_wm_level *r = &pipe_wm->wm[0];
-
- if (drm_WARN_ON(&dev_priv->drm, !r->enable))
- continue;
-
- results->wm_pipe[pipe] =
- WM0_PIPE_PRIMARY(r->pri_val) |
- WM0_PIPE_SPRITE(r->spr_val) |
- WM0_PIPE_CURSOR(r->cur_val);
- }
-}
-
-/* Find the result with the highest level enabled. Check for enable_fbc_wm in
- * case both are at the same level. Prefer r1 in case they're the same. */
-static struct intel_pipe_wm *
-ilk_find_best_result(struct drm_i915_private *dev_priv,
- struct intel_pipe_wm *r1,
- struct intel_pipe_wm *r2)
-{
- int level, max_level = ilk_wm_max_level(dev_priv);
- int level1 = 0, level2 = 0;
-
- for (level = 1; level <= max_level; level++) {
- if (r1->wm[level].enable)
- level1 = level;
- if (r2->wm[level].enable)
- level2 = level;
- }
-
- if (level1 == level2) {
- if (r2->fbc_wm_enabled && !r1->fbc_wm_enabled)
- return r2;
- else
- return r1;
- } else if (level1 > level2) {
- return r1;
- } else {
- return r2;
- }
-}
-
-/* dirty bits used to track which watermarks need changes */
-#define WM_DIRTY_PIPE(pipe) (1 << (pipe))
-#define WM_DIRTY_LP(wm_lp) (1 << (15 + (wm_lp)))
-#define WM_DIRTY_LP_ALL (WM_DIRTY_LP(1) | WM_DIRTY_LP(2) | WM_DIRTY_LP(3))
-#define WM_DIRTY_FBC (1 << 24)
-#define WM_DIRTY_DDB (1 << 25)
-
-static unsigned int ilk_compute_wm_dirty(struct drm_i915_private *dev_priv,
- const struct ilk_wm_values *old,
- const struct ilk_wm_values *new)
-{
- unsigned int dirty = 0;
- enum pipe pipe;
- int wm_lp;
-
- for_each_pipe(dev_priv, pipe) {
- if (old->wm_pipe[pipe] != new->wm_pipe[pipe]) {
- dirty |= WM_DIRTY_PIPE(pipe);
- /* Must disable LP1+ watermarks too */
- dirty |= WM_DIRTY_LP_ALL;
- }
- }
-
- if (old->enable_fbc_wm != new->enable_fbc_wm) {
- dirty |= WM_DIRTY_FBC;
- /* Must disable LP1+ watermarks too */
- dirty |= WM_DIRTY_LP_ALL;
- }
-
- if (old->partitioning != new->partitioning) {
- dirty |= WM_DIRTY_DDB;
- /* Must disable LP1+ watermarks too */
- dirty |= WM_DIRTY_LP_ALL;
- }
-
- /* LP1+ watermarks already deemed dirty, no need to continue */
- if (dirty & WM_DIRTY_LP_ALL)
- return dirty;
-
- /* Find the lowest numbered LP1+ watermark in need of an update... */
- for (wm_lp = 1; wm_lp <= 3; wm_lp++) {
- if (old->wm_lp[wm_lp - 1] != new->wm_lp[wm_lp - 1] ||
- old->wm_lp_spr[wm_lp - 1] != new->wm_lp_spr[wm_lp - 1])
- break;
- }
-
- /* ...and mark it and all higher numbered LP1+ watermarks as dirty */
- for (; wm_lp <= 3; wm_lp++)
- dirty |= WM_DIRTY_LP(wm_lp);
-
- return dirty;
-}
-
-static bool _ilk_disable_lp_wm(struct drm_i915_private *dev_priv,
- unsigned int dirty)
-{
- struct ilk_wm_values *previous = &dev_priv->display.wm.hw;
- bool changed = false;
-
- if (dirty & WM_DIRTY_LP(3) && previous->wm_lp[2] & WM_LP_ENABLE) {
- previous->wm_lp[2] &= ~WM_LP_ENABLE;
- intel_uncore_write(&dev_priv->uncore, WM3_LP_ILK, previous->wm_lp[2]);
- changed = true;
- }
- if (dirty & WM_DIRTY_LP(2) && previous->wm_lp[1] & WM_LP_ENABLE) {
- previous->wm_lp[1] &= ~WM_LP_ENABLE;
- intel_uncore_write(&dev_priv->uncore, WM2_LP_ILK, previous->wm_lp[1]);
- changed = true;
- }
- if (dirty & WM_DIRTY_LP(1) && previous->wm_lp[0] & WM_LP_ENABLE) {
- previous->wm_lp[0] &= ~WM_LP_ENABLE;
- intel_uncore_write(&dev_priv->uncore, WM1_LP_ILK, previous->wm_lp[0]);
- changed = true;
- }
-
- /*
- * Don't touch WM_LP_SPRITE_ENABLE here.
- * Doing so could cause underruns.
- */
-
- return changed;
-}
-
-/*
- * The spec says we shouldn't write when we don't need, because every write
- * causes WMs to be re-evaluated, expending some power.
- */
-static void ilk_write_wm_values(struct drm_i915_private *dev_priv,
- struct ilk_wm_values *results)
-{
- struct ilk_wm_values *previous = &dev_priv->display.wm.hw;
- unsigned int dirty;
-
- dirty = ilk_compute_wm_dirty(dev_priv, previous, results);
- if (!dirty)
- return;
-
- _ilk_disable_lp_wm(dev_priv, dirty);
-
- if (dirty & WM_DIRTY_PIPE(PIPE_A))
- intel_uncore_write(&dev_priv->uncore, WM0_PIPE_ILK(PIPE_A), results->wm_pipe[0]);
- if (dirty & WM_DIRTY_PIPE(PIPE_B))
- intel_uncore_write(&dev_priv->uncore, WM0_PIPE_ILK(PIPE_B), results->wm_pipe[1]);
- if (dirty & WM_DIRTY_PIPE(PIPE_C))
- intel_uncore_write(&dev_priv->uncore, WM0_PIPE_ILK(PIPE_C), results->wm_pipe[2]);
-
- if (dirty & WM_DIRTY_DDB) {
- if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
- intel_uncore_rmw(&dev_priv->uncore, WM_MISC, WM_MISC_DATA_PARTITION_5_6,
- results->partitioning == INTEL_DDB_PART_1_2 ? 0 :
- WM_MISC_DATA_PARTITION_5_6);
- else
- intel_uncore_rmw(&dev_priv->uncore, DISP_ARB_CTL2, DISP_DATA_PARTITION_5_6,
- results->partitioning == INTEL_DDB_PART_1_2 ? 0 :
- DISP_DATA_PARTITION_5_6);
- }
-
- if (dirty & WM_DIRTY_FBC)
- intel_uncore_rmw(&dev_priv->uncore, DISP_ARB_CTL, DISP_FBC_WM_DIS,
- results->enable_fbc_wm ? 0 : DISP_FBC_WM_DIS);
-
- if (dirty & WM_DIRTY_LP(1) &&
- previous->wm_lp_spr[0] != results->wm_lp_spr[0])
- intel_uncore_write(&dev_priv->uncore, WM1S_LP_ILK, results->wm_lp_spr[0]);
-
- if (DISPLAY_VER(dev_priv) >= 7) {
- if (dirty & WM_DIRTY_LP(2) && previous->wm_lp_spr[1] != results->wm_lp_spr[1])
- intel_uncore_write(&dev_priv->uncore, WM2S_LP_IVB, results->wm_lp_spr[1]);
- if (dirty & WM_DIRTY_LP(3) && previous->wm_lp_spr[2] != results->wm_lp_spr[2])
- intel_uncore_write(&dev_priv->uncore, WM3S_LP_IVB, results->wm_lp_spr[2]);
- }
-
- if (dirty & WM_DIRTY_LP(1) && previous->wm_lp[0] != results->wm_lp[0])
- intel_uncore_write(&dev_priv->uncore, WM1_LP_ILK, results->wm_lp[0]);
- if (dirty & WM_DIRTY_LP(2) && previous->wm_lp[1] != results->wm_lp[1])
- intel_uncore_write(&dev_priv->uncore, WM2_LP_ILK, results->wm_lp[1]);
- if (dirty & WM_DIRTY_LP(3) && previous->wm_lp[2] != results->wm_lp[2])
- intel_uncore_write(&dev_priv->uncore, WM3_LP_ILK, results->wm_lp[2]);
-
- dev_priv->display.wm.hw = *results;
-}
-
-bool ilk_disable_lp_wm(struct drm_i915_private *dev_priv)
-{
- return _ilk_disable_lp_wm(dev_priv, WM_DIRTY_LP_ALL);
-}
-
-static void ilk_compute_wm_config(struct drm_i915_private *dev_priv,
- struct intel_wm_config *config)
-{
- struct intel_crtc *crtc;
-
- /* Compute the currently _active_ config */
- for_each_intel_crtc(&dev_priv->drm, crtc) {
- const struct intel_pipe_wm *wm = &crtc->wm.active.ilk;
-
- if (!wm->pipe_enabled)
- continue;
-
- config->sprites_enabled |= wm->sprites_enabled;
- config->sprites_scaled |= wm->sprites_scaled;
- config->num_pipes_active++;
- }
-}
-
-static void ilk_program_watermarks(struct drm_i915_private *dev_priv)
-{
- struct intel_pipe_wm lp_wm_1_2 = {}, lp_wm_5_6 = {}, *best_lp_wm;
- struct ilk_wm_maximums max;
- struct intel_wm_config config = {};
- struct ilk_wm_values results = {};
- enum intel_ddb_partitioning partitioning;
-
- ilk_compute_wm_config(dev_priv, &config);
-
- ilk_compute_wm_maximums(dev_priv, 1, &config, INTEL_DDB_PART_1_2, &max);
- ilk_wm_merge(dev_priv, &config, &max, &lp_wm_1_2);
-
- /* 5/6 split only in single pipe config on IVB+ */
- if (DISPLAY_VER(dev_priv) >= 7 &&
- config.num_pipes_active == 1 && config.sprites_enabled) {
- ilk_compute_wm_maximums(dev_priv, 1, &config, INTEL_DDB_PART_5_6, &max);
- ilk_wm_merge(dev_priv, &config, &max, &lp_wm_5_6);
-
- best_lp_wm = ilk_find_best_result(dev_priv, &lp_wm_1_2, &lp_wm_5_6);
- } else {
- best_lp_wm = &lp_wm_1_2;
- }
-
- partitioning = (best_lp_wm == &lp_wm_1_2) ?
- INTEL_DDB_PART_1_2 : INTEL_DDB_PART_5_6;
-
- ilk_compute_wm_results(dev_priv, best_lp_wm, partitioning, &results);
-
- ilk_write_wm_values(dev_priv, &results);
-}
-
-static void ilk_initial_watermarks(struct intel_atomic_state *state,
- struct intel_crtc *crtc)
-{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- const struct intel_crtc_state *crtc_state =
- intel_atomic_get_new_crtc_state(state, crtc);
-
- mutex_lock(&dev_priv->display.wm.wm_mutex);
- crtc->wm.active.ilk = crtc_state->wm.ilk.intermediate;
- ilk_program_watermarks(dev_priv);
- mutex_unlock(&dev_priv->display.wm.wm_mutex);
-}
-
-static void ilk_optimize_watermarks(struct intel_atomic_state *state,
- struct intel_crtc *crtc)
-{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- const struct intel_crtc_state *crtc_state =
- intel_atomic_get_new_crtc_state(state, crtc);
-
- if (!crtc_state->wm.need_postvbl_update)
- return;
-
- mutex_lock(&dev_priv->display.wm.wm_mutex);
- crtc->wm.active.ilk = crtc_state->wm.ilk.optimal;
- ilk_program_watermarks(dev_priv);
- mutex_unlock(&dev_priv->display.wm.wm_mutex);
-}
-
-static void ilk_pipe_wm_get_hw_state(struct intel_crtc *crtc)
-{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct ilk_wm_values *hw = &dev_priv->display.wm.hw;
- struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state);
- struct intel_pipe_wm *active = &crtc_state->wm.ilk.optimal;
- enum pipe pipe = crtc->pipe;
-
- hw->wm_pipe[pipe] = intel_uncore_read(&dev_priv->uncore, WM0_PIPE_ILK(pipe));
-
- memset(active, 0, sizeof(*active));
-
- active->pipe_enabled = crtc->active;
-
- if (active->pipe_enabled) {
- u32 tmp = hw->wm_pipe[pipe];
-
- /*
- * For active pipes LP0 watermark is marked as
- * enabled, and LP1+ watermaks as disabled since
- * we can't really reverse compute them in case
- * multiple pipes are active.
- */
- active->wm[0].enable = true;
- active->wm[0].pri_val = REG_FIELD_GET(WM0_PIPE_PRIMARY_MASK, tmp);
- active->wm[0].spr_val = REG_FIELD_GET(WM0_PIPE_SPRITE_MASK, tmp);
- active->wm[0].cur_val = REG_FIELD_GET(WM0_PIPE_CURSOR_MASK, tmp);
- } else {
- int level, max_level = ilk_wm_max_level(dev_priv);
-
- /*
- * For inactive pipes, all watermark levels
- * should be marked as enabled but zeroed,
- * which is what we'd compute them to.
- */
- for (level = 0; level <= max_level; level++)
- active->wm[level].enable = true;
- }
-
- crtc->wm.active.ilk = *active;
-}
-
-#define _FW_WM(value, plane) \
- (((value) & DSPFW_ ## plane ## _MASK) >> DSPFW_ ## plane ## _SHIFT)
-#define _FW_WM_VLV(value, plane) \
- (((value) & DSPFW_ ## plane ## _MASK_VLV) >> DSPFW_ ## plane ## _SHIFT)
-
-static void g4x_read_wm_values(struct drm_i915_private *dev_priv,
- struct g4x_wm_values *wm)
-{
- u32 tmp;
-
- tmp = intel_uncore_read(&dev_priv->uncore, DSPFW1);
- wm->sr.plane = _FW_WM(tmp, SR);
- wm->pipe[PIPE_B].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORB);
- wm->pipe[PIPE_B].plane[PLANE_PRIMARY] = _FW_WM(tmp, PLANEB);
- wm->pipe[PIPE_A].plane[PLANE_PRIMARY] = _FW_WM(tmp, PLANEA);
-
- tmp = intel_uncore_read(&dev_priv->uncore, DSPFW2);
- wm->fbc_en = tmp & DSPFW_FBC_SR_EN;
- wm->sr.fbc = _FW_WM(tmp, FBC_SR);
- wm->hpll.fbc = _FW_WM(tmp, FBC_HPLL_SR);
- wm->pipe[PIPE_B].plane[PLANE_SPRITE0] = _FW_WM(tmp, SPRITEB);
- wm->pipe[PIPE_A].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORA);
- wm->pipe[PIPE_A].plane[PLANE_SPRITE0] = _FW_WM(tmp, SPRITEA);
-
- tmp = intel_uncore_read(&dev_priv->uncore, DSPFW3);
- wm->hpll_en = tmp & DSPFW_HPLL_SR_EN;
- wm->sr.cursor = _FW_WM(tmp, CURSOR_SR);
- wm->hpll.cursor = _FW_WM(tmp, HPLL_CURSOR);
- wm->hpll.plane = _FW_WM(tmp, HPLL_SR);
-}
-
-static void vlv_read_wm_values(struct drm_i915_private *dev_priv,
- struct vlv_wm_values *wm)
-{
- enum pipe pipe;
- u32 tmp;
-
- for_each_pipe(dev_priv, pipe) {
- tmp = intel_uncore_read(&dev_priv->uncore, VLV_DDL(pipe));
-
- wm->ddl[pipe].plane[PLANE_PRIMARY] =
- (tmp >> DDL_PLANE_SHIFT) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK);
- wm->ddl[pipe].plane[PLANE_CURSOR] =
- (tmp >> DDL_CURSOR_SHIFT) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK);
- wm->ddl[pipe].plane[PLANE_SPRITE0] =
- (tmp >> DDL_SPRITE_SHIFT(0)) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK);
- wm->ddl[pipe].plane[PLANE_SPRITE1] =
- (tmp >> DDL_SPRITE_SHIFT(1)) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK);
- }
-
- tmp = intel_uncore_read(&dev_priv->uncore, DSPFW1);
- wm->sr.plane = _FW_WM(tmp, SR);
- wm->pipe[PIPE_B].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORB);
- wm->pipe[PIPE_B].plane[PLANE_PRIMARY] = _FW_WM_VLV(tmp, PLANEB);
- wm->pipe[PIPE_A].plane[PLANE_PRIMARY] = _FW_WM_VLV(tmp, PLANEA);
-
- tmp = intel_uncore_read(&dev_priv->uncore, DSPFW2);
- wm->pipe[PIPE_A].plane[PLANE_SPRITE1] = _FW_WM_VLV(tmp, SPRITEB);
- wm->pipe[PIPE_A].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORA);
- wm->pipe[PIPE_A].plane[PLANE_SPRITE0] = _FW_WM_VLV(tmp, SPRITEA);
-
- tmp = intel_uncore_read(&dev_priv->uncore, DSPFW3);
- wm->sr.cursor = _FW_WM(tmp, CURSOR_SR);
-
- if (IS_CHERRYVIEW(dev_priv)) {
- tmp = intel_uncore_read(&dev_priv->uncore, DSPFW7_CHV);
- wm->pipe[PIPE_B].plane[PLANE_SPRITE1] = _FW_WM_VLV(tmp, SPRITED);
- wm->pipe[PIPE_B].plane[PLANE_SPRITE0] = _FW_WM_VLV(tmp, SPRITEC);
-
- tmp = intel_uncore_read(&dev_priv->uncore, DSPFW8_CHV);
- wm->pipe[PIPE_C].plane[PLANE_SPRITE1] = _FW_WM_VLV(tmp, SPRITEF);
- wm->pipe[PIPE_C].plane[PLANE_SPRITE0] = _FW_WM_VLV(tmp, SPRITEE);
-
- tmp = intel_uncore_read(&dev_priv->uncore, DSPFW9_CHV);
- wm->pipe[PIPE_C].plane[PLANE_PRIMARY] = _FW_WM_VLV(tmp, PLANEC);
- wm->pipe[PIPE_C].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORC);
-
- tmp = intel_uncore_read(&dev_priv->uncore, DSPHOWM);
- wm->sr.plane |= _FW_WM(tmp, SR_HI) << 9;
- wm->pipe[PIPE_C].plane[PLANE_SPRITE1] |= _FW_WM(tmp, SPRITEF_HI) << 8;
- wm->pipe[PIPE_C].plane[PLANE_SPRITE0] |= _FW_WM(tmp, SPRITEE_HI) << 8;
- wm->pipe[PIPE_C].plane[PLANE_PRIMARY] |= _FW_WM(tmp, PLANEC_HI) << 8;
- wm->pipe[PIPE_B].plane[PLANE_SPRITE1] |= _FW_WM(tmp, SPRITED_HI) << 8;
- wm->pipe[PIPE_B].plane[PLANE_SPRITE0] |= _FW_WM(tmp, SPRITEC_HI) << 8;
- wm->pipe[PIPE_B].plane[PLANE_PRIMARY] |= _FW_WM(tmp, PLANEB_HI) << 8;
- wm->pipe[PIPE_A].plane[PLANE_SPRITE1] |= _FW_WM(tmp, SPRITEB_HI) << 8;
- wm->pipe[PIPE_A].plane[PLANE_SPRITE0] |= _FW_WM(tmp, SPRITEA_HI) << 8;
- wm->pipe[PIPE_A].plane[PLANE_PRIMARY] |= _FW_WM(tmp, PLANEA_HI) << 8;
- } else {
- tmp = intel_uncore_read(&dev_priv->uncore, DSPFW7);
- wm->pipe[PIPE_B].plane[PLANE_SPRITE1] = _FW_WM_VLV(tmp, SPRITED);
- wm->pipe[PIPE_B].plane[PLANE_SPRITE0] = _FW_WM_VLV(tmp, SPRITEC);
-
- tmp = intel_uncore_read(&dev_priv->uncore, DSPHOWM);
- wm->sr.plane |= _FW_WM(tmp, SR_HI) << 9;
- wm->pipe[PIPE_B].plane[PLANE_SPRITE1] |= _FW_WM(tmp, SPRITED_HI) << 8;
- wm->pipe[PIPE_B].plane[PLANE_SPRITE0] |= _FW_WM(tmp, SPRITEC_HI) << 8;
- wm->pipe[PIPE_B].plane[PLANE_PRIMARY] |= _FW_WM(tmp, PLANEB_HI) << 8;
- wm->pipe[PIPE_A].plane[PLANE_SPRITE1] |= _FW_WM(tmp, SPRITEB_HI) << 8;
- wm->pipe[PIPE_A].plane[PLANE_SPRITE0] |= _FW_WM(tmp, SPRITEA_HI) << 8;
- wm->pipe[PIPE_A].plane[PLANE_PRIMARY] |= _FW_WM(tmp, PLANEA_HI) << 8;
- }
-}
-
-#undef _FW_WM
-#undef _FW_WM_VLV
-
-void g4x_wm_get_hw_state(struct drm_i915_private *dev_priv)
-{
- struct g4x_wm_values *wm = &dev_priv->display.wm.g4x;
- struct intel_crtc *crtc;
-
- g4x_read_wm_values(dev_priv, wm);
-
- wm->cxsr = intel_uncore_read(&dev_priv->uncore, FW_BLC_SELF) & FW_BLC_SELF_EN;
-
- for_each_intel_crtc(&dev_priv->drm, crtc) {
- struct intel_crtc_state *crtc_state =
- to_intel_crtc_state(crtc->base.state);
- struct g4x_wm_state *active = &crtc->wm.active.g4x;
- struct g4x_pipe_wm *raw;
- enum pipe pipe = crtc->pipe;
- enum plane_id plane_id;
- int level, max_level;
-
- active->cxsr = wm->cxsr;
- active->hpll_en = wm->hpll_en;
- active->fbc_en = wm->fbc_en;
-
- active->sr = wm->sr;
- active->hpll = wm->hpll;
-
- for_each_plane_id_on_crtc(crtc, plane_id) {
- active->wm.plane[plane_id] =
- wm->pipe[pipe].plane[plane_id];
- }
-
- if (wm->cxsr && wm->hpll_en)
- max_level = G4X_WM_LEVEL_HPLL;
- else if (wm->cxsr)
- max_level = G4X_WM_LEVEL_SR;
- else
- max_level = G4X_WM_LEVEL_NORMAL;
-
- level = G4X_WM_LEVEL_NORMAL;
- raw = &crtc_state->wm.g4x.raw[level];
- for_each_plane_id_on_crtc(crtc, plane_id)
- raw->plane[plane_id] = active->wm.plane[plane_id];
-
- level = G4X_WM_LEVEL_SR;
- if (level > max_level)
- goto out;
-
- raw = &crtc_state->wm.g4x.raw[level];
- raw->plane[PLANE_PRIMARY] = active->sr.plane;
- raw->plane[PLANE_CURSOR] = active->sr.cursor;
- raw->plane[PLANE_SPRITE0] = 0;
- raw->fbc = active->sr.fbc;
-
- level = G4X_WM_LEVEL_HPLL;
- if (level > max_level)
- goto out;
-
- raw = &crtc_state->wm.g4x.raw[level];
- raw->plane[PLANE_PRIMARY] = active->hpll.plane;
- raw->plane[PLANE_CURSOR] = active->hpll.cursor;
- raw->plane[PLANE_SPRITE0] = 0;
- raw->fbc = active->hpll.fbc;
-
- level++;
- out:
- for_each_plane_id_on_crtc(crtc, plane_id)
- g4x_raw_plane_wm_set(crtc_state, level,
- plane_id, USHRT_MAX);
- g4x_raw_fbc_wm_set(crtc_state, level, USHRT_MAX);
-
- g4x_invalidate_wms(crtc, active, level);
-
- crtc_state->wm.g4x.optimal = *active;
- crtc_state->wm.g4x.intermediate = *active;
-
- drm_dbg_kms(&dev_priv->drm,
- "Initial watermarks: pipe %c, plane=%d, cursor=%d, sprite=%d\n",
- pipe_name(pipe),
- wm->pipe[pipe].plane[PLANE_PRIMARY],
- wm->pipe[pipe].plane[PLANE_CURSOR],
- wm->pipe[pipe].plane[PLANE_SPRITE0]);
- }
-
- drm_dbg_kms(&dev_priv->drm,
- "Initial SR watermarks: plane=%d, cursor=%d fbc=%d\n",
- wm->sr.plane, wm->sr.cursor, wm->sr.fbc);
- drm_dbg_kms(&dev_priv->drm,
- "Initial HPLL watermarks: plane=%d, SR cursor=%d fbc=%d\n",
- wm->hpll.plane, wm->hpll.cursor, wm->hpll.fbc);
- drm_dbg_kms(&dev_priv->drm, "Initial SR=%s HPLL=%s FBC=%s\n",
- str_yes_no(wm->cxsr), str_yes_no(wm->hpll_en),
- str_yes_no(wm->fbc_en));
-}
-
-void g4x_wm_sanitize(struct drm_i915_private *dev_priv)
-{
- struct intel_plane *plane;
- struct intel_crtc *crtc;
-
- mutex_lock(&dev_priv->display.wm.wm_mutex);
-
- for_each_intel_plane(&dev_priv->drm, plane) {
- struct intel_crtc *crtc =
- intel_crtc_for_pipe(dev_priv, plane->pipe);
- struct intel_crtc_state *crtc_state =
- to_intel_crtc_state(crtc->base.state);
- struct intel_plane_state *plane_state =
- to_intel_plane_state(plane->base.state);
- enum plane_id plane_id = plane->id;
- int level, num_levels = intel_wm_num_levels(dev_priv);
-
- if (plane_state->uapi.visible)
- continue;
-
- for (level = 0; level < num_levels; level++) {
- struct g4x_pipe_wm *raw =
- &crtc_state->wm.g4x.raw[level];
-
- raw->plane[plane_id] = 0;
-
- if (plane_id == PLANE_PRIMARY)
- raw->fbc = 0;
- }
- }
-
- for_each_intel_crtc(&dev_priv->drm, crtc) {
- struct intel_crtc_state *crtc_state =
- to_intel_crtc_state(crtc->base.state);
- int ret;
-
- ret = _g4x_compute_pipe_wm(crtc_state);
- drm_WARN_ON(&dev_priv->drm, ret);
-
- crtc_state->wm.g4x.intermediate =
- crtc_state->wm.g4x.optimal;
- crtc->wm.active.g4x = crtc_state->wm.g4x.optimal;
- }
-
- g4x_program_watermarks(dev_priv);
-
- mutex_unlock(&dev_priv->display.wm.wm_mutex);
-}
-
-void vlv_wm_get_hw_state(struct drm_i915_private *dev_priv)
-{
- struct vlv_wm_values *wm = &dev_priv->display.wm.vlv;
- struct intel_crtc *crtc;
- u32 val;
-
- vlv_read_wm_values(dev_priv, wm);
-
- wm->cxsr = intel_uncore_read(&dev_priv->uncore, FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
- wm->level = VLV_WM_LEVEL_PM2;
-
- if (IS_CHERRYVIEW(dev_priv)) {
- vlv_punit_get(dev_priv);
-
- val = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM);
- if (val & DSP_MAXFIFO_PM5_ENABLE)
- wm->level = VLV_WM_LEVEL_PM5;
-
- /*
- * If DDR DVFS is disabled in the BIOS, Punit
- * will never ack the request. So if that happens
- * assume we don't have to enable/disable DDR DVFS
- * dynamically. To test that just set the REQ_ACK
- * bit to poke the Punit, but don't change the
- * HIGH/LOW bits so that we don't actually change
- * the current state.
- */
- val = vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2);
- val |= FORCE_DDR_FREQ_REQ_ACK;
- vlv_punit_write(dev_priv, PUNIT_REG_DDR_SETUP2, val);
-
- if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2) &
- FORCE_DDR_FREQ_REQ_ACK) == 0, 3)) {
- drm_dbg_kms(&dev_priv->drm,
- "Punit not acking DDR DVFS request, "
- "assuming DDR DVFS is disabled\n");
- dev_priv->display.wm.max_level = VLV_WM_LEVEL_PM5;
- } else {
- val = vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2);
- if ((val & FORCE_DDR_HIGH_FREQ) == 0)
- wm->level = VLV_WM_LEVEL_DDR_DVFS;
- }
-
- vlv_punit_put(dev_priv);
- }
-
- for_each_intel_crtc(&dev_priv->drm, crtc) {
- struct intel_crtc_state *crtc_state =
- to_intel_crtc_state(crtc->base.state);
- struct vlv_wm_state *active = &crtc->wm.active.vlv;
- const struct vlv_fifo_state *fifo_state =
- &crtc_state->wm.vlv.fifo_state;
- enum pipe pipe = crtc->pipe;
- enum plane_id plane_id;
- int level;
-
- vlv_get_fifo_size(crtc_state);
-
- active->num_levels = wm->level + 1;
- active->cxsr = wm->cxsr;
-
- for (level = 0; level < active->num_levels; level++) {
- struct g4x_pipe_wm *raw =
- &crtc_state->wm.vlv.raw[level];
-
- active->sr[level].plane = wm->sr.plane;
- active->sr[level].cursor = wm->sr.cursor;
-
- for_each_plane_id_on_crtc(crtc, plane_id) {
- active->wm[level].plane[plane_id] =
- wm->pipe[pipe].plane[plane_id];
-
- raw->plane[plane_id] =
- vlv_invert_wm_value(active->wm[level].plane[plane_id],
- fifo_state->plane[plane_id]);
- }
- }
-
- for_each_plane_id_on_crtc(crtc, plane_id)
- vlv_raw_plane_wm_set(crtc_state, level,
- plane_id, USHRT_MAX);
- vlv_invalidate_wms(crtc, active, level);
-
- crtc_state->wm.vlv.optimal = *active;
- crtc_state->wm.vlv.intermediate = *active;
-
- drm_dbg_kms(&dev_priv->drm,
- "Initial watermarks: pipe %c, plane=%d, cursor=%d, sprite0=%d, sprite1=%d\n",
- pipe_name(pipe),
- wm->pipe[pipe].plane[PLANE_PRIMARY],
- wm->pipe[pipe].plane[PLANE_CURSOR],
- wm->pipe[pipe].plane[PLANE_SPRITE0],
- wm->pipe[pipe].plane[PLANE_SPRITE1]);
- }
-
- drm_dbg_kms(&dev_priv->drm,
- "Initial watermarks: SR plane=%d, SR cursor=%d level=%d cxsr=%d\n",
- wm->sr.plane, wm->sr.cursor, wm->level, wm->cxsr);
-}
-
-void vlv_wm_sanitize(struct drm_i915_private *dev_priv)
-{
- struct intel_plane *plane;
- struct intel_crtc *crtc;
-
- mutex_lock(&dev_priv->display.wm.wm_mutex);
-
- for_each_intel_plane(&dev_priv->drm, plane) {
- struct intel_crtc *crtc =
- intel_crtc_for_pipe(dev_priv, plane->pipe);
- struct intel_crtc_state *crtc_state =
- to_intel_crtc_state(crtc->base.state);
- struct intel_plane_state *plane_state =
- to_intel_plane_state(plane->base.state);
- enum plane_id plane_id = plane->id;
- int level, num_levels = intel_wm_num_levels(dev_priv);
-
- if (plane_state->uapi.visible)
- continue;
-
- for (level = 0; level < num_levels; level++) {
- struct g4x_pipe_wm *raw =
- &crtc_state->wm.vlv.raw[level];
-
- raw->plane[plane_id] = 0;
- }
- }
-
- for_each_intel_crtc(&dev_priv->drm, crtc) {
- struct intel_crtc_state *crtc_state =
- to_intel_crtc_state(crtc->base.state);
- int ret;
-
- ret = _vlv_compute_pipe_wm(crtc_state);
- drm_WARN_ON(&dev_priv->drm, ret);
-
- crtc_state->wm.vlv.intermediate =
- crtc_state->wm.vlv.optimal;
- crtc->wm.active.vlv = crtc_state->wm.vlv.optimal;
- }
-
- vlv_program_watermarks(dev_priv);
-
- mutex_unlock(&dev_priv->display.wm.wm_mutex);
-}
-
-/*
- * FIXME should probably kill this and improve
- * the real watermark readout/sanitation instead
- */
-static void ilk_init_lp_watermarks(struct drm_i915_private *dev_priv)
-{
- intel_uncore_rmw(&dev_priv->uncore, WM3_LP_ILK, WM_LP_ENABLE, 0);
- intel_uncore_rmw(&dev_priv->uncore, WM2_LP_ILK, WM_LP_ENABLE, 0);
- intel_uncore_rmw(&dev_priv->uncore, WM1_LP_ILK, WM_LP_ENABLE, 0);
-
- /*
- * Don't touch WM_LP_SPRITE_ENABLE here.
- * Doing so could cause underruns.
- */
-}
-
-void ilk_wm_get_hw_state(struct drm_i915_private *dev_priv)
-{
- struct ilk_wm_values *hw = &dev_priv->display.wm.hw;
- struct intel_crtc *crtc;
-
- ilk_init_lp_watermarks(dev_priv);
-
- for_each_intel_crtc(&dev_priv->drm, crtc)
- ilk_pipe_wm_get_hw_state(crtc);
-
- hw->wm_lp[0] = intel_uncore_read(&dev_priv->uncore, WM1_LP_ILK);
- hw->wm_lp[1] = intel_uncore_read(&dev_priv->uncore, WM2_LP_ILK);
- hw->wm_lp[2] = intel_uncore_read(&dev_priv->uncore, WM3_LP_ILK);
-
- hw->wm_lp_spr[0] = intel_uncore_read(&dev_priv->uncore, WM1S_LP_ILK);
- if (DISPLAY_VER(dev_priv) >= 7) {
- hw->wm_lp_spr[1] = intel_uncore_read(&dev_priv->uncore, WM2S_LP_IVB);
- hw->wm_lp_spr[2] = intel_uncore_read(&dev_priv->uncore, WM3S_LP_IVB);
- }
-
- if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
- hw->partitioning = (intel_uncore_read(&dev_priv->uncore, WM_MISC) & WM_MISC_DATA_PARTITION_5_6) ?
- INTEL_DDB_PART_5_6 : INTEL_DDB_PART_1_2;
- else if (IS_IVYBRIDGE(dev_priv))
- hw->partitioning = (intel_uncore_read(&dev_priv->uncore, DISP_ARB_CTL2) & DISP_DATA_PARTITION_5_6) ?
- INTEL_DDB_PART_5_6 : INTEL_DDB_PART_1_2;
-
- hw->enable_fbc_wm =
- !(intel_uncore_read(&dev_priv->uncore, DISP_ARB_CTL) & DISP_FBC_WM_DIS);
-}
-
static void ibx_init_clock_gating(struct drm_i915_private *dev_priv)
{
/*
@@ -4282,16 +320,6 @@ static void lpt_init_clock_gating(struct drm_i915_private *dev_priv)
0, TRANS_CHICKEN1_DP0UNIT_GC_DISABLE);
}
-static void lpt_suspend_hw(struct drm_i915_private *dev_priv)
-{
- if (HAS_PCH_LPT_LP(dev_priv)) {
- u32 val = intel_uncore_read(&dev_priv->uncore, SOUTH_DSPCLK_GATE_D);
-
- val &= ~PCH_LP_PARTITION_LEVEL_DISABLE;
- intel_uncore_write(&dev_priv->uncore, SOUTH_DSPCLK_GATE_D, val);
- }
-}
-
static void gen8_set_l3sqc_credits(struct drm_i915_private *dev_priv,
int general_prio_credits,
int high_prio_credits)
@@ -4336,10 +364,6 @@ static void gen12lp_init_clock_gating(struct drm_i915_private *dev_priv)
intel_uncore_write(&dev_priv->uncore, ILK_DPFC_CHICKEN(INTEL_FBC_A),
DPFC_CHICKEN_COMP_DUMMY_PIXEL);
- /* Wa_1409825376:tgl (pre-prod)*/
- if (IS_TGL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_C0))
- intel_uncore_rmw(&dev_priv->uncore, GEN9_CLKGATE_DIS_3, 0, TGL_VRH_GATING_DIS);
-
/* Wa_14013723622:tgl,rkl,dg1,adl-s */
if (DISPLAY_VER(dev_priv) == 12)
intel_uncore_rmw(&dev_priv->uncore, CLKREQ_POLICY,
@@ -4357,15 +381,6 @@ static void adlp_init_clock_gating(struct drm_i915_private *dev_priv)
intel_de_rmw(dev_priv, GEN8_CHICKEN_DCPR_1, DDI_CLOCK_REG_ACCESS, 0);
}
-static void dg1_init_clock_gating(struct drm_i915_private *dev_priv)
-{
- gen12lp_init_clock_gating(dev_priv);
-
- /* Wa_1409836686:dg1[a0] */
- if (IS_DG1_GRAPHICS_STEP(dev_priv, STEP_A0, STEP_B0))
- intel_uncore_rmw(&dev_priv->uncore, GEN9_CLKGATE_DIS_3, 0, DPT_GATING_DIS);
-}
-
static void xehpsdv_init_clock_gating(struct drm_i915_private *dev_priv)
{
/* Wa_22010146351:xehpsdv */
@@ -4764,12 +779,6 @@ void intel_init_clock_gating(struct drm_i915_private *dev_priv)
dev_priv->clock_gating_funcs->init_clock_gating(dev_priv);
}
-void intel_suspend_hw(struct drm_i915_private *dev_priv)
-{
- if (HAS_PCH_LPT(dev_priv))
- lpt_suspend_hw(dev_priv);
-}
-
static void nop_init_clock_gating(struct drm_i915_private *dev_priv)
{
drm_dbg_kms(&dev_priv->drm,
@@ -4785,7 +794,6 @@ CG_FUNCS(pvc);
CG_FUNCS(dg2);
CG_FUNCS(xehpsdv);
CG_FUNCS(adlp);
-CG_FUNCS(dg1);
CG_FUNCS(gen12lp);
CG_FUNCS(icl);
CG_FUNCS(cfl);
@@ -4820,7 +828,9 @@ CG_FUNCS(nop);
*/
void intel_init_clock_gating_hooks(struct drm_i915_private *dev_priv)
{
- if (IS_PONTEVECCHIO(dev_priv))
+ if (IS_METEORLAKE(dev_priv))
+ dev_priv->clock_gating_funcs = &nop_clock_gating_funcs;
+ else if (IS_PONTEVECCHIO(dev_priv))
dev_priv->clock_gating_funcs = &pvc_clock_gating_funcs;
else if (IS_DG2(dev_priv))
dev_priv->clock_gating_funcs = &dg2_clock_gating_funcs;
@@ -4828,8 +838,6 @@ void intel_init_clock_gating_hooks(struct drm_i915_private *dev_priv)
dev_priv->clock_gating_funcs = &xehpsdv_clock_gating_funcs;
else if (IS_ALDERLAKE_P(dev_priv))
dev_priv->clock_gating_funcs = &adlp_clock_gating_funcs;
- else if (IS_DG1(dev_priv))
- dev_priv->clock_gating_funcs = &dg1_clock_gating_funcs;
else if (GRAPHICS_VER(dev_priv) == 12)
dev_priv->clock_gating_funcs = &gen12lp_clock_gating_funcs;
else if (GRAPHICS_VER(dev_priv) == 11)
@@ -4875,117 +883,3 @@ void intel_init_clock_gating_hooks(struct drm_i915_private *dev_priv)
dev_priv->clock_gating_funcs = &nop_clock_gating_funcs;
}
}
-
-static const struct intel_wm_funcs ilk_wm_funcs = {
- .compute_pipe_wm = ilk_compute_pipe_wm,
- .compute_intermediate_wm = ilk_compute_intermediate_wm,
- .initial_watermarks = ilk_initial_watermarks,
- .optimize_watermarks = ilk_optimize_watermarks,
-};
-
-static const struct intel_wm_funcs vlv_wm_funcs = {
- .compute_pipe_wm = vlv_compute_pipe_wm,
- .compute_intermediate_wm = vlv_compute_intermediate_wm,
- .initial_watermarks = vlv_initial_watermarks,
- .optimize_watermarks = vlv_optimize_watermarks,
- .atomic_update_watermarks = vlv_atomic_update_fifo,
-};
-
-static const struct intel_wm_funcs g4x_wm_funcs = {
- .compute_pipe_wm = g4x_compute_pipe_wm,
- .compute_intermediate_wm = g4x_compute_intermediate_wm,
- .initial_watermarks = g4x_initial_watermarks,
- .optimize_watermarks = g4x_optimize_watermarks,
-};
-
-static const struct intel_wm_funcs pnv_wm_funcs = {
- .update_wm = pnv_update_wm,
-};
-
-static const struct intel_wm_funcs i965_wm_funcs = {
- .update_wm = i965_update_wm,
-};
-
-static const struct intel_wm_funcs i9xx_wm_funcs = {
- .update_wm = i9xx_update_wm,
-};
-
-static const struct intel_wm_funcs i845_wm_funcs = {
- .update_wm = i845_update_wm,
-};
-
-static const struct intel_wm_funcs nop_funcs = {
-};
-
-/* Set up chip specific power management-related functions */
-void intel_init_pm(struct drm_i915_private *dev_priv)
-{
- if (DISPLAY_VER(dev_priv) >= 9) {
- skl_wm_init(dev_priv);
- return;
- }
-
- /* For cxsr */
- if (IS_PINEVIEW(dev_priv))
- pnv_get_mem_freq(dev_priv);
- else if (GRAPHICS_VER(dev_priv) == 5)
- ilk_get_mem_freq(dev_priv);
-
- /* For FIFO watermark updates */
- if (HAS_PCH_SPLIT(dev_priv)) {
- ilk_setup_wm_latency(dev_priv);
-
- if ((DISPLAY_VER(dev_priv) == 5 && dev_priv->display.wm.pri_latency[1] &&
- dev_priv->display.wm.spr_latency[1] && dev_priv->display.wm.cur_latency[1]) ||
- (DISPLAY_VER(dev_priv) != 5 && dev_priv->display.wm.pri_latency[0] &&
- dev_priv->display.wm.spr_latency[0] && dev_priv->display.wm.cur_latency[0])) {
- dev_priv->display.funcs.wm = &ilk_wm_funcs;
- } else {
- drm_dbg_kms(&dev_priv->drm,
- "Failed to read display plane latency. "
- "Disable CxSR\n");
- dev_priv->display.funcs.wm = &nop_funcs;
- }
- } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
- vlv_setup_wm_latency(dev_priv);
- dev_priv->display.funcs.wm = &vlv_wm_funcs;
- } else if (IS_G4X(dev_priv)) {
- g4x_setup_wm_latency(dev_priv);
- dev_priv->display.funcs.wm = &g4x_wm_funcs;
- } else if (IS_PINEVIEW(dev_priv)) {
- if (!intel_get_cxsr_latency(!IS_MOBILE(dev_priv),
- dev_priv->is_ddr3,
- dev_priv->fsb_freq,
- dev_priv->mem_freq)) {
- drm_info(&dev_priv->drm,
- "failed to find known CxSR latency "
- "(found ddr%s fsb freq %d, mem freq %d), "
- "disabling CxSR\n",
- (dev_priv->is_ddr3 == 1) ? "3" : "2",
- dev_priv->fsb_freq, dev_priv->mem_freq);
- /* Disable CxSR and never update its watermark again */
- intel_set_memory_cxsr(dev_priv, false);
- dev_priv->display.funcs.wm = &nop_funcs;
- } else
- dev_priv->display.funcs.wm = &pnv_wm_funcs;
- } else if (DISPLAY_VER(dev_priv) == 4) {
- dev_priv->display.funcs.wm = &i965_wm_funcs;
- } else if (DISPLAY_VER(dev_priv) == 3) {
- dev_priv->display.funcs.wm = &i9xx_wm_funcs;
- } else if (DISPLAY_VER(dev_priv) == 2) {
- if (INTEL_NUM_PIPES(dev_priv) == 1)
- dev_priv->display.funcs.wm = &i845_wm_funcs;
- else
- dev_priv->display.funcs.wm = &i9xx_wm_funcs;
- } else {
- drm_err(&dev_priv->drm,
- "unexpected fall-through in %s\n", __func__);
- dev_priv->display.funcs.wm = &nop_funcs;
- }
-}
-
-void intel_pm_setup(struct drm_i915_private *dev_priv)
-{
- dev_priv->runtime_pm.suspended = false;
- atomic_set(&dev_priv->runtime_pm.wakeref_count, 0);
-}
diff --git a/drivers/gpu/drm/i915/intel_pm.h b/drivers/gpu/drm/i915/intel_pm.h
index c09b872d65c8..f774bddcdca6 100644
--- a/drivers/gpu/drm/i915/intel_pm.h
+++ b/drivers/gpu/drm/i915/intel_pm.h
@@ -13,22 +13,6 @@ struct intel_crtc_state;
struct intel_plane_state;
void intel_init_clock_gating(struct drm_i915_private *dev_priv);
-void intel_suspend_hw(struct drm_i915_private *dev_priv);
-int ilk_wm_max_level(const struct drm_i915_private *dev_priv);
-void intel_init_pm(struct drm_i915_private *dev_priv);
void intel_init_clock_gating_hooks(struct drm_i915_private *dev_priv);
-void intel_pm_setup(struct drm_i915_private *dev_priv);
-void g4x_wm_get_hw_state(struct drm_i915_private *dev_priv);
-void vlv_wm_get_hw_state(struct drm_i915_private *dev_priv);
-void ilk_wm_get_hw_state(struct drm_i915_private *dev_priv);
-void g4x_wm_sanitize(struct drm_i915_private *dev_priv);
-void vlv_wm_sanitize(struct drm_i915_private *dev_priv);
-bool ilk_disable_lp_wm(struct drm_i915_private *dev_priv);
-bool intel_wm_plane_visible(const struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *plane_state);
-void intel_print_wm_latency(struct drm_i915_private *dev_priv,
- const char *name, const u16 wm[]);
-
-bool intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable);
#endif /* __INTEL_PM_H__ */
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c
index 129746713d07..cf5122299b6b 100644
--- a/drivers/gpu/drm/i915/intel_runtime_pm.c
+++ b/drivers/gpu/drm/i915/intel_runtime_pm.c
@@ -652,6 +652,8 @@ void intel_runtime_pm_init_early(struct intel_runtime_pm *rpm)
rpm->kdev = kdev;
rpm->available = HAS_RUNTIME_PM(i915);
+ rpm->suspended = false;
+ atomic_set(&rpm->wakeref_count, 0);
init_intel_runtime_pm_wakeref(rpm);
INIT_LIST_HEAD(&rpm->lmem_userfault_list);
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index 8dee9e62a73e..e1e1f34490c8 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -32,7 +32,6 @@
#include "i915_reg.h"
#include "i915_trace.h"
#include "i915_vgpu.h"
-#include "intel_pm.h"
#define FORCEWAKE_ACK_TIMEOUT_MS 50
#define GT_FIFO_TIMEOUT_MS 10
@@ -2460,7 +2459,7 @@ static int i915_pmic_bus_access_notifier(struct notifier_block *nb,
static void uncore_unmap_mmio(struct drm_device *drm, void *regs)
{
- iounmap(regs);
+ iounmap((void __iomem *)regs);
}
int intel_uncore_setup_mmio(struct intel_uncore *uncore, phys_addr_t phys_addr)
@@ -2491,7 +2490,8 @@ int intel_uncore_setup_mmio(struct intel_uncore *uncore, phys_addr_t phys_addr)
return -EIO;
}
- return drmm_add_action_or_reset(&i915->drm, uncore_unmap_mmio, uncore->regs);
+ return drmm_add_action_or_reset(&i915->drm, uncore_unmap_mmio,
+ (void __force *)uncore->regs);
}
void intel_uncore_init_early(struct intel_uncore *uncore,
@@ -2748,14 +2748,25 @@ static void driver_initiated_flr(struct intel_uncore *uncore)
/* Trigger the actual Driver-FLR */
intel_uncore_rmw_fw(uncore, GU_CNTL, 0, DRIVERFLR);
+ /* Wait for hardware teardown to complete */
+ ret = intel_wait_for_register_fw(uncore, GU_CNTL,
+ DRIVERFLR, 0,
+ flr_timeout_ms);
+ if (ret) {
+ drm_err(&i915->drm, "Driver-FLR-teardown wait completion failed! %d\n", ret);
+ return;
+ }
+
+ /* Wait for hardware/firmware re-init to complete */
ret = intel_wait_for_register_fw(uncore, GU_DEBUG,
DRIVERFLR_STATUS, DRIVERFLR_STATUS,
flr_timeout_ms);
if (ret) {
- drm_err(&i915->drm, "wait for Driver-FLR completion failed! %d\n", ret);
+ drm_err(&i915->drm, "Driver-FLR-reinit wait completion failed! %d\n", ret);
return;
}
+ /* Clear sticky completion status */
intel_uncore_write_fw(uncore, GU_DEBUG, DRIVERFLR_STATUS);
}
diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp.c b/drivers/gpu/drm/i915/pxp/intel_pxp.c
index cfc9af8b3d21..9d4c7724e98e 100644
--- a/drivers/gpu/drm/i915/pxp/intel_pxp.c
+++ b/drivers/gpu/drm/i915/pxp/intel_pxp.c
@@ -270,6 +270,60 @@ static bool pxp_component_bound(struct intel_pxp *pxp)
return bound;
}
+static int __pxp_global_teardown_final(struct intel_pxp *pxp)
+{
+ if (!pxp->arb_is_valid)
+ return 0;
+ /*
+ * To ensure synchronous and coherent session teardown completion
+ * in response to suspend or shutdown triggers, don't use a worker.
+ */
+ intel_pxp_mark_termination_in_progress(pxp);
+ intel_pxp_terminate(pxp, false);
+
+ if (!wait_for_completion_timeout(&pxp->termination, msecs_to_jiffies(250)))
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+static int __pxp_global_teardown_restart(struct intel_pxp *pxp)
+{
+ if (pxp->arb_is_valid)
+ return 0;
+ /*
+ * The arb-session is currently inactive and we are doing a reset and restart
+ * due to a runtime event. Use the worker that was designed for this.
+ */
+ pxp_queue_termination(pxp);
+
+ if (!wait_for_completion_timeout(&pxp->termination, msecs_to_jiffies(250)))
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+void intel_pxp_end(struct intel_pxp *pxp)
+{
+ struct drm_i915_private *i915 = pxp->ctrl_gt->i915;
+ intel_wakeref_t wakeref;
+
+ if (!intel_pxp_is_enabled(pxp))
+ return;
+
+ wakeref = intel_runtime_pm_get(&i915->runtime_pm);
+
+ mutex_lock(&pxp->arb_mutex);
+
+ if (__pxp_global_teardown_final(pxp))
+ drm_dbg(&i915->drm, "PXP end timed out\n");
+
+ mutex_unlock(&pxp->arb_mutex);
+
+ intel_pxp_fini_hw(pxp);
+ intel_runtime_pm_put(&i915->runtime_pm, wakeref);
+}
+
/*
* the arb session is restarted from the irq work when we receive the
* termination completion interrupt
@@ -286,16 +340,9 @@ int intel_pxp_start(struct intel_pxp *pxp)
mutex_lock(&pxp->arb_mutex);
- if (pxp->arb_is_valid)
- goto unlock;
-
- pxp_queue_termination(pxp);
-
- if (!wait_for_completion_timeout(&pxp->termination,
- msecs_to_jiffies(250))) {
- ret = -ETIMEDOUT;
+ ret = __pxp_global_teardown_restart(pxp);
+ if (ret)
goto unlock;
- }
/* make sure the compiler doesn't optimize the double access */
barrier();
diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp.h b/drivers/gpu/drm/i915/pxp/intel_pxp.h
index 04440fada711..3ded0890cd27 100644
--- a/drivers/gpu/drm/i915/pxp/intel_pxp.h
+++ b/drivers/gpu/drm/i915/pxp/intel_pxp.h
@@ -24,8 +24,10 @@ void intel_pxp_init_hw(struct intel_pxp *pxp);
void intel_pxp_fini_hw(struct intel_pxp *pxp);
void intel_pxp_mark_termination_in_progress(struct intel_pxp *pxp);
+void intel_pxp_tee_end_arb_fw_session(struct intel_pxp *pxp, u32 arb_session_id);
int intel_pxp_start(struct intel_pxp *pxp);
+void intel_pxp_end(struct intel_pxp *pxp);
int intel_pxp_key_check(struct intel_pxp *pxp,
struct drm_i915_gem_object *obj,
diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_cmd_interface_42.h b/drivers/gpu/drm/i915/pxp/intel_pxp_cmd_interface_42.h
index 739f9072fa5f..26f7d9f01bf3 100644
--- a/drivers/gpu/drm/i915/pxp/intel_pxp_cmd_interface_42.h
+++ b/drivers/gpu/drm/i915/pxp/intel_pxp_cmd_interface_42.h
@@ -12,6 +12,9 @@
/* PXP-Opcode for Init Session */
#define PXP42_CMDID_INIT_SESSION 0x1e
+/* PXP-Opcode for Invalidate Stream Key */
+#define PXP42_CMDID_INVALIDATE_STREAM_KEY 0x00000007
+
/* PXP-Input-Packet: Init Session (Arb-Session) */
struct pxp42_create_arb_in {
struct pxp_cmd_header header;
@@ -25,4 +28,16 @@ struct pxp42_create_arb_out {
struct pxp_cmd_header header;
} __packed;
+/* PXP-Input-Packet: Invalidate Stream Key */
+struct pxp42_inv_stream_key_in {
+ struct pxp_cmd_header header;
+ u32 rsvd[3];
+} __packed;
+
+/* PXP-Output-Packet: Invalidate Stream Key */
+struct pxp42_inv_stream_key_out {
+ struct pxp_cmd_header header;
+ u32 rsvd;
+} __packed;
+
#endif /* __INTEL_PXP_FW_INTERFACE_42_H__ */
diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_cmd_interface_cmn.h b/drivers/gpu/drm/i915/pxp/intel_pxp_cmd_interface_cmn.h
index aaa8187a0afb..ae9b151b7cb7 100644
--- a/drivers/gpu/drm/i915/pxp/intel_pxp_cmd_interface_cmn.h
+++ b/drivers/gpu/drm/i915/pxp/intel_pxp_cmd_interface_cmn.h
@@ -28,6 +28,9 @@ struct pxp_cmd_header {
union {
u32 status; /* out */
u32 stream_id; /* in */
+#define PXP_CMDHDR_EXTDATA_SESSION_VALID GENMASK(0, 0)
+#define PXP_CMDHDR_EXTDATA_APP_TYPE GENMASK(1, 1)
+#define PXP_CMDHDR_EXTDATA_SESSION_ID GENMASK(17, 2)
};
/* Length of the message (excluding the header) */
u32 buffer_len;
diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_huc.c b/drivers/gpu/drm/i915/pxp/intel_pxp_huc.c
index 64609d1b1c0f..23431c36b60b 100644
--- a/drivers/gpu/drm/i915/pxp/intel_pxp_huc.c
+++ b/drivers/gpu/drm/i915/pxp/intel_pxp_huc.c
@@ -38,7 +38,7 @@ int intel_pxp_huc_load_and_auth(struct intel_pxp *pxp)
huc_in.header.command_id = PXP43_CMDID_START_HUC_AUTH;
huc_in.header.status = 0;
huc_in.header.buffer_len = sizeof(huc_in.huc_base_address);
- huc_in.huc_base_address = huc_phys_addr;
+ huc_in.huc_base_address = cpu_to_le64(huc_phys_addr);
err = intel_pxp_tee_stream_message(pxp, client_id, fence_id,
&huc_in, sizeof(huc_in),
diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_pm.c b/drivers/gpu/drm/i915/pxp/intel_pxp_pm.c
index 892d39cc61c1..4f836b317424 100644
--- a/drivers/gpu/drm/i915/pxp/intel_pxp_pm.c
+++ b/drivers/gpu/drm/i915/pxp/intel_pxp_pm.c
@@ -16,7 +16,7 @@ void intel_pxp_suspend_prepare(struct intel_pxp *pxp)
if (!intel_pxp_is_enabled(pxp))
return;
- pxp->arb_is_valid = false;
+ intel_pxp_end(pxp);
intel_pxp_invalidate(pxp);
}
@@ -34,7 +34,7 @@ void intel_pxp_suspend(struct intel_pxp *pxp)
}
}
-void intel_pxp_resume(struct intel_pxp *pxp)
+void intel_pxp_resume_complete(struct intel_pxp *pxp)
{
if (!intel_pxp_is_enabled(pxp))
return;
diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_pm.h b/drivers/gpu/drm/i915/pxp/intel_pxp_pm.h
index 586be769104f..06b46f535b42 100644
--- a/drivers/gpu/drm/i915/pxp/intel_pxp_pm.h
+++ b/drivers/gpu/drm/i915/pxp/intel_pxp_pm.h
@@ -11,7 +11,7 @@ struct intel_pxp;
#ifdef CONFIG_DRM_I915_PXP
void intel_pxp_suspend_prepare(struct intel_pxp *pxp);
void intel_pxp_suspend(struct intel_pxp *pxp);
-void intel_pxp_resume(struct intel_pxp *pxp);
+void intel_pxp_resume_complete(struct intel_pxp *pxp);
void intel_pxp_runtime_suspend(struct intel_pxp *pxp);
#else
static inline void intel_pxp_suspend_prepare(struct intel_pxp *pxp)
@@ -22,7 +22,7 @@ static inline void intel_pxp_suspend(struct intel_pxp *pxp)
{
}
-static inline void intel_pxp_resume(struct intel_pxp *pxp)
+static inline void intel_pxp_resume_complete(struct intel_pxp *pxp)
{
}
@@ -32,6 +32,6 @@ static inline void intel_pxp_runtime_suspend(struct intel_pxp *pxp)
#endif
static inline void intel_pxp_runtime_resume(struct intel_pxp *pxp)
{
- intel_pxp_resume(pxp);
+ intel_pxp_resume_complete(pxp);
}
#endif /* __INTEL_PXP_PM_H__ */
diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_session.c b/drivers/gpu/drm/i915/pxp/intel_pxp_session.c
index ae413580b81a..448cacb0465d 100644
--- a/drivers/gpu/drm/i915/pxp/intel_pxp_session.c
+++ b/drivers/gpu/drm/i915/pxp/intel_pxp_session.c
@@ -110,14 +110,16 @@ static int pxp_terminate_arb_session_and_global(struct intel_pxp *pxp)
intel_uncore_write(gt->uncore, PXP_GLOBAL_TERMINATE, 1);
+ intel_pxp_tee_end_arb_fw_session(pxp, ARB_SESSION);
+
return ret;
}
-static void pxp_terminate(struct intel_pxp *pxp)
+void intel_pxp_terminate(struct intel_pxp *pxp, bool post_invalidation_needs_restart)
{
int ret;
- pxp->hw_state_invalidated = true;
+ pxp->hw_state_invalidated = post_invalidation_needs_restart;
/*
* if we fail to submit the termination there is no point in waiting for
@@ -165,7 +167,7 @@ static void pxp_session_work(struct work_struct *work)
if (events & PXP_TERMINATION_REQUEST) {
events &= ~PXP_TERMINATION_COMPLETE;
- pxp_terminate(pxp);
+ intel_pxp_terminate(pxp, true);
}
if (events & PXP_TERMINATION_COMPLETE)
diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_session.h b/drivers/gpu/drm/i915/pxp/intel_pxp_session.h
index 903ac52cffa1..ba5788127109 100644
--- a/drivers/gpu/drm/i915/pxp/intel_pxp_session.h
+++ b/drivers/gpu/drm/i915/pxp/intel_pxp_session.h
@@ -12,9 +12,14 @@ struct intel_pxp;
#ifdef CONFIG_DRM_I915_PXP
void intel_pxp_session_management_init(struct intel_pxp *pxp);
+void intel_pxp_terminate(struct intel_pxp *pxp, bool post_invalidation_needs_restart);
#else
static inline void intel_pxp_session_management_init(struct intel_pxp *pxp)
{
}
+
+static inline void intel_pxp_terminate(struct intel_pxp *pxp, bool post_invalidation_needs_restart)
+{
+}
#endif
#endif /* __INTEL_PXP_SESSION_H__ */
diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_tee.c b/drivers/gpu/drm/i915/pxp/intel_pxp_tee.c
index 73aa8015f828..d9d248b48093 100644
--- a/drivers/gpu/drm/i915/pxp/intel_pxp_tee.c
+++ b/drivers/gpu/drm/i915/pxp/intel_pxp_tee.c
@@ -127,6 +127,12 @@ static int i915_pxp_tee_component_bind(struct device *i915_kdev,
intel_wakeref_t wakeref;
int ret = 0;
+ if (!HAS_HECI_PXP(i915)) {
+ pxp->dev_link = device_link_add(i915_kdev, tee_kdev, DL_FLAG_STATELESS);
+ if (drm_WARN_ON(&i915->drm, !pxp->dev_link))
+ return -ENODEV;
+ }
+
mutex_lock(&pxp->tee_mutex);
pxp->pxp_component = data;
pxp->pxp_component->tee_dev = tee_kdev;
@@ -169,6 +175,11 @@ static void i915_pxp_tee_component_unbind(struct device *i915_kdev,
mutex_lock(&pxp->tee_mutex);
pxp->pxp_component = NULL;
mutex_unlock(&pxp->tee_mutex);
+
+ if (pxp->dev_link) {
+ device_link_del(pxp->dev_link);
+ pxp->dev_link = NULL;
+ }
}
static const struct component_ops i915_pxp_tee_component_ops = {
@@ -308,3 +319,38 @@ int intel_pxp_tee_cmd_create_arb_session(struct intel_pxp *pxp,
return ret;
}
+
+void intel_pxp_tee_end_arb_fw_session(struct intel_pxp *pxp, u32 session_id)
+{
+ struct drm_i915_private *i915 = pxp->ctrl_gt->i915;
+ struct pxp42_inv_stream_key_in msg_in = {0};
+ struct pxp42_inv_stream_key_out msg_out = {0};
+ int ret, trials = 0;
+
+try_again:
+ memset(&msg_in, 0, sizeof(msg_in));
+ memset(&msg_out, 0, sizeof(msg_out));
+ msg_in.header.api_version = PXP_APIVER(4, 2);
+ msg_in.header.command_id = PXP42_CMDID_INVALIDATE_STREAM_KEY;
+ msg_in.header.buffer_len = sizeof(msg_in) - sizeof(msg_in.header);
+
+ msg_in.header.stream_id = FIELD_PREP(PXP_CMDHDR_EXTDATA_SESSION_VALID, 1);
+ msg_in.header.stream_id |= FIELD_PREP(PXP_CMDHDR_EXTDATA_APP_TYPE, 0);
+ msg_in.header.stream_id |= FIELD_PREP(PXP_CMDHDR_EXTDATA_SESSION_ID, session_id);
+
+ ret = intel_pxp_tee_io_message(pxp,
+ &msg_in, sizeof(msg_in),
+ &msg_out, sizeof(msg_out),
+ NULL);
+
+ /* Cleanup coherency between GT and Firmware is critical, so try again if it fails */
+ if ((ret || msg_out.header.status != 0x0) && ++trials < 3)
+ goto try_again;
+
+ if (ret)
+ drm_err(&i915->drm, "Failed to send tee msg for inv-stream-key-%d, ret=[%d]\n",
+ session_id, ret);
+ else if (msg_out.header.status != 0x0)
+ drm_warn(&i915->drm, "PXP firmware failed inv-stream-key-%d with status 0x%08x\n",
+ session_id, msg_out.header.status);
+}
diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_types.h b/drivers/gpu/drm/i915/pxp/intel_pxp_types.h
index 7dc5f08d1583..007de49e1ea4 100644
--- a/drivers/gpu/drm/i915/pxp/intel_pxp_types.h
+++ b/drivers/gpu/drm/i915/pxp/intel_pxp_types.h
@@ -32,6 +32,9 @@ struct intel_pxp {
* which are protected by &tee_mutex.
*/
struct i915_pxp_component *pxp_component;
+
+ /* @dev_link: Enforce module relationship for power management ordering. */
+ struct device_link *dev_link;
/**
* @pxp_component_added: track if the pxp component has been added.
* Set and cleared in tee init and fini functions respectively.
diff --git a/drivers/gpu/drm/i915/selftests/i915_live_selftests.h b/drivers/gpu/drm/i915/selftests/i915_live_selftests.h
index aaf8a380e5c7..5aee6c9a8295 100644
--- a/drivers/gpu/drm/i915/selftests/i915_live_selftests.h
+++ b/drivers/gpu/drm/i915/selftests/i915_live_selftests.h
@@ -25,6 +25,7 @@ selftest(gt_lrc, intel_lrc_live_selftests)
selftest(gt_mocs, intel_mocs_live_selftests)
selftest(gt_pm, intel_gt_pm_live_selftests)
selftest(gt_heartbeat, intel_heartbeat_live_selftests)
+selftest(gt_tlb, intel_tlb_live_selftests)
selftest(requests, i915_request_live_selftests)
selftest(migrate, intel_migrate_live_selftests)
selftest(active, i915_active_live_selftests)
diff --git a/drivers/gpu/drm/i915/selftests/i915_request.c b/drivers/gpu/drm/i915/selftests/i915_request.c
index 6fe22b096bdd..a9b79888c193 100644
--- a/drivers/gpu/drm/i915/selftests/i915_request.c
+++ b/drivers/gpu/drm/i915/selftests/i915_request.c
@@ -957,18 +957,18 @@ static int live_cancel_request(void *arg)
return 0;
}
-static struct i915_vma *empty_batch(struct drm_i915_private *i915)
+static struct i915_vma *empty_batch(struct intel_gt *gt)
{
struct drm_i915_gem_object *obj;
struct i915_vma *vma;
u32 *cmd;
int err;
- obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
+ obj = i915_gem_object_create_internal(gt->i915, PAGE_SIZE);
if (IS_ERR(obj))
return ERR_CAST(obj);
- cmd = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WB);
+ cmd = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WC);
if (IS_ERR(cmd)) {
err = PTR_ERR(cmd);
goto err;
@@ -979,15 +979,15 @@ static struct i915_vma *empty_batch(struct drm_i915_private *i915)
__i915_gem_object_flush_map(obj, 0, 64);
i915_gem_object_unpin_map(obj);
- intel_gt_chipset_flush(to_gt(i915));
+ intel_gt_chipset_flush(gt);
- vma = i915_vma_instance(obj, &to_gt(i915)->ggtt->vm, NULL);
+ vma = i915_vma_instance(obj, gt->vm, NULL);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
goto err;
}
- err = i915_vma_pin(vma, 0, 0, PIN_USER | PIN_GLOBAL);
+ err = i915_vma_pin(vma, 0, 0, PIN_USER);
if (err)
goto err;
@@ -1005,6 +1005,14 @@ err:
return ERR_PTR(err);
}
+static int emit_bb_start(struct i915_request *rq, struct i915_vma *batch)
+{
+ return rq->engine->emit_bb_start(rq,
+ i915_vma_offset(batch),
+ i915_vma_size(batch),
+ 0);
+}
+
static struct i915_request *
empty_request(struct intel_engine_cs *engine,
struct i915_vma *batch)
@@ -1016,10 +1024,7 @@ empty_request(struct intel_engine_cs *engine,
if (IS_ERR(request))
return request;
- err = engine->emit_bb_start(request,
- i915_vma_offset(batch),
- i915_vma_size(batch),
- I915_DISPATCH_SECURE);
+ err = emit_bb_start(request, batch);
if (err)
goto out_request;
@@ -1034,8 +1039,7 @@ static int live_empty_request(void *arg)
struct drm_i915_private *i915 = arg;
struct intel_engine_cs *engine;
struct igt_live_test t;
- struct i915_vma *batch;
- int err = 0;
+ int err;
/*
* Submit various sized batches of empty requests, to each engine
@@ -1043,16 +1047,17 @@ static int live_empty_request(void *arg)
* the overhead of submitting requests to the hardware.
*/
- batch = empty_batch(i915);
- if (IS_ERR(batch))
- return PTR_ERR(batch);
-
for_each_uabi_engine(engine, i915) {
IGT_TIMEOUT(end_time);
struct i915_request *request;
+ struct i915_vma *batch;
unsigned long n, prime;
ktime_t times[2] = {};
+ batch = empty_batch(engine->gt);
+ if (IS_ERR(batch))
+ return PTR_ERR(batch);
+
err = igt_live_test_begin(&t, i915, __func__, engine->name);
if (err)
goto out_batch;
@@ -1100,27 +1105,29 @@ static int live_empty_request(void *arg)
engine->name,
ktime_to_ns(times[0]),
prime, div64_u64(ktime_to_ns(times[1]), prime));
+out_batch:
+ i915_vma_unpin(batch);
+ i915_vma_put(batch);
+ if (err)
+ break;
}
-out_batch:
- i915_vma_unpin(batch);
- i915_vma_put(batch);
return err;
}
-static struct i915_vma *recursive_batch(struct drm_i915_private *i915)
+static struct i915_vma *recursive_batch(struct intel_gt *gt)
{
struct drm_i915_gem_object *obj;
- const int ver = GRAPHICS_VER(i915);
+ const int ver = GRAPHICS_VER(gt->i915);
struct i915_vma *vma;
u32 *cmd;
int err;
- obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
+ obj = i915_gem_object_create_internal(gt->i915, PAGE_SIZE);
if (IS_ERR(obj))
return ERR_CAST(obj);
- vma = i915_vma_instance(obj, to_gt(i915)->vm, NULL);
+ vma = i915_vma_instance(obj, gt->vm, NULL);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
goto err;
@@ -1152,7 +1159,7 @@ static struct i915_vma *recursive_batch(struct drm_i915_private *i915)
__i915_gem_object_flush_map(obj, 0, 64);
i915_gem_object_unpin_map(obj);
- intel_gt_chipset_flush(to_gt(i915));
+ intel_gt_chipset_flush(gt);
return vma;
@@ -1186,7 +1193,6 @@ static int live_all_engines(void *arg)
struct intel_engine_cs *engine;
struct i915_request **request;
struct igt_live_test t;
- struct i915_vma *batch;
unsigned int idx;
int err;
@@ -1204,42 +1210,44 @@ static int live_all_engines(void *arg)
if (err)
goto out_free;
- batch = recursive_batch(i915);
- if (IS_ERR(batch)) {
- err = PTR_ERR(batch);
- pr_err("%s: Unable to create batch, err=%d\n", __func__, err);
- goto out_free;
- }
-
- i915_vma_lock(batch);
-
idx = 0;
for_each_uabi_engine(engine, i915) {
+ struct i915_vma *batch;
+
+ batch = recursive_batch(engine->gt);
+ if (IS_ERR(batch)) {
+ err = PTR_ERR(batch);
+ pr_err("%s: Unable to create batch, err=%d\n",
+ __func__, err);
+ goto out_free;
+ }
+
+ i915_vma_lock(batch);
request[idx] = intel_engine_create_kernel_request(engine);
if (IS_ERR(request[idx])) {
err = PTR_ERR(request[idx]);
pr_err("%s: Request allocation failed with err=%d\n",
__func__, err);
- goto out_request;
+ goto out_unlock;
}
+ GEM_BUG_ON(request[idx]->context->vm != batch->vm);
err = i915_vma_move_to_active(batch, request[idx], 0);
GEM_BUG_ON(err);
- err = engine->emit_bb_start(request[idx],
- i915_vma_offset(batch),
- i915_vma_size(batch),
- 0);
+ err = emit_bb_start(request[idx], batch);
GEM_BUG_ON(err);
request[idx]->batch = batch;
i915_request_get(request[idx]);
i915_request_add(request[idx]);
idx++;
+out_unlock:
+ i915_vma_unlock(batch);
+ if (err)
+ goto out_request;
}
- i915_vma_unlock(batch);
-
idx = 0;
for_each_uabi_engine(engine, i915) {
if (i915_request_completed(request[idx])) {
@@ -1251,17 +1259,23 @@ static int live_all_engines(void *arg)
idx++;
}
- err = recursive_batch_resolve(batch);
- if (err) {
- pr_err("%s: failed to resolve batch, err=%d\n", __func__, err);
- goto out_request;
+ idx = 0;
+ for_each_uabi_engine(engine, i915) {
+ err = recursive_batch_resolve(request[idx]->batch);
+ if (err) {
+ pr_err("%s: failed to resolve batch, err=%d\n",
+ __func__, err);
+ goto out_request;
+ }
+ idx++;
}
idx = 0;
for_each_uabi_engine(engine, i915) {
+ struct i915_request *rq = request[idx];
long timeout;
- timeout = i915_request_wait(request[idx], 0,
+ timeout = i915_request_wait(rq, 0,
MAX_SCHEDULE_TIMEOUT);
if (timeout < 0) {
err = timeout;
@@ -1270,8 +1284,10 @@ static int live_all_engines(void *arg)
goto out_request;
}
- GEM_BUG_ON(!i915_request_completed(request[idx]));
- i915_request_put(request[idx]);
+ GEM_BUG_ON(!i915_request_completed(rq));
+ i915_vma_unpin(rq->batch);
+ i915_vma_put(rq->batch);
+ i915_request_put(rq);
request[idx] = NULL;
idx++;
}
@@ -1281,12 +1297,18 @@ static int live_all_engines(void *arg)
out_request:
idx = 0;
for_each_uabi_engine(engine, i915) {
- if (request[idx])
- i915_request_put(request[idx]);
+ struct i915_request *rq = request[idx];
+
+ if (!rq)
+ continue;
+
+ if (rq->batch) {
+ i915_vma_unpin(rq->batch);
+ i915_vma_put(rq->batch);
+ }
+ i915_request_put(rq);
idx++;
}
- i915_vma_unpin(batch);
- i915_vma_put(batch);
out_free:
kfree(request);
return err;
@@ -1322,7 +1344,7 @@ static int live_sequential_engines(void *arg)
for_each_uabi_engine(engine, i915) {
struct i915_vma *batch;
- batch = recursive_batch(i915);
+ batch = recursive_batch(engine->gt);
if (IS_ERR(batch)) {
err = PTR_ERR(batch);
pr_err("%s: Unable to create batch for %s, err=%d\n",
@@ -1338,6 +1360,7 @@ static int live_sequential_engines(void *arg)
__func__, engine->name, err);
goto out_unlock;
}
+ GEM_BUG_ON(request[idx]->context->vm != batch->vm);
if (prev) {
err = i915_request_await_dma_fence(request[idx],
@@ -1353,10 +1376,7 @@ static int live_sequential_engines(void *arg)
err = i915_vma_move_to_active(batch, request[idx], 0);
GEM_BUG_ON(err);
- err = engine->emit_bb_start(request[idx],
- i915_vma_offset(batch),
- i915_vma_size(batch),
- 0);
+ err = emit_bb_start(request[idx], batch);
GEM_BUG_ON(err);
request[idx]->batch = batch;
diff --git a/drivers/gpu/drm/i915/soc/intel_dram.c b/drivers/gpu/drm/i915/soc/intel_dram.c
index bba8cb6e8ae4..9f0651d48d41 100644
--- a/drivers/gpu/drm/i915/soc/intel_dram.c
+++ b/drivers/gpu/drm/i915/soc/intel_dram.c
@@ -10,6 +10,7 @@
#include "intel_dram.h"
#include "intel_mchbar_regs.h"
#include "intel_pcode.h"
+#include "vlv_sideband.h"
struct dram_dimm_info {
u16 size;
@@ -42,6 +43,155 @@ static const char *intel_dram_type_str(enum intel_dram_type type)
#undef DRAM_TYPE_STR
+static void pnv_detect_mem_freq(struct drm_i915_private *dev_priv)
+{
+ u32 tmp;
+
+ tmp = intel_uncore_read(&dev_priv->uncore, CLKCFG);
+
+ switch (tmp & CLKCFG_FSB_MASK) {
+ case CLKCFG_FSB_533:
+ dev_priv->fsb_freq = 533; /* 133*4 */
+ break;
+ case CLKCFG_FSB_800:
+ dev_priv->fsb_freq = 800; /* 200*4 */
+ break;
+ case CLKCFG_FSB_667:
+ dev_priv->fsb_freq = 667; /* 167*4 */
+ break;
+ case CLKCFG_FSB_400:
+ dev_priv->fsb_freq = 400; /* 100*4 */
+ break;
+ }
+
+ switch (tmp & CLKCFG_MEM_MASK) {
+ case CLKCFG_MEM_533:
+ dev_priv->mem_freq = 533;
+ break;
+ case CLKCFG_MEM_667:
+ dev_priv->mem_freq = 667;
+ break;
+ case CLKCFG_MEM_800:
+ dev_priv->mem_freq = 800;
+ break;
+ }
+
+ /* detect pineview DDR3 setting */
+ tmp = intel_uncore_read(&dev_priv->uncore, CSHRDDR3CTL);
+ dev_priv->is_ddr3 = (tmp & CSHRDDR3CTL_DDR3) ? 1 : 0;
+}
+
+static void ilk_detect_mem_freq(struct drm_i915_private *dev_priv)
+{
+ u16 ddrpll, csipll;
+
+ ddrpll = intel_uncore_read16(&dev_priv->uncore, DDRMPLL1);
+ switch (ddrpll & 0xff) {
+ case 0xc:
+ dev_priv->mem_freq = 800;
+ break;
+ case 0x10:
+ dev_priv->mem_freq = 1066;
+ break;
+ case 0x14:
+ dev_priv->mem_freq = 1333;
+ break;
+ case 0x18:
+ dev_priv->mem_freq = 1600;
+ break;
+ default:
+ drm_dbg(&dev_priv->drm, "unknown memory frequency 0x%02x\n",
+ ddrpll & 0xff);
+ dev_priv->mem_freq = 0;
+ break;
+ }
+
+ csipll = intel_uncore_read16(&dev_priv->uncore, CSIPLL0);
+ switch (csipll & 0x3ff) {
+ case 0x00c:
+ dev_priv->fsb_freq = 3200;
+ break;
+ case 0x00e:
+ dev_priv->fsb_freq = 3733;
+ break;
+ case 0x010:
+ dev_priv->fsb_freq = 4266;
+ break;
+ case 0x012:
+ dev_priv->fsb_freq = 4800;
+ break;
+ case 0x014:
+ dev_priv->fsb_freq = 5333;
+ break;
+ case 0x016:
+ dev_priv->fsb_freq = 5866;
+ break;
+ case 0x018:
+ dev_priv->fsb_freq = 6400;
+ break;
+ default:
+ drm_dbg(&dev_priv->drm, "unknown fsb frequency 0x%04x\n",
+ csipll & 0x3ff);
+ dev_priv->fsb_freq = 0;
+ break;
+ }
+}
+
+static void chv_detect_mem_freq(struct drm_i915_private *i915)
+{
+ u32 val;
+
+ vlv_iosf_sb_get(i915, BIT(VLV_IOSF_SB_CCK));
+ val = vlv_cck_read(i915, CCK_FUSE_REG);
+ vlv_iosf_sb_put(i915, BIT(VLV_IOSF_SB_CCK));
+
+ switch ((val >> 2) & 0x7) {
+ case 3:
+ i915->mem_freq = 2000;
+ break;
+ default:
+ i915->mem_freq = 1600;
+ break;
+ }
+}
+
+static void vlv_detect_mem_freq(struct drm_i915_private *i915)
+{
+ u32 val;
+
+ vlv_iosf_sb_get(i915, BIT(VLV_IOSF_SB_PUNIT));
+ val = vlv_punit_read(i915, PUNIT_REG_GPU_FREQ_STS);
+ vlv_iosf_sb_put(i915, BIT(VLV_IOSF_SB_PUNIT));
+
+ switch ((val >> 6) & 3) {
+ case 0:
+ case 1:
+ i915->mem_freq = 800;
+ break;
+ case 2:
+ i915->mem_freq = 1066;
+ break;
+ case 3:
+ i915->mem_freq = 1333;
+ break;
+ }
+}
+
+static void detect_mem_freq(struct drm_i915_private *i915)
+{
+ if (IS_PINEVIEW(i915))
+ pnv_detect_mem_freq(i915);
+ else if (GRAPHICS_VER(i915) == 5)
+ ilk_detect_mem_freq(i915);
+ else if (IS_CHERRYVIEW(i915))
+ chv_detect_mem_freq(i915);
+ else if (IS_VALLEYVIEW(i915))
+ vlv_detect_mem_freq(i915);
+
+ if (i915->mem_freq)
+ drm_dbg(&i915->drm, "DDR speed: %d MHz\n", i915->mem_freq);
+}
+
static int intel_dimm_num_devices(const struct dram_dimm_info *dimm)
{
return dimm->ranks * 64 / (dimm->width ?: 1);
@@ -507,6 +657,8 @@ void intel_dram_detect(struct drm_i915_private *i915)
struct dram_info *dram_info = &i915->dram_info;
int ret;
+ detect_mem_freq(i915);
+
if (GRAPHICS_VER(i915) < 9 || IS_DG2(i915) || !HAS_DISPLAY(i915))
return;
diff --git a/drivers/gpu/drm/imx/Kconfig b/drivers/gpu/drm/imx/Kconfig
index e5749927fd6c..03535a15dd8f 100644
--- a/drivers/gpu/drm/imx/Kconfig
+++ b/drivers/gpu/drm/imx/Kconfig
@@ -2,3 +2,4 @@
source "drivers/gpu/drm/imx/dcss/Kconfig"
source "drivers/gpu/drm/imx/ipuv3/Kconfig"
+source "drivers/gpu/drm/imx/lcdc/Kconfig"
diff --git a/drivers/gpu/drm/imx/Makefile b/drivers/gpu/drm/imx/Makefile
index 909622864716..86f38e7c7422 100644
--- a/drivers/gpu/drm/imx/Makefile
+++ b/drivers/gpu/drm/imx/Makefile
@@ -2,3 +2,4 @@
obj-$(CONFIG_DRM_IMX_DCSS) += dcss/
obj-$(CONFIG_DRM_IMX) += ipuv3/
+obj-$(CONFIG_DRM_IMX_LCDC) += lcdc/
diff --git a/drivers/gpu/drm/imx/dcss/dcss-kms.c b/drivers/gpu/drm/imx/dcss/dcss-kms.c
index dab5e664920d..896de946f8df 100644
--- a/drivers/gpu/drm/imx/dcss/dcss-kms.c
+++ b/drivers/gpu/drm/imx/dcss/dcss-kms.c
@@ -7,7 +7,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_bridge_connector.h>
#include <drm/drm_drv.h>
-#include <drm/drm_fbdev_generic.h>
+#include <drm/drm_fbdev_dma.h>
#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_of.h>
@@ -145,7 +145,7 @@ struct dcss_kms_dev *dcss_kms_attach(struct dcss_dev *dcss)
if (ret)
goto cleanup_crtc;
- drm_fbdev_generic_setup(drm, 32);
+ drm_fbdev_dma_setup(drm, 32);
return kms;
diff --git a/drivers/gpu/drm/imx/ipuv3/Kconfig b/drivers/gpu/drm/imx/ipuv3/Kconfig
index bb278a369575..bacf0655ebaf 100644
--- a/drivers/gpu/drm/imx/ipuv3/Kconfig
+++ b/drivers/gpu/drm/imx/ipuv3/Kconfig
@@ -4,7 +4,7 @@ config DRM_IMX
select DRM_KMS_HELPER
select VIDEOMODE_HELPERS
select DRM_GEM_DMA_HELPER
- depends on DRM && (ARCH_MXC || ARCH_MULTIPLATFORM || COMPILE_TEST)
+ depends on DRM && (ARCH_MXC || COMPILE_TEST)
depends on IMX_IPUV3_CORE
help
enable i.MX graphics support
diff --git a/drivers/gpu/drm/imx/ipuv3/imx-drm-core.c b/drivers/gpu/drm/imx/ipuv3/imx-drm-core.c
index e060fa6cbcb9..4a866ac60fff 100644
--- a/drivers/gpu/drm/imx/ipuv3/imx-drm-core.c
+++ b/drivers/gpu/drm/imx/ipuv3/imx-drm-core.c
@@ -16,7 +16,7 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_drv.h>
-#include <drm/drm_fbdev_generic.h>
+#include <drm/drm_fbdev_dma.h>
#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_managed.h>
@@ -249,7 +249,7 @@ static int imx_drm_bind(struct device *dev)
if (ret)
goto err_poll_fini;
- drm_fbdev_generic_setup(drm, legacyfb_depth);
+ drm_fbdev_dma_setup(drm, legacyfb_depth);
return 0;
diff --git a/drivers/gpu/drm/imx/lcdc/Kconfig b/drivers/gpu/drm/imx/lcdc/Kconfig
new file mode 100644
index 000000000000..7e57922bbd9d
--- /dev/null
+++ b/drivers/gpu/drm/imx/lcdc/Kconfig
@@ -0,0 +1,7 @@
+config DRM_IMX_LCDC
+ tristate "Freescale i.MX LCDC displays"
+ depends on DRM && (ARCH_MXC || COMPILE_TEST)
+ select DRM_GEM_DMA_HELPER
+ select DRM_KMS_HELPER
+ help
+ Found on i.MX1, i.MX21, i.MX25 and i.MX27.
diff --git a/drivers/gpu/drm/imx/lcdc/Makefile b/drivers/gpu/drm/imx/lcdc/Makefile
new file mode 100644
index 000000000000..e84daa432c2e
--- /dev/null
+++ b/drivers/gpu/drm/imx/lcdc/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_DRM_IMX_LCDC) += imx-lcdc.o
diff --git a/drivers/gpu/drm/imx/lcdc/imx-lcdc.c b/drivers/gpu/drm/imx/lcdc/imx-lcdc.c
new file mode 100644
index 000000000000..8e6d457917da
--- /dev/null
+++ b/drivers/gpu/drm/imx/lcdc/imx-lcdc.c
@@ -0,0 +1,546 @@
+// SPDX-License-Identifier: GPL-2.0-only
+// SPDX-FileCopyrightText: 2020 Marian Cichy <M.Cichy@pengutronix.de>
+
+#include <drm/drm_bridge.h>
+#include <drm/drm_bridge_connector.h>
+#include <drm/drm_damage_helper.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_fbdev_generic.h>
+#include <drm/drm_fb_dma_helper.h>
+#include <drm/drm_fourcc.h>
+#include <drm/drm_framebuffer.h>
+#include <drm/drm_gem_atomic_helper.h>
+#include <drm/drm_gem_dma_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_of.h>
+#include <drm/drm_probe_helper.h>
+#include <drm/drm_simple_kms_helper.h>
+#include <drm/drm_vblank.h>
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/dma-mapping.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+
+#define IMX21LCDC_LSSAR 0x0000 /* LCDC Screen Start Address Register */
+#define IMX21LCDC_LSR 0x0004 /* LCDC Size Register */
+#define IMX21LCDC_LVPWR 0x0008 /* LCDC Virtual Page Width Register */
+#define IMX21LCDC_LCPR 0x000C /* LCDC Cursor Position Register */
+#define IMX21LCDC_LCWHB 0x0010 /* LCDC Cursor Width Height and Blink Register*/
+#define IMX21LCDC_LCCMR 0x0014 /* LCDC Color Cursor Mapping Register */
+#define IMX21LCDC_LPCR 0x0018 /* LCDC Panel Configuration Register */
+#define IMX21LCDC_LHCR 0x001C /* LCDC Horizontal Configuration Register */
+#define IMX21LCDC_LVCR 0x0020 /* LCDC Vertical Configuration Register */
+#define IMX21LCDC_LPOR 0x0024 /* LCDC Panning Offset Register */
+#define IMX21LCDC_LSCR 0x0028 /* LCDC Sharp Configuration Register */
+#define IMX21LCDC_LPCCR 0x002C /* LCDC PWM Contrast Control Register */
+#define IMX21LCDC_LDCR 0x0030 /* LCDC DMA Control Register */
+#define IMX21LCDC_LRMCR 0x0034 /* LCDC Refresh Mode Control Register */
+#define IMX21LCDC_LICR 0x0038 /* LCDC Interrupt Configuration Register */
+#define IMX21LCDC_LIER 0x003C /* LCDC Interrupt Enable Register */
+#define IMX21LCDC_LISR 0x0040 /* LCDC Interrupt Status Register */
+#define IMX21LCDC_LGWSAR 0x0050 /* LCDC Graphic Window Start Address Register */
+#define IMX21LCDC_LGWSR 0x0054 /* LCDC Graph Window Size Register */
+#define IMX21LCDC_LGWVPWR 0x0058 /* LCDC Graphic Window Virtual Page Width Register */
+#define IMX21LCDC_LGWPOR 0x005C /* LCDC Graphic Window Panning Offset Register */
+#define IMX21LCDC_LGWPR 0x0060 /* LCDC Graphic Window Position Register */
+#define IMX21LCDC_LGWCR 0x0064 /* LCDC Graphic Window Control Register */
+#define IMX21LCDC_LGWDCR 0x0068 /* LCDC Graphic Window DMA Control Register */
+#define IMX21LCDC_LAUSCR 0x0080 /* LCDC AUS Mode Control Register */
+#define IMX21LCDC_LAUSCCR 0x0084 /* LCDC AUS Mode Cursor Control Register */
+#define IMX21LCDC_BGLUT 0x0800 /* Background Lookup Table */
+#define IMX21LCDC_GWLUT 0x0C00 /* Graphic Window Lookup Table */
+
+#define IMX21LCDC_LCPR_CC0 BIT(30) /* Cursor Control Bit 0 */
+#define IMX21LCDC_LCPR_CC1 BIT(31) /* Cursor Control Bit 1 */
+
+/* Values HSYNC, VSYNC and Framesize Register */
+#define IMX21LCDC_LHCR_HWIDTH GENMASK(31, 26)
+#define IMX21LCDC_LHCR_HFPORCH GENMASK(15, 8) /* H_WAIT_1 in the i.MX25 Reference manual */
+#define IMX21LCDC_LHCR_HBPORCH GENMASK(7, 0) /* H_WAIT_2 in the i.MX25 Reference manual */
+
+#define IMX21LCDC_LVCR_VWIDTH GENMASK(31, 26)
+#define IMX21LCDC_LVCR_VFPORCH GENMASK(15, 8) /* V_WAIT_1 in the i.MX25 Reference manual */
+#define IMX21LCDC_LVCR_VBPORCH GENMASK(7, 0) /* V_WAIT_2 in the i.MX25 Reference manual */
+
+#define IMX21LCDC_LSR_XMAX GENMASK(25, 20)
+#define IMX21LCDC_LSR_YMAX GENMASK(9, 0)
+
+/* Values for LPCR Register */
+#define IMX21LCDC_LPCR_PCD GENMASK(5, 0)
+#define IMX21LCDC_LPCR_SHARP BIT(6)
+#define IMX21LCDC_LPCR_SCLKSEL BIT(7)
+#define IMX21LCDC_LPCR_ACD GENMASK(14, 8)
+#define IMX21LCDC_LPCR_ACDSEL BIT(15)
+#define IMX21LCDC_LPCR_REV_VS BIT(16)
+#define IMX21LCDC_LPCR_SWAP_SEL BIT(17)
+#define IMX21LCDC_LPCR_END_SEL BIT(18)
+#define IMX21LCDC_LPCR_SCLKIDLE BIT(19)
+#define IMX21LCDC_LPCR_OEPOL BIT(20)
+#define IMX21LCDC_LPCR_CLKPOL BIT(21)
+#define IMX21LCDC_LPCR_LPPOL BIT(22)
+#define IMX21LCDC_LPCR_FLMPOL BIT(23)
+#define IMX21LCDC_LPCR_PIXPOL BIT(24)
+#define IMX21LCDC_LPCR_BPIX GENMASK(27, 25)
+#define IMX21LCDC_LPCR_PBSIZ GENMASK(29, 28)
+#define IMX21LCDC_LPCR_COLOR BIT(30)
+#define IMX21LCDC_LPCR_TFT BIT(31)
+
+#define INTR_EOF BIT(1) /* VBLANK Interrupt Bit */
+
+#define BPP_RGB565 0x05
+#define BPP_XRGB8888 0x07
+
+#define LCDC_MIN_XRES 64
+#define LCDC_MIN_YRES 64
+
+#define LCDC_MAX_XRES 1024
+#define LCDC_MAX_YRES 1024
+
+struct imx_lcdc {
+ struct drm_device drm;
+ struct drm_simple_display_pipe pipe;
+ struct drm_connector *connector;
+ void __iomem *base;
+
+ struct clk *clk_ipg;
+ struct clk *clk_ahb;
+ struct clk *clk_per;
+};
+
+static const u32 imx_lcdc_formats[] = {
+ DRM_FORMAT_RGB565, DRM_FORMAT_XRGB8888,
+};
+
+static inline struct imx_lcdc *imx_lcdc_from_drmdev(struct drm_device *drm)
+{
+ return container_of(drm, struct imx_lcdc, drm);
+}
+
+static unsigned int imx_lcdc_get_format(unsigned int drm_format)
+{
+ switch (drm_format) {
+ default:
+ DRM_WARN("Format not supported - fallback to XRGB8888\n");
+ fallthrough;
+
+ case DRM_FORMAT_XRGB8888:
+ return BPP_XRGB8888;
+
+ case DRM_FORMAT_RGB565:
+ return BPP_RGB565;
+ }
+}
+
+static void imx_lcdc_update_hw_registers(struct drm_simple_display_pipe *pipe,
+ struct drm_plane_state *old_state,
+ bool mode_set)
+{
+ struct drm_crtc *crtc = &pipe->crtc;
+ struct drm_plane_state *new_state = pipe->plane.state;
+ struct drm_framebuffer *fb = new_state->fb;
+ struct imx_lcdc *lcdc = imx_lcdc_from_drmdev(pipe->crtc.dev);
+ u32 lpcr, lvcr, lhcr;
+ u32 framesize;
+ dma_addr_t addr;
+
+ addr = drm_fb_dma_get_gem_addr(fb, new_state, 0);
+ /* The LSSAR register specifies the LCD screen start address (SSA). */
+ writel(addr, lcdc->base + IMX21LCDC_LSSAR);
+
+ if (!mode_set)
+ return;
+
+ /* Disable PER clock to make register write possible */
+ if (old_state && old_state->crtc && old_state->crtc->enabled)
+ clk_disable_unprepare(lcdc->clk_per);
+
+ /* Framesize */
+ framesize = FIELD_PREP(IMX21LCDC_LSR_XMAX, crtc->mode.hdisplay >> 4) |
+ FIELD_PREP(IMX21LCDC_LSR_YMAX, crtc->mode.vdisplay);
+ writel(framesize, lcdc->base + IMX21LCDC_LSR);
+
+ /* HSYNC */
+ lhcr = FIELD_PREP(IMX21LCDC_LHCR_HFPORCH, crtc->mode.hsync_start - crtc->mode.hdisplay - 1) |
+ FIELD_PREP(IMX21LCDC_LHCR_HWIDTH, crtc->mode.hsync_end - crtc->mode.hsync_start - 1) |
+ FIELD_PREP(IMX21LCDC_LHCR_HBPORCH, crtc->mode.htotal - crtc->mode.hsync_end - 3);
+ writel(lhcr, lcdc->base + IMX21LCDC_LHCR);
+
+ /* VSYNC */
+ lvcr = FIELD_PREP(IMX21LCDC_LVCR_VFPORCH, crtc->mode.vsync_start - crtc->mode.vdisplay) |
+ FIELD_PREP(IMX21LCDC_LVCR_VWIDTH, crtc->mode.vsync_end - crtc->mode.vsync_start) |
+ FIELD_PREP(IMX21LCDC_LVCR_VBPORCH, crtc->mode.vtotal - crtc->mode.vsync_end);
+ writel(lvcr, lcdc->base + IMX21LCDC_LVCR);
+
+ lpcr = readl(lcdc->base + IMX21LCDC_LPCR);
+ lpcr &= ~IMX21LCDC_LPCR_BPIX;
+ lpcr |= FIELD_PREP(IMX21LCDC_LPCR_BPIX, imx_lcdc_get_format(fb->format->format));
+ writel(lpcr, lcdc->base + IMX21LCDC_LPCR);
+
+ /* Virtual Page Width */
+ writel(new_state->fb->pitches[0] / 4, lcdc->base + IMX21LCDC_LVPWR);
+
+ /* Enable PER clock */
+ if (new_state->crtc->enabled)
+ clk_prepare_enable(lcdc->clk_per);
+}
+
+static void imx_lcdc_pipe_enable(struct drm_simple_display_pipe *pipe,
+ struct drm_crtc_state *crtc_state,
+ struct drm_plane_state *plane_state)
+{
+ int ret;
+ int clk_div;
+ int bpp;
+ struct imx_lcdc *lcdc = imx_lcdc_from_drmdev(pipe->crtc.dev);
+ struct drm_display_mode *mode = &pipe->crtc.mode;
+ struct drm_display_info *disp_info = &lcdc->connector->display_info;
+ const int hsync_pol = (mode->flags & DRM_MODE_FLAG_PHSYNC) ? 0 : 1;
+ const int vsync_pol = (mode->flags & DRM_MODE_FLAG_PVSYNC) ? 0 : 1;
+ const int data_enable_pol =
+ (disp_info->bus_flags & DRM_BUS_FLAG_DE_HIGH) ? 0 : 1;
+ const int clk_pol =
+ (disp_info->bus_flags & DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE) ? 0 : 1;
+
+ clk_div = DIV_ROUND_CLOSEST_ULL(clk_get_rate(lcdc->clk_per),
+ mode->clock * 1000);
+ bpp = imx_lcdc_get_format(plane_state->fb->format->format);
+
+ writel(FIELD_PREP(IMX21LCDC_LPCR_PCD, clk_div - 1) |
+ FIELD_PREP(IMX21LCDC_LPCR_LPPOL, hsync_pol) |
+ FIELD_PREP(IMX21LCDC_LPCR_FLMPOL, vsync_pol) |
+ FIELD_PREP(IMX21LCDC_LPCR_OEPOL, data_enable_pol) |
+ FIELD_PREP(IMX21LCDC_LPCR_TFT, 1) |
+ FIELD_PREP(IMX21LCDC_LPCR_COLOR, 1) |
+ FIELD_PREP(IMX21LCDC_LPCR_PBSIZ, 3) |
+ FIELD_PREP(IMX21LCDC_LPCR_BPIX, bpp) |
+ FIELD_PREP(IMX21LCDC_LPCR_SCLKSEL, 1) |
+ FIELD_PREP(IMX21LCDC_LPCR_PIXPOL, 0) |
+ FIELD_PREP(IMX21LCDC_LPCR_CLKPOL, clk_pol),
+ lcdc->base + IMX21LCDC_LPCR);
+
+ /* 0px panning offset */
+ writel(0x00000000, lcdc->base + IMX21LCDC_LPOR);
+
+ /* disable hardware cursor */
+ writel(readl(lcdc->base + IMX21LCDC_LCPR) & ~(IMX21LCDC_LCPR_CC0 | IMX21LCDC_LCPR_CC1),
+ lcdc->base + IMX21LCDC_LCPR);
+
+ ret = clk_prepare_enable(lcdc->clk_ipg);
+ if (ret) {
+ dev_err(pipe->crtc.dev->dev, "Cannot enable ipg clock: %pe\n", ERR_PTR(ret));
+ return;
+ }
+ ret = clk_prepare_enable(lcdc->clk_ahb);
+ if (ret) {
+ dev_err(pipe->crtc.dev->dev, "Cannot enable ahb clock: %pe\n", ERR_PTR(ret));
+
+ clk_disable_unprepare(lcdc->clk_ipg);
+
+ return;
+ }
+
+ imx_lcdc_update_hw_registers(pipe, NULL, true);
+
+ /* Enable VBLANK Interrupt */
+ writel(INTR_EOF, lcdc->base + IMX21LCDC_LIER);
+}
+
+static void imx_lcdc_pipe_disable(struct drm_simple_display_pipe *pipe)
+{
+ struct imx_lcdc *lcdc = imx_lcdc_from_drmdev(pipe->crtc.dev);
+ struct drm_crtc *crtc = &lcdc->pipe.crtc;
+ struct drm_pending_vblank_event *event;
+
+ clk_disable_unprepare(lcdc->clk_ahb);
+ clk_disable_unprepare(lcdc->clk_ipg);
+
+ if (pipe->crtc.enabled)
+ clk_disable_unprepare(lcdc->clk_per);
+
+ spin_lock_irq(&lcdc->drm.event_lock);
+ event = crtc->state->event;
+ if (event) {
+ crtc->state->event = NULL;
+ drm_crtc_send_vblank_event(crtc, event);
+ }
+ spin_unlock_irq(&lcdc->drm.event_lock);
+
+ /* Disable VBLANK Interrupt */
+ writel(0, lcdc->base + IMX21LCDC_LIER);
+}
+
+static int imx_lcdc_pipe_check(struct drm_simple_display_pipe *pipe,
+ struct drm_plane_state *plane_state,
+ struct drm_crtc_state *crtc_state)
+{
+ const struct drm_display_mode *mode = &crtc_state->mode;
+ const struct drm_display_mode *old_mode = &pipe->crtc.state->mode;
+
+ if (mode->hdisplay < LCDC_MIN_XRES || mode->hdisplay > LCDC_MAX_XRES ||
+ mode->vdisplay < LCDC_MIN_YRES || mode->vdisplay > LCDC_MAX_YRES ||
+ mode->hdisplay % 0x10) { /* must be multiple of 16 */
+ drm_err(pipe->crtc.dev, "unsupported display mode (%u x %u)\n",
+ mode->hdisplay, mode->vdisplay);
+ return -EINVAL;
+ }
+
+ crtc_state->mode_changed =
+ old_mode->hdisplay != mode->hdisplay ||
+ old_mode->vdisplay != mode->vdisplay;
+
+ return 0;
+}
+
+static void imx_lcdc_pipe_update(struct drm_simple_display_pipe *pipe,
+ struct drm_plane_state *old_state)
+{
+ struct drm_crtc *crtc = &pipe->crtc;
+ struct drm_pending_vblank_event *event = crtc->state->event;
+ struct drm_plane_state *new_state = pipe->plane.state;
+ struct drm_framebuffer *fb = new_state->fb;
+ struct drm_framebuffer *old_fb = old_state->fb;
+ struct drm_crtc *old_crtc = old_state->crtc;
+ bool mode_changed = false;
+
+ if (old_fb && old_fb->format != fb->format)
+ mode_changed = true;
+ else if (old_crtc != crtc)
+ mode_changed = true;
+
+ imx_lcdc_update_hw_registers(pipe, old_state, mode_changed);
+
+ if (event) {
+ crtc->state->event = NULL;
+
+ spin_lock_irq(&crtc->dev->event_lock);
+
+ if (crtc->state->active && drm_crtc_vblank_get(crtc) == 0)
+ drm_crtc_arm_vblank_event(crtc, event);
+ else
+ drm_crtc_send_vblank_event(crtc, event);
+
+ spin_unlock_irq(&crtc->dev->event_lock);
+ }
+}
+
+static const struct drm_simple_display_pipe_funcs imx_lcdc_pipe_funcs = {
+ .enable = imx_lcdc_pipe_enable,
+ .disable = imx_lcdc_pipe_disable,
+ .check = imx_lcdc_pipe_check,
+ .update = imx_lcdc_pipe_update,
+};
+
+static const struct drm_mode_config_funcs imx_lcdc_mode_config_funcs = {
+ .fb_create = drm_gem_fb_create_with_dirty,
+ .atomic_check = drm_atomic_helper_check,
+ .atomic_commit = drm_atomic_helper_commit,
+};
+
+static const struct drm_mode_config_helper_funcs imx_lcdc_mode_config_helpers = {
+ .atomic_commit_tail = drm_atomic_helper_commit_tail_rpm,
+};
+
+static void imx_lcdc_release(struct drm_device *drm)
+{
+ struct imx_lcdc *lcdc = imx_lcdc_from_drmdev(drm);
+
+ drm_kms_helper_poll_fini(drm);
+ kfree(lcdc);
+}
+
+DEFINE_DRM_GEM_DMA_FOPS(imx_lcdc_drm_fops);
+
+static struct drm_driver imx_lcdc_drm_driver = {
+ .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
+ .fops = &imx_lcdc_drm_fops,
+ DRM_GEM_DMA_DRIVER_OPS_VMAP,
+ .release = imx_lcdc_release,
+ .name = "imx-lcdc",
+ .desc = "i.MX LCDC driver",
+ .date = "20200716",
+};
+
+static const struct of_device_id imx_lcdc_of_dev_id[] = {
+ {
+ .compatible = "fsl,imx21-lcdc",
+ },
+ {
+ .compatible = "fsl,imx25-lcdc",
+ },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, imx_lcdc_of_dev_id);
+
+static irqreturn_t imx_lcdc_irq_handler(int irq, void *arg)
+{
+ struct imx_lcdc *lcdc = arg;
+ struct drm_crtc *crtc = &lcdc->pipe.crtc;
+ unsigned int status;
+
+ status = readl(lcdc->base + IMX21LCDC_LISR);
+
+ if (status & INTR_EOF) {
+ drm_crtc_handle_vblank(crtc);
+ return IRQ_HANDLED;
+ }
+
+ return IRQ_NONE;
+}
+
+static int imx_lcdc_probe(struct platform_device *pdev)
+{
+ struct imx_lcdc *lcdc;
+ struct drm_device *drm;
+ struct drm_bridge *bridge;
+ int irq;
+ int ret;
+ struct device *dev = &pdev->dev;
+
+ lcdc = devm_drm_dev_alloc(dev, &imx_lcdc_drm_driver,
+ struct imx_lcdc, drm);
+ if (!lcdc)
+ return -ENOMEM;
+
+ drm = &lcdc->drm;
+
+ lcdc->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(lcdc->base))
+ return dev_err_probe(dev, PTR_ERR(lcdc->base), "Cannot get IO memory\n");
+
+ bridge = devm_drm_of_get_bridge(dev, dev->of_node, 0, 0);
+ if (IS_ERR(bridge))
+ return dev_err_probe(dev, PTR_ERR(bridge), "Failed to find bridge\n");
+
+ /* Get Clocks */
+ lcdc->clk_ipg = devm_clk_get(dev, "ipg");
+ if (IS_ERR(lcdc->clk_ipg))
+ return dev_err_probe(dev, PTR_ERR(lcdc->clk_ipg), "Failed to get %s clk\n", "ipg");
+
+ lcdc->clk_ahb = devm_clk_get(dev, "ahb");
+ if (IS_ERR(lcdc->clk_ahb))
+ return dev_err_probe(dev, PTR_ERR(lcdc->clk_ahb), "Failed to get %s clk\n", "ahb");
+
+ lcdc->clk_per = devm_clk_get(dev, "per");
+ if (IS_ERR(lcdc->clk_per))
+ return dev_err_probe(dev, PTR_ERR(lcdc->clk_per), "Failed to get %s clk\n", "per");
+
+ ret = dma_set_mask_and_coherent(drm->dev, DMA_BIT_MASK(32));
+ if (ret)
+ return dev_err_probe(dev, ret, "Cannot set DMA Mask\n");
+
+ /* Modeset init */
+ ret = drmm_mode_config_init(drm);
+ if (ret)
+ return dev_err_probe(dev, ret, "Cannot initialize mode configuration structure\n");
+
+ /* CRTC, Plane, Encoder */
+ ret = drm_simple_display_pipe_init(drm, &lcdc->pipe,
+ &imx_lcdc_pipe_funcs,
+ imx_lcdc_formats,
+ ARRAY_SIZE(imx_lcdc_formats), NULL, NULL);
+ if (ret < 0)
+ return dev_err_probe(drm->dev, ret, "Cannot setup simple display pipe\n");
+
+ ret = drm_vblank_init(drm, drm->mode_config.num_crtc);
+ if (ret < 0)
+ return dev_err_probe(drm->dev, ret, "Failed to initialize vblank\n");
+
+ ret = drm_bridge_attach(&lcdc->pipe.encoder, bridge, NULL, DRM_BRIDGE_ATTACH_NO_CONNECTOR);
+ if (ret)
+ return dev_err_probe(drm->dev, ret, "Cannot attach bridge\n");
+
+ lcdc->connector = drm_bridge_connector_init(drm, &lcdc->pipe.encoder);
+ if (IS_ERR(lcdc->connector))
+ return dev_err_probe(drm->dev, PTR_ERR(lcdc->connector), "Cannot init bridge connector\n");
+
+ drm_connector_attach_encoder(lcdc->connector, &lcdc->pipe.encoder);
+
+ /*
+ * The LCDC controller does not have an enable bit. The
+ * controller starts directly when the clocks are enabled.
+ * If the clocks are enabled when the controller is not yet
+ * programmed with proper register values (enabled at the
+ * bootloader, for example) then it just goes into some undefined
+ * state.
+ * To avoid this issue, let's enable and disable LCDC IPG,
+ * PER and AHB clock so that we force some kind of 'reset'
+ * to the LCDC block.
+ */
+
+ ret = clk_prepare_enable(lcdc->clk_ipg);
+ if (ret)
+ return dev_err_probe(dev, ret, "Cannot enable ipg clock\n");
+ clk_disable_unprepare(lcdc->clk_ipg);
+
+ ret = clk_prepare_enable(lcdc->clk_per);
+ if (ret)
+ return dev_err_probe(dev, ret, "Cannot enable per clock\n");
+ clk_disable_unprepare(lcdc->clk_per);
+
+ ret = clk_prepare_enable(lcdc->clk_ahb);
+ if (ret)
+ return dev_err_probe(dev, ret, "Cannot enable ahb clock\n");
+ clk_disable_unprepare(lcdc->clk_ahb);
+
+ drm->mode_config.min_width = LCDC_MIN_XRES;
+ drm->mode_config.max_width = LCDC_MAX_XRES;
+ drm->mode_config.min_height = LCDC_MIN_YRES;
+ drm->mode_config.max_height = LCDC_MAX_YRES;
+ drm->mode_config.preferred_depth = 16;
+ drm->mode_config.funcs = &imx_lcdc_mode_config_funcs;
+ drm->mode_config.helper_private = &imx_lcdc_mode_config_helpers;
+
+ drm_mode_config_reset(drm);
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ ret = irq;
+ return ret;
+ }
+
+ ret = devm_request_irq(dev, irq, imx_lcdc_irq_handler, 0, "imx-lcdc", lcdc);
+ if (ret < 0)
+ return dev_err_probe(drm->dev, ret, "Failed to install IRQ handler\n");
+
+ platform_set_drvdata(pdev, drm);
+
+ ret = drm_dev_register(&lcdc->drm, 0);
+ if (ret)
+ return dev_err_probe(dev, ret, "Cannot register device\n");
+
+ drm_fbdev_generic_setup(drm, 0);
+
+ return 0;
+}
+
+static int imx_lcdc_remove(struct platform_device *pdev)
+{
+ struct drm_device *drm = platform_get_drvdata(pdev);
+
+ drm_dev_unregister(drm);
+ drm_atomic_helper_shutdown(drm);
+
+ return 0;
+}
+
+static void imx_lcdc_shutdown(struct platform_device *pdev)
+{
+ drm_atomic_helper_shutdown(platform_get_drvdata(pdev));
+}
+
+static struct platform_driver imx_lcdc_driver = {
+ .driver = {
+ .name = "imx-lcdc",
+ .of_match_table = imx_lcdc_of_dev_id,
+ },
+ .probe = imx_lcdc_probe,
+ .remove = imx_lcdc_remove,
+ .shutdown = imx_lcdc_shutdown,
+};
+module_platform_driver(imx_lcdc_driver);
+
+MODULE_AUTHOR("Marian Cichy <M.Cichy@pengutronix.de>");
+MODULE_DESCRIPTION("Freescale i.MX LCDC driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/kmb/kmb_drv.c b/drivers/gpu/drm/kmb/kmb_drv.c
index d29c678f6c91..24035b53441c 100644
--- a/drivers/gpu/drm/kmb/kmb_drv.c
+++ b/drivers/gpu/drm/kmb/kmb_drv.c
@@ -15,7 +15,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_drv.h>
-#include <drm/drm_fbdev_generic.h>
+#include <drm/drm_fbdev_dma.h>
#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_module.h>
@@ -562,7 +562,7 @@ static int kmb_probe(struct platform_device *pdev)
if (ret)
goto err_register;
- drm_fbdev_generic_setup(&kmb->drm, 0);
+ drm_fbdev_dma_setup(&kmb->drm, 0);
return 0;
diff --git a/drivers/gpu/drm/lima/lima_gem.c b/drivers/gpu/drm/lima/lima_gem.c
index 0f1ca0b0db49..10252dc11a22 100644
--- a/drivers/gpu/drm/lima/lima_gem.c
+++ b/drivers/gpu/drm/lima/lima_gem.c
@@ -277,21 +277,13 @@ static int lima_gem_add_deps(struct drm_file *file, struct lima_submit *submit)
int i, err;
for (i = 0; i < ARRAY_SIZE(submit->in_sync); i++) {
- struct dma_fence *fence = NULL;
-
if (!submit->in_sync[i])
continue;
- err = drm_syncobj_find_fence(file, submit->in_sync[i],
- 0, 0, &fence);
+ err = drm_sched_job_add_syncobj_dependency(&submit->task->base, file,
+ submit->in_sync[i], 0);
if (err)
return err;
-
- err = drm_sched_job_add_dependency(&submit->task->base, fence);
- if (err) {
- dma_fence_put(fence);
- return err;
- }
}
return 0;
diff --git a/drivers/gpu/drm/logicvc/logicvc_drm.c b/drivers/gpu/drm/logicvc/logicvc_drm.c
index 2fb23697740a..c35c453fd025 100644
--- a/drivers/gpu/drm/logicvc/logicvc_drm.c
+++ b/drivers/gpu/drm/logicvc/logicvc_drm.c
@@ -17,7 +17,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_drv.h>
-#include <drm/drm_fbdev_generic.h>
+#include <drm/drm_fbdev_dma.h>
#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_print.h>
@@ -449,7 +449,7 @@ static int logicvc_drm_probe(struct platform_device *pdev)
preferred_bpp = 32;
break;
}
- drm_fbdev_generic_setup(drm_dev, preferred_bpp);
+ drm_fbdev_dma_setup(drm_dev, preferred_bpp);
return 0;
diff --git a/drivers/gpu/drm/mcde/mcde_drv.c b/drivers/gpu/drm/mcde/mcde_drv.c
index 4aedb050d2a5..a8cd86c06c14 100644
--- a/drivers/gpu/drm/mcde/mcde_drv.c
+++ b/drivers/gpu/drm/mcde/mcde_drv.c
@@ -69,7 +69,7 @@
#include <drm/drm_bridge.h>
#include <drm/drm_drv.h>
#include <drm/drm_fb_dma_helper.h>
-#include <drm/drm_fbdev_generic.h>
+#include <drm/drm_fbdev_dma.h>
#include <drm/drm_gem.h>
#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
@@ -94,7 +94,7 @@
#define MCDE_PID_MAJOR_VERSION_MASK 0xFF000000
static const struct drm_mode_config_funcs mcde_mode_config_funcs = {
- .fb_create = drm_gem_fb_create_with_dirty,
+ .fb_create = drm_gem_fb_create,
.atomic_check = drm_atomic_helper_check,
.atomic_commit = drm_atomic_helper_commit,
};
@@ -237,7 +237,7 @@ static int mcde_drm_bind(struct device *dev)
if (ret < 0)
goto unbind;
- drm_fbdev_generic_setup(drm, 32);
+ drm_fbdev_dma_setup(drm, 32);
return 0;
diff --git a/drivers/gpu/drm/meson/meson_drv.c b/drivers/gpu/drm/meson/meson_drv.c
index 79bfe3938d3c..6608a251106b 100644
--- a/drivers/gpu/drm/meson/meson_drv.c
+++ b/drivers/gpu/drm/meson/meson_drv.c
@@ -18,7 +18,7 @@
#include <drm/drm_aperture.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_drv.h>
-#include <drm/drm_fbdev_generic.h>
+#include <drm/drm_fbdev_dma.h>
#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_modeset_helper_vtables.h>
@@ -353,7 +353,7 @@ static int meson_drv_bind_master(struct device *dev, bool has_components)
if (ret)
goto uninstall_irq;
- drm_fbdev_generic_setup(drm, 32);
+ drm_fbdev_dma_setup(drm, 32);
return 0;
diff --git a/drivers/gpu/drm/meson/meson_venc.c b/drivers/gpu/drm/meson/meson_venc.c
index 3c55ed003359..fcd532db19c1 100644
--- a/drivers/gpu/drm/meson/meson_venc.c
+++ b/drivers/gpu/drm/meson/meson_venc.c
@@ -866,10 +866,10 @@ meson_venc_hdmi_supported_mode(const struct drm_display_mode *mode)
DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC))
return MODE_BAD;
- if (mode->hdisplay < 640 || mode->hdisplay > 1920)
+ if (mode->hdisplay < 400 || mode->hdisplay > 1920)
return MODE_BAD_HVALUE;
- if (mode->vdisplay < 480 || mode->vdisplay > 1200)
+ if (mode->vdisplay < 480 || mode->vdisplay > 1920)
return MODE_BAD_VVALUE;
return MODE_OK;
diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.h b/drivers/gpu/drm/mgag200/mgag200_drv.h
index 9e604dbb8e44..57c7edcab602 100644
--- a/drivers/gpu/drm/mgag200/mgag200_drv.h
+++ b/drivers/gpu/drm/mgag200/mgag200_drv.h
@@ -375,12 +375,15 @@ int mgag200_primary_plane_helper_atomic_check(struct drm_plane *plane,
struct drm_atomic_state *new_state);
void mgag200_primary_plane_helper_atomic_update(struct drm_plane *plane,
struct drm_atomic_state *old_state);
+void mgag200_primary_plane_helper_atomic_enable(struct drm_plane *plane,
+ struct drm_atomic_state *state);
void mgag200_primary_plane_helper_atomic_disable(struct drm_plane *plane,
struct drm_atomic_state *old_state);
#define MGAG200_PRIMARY_PLANE_HELPER_FUNCS \
DRM_GEM_SHADOW_PLANE_HELPER_FUNCS, \
.atomic_check = mgag200_primary_plane_helper_atomic_check, \
.atomic_update = mgag200_primary_plane_helper_atomic_update, \
+ .atomic_enable = mgag200_primary_plane_helper_atomic_enable, \
.atomic_disable = mgag200_primary_plane_helper_atomic_disable
#define MGAG200_PRIMARY_PLANE_FUNCS \
diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c
index 0a5aaf78172a..0f2dd26755df 100644
--- a/drivers/gpu/drm/mgag200/mgag200_mode.c
+++ b/drivers/gpu/drm/mgag200/mgag200_mode.c
@@ -501,10 +501,6 @@ void mgag200_primary_plane_helper_atomic_update(struct drm_plane *plane,
struct drm_framebuffer *fb = plane_state->fb;
struct drm_atomic_helper_damage_iter iter;
struct drm_rect damage;
- u8 seq1;
-
- if (!fb)
- return;
drm_atomic_helper_damage_iter_init(&iter, old_plane_state, plane_state);
drm_atomic_for_each_plane_damage(&iter, &damage) {
@@ -514,13 +510,19 @@ void mgag200_primary_plane_helper_atomic_update(struct drm_plane *plane,
/* Always scanout image at VRAM offset 0 */
mgag200_set_startadd(mdev, (u32)0);
mgag200_set_offset(mdev, fb);
+}
- if (!old_plane_state->crtc && plane_state->crtc) { // enabling
- RREG_SEQ(0x01, seq1);
- seq1 &= ~MGAREG_SEQ1_SCROFF;
- WREG_SEQ(0x01, seq1);
- msleep(20);
- }
+void mgag200_primary_plane_helper_atomic_enable(struct drm_plane *plane,
+ struct drm_atomic_state *state)
+{
+ struct drm_device *dev = plane->dev;
+ struct mga_device *mdev = to_mga_device(dev);
+ u8 seq1;
+
+ RREG_SEQ(0x01, seq1);
+ seq1 &= ~MGAREG_SEQ1_SCROFF;
+ WREG_SEQ(0x01, seq1);
+ msleep(20);
}
void mgag200_primary_plane_helper_atomic_disable(struct drm_plane *plane,
diff --git a/drivers/gpu/drm/mxsfb/lcdif_drv.c b/drivers/gpu/drm/mxsfb/lcdif_drv.c
index cc2ceb301b96..6fb5b469ee5a 100644
--- a/drivers/gpu/drm/mxsfb/lcdif_drv.c
+++ b/drivers/gpu/drm/mxsfb/lcdif_drv.c
@@ -16,7 +16,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_bridge.h>
#include <drm/drm_drv.h>
-#include <drm/drm_fbdev_generic.h>
+#include <drm/drm_fbdev_dma.h>
#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_mode_config.h>
@@ -220,7 +220,7 @@ static int lcdif_probe(struct platform_device *pdev)
if (ret)
goto err_unload;
- drm_fbdev_generic_setup(drm, 32);
+ drm_fbdev_dma_setup(drm, 32);
return 0;
diff --git a/drivers/gpu/drm/mxsfb/mxsfb_drv.c b/drivers/gpu/drm/mxsfb/mxsfb_drv.c
index b3ab86ad1b36..368b1fbd8305 100644
--- a/drivers/gpu/drm/mxsfb/mxsfb_drv.c
+++ b/drivers/gpu/drm/mxsfb/mxsfb_drv.c
@@ -20,7 +20,7 @@
#include <drm/drm_bridge.h>
#include <drm/drm_connector.h>
#include <drm/drm_drv.h>
-#include <drm/drm_fbdev_generic.h>
+#include <drm/drm_fbdev_dma.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
@@ -365,7 +365,7 @@ static int mxsfb_probe(struct platform_device *pdev)
if (ret)
goto err_unload;
- drm_fbdev_generic_setup(drm, 32);
+ drm_fbdev_dma_setup(drm, 32);
return 0;
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index 288eebc70a67..c2ec91cc845d 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -1015,9 +1015,6 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict,
if (ret)
goto out_ntfy;
- if (nvbo->bo.pin_count)
- NV_WARN(drm, "Moving pinned object %p!\n", nvbo);
-
if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA) {
ret = nouveau_bo_vm_bind(bo, new_reg, &new_tile);
if (ret)
diff --git a/drivers/gpu/drm/nouveau/nouveau_hwmon.c b/drivers/gpu/drm/nouveau/nouveau_hwmon.c
index a7db7c31064b..e844be49e11e 100644
--- a/drivers/gpu/drm/nouveau/nouveau_hwmon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_hwmon.c
@@ -41,7 +41,7 @@ static ssize_t
nouveau_hwmon_show_temp1_auto_point1_pwm(struct device *d,
struct device_attribute *a, char *buf)
{
- return snprintf(buf, PAGE_SIZE, "%d\n", 100);
+ return sysfs_emit(buf, "%d\n", 100);
}
static SENSOR_DEVICE_ATTR(temp1_auto_point1_pwm, 0444,
nouveau_hwmon_show_temp1_auto_point1_pwm, NULL, 0);
@@ -54,8 +54,8 @@ nouveau_hwmon_temp1_auto_point1_temp(struct device *d,
struct nouveau_drm *drm = nouveau_drm(dev);
struct nvkm_therm *therm = nvxx_therm(&drm->client.device);
- return snprintf(buf, PAGE_SIZE, "%d\n",
- therm->attr_get(therm, NVKM_THERM_ATTR_THRS_FAN_BOOST) * 1000);
+ return sysfs_emit(buf, "%d\n",
+ therm->attr_get(therm, NVKM_THERM_ATTR_THRS_FAN_BOOST) * 1000);
}
static ssize_t
nouveau_hwmon_set_temp1_auto_point1_temp(struct device *d,
@@ -87,8 +87,8 @@ nouveau_hwmon_temp1_auto_point1_temp_hyst(struct device *d,
struct nouveau_drm *drm = nouveau_drm(dev);
struct nvkm_therm *therm = nvxx_therm(&drm->client.device);
- return snprintf(buf, PAGE_SIZE, "%d\n",
- therm->attr_get(therm, NVKM_THERM_ATTR_THRS_FAN_BOOST_HYST) * 1000);
+ return sysfs_emit(buf, "%d\n",
+ therm->attr_get(therm, NVKM_THERM_ATTR_THRS_FAN_BOOST_HYST) * 1000);
}
static ssize_t
nouveau_hwmon_set_temp1_auto_point1_temp_hyst(struct device *d,
diff --git a/drivers/gpu/drm/nouveau/nouveau_led.h b/drivers/gpu/drm/nouveau/nouveau_led.h
index 21a5775028cc..bc9bc7208da3 100644
--- a/drivers/gpu/drm/nouveau/nouveau_led.h
+++ b/drivers/gpu/drm/nouveau/nouveau_led.h
@@ -27,7 +27,7 @@
#include "nouveau_drv.h"
-struct led_classdev;
+#include <linux/leds.h>
struct nouveau_led {
struct drm_device *dev;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.c
index 5bb65258c36d..6c94451d0faa 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.c
@@ -447,7 +447,7 @@ gf100_fifo_nonstall_allow(struct nvkm_event *event, int type, int index)
spin_unlock_irqrestore(&fifo->lock, flags);
}
-void
+static void
gf100_fifo_nonstall_block(struct nvkm_event *event, int type, int index)
{
struct nvkm_fifo *fifo = container_of(event, typeof(*fifo), nonstall.event);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/runl.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/runl.c
index b5836cbc29aa..93d628d7d508 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/runl.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/runl.c
@@ -30,7 +30,7 @@
#include <subdev/timer.h>
#include <subdev/top.h>
-struct nvkm_cgrp *
+static struct nvkm_cgrp *
nvkm_engn_cgrp_get(struct nvkm_engn *engn, unsigned long *pirqflags)
{
struct nvkm_cgrp *cgrp = NULL;
diff --git a/drivers/gpu/drm/nouveau/nvkm/nvfw/acr.c b/drivers/gpu/drm/nouveau/nvkm/nvfw/acr.c
index 83a9c48bc58c..7ac90c495737 100644
--- a/drivers/gpu/drm/nouveau/nvkm/nvfw/acr.c
+++ b/drivers/gpu/drm/nouveau/nvkm/nvfw/acr.c
@@ -45,7 +45,7 @@ wpr_header_v1_dump(struct nvkm_subdev *subdev, const struct wpr_header_v1 *hdr)
nvkm_debug(subdev, "\tstatus : %d\n", hdr->status);
}
-void
+static void
wpr_generic_header_dump(struct nvkm_subdev *subdev, const struct wpr_generic_header *hdr)
{
nvkm_debug(subdev, "wprGenericHeader\n");
diff --git a/drivers/gpu/drm/omapdrm/Kconfig b/drivers/gpu/drm/omapdrm/Kconfig
index 455e1a91f0e5..76ded1568bd0 100644
--- a/drivers/gpu/drm/omapdrm/Kconfig
+++ b/drivers/gpu/drm/omapdrm/Kconfig
@@ -2,7 +2,7 @@
config DRM_OMAP
tristate "OMAP DRM"
depends on DRM && OF
- depends on ARCH_OMAP2PLUS || ARCH_MULTIPLATFORM
+ depends on ARCH_OMAP2PLUS
select DRM_KMS_HELPER
select VIDEOMODE_HELPERS
select HDMI
diff --git a/drivers/gpu/drm/panel/Kconfig b/drivers/gpu/drm/panel/Kconfig
index 8eeee71c0000..2b9d6db7860b 100644
--- a/drivers/gpu/drm/panel/Kconfig
+++ b/drivers/gpu/drm/panel/Kconfig
@@ -318,6 +318,17 @@ config DRM_PANEL_LG_LG4573
Say Y here if you want to enable support for LG4573 RGB panel.
To compile this driver as a module, choose M here.
+config DRM_PANEL_MAGNACHIP_D53E6EA8966
+ tristate "Magnachip D53E6EA8966 DSI panel"
+ depends on OF && SPI
+ depends on DRM_MIPI_DSI
+ depends on BACKLIGHT_CLASS_DEVICE
+ select DRM_MIPI_DBI
+ help
+ DRM panel driver for the Samsung AMS495QA01 panel controlled
+ with the Magnachip D53E6EA8966 panel IC. This panel receives
+ video data via DSI but commands via 9-bit SPI using DBI.
+
config DRM_PANEL_NEC_NL8048HL11
tristate "NEC NL8048HL11 RGB panel"
depends on GPIOLIB && OF && SPI
@@ -377,6 +388,16 @@ config DRM_PANEL_NOVATEK_NT35950
Sharp panels used in Sony Xperia Z5 Premium and XZ Premium
mobile phones.
+config DRM_PANEL_NOVATEK_NT36523
+ tristate "Novatek NT36523 panel driver"
+ depends on OF
+ depends on DRM_MIPI_DSI
+ depends on BACKLIGHT_CLASS_DEVICE
+ help
+ Say Y here if you want to enable support for the panels built
+ around the Novatek NT36523 display controller, such as some
+ Boe panels used in Xiaomi Mi Pad 5 and 5 Pro tablets.
+
config DRM_PANEL_NOVATEK_NT36672A
tristate "Novatek NT36672A DSI panel"
depends on OF
@@ -685,6 +706,16 @@ config DRM_PANEL_SONY_ACX565AKM
Say Y here if you want to enable support for the Sony ACX565AKM
800x600 3.5" panel (found on the Nokia N900).
+config DRM_PANEL_SONY_TD4353_JDI
+ tristate "Sony TD4353 JDI panel"
+ depends on GPIOLIB && OF
+ depends on DRM_MIPI_DSI
+ depends on BACKLIGHT_CLASS_DEVICE
+ help
+ Say Y here if you want to enable support for the Sony Tama
+ TD4353 JDI command mode panel as found on some Sony Xperia
+ XZ2 and XZ2 Compact smartphones.
+
config DRM_PANEL_SONY_TULIP_TRULY_NT35521
tristate "Sony Tulip Truly NT35521 panel"
depends on GPIOLIB && OF
diff --git a/drivers/gpu/drm/panel/Makefile b/drivers/gpu/drm/panel/Makefile
index c05aa9e23907..ff169781e82d 100644
--- a/drivers/gpu/drm/panel/Makefile
+++ b/drivers/gpu/drm/panel/Makefile
@@ -29,12 +29,14 @@ obj-$(CONFIG_DRM_PANEL_LEADTEK_LTK050H3146W) += panel-leadtek-ltk050h3146w.o
obj-$(CONFIG_DRM_PANEL_LEADTEK_LTK500HD1829) += panel-leadtek-ltk500hd1829.o
obj-$(CONFIG_DRM_PANEL_LG_LB035Q02) += panel-lg-lb035q02.o
obj-$(CONFIG_DRM_PANEL_LG_LG4573) += panel-lg-lg4573.o
+obj-$(CONFIG_DRM_PANEL_MAGNACHIP_D53E6EA8966) += panel-magnachip-d53e6ea8966.o
obj-$(CONFIG_DRM_PANEL_NEC_NL8048HL11) += panel-nec-nl8048hl11.o
obj-$(CONFIG_DRM_PANEL_NEWVISION_NV3051D) += panel-newvision-nv3051d.o
obj-$(CONFIG_DRM_PANEL_NEWVISION_NV3052C) += panel-newvision-nv3052c.o
obj-$(CONFIG_DRM_PANEL_NOVATEK_NT35510) += panel-novatek-nt35510.o
obj-$(CONFIG_DRM_PANEL_NOVATEK_NT35560) += panel-novatek-nt35560.o
obj-$(CONFIG_DRM_PANEL_NOVATEK_NT35950) += panel-novatek-nt35950.o
+obj-$(CONFIG_DRM_PANEL_NOVATEK_NT36523) += panel-novatek-nt36523.o
obj-$(CONFIG_DRM_PANEL_NOVATEK_NT36672A) += panel-novatek-nt36672a.o
obj-$(CONFIG_DRM_PANEL_NOVATEK_NT39016) += panel-novatek-nt39016.o
obj-$(CONFIG_DRM_PANEL_MANTIX_MLAF057WE51) += panel-mantix-mlaf057we51.o
@@ -69,6 +71,7 @@ obj-$(CONFIG_DRM_PANEL_SITRONIX_ST7701) += panel-sitronix-st7701.o
obj-$(CONFIG_DRM_PANEL_SITRONIX_ST7703) += panel-sitronix-st7703.o
obj-$(CONFIG_DRM_PANEL_SITRONIX_ST7789V) += panel-sitronix-st7789v.o
obj-$(CONFIG_DRM_PANEL_SONY_ACX565AKM) += panel-sony-acx565akm.o
+obj-$(CONFIG_DRM_PANEL_SONY_TD4353_JDI) += panel-sony-td4353-jdi.o
obj-$(CONFIG_DRM_PANEL_SONY_TULIP_TRULY_NT35521) += panel-sony-tulip-truly-nt35521.o
obj-$(CONFIG_DRM_PANEL_TDO_TL070WSH30) += panel-tdo-tl070wsh30.o
obj-$(CONFIG_DRM_PANEL_TPO_TD028TTEC1) += panel-tpo-td028ttec1.o
diff --git a/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c b/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c
index c924f1124ebc..783234ae0f57 100644
--- a/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c
+++ b/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c
@@ -1131,6 +1131,103 @@ static const struct panel_init_cmd auo_b101uan08_3_init_cmd[] = {
{},
};
+static const struct panel_init_cmd starry_qfh032011_53g_init_cmd[] = {
+ _INIT_DCS_CMD(0xB0, 0x01),
+ _INIT_DCS_CMD(0xC3, 0x4F),
+ _INIT_DCS_CMD(0xC4, 0x40),
+ _INIT_DCS_CMD(0xC5, 0x40),
+ _INIT_DCS_CMD(0xC6, 0x40),
+ _INIT_DCS_CMD(0xC7, 0x40),
+ _INIT_DCS_CMD(0xC8, 0x4D),
+ _INIT_DCS_CMD(0xC9, 0x52),
+ _INIT_DCS_CMD(0xCA, 0x51),
+ _INIT_DCS_CMD(0xCD, 0x5D),
+ _INIT_DCS_CMD(0xCE, 0x5B),
+ _INIT_DCS_CMD(0xCF, 0x4B),
+ _INIT_DCS_CMD(0xD0, 0x49),
+ _INIT_DCS_CMD(0xD1, 0x47),
+ _INIT_DCS_CMD(0xD2, 0x45),
+ _INIT_DCS_CMD(0xD3, 0x41),
+ _INIT_DCS_CMD(0xD7, 0x50),
+ _INIT_DCS_CMD(0xD8, 0x40),
+ _INIT_DCS_CMD(0xD9, 0x40),
+ _INIT_DCS_CMD(0xDA, 0x40),
+ _INIT_DCS_CMD(0xDB, 0x40),
+ _INIT_DCS_CMD(0xDC, 0x4E),
+ _INIT_DCS_CMD(0xDD, 0x52),
+ _INIT_DCS_CMD(0xDE, 0x51),
+ _INIT_DCS_CMD(0xE1, 0x5E),
+ _INIT_DCS_CMD(0xE2, 0x5C),
+ _INIT_DCS_CMD(0xE3, 0x4C),
+ _INIT_DCS_CMD(0xE4, 0x4A),
+ _INIT_DCS_CMD(0xE5, 0x48),
+ _INIT_DCS_CMD(0xE6, 0x46),
+ _INIT_DCS_CMD(0xE7, 0x42),
+ _INIT_DCS_CMD(0xB0, 0x03),
+ _INIT_DCS_CMD(0xBE, 0x03),
+ _INIT_DCS_CMD(0xCC, 0x44),
+ _INIT_DCS_CMD(0xC8, 0x07),
+ _INIT_DCS_CMD(0xC9, 0x05),
+ _INIT_DCS_CMD(0xCA, 0x42),
+ _INIT_DCS_CMD(0xCD, 0x3E),
+ _INIT_DCS_CMD(0xCF, 0x60),
+ _INIT_DCS_CMD(0xD2, 0x04),
+ _INIT_DCS_CMD(0xD3, 0x04),
+ _INIT_DCS_CMD(0xD4, 0x01),
+ _INIT_DCS_CMD(0xD5, 0x00),
+ _INIT_DCS_CMD(0xD6, 0x03),
+ _INIT_DCS_CMD(0xD7, 0x04),
+ _INIT_DCS_CMD(0xD9, 0x01),
+ _INIT_DCS_CMD(0xDB, 0x01),
+ _INIT_DCS_CMD(0xE4, 0xF0),
+ _INIT_DCS_CMD(0xE5, 0x0A),
+ _INIT_DCS_CMD(0xB0, 0x00),
+ _INIT_DCS_CMD(0xCC, 0x08),
+ _INIT_DCS_CMD(0xC2, 0x08),
+ _INIT_DCS_CMD(0xC4, 0x10),
+ _INIT_DCS_CMD(0xB0, 0x02),
+ _INIT_DCS_CMD(0xC0, 0x00),
+ _INIT_DCS_CMD(0xC1, 0x0A),
+ _INIT_DCS_CMD(0xC2, 0x20),
+ _INIT_DCS_CMD(0xC3, 0x24),
+ _INIT_DCS_CMD(0xC4, 0x23),
+ _INIT_DCS_CMD(0xC5, 0x29),
+ _INIT_DCS_CMD(0xC6, 0x23),
+ _INIT_DCS_CMD(0xC7, 0x1C),
+ _INIT_DCS_CMD(0xC8, 0x19),
+ _INIT_DCS_CMD(0xC9, 0x17),
+ _INIT_DCS_CMD(0xCA, 0x17),
+ _INIT_DCS_CMD(0xCB, 0x18),
+ _INIT_DCS_CMD(0xCC, 0x1A),
+ _INIT_DCS_CMD(0xCD, 0x1E),
+ _INIT_DCS_CMD(0xCE, 0x20),
+ _INIT_DCS_CMD(0xCF, 0x23),
+ _INIT_DCS_CMD(0xD0, 0x07),
+ _INIT_DCS_CMD(0xD1, 0x00),
+ _INIT_DCS_CMD(0xD2, 0x00),
+ _INIT_DCS_CMD(0xD3, 0x0A),
+ _INIT_DCS_CMD(0xD4, 0x13),
+ _INIT_DCS_CMD(0xD5, 0x1C),
+ _INIT_DCS_CMD(0xD6, 0x1A),
+ _INIT_DCS_CMD(0xD7, 0x13),
+ _INIT_DCS_CMD(0xD8, 0x17),
+ _INIT_DCS_CMD(0xD9, 0x1C),
+ _INIT_DCS_CMD(0xDA, 0x19),
+ _INIT_DCS_CMD(0xDB, 0x17),
+ _INIT_DCS_CMD(0xDC, 0x17),
+ _INIT_DCS_CMD(0xDD, 0x18),
+ _INIT_DCS_CMD(0xDE, 0x1A),
+ _INIT_DCS_CMD(0xDF, 0x1E),
+ _INIT_DCS_CMD(0xE0, 0x20),
+ _INIT_DCS_CMD(0xE1, 0x23),
+ _INIT_DCS_CMD(0xE2, 0x07),
+ _INIT_DCS_CMD(0X11),
+ _INIT_DELAY_CMD(120),
+ _INIT_DCS_CMD(0X29),
+ _INIT_DELAY_CMD(80),
+ {},
+};
+
static inline struct boe_panel *to_boe_panel(struct drm_panel *panel)
{
return container_of(panel, struct boe_panel, base);
@@ -1497,6 +1594,32 @@ static const struct panel_desc boe_tv105wum_nw0_desc = {
.init_cmds = boe_init_cmd,
};
+static const struct drm_display_mode starry_qfh032011_53g_default_mode = {
+ .clock = 165731,
+ .hdisplay = 1200,
+ .hsync_start = 1200 + 100,
+ .hsync_end = 1200 + 100 + 10,
+ .htotal = 1200 + 100 + 10 + 100,
+ .vdisplay = 1920,
+ .vsync_start = 1920 + 14,
+ .vsync_end = 1920 + 14 + 10,
+ .vtotal = 1920 + 14 + 10 + 15,
+};
+
+static const struct panel_desc starry_qfh032011_53g_desc = {
+ .modes = &starry_qfh032011_53g_default_mode,
+ .bpc = 8,
+ .size = {
+ .width_mm = 135,
+ .height_mm = 216,
+ },
+ .lanes = 4,
+ .format = MIPI_DSI_FMT_RGB888,
+ .mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_SYNC_PULSE |
+ MIPI_DSI_MODE_LPM,
+ .init_cmds = starry_qfh032011_53g_init_cmd,
+};
+
static int boe_panel_get_modes(struct drm_panel *panel,
struct drm_connector *connector)
{
@@ -1667,6 +1790,9 @@ static const struct of_device_id boe_of_match[] = {
{ .compatible = "innolux,hj110iz-01a",
.data = &inx_hj110iz_desc
},
+ { .compatible = "starry,2081101qfh032011-53g",
+ .data = &starry_qfh032011_53g_desc
+ },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, boe_of_match);
diff --git a/drivers/gpu/drm/panel/panel-jadard-jd9365da-h3.c b/drivers/gpu/drm/panel/panel-jadard-jd9365da-h3.c
index 48c1702a863b..323c33c9c37a 100644
--- a/drivers/gpu/drm/panel/panel-jadard-jd9365da-h3.c
+++ b/drivers/gpu/drm/panel/panel-jadard-jd9365da-h3.c
@@ -167,6 +167,202 @@ static const struct drm_panel_funcs jadard_funcs = {
.get_modes = jadard_get_modes,
};
+static const struct jadard_init_cmd radxa_display_8hd_ad002_init_cmds[] = {
+ { .data = { 0xE0, 0x00 } },
+ { .data = { 0xE1, 0x93 } },
+ { .data = { 0xE2, 0x65 } },
+ { .data = { 0xE3, 0xF8 } },
+ { .data = { 0x80, 0x03 } },
+ { .data = { 0xE0, 0x01 } },
+ { .data = { 0x00, 0x00 } },
+ { .data = { 0x01, 0x7E } },
+ { .data = { 0x03, 0x00 } },
+ { .data = { 0x04, 0x65 } },
+ { .data = { 0x0C, 0x74 } },
+ { .data = { 0x17, 0x00 } },
+ { .data = { 0x18, 0xB7 } },
+ { .data = { 0x19, 0x00 } },
+ { .data = { 0x1A, 0x00 } },
+ { .data = { 0x1B, 0xB7 } },
+ { .data = { 0x1C, 0x00 } },
+ { .data = { 0x24, 0xFE } },
+ { .data = { 0x37, 0x19 } },
+ { .data = { 0x38, 0x05 } },
+ { .data = { 0x39, 0x00 } },
+ { .data = { 0x3A, 0x01 } },
+ { .data = { 0x3B, 0x01 } },
+ { .data = { 0x3C, 0x70 } },
+ { .data = { 0x3D, 0xFF } },
+ { .data = { 0x3E, 0xFF } },
+ { .data = { 0x3F, 0xFF } },
+ { .data = { 0x40, 0x06 } },
+ { .data = { 0x41, 0xA0 } },
+ { .data = { 0x43, 0x1E } },
+ { .data = { 0x44, 0x0F } },
+ { .data = { 0x45, 0x28 } },
+ { .data = { 0x4B, 0x04 } },
+ { .data = { 0x55, 0x02 } },
+ { .data = { 0x56, 0x01 } },
+ { .data = { 0x57, 0xA9 } },
+ { .data = { 0x58, 0x0A } },
+ { .data = { 0x59, 0x0A } },
+ { .data = { 0x5A, 0x37 } },
+ { .data = { 0x5B, 0x19 } },
+ { .data = { 0x5D, 0x78 } },
+ { .data = { 0x5E, 0x63 } },
+ { .data = { 0x5F, 0x54 } },
+ { .data = { 0x60, 0x49 } },
+ { .data = { 0x61, 0x45 } },
+ { .data = { 0x62, 0x38 } },
+ { .data = { 0x63, 0x3D } },
+ { .data = { 0x64, 0x28 } },
+ { .data = { 0x65, 0x43 } },
+ { .data = { 0x66, 0x41 } },
+ { .data = { 0x67, 0x43 } },
+ { .data = { 0x68, 0x62 } },
+ { .data = { 0x69, 0x50 } },
+ { .data = { 0x6A, 0x57 } },
+ { .data = { 0x6B, 0x49 } },
+ { .data = { 0x6C, 0x44 } },
+ { .data = { 0x6D, 0x37 } },
+ { .data = { 0x6E, 0x23 } },
+ { .data = { 0x6F, 0x10 } },
+ { .data = { 0x70, 0x78 } },
+ { .data = { 0x71, 0x63 } },
+ { .data = { 0x72, 0x54 } },
+ { .data = { 0x73, 0x49 } },
+ { .data = { 0x74, 0x45 } },
+ { .data = { 0x75, 0x38 } },
+ { .data = { 0x76, 0x3D } },
+ { .data = { 0x77, 0x28 } },
+ { .data = { 0x78, 0x43 } },
+ { .data = { 0x79, 0x41 } },
+ { .data = { 0x7A, 0x43 } },
+ { .data = { 0x7B, 0x62 } },
+ { .data = { 0x7C, 0x50 } },
+ { .data = { 0x7D, 0x57 } },
+ { .data = { 0x7E, 0x49 } },
+ { .data = { 0x7F, 0x44 } },
+ { .data = { 0x80, 0x37 } },
+ { .data = { 0x81, 0x23 } },
+ { .data = { 0x82, 0x10 } },
+ { .data = { 0xE0, 0x02 } },
+ { .data = { 0x00, 0x47 } },
+ { .data = { 0x01, 0x47 } },
+ { .data = { 0x02, 0x45 } },
+ { .data = { 0x03, 0x45 } },
+ { .data = { 0x04, 0x4B } },
+ { .data = { 0x05, 0x4B } },
+ { .data = { 0x06, 0x49 } },
+ { .data = { 0x07, 0x49 } },
+ { .data = { 0x08, 0x41 } },
+ { .data = { 0x09, 0x1F } },
+ { .data = { 0x0A, 0x1F } },
+ { .data = { 0x0B, 0x1F } },
+ { .data = { 0x0C, 0x1F } },
+ { .data = { 0x0D, 0x1F } },
+ { .data = { 0x0E, 0x1F } },
+ { .data = { 0x0F, 0x5F } },
+ { .data = { 0x10, 0x5F } },
+ { .data = { 0x11, 0x57 } },
+ { .data = { 0x12, 0x77 } },
+ { .data = { 0x13, 0x35 } },
+ { .data = { 0x14, 0x1F } },
+ { .data = { 0x15, 0x1F } },
+ { .data = { 0x16, 0x46 } },
+ { .data = { 0x17, 0x46 } },
+ { .data = { 0x18, 0x44 } },
+ { .data = { 0x19, 0x44 } },
+ { .data = { 0x1A, 0x4A } },
+ { .data = { 0x1B, 0x4A } },
+ { .data = { 0x1C, 0x48 } },
+ { .data = { 0x1D, 0x48 } },
+ { .data = { 0x1E, 0x40 } },
+ { .data = { 0x1F, 0x1F } },
+ { .data = { 0x20, 0x1F } },
+ { .data = { 0x21, 0x1F } },
+ { .data = { 0x22, 0x1F } },
+ { .data = { 0x23, 0x1F } },
+ { .data = { 0x24, 0x1F } },
+ { .data = { 0x25, 0x5F } },
+ { .data = { 0x26, 0x5F } },
+ { .data = { 0x27, 0x57 } },
+ { .data = { 0x28, 0x77 } },
+ { .data = { 0x29, 0x35 } },
+ { .data = { 0x2A, 0x1F } },
+ { .data = { 0x2B, 0x1F } },
+ { .data = { 0x58, 0x40 } },
+ { .data = { 0x59, 0x00 } },
+ { .data = { 0x5A, 0x00 } },
+ { .data = { 0x5B, 0x10 } },
+ { .data = { 0x5C, 0x06 } },
+ { .data = { 0x5D, 0x40 } },
+ { .data = { 0x5E, 0x01 } },
+ { .data = { 0x5F, 0x02 } },
+ { .data = { 0x60, 0x30 } },
+ { .data = { 0x61, 0x01 } },
+ { .data = { 0x62, 0x02 } },
+ { .data = { 0x63, 0x03 } },
+ { .data = { 0x64, 0x6B } },
+ { .data = { 0x65, 0x05 } },
+ { .data = { 0x66, 0x0C } },
+ { .data = { 0x67, 0x73 } },
+ { .data = { 0x68, 0x09 } },
+ { .data = { 0x69, 0x03 } },
+ { .data = { 0x6A, 0x56 } },
+ { .data = { 0x6B, 0x08 } },
+ { .data = { 0x6C, 0x00 } },
+ { .data = { 0x6D, 0x04 } },
+ { .data = { 0x6E, 0x04 } },
+ { .data = { 0x6F, 0x88 } },
+ { .data = { 0x70, 0x00 } },
+ { .data = { 0x71, 0x00 } },
+ { .data = { 0x72, 0x06 } },
+ { .data = { 0x73, 0x7B } },
+ { .data = { 0x74, 0x00 } },
+ { .data = { 0x75, 0xF8 } },
+ { .data = { 0x76, 0x00 } },
+ { .data = { 0x77, 0xD5 } },
+ { .data = { 0x78, 0x2E } },
+ { .data = { 0x79, 0x12 } },
+ { .data = { 0x7A, 0x03 } },
+ { .data = { 0x7B, 0x00 } },
+ { .data = { 0x7C, 0x00 } },
+ { .data = { 0x7D, 0x03 } },
+ { .data = { 0x7E, 0x7B } },
+ { .data = { 0xE0, 0x04 } },
+ { .data = { 0x00, 0x0E } },
+ { .data = { 0x02, 0xB3 } },
+ { .data = { 0x09, 0x60 } },
+ { .data = { 0x0E, 0x2A } },
+ { .data = { 0x36, 0x59 } },
+ { .data = { 0xE0, 0x00 } },
+};
+
+static const struct jadard_panel_desc radxa_display_8hd_ad002_desc = {
+ .mode = {
+ .clock = 70000,
+
+ .hdisplay = 800,
+ .hsync_start = 800 + 40,
+ .hsync_end = 800 + 40 + 18,
+ .htotal = 800 + 40 + 18 + 20,
+
+ .vdisplay = 1280,
+ .vsync_start = 1280 + 20,
+ .vsync_end = 1280 + 20 + 4,
+ .vtotal = 1280 + 20 + 4 + 20,
+
+ .width_mm = 127,
+ .height_mm = 199,
+ .type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
+ },
+ .lanes = 4,
+ .format = MIPI_DSI_FMT_RGB888,
+ .init_cmds = radxa_display_8hd_ad002_init_cmds,
+ .num_init_cmds = ARRAY_SIZE(radxa_display_8hd_ad002_init_cmds),
+};
+
static const struct jadard_init_cmd cz101b4001_init_cmds[] = {
{ .data = { 0xE0, 0x00 } },
{ .data = { 0xE1, 0x93 } },
@@ -452,7 +648,18 @@ static void jadard_dsi_remove(struct mipi_dsi_device *dsi)
}
static const struct of_device_id jadard_of_match[] = {
- { .compatible = "chongzhou,cz101b4001", .data = &cz101b4001_desc },
+ {
+ .compatible = "chongzhou,cz101b4001",
+ .data = &cz101b4001_desc
+ },
+ {
+ .compatible = "radxa,display-10hd-ad001",
+ .data = &cz101b4001_desc
+ },
+ {
+ .compatible = "radxa,display-8hd-ad002",
+ .data = &radxa_display_8hd_ad002_desc
+ },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, jadard_of_match);
diff --git a/drivers/gpu/drm/panel/panel-magnachip-d53e6ea8966.c b/drivers/gpu/drm/panel/panel-magnachip-d53e6ea8966.c
new file mode 100644
index 000000000000..8c362c40227f
--- /dev/null
+++ b/drivers/gpu/drm/panel/panel-magnachip-d53e6ea8966.c
@@ -0,0 +1,522 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Magnachip d53e6ea8966 MIPI-DSI panel driver
+ * Copyright (C) 2023 Chris Morgan
+ */
+
+#include <drm/drm_mipi_dbi.h>
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_modes.h>
+#include <drm/drm_of.h>
+#include <drm/drm_panel.h>
+
+#include <linux/backlight.h>
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/media-bus-format.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/regulator/consumer.h>
+#include <linux/spi/spi.h>
+
+#include <video/mipi_display.h>
+
+/* Forward declaration for use in backlight function */
+struct d53e6ea8966;
+
+/* Panel info, unique to each panel */
+struct d53e6ea8966_panel_info {
+ /** @display_modes: the supported display modes */
+ const struct drm_display_mode *display_modes;
+ /** @num_modes: the number of supported display modes */
+ unsigned int num_modes;
+ /** @width_mm: panel width in mm */
+ u16 width_mm;
+ /** @height_mm: panel height in mm */
+ u16 height_mm;
+ /** @bus_flags: drm bus flags for panel */
+ u32 bus_flags;
+ /** @panel_init_seq: panel specific init sequence */
+ void (*panel_init_seq)(struct d53e6ea8966 *db);
+ /** @backlight_register: panel backlight registration or NULL */
+ int (*backlight_register)(struct d53e6ea8966 *db);
+};
+
+struct d53e6ea8966 {
+ /** @dev: the container device */
+ struct device *dev;
+ /** @dbi: the DBI bus abstraction handle */
+ struct mipi_dbi dbi;
+ /** @panel: the DRM panel instance for this device */
+ struct drm_panel panel;
+ /** @reset: reset GPIO line */
+ struct gpio_desc *reset;
+ /** @enable: enable GPIO line */
+ struct gpio_desc *enable;
+ /** @reg_vdd: VDD supply regulator for panel logic */
+ struct regulator *reg_vdd;
+ /** @reg_elvdd: ELVDD supply regulator for panel display */
+ struct regulator *reg_elvdd;
+ /** @dsi_dev: DSI child device (panel) */
+ struct mipi_dsi_device *dsi_dev;
+ /** @bl_dev: pseudo-backlight device for oled panel */
+ struct backlight_device *bl_dev;
+ /** @panel_info: struct containing panel timing and info */
+ const struct d53e6ea8966_panel_info *panel_info;
+};
+
+#define NUM_GAMMA_LEVELS 16
+#define GAMMA_TABLE_COUNT 23
+#define MAX_BRIGHTNESS (NUM_GAMMA_LEVELS - 1)
+
+#define MCS_ELVSS_ON 0xb1
+#define MCS_TEMP_SWIRE 0xb2
+#define MCS_PASSWORD_0 0xf0
+#define MCS_PASSWORD_1 0xf1
+#define MCS_ANALOG_PWR_CTL_0 0xf4
+#define MCS_ANALOG_PWR_CTL_1 0xf5
+#define MCS_GTCON_SET 0xf7
+#define MCS_GATELESS_SIGNAL_SET 0xf8
+#define MCS_SET_GAMMA 0xf9
+
+static inline struct d53e6ea8966 *to_d53e6ea8966(struct drm_panel *panel)
+{
+ return container_of(panel, struct d53e6ea8966, panel);
+}
+
+/* Table of gamma values provided in datasheet */
+static u8 ams495qa01_gamma[NUM_GAMMA_LEVELS][GAMMA_TABLE_COUNT] = {
+ {0x01, 0x79, 0x78, 0x8d, 0xd9, 0xdf, 0xd5, 0xcb, 0xcf, 0xc5,
+ 0xe5, 0xe0, 0xe4, 0xdc, 0xb8, 0xd4, 0xfa, 0xed, 0xe6, 0x2f,
+ 0x00, 0x2f},
+ {0x01, 0x7d, 0x7c, 0x92, 0xd7, 0xdd, 0xd2, 0xcb, 0xd0, 0xc6,
+ 0xe5, 0xe1, 0xe3, 0xda, 0xbd, 0xd3, 0xfa, 0xed, 0xe6, 0x2f,
+ 0x00, 0x2f},
+ {0x01, 0x7f, 0x7e, 0x95, 0xd7, 0xde, 0xd2, 0xcb, 0xcf, 0xc5,
+ 0xe5, 0xe3, 0xe3, 0xda, 0xbf, 0xd3, 0xfa, 0xed, 0xe6, 0x2f,
+ 0x00, 0x2f},
+ {0x01, 0x82, 0x81, 0x99, 0xd6, 0xdd, 0xd1, 0xca, 0xcf, 0xc3,
+ 0xe4, 0xe3, 0xe3, 0xda, 0xc2, 0xd3, 0xfa, 0xed, 0xe6, 0x2f,
+ 0x00, 0x2f},
+ {0x01, 0x84, 0x83, 0x9b, 0xd7, 0xde, 0xd2, 0xc8, 0xce, 0xc2,
+ 0xe4, 0xe3, 0xe2, 0xd9, 0xc3, 0xd3, 0xfa, 0xed, 0xe6, 0x2f,
+ 0x00, 0x2f},
+ {0x01, 0x87, 0x86, 0x9f, 0xd6, 0xdd, 0xd1, 0xc7, 0xce, 0xc1,
+ 0xe4, 0xe3, 0xe2, 0xd9, 0xc6, 0xd3, 0xfa, 0xed, 0xe6, 0x2f,
+ 0x00, 0x2f},
+ {0x01, 0x89, 0x89, 0xa2, 0xd5, 0xdb, 0xcf, 0xc8, 0xcf, 0xc2,
+ 0xe3, 0xe3, 0xe1, 0xd9, 0xc7, 0xd3, 0xfa, 0xed, 0xe6, 0x2f,
+ 0x00, 0x2f},
+ {0x01, 0x8b, 0x8b, 0xa5, 0xd5, 0xdb, 0xcf, 0xc7, 0xce, 0xc0,
+ 0xe3, 0xe3, 0xe1, 0xd8, 0xc7, 0xd3, 0xfa, 0xed, 0xe6, 0x2f,
+ 0x00, 0x2f},
+ {0x01, 0x8d, 0x8d, 0xa7, 0xd5, 0xdb, 0xcf, 0xc6, 0xce, 0xc0,
+ 0xe4, 0xe4, 0xe1, 0xd7, 0xc8, 0xd3, 0xfa, 0xed, 0xe6, 0x2f,
+ 0x00, 0x2f},
+ {0x01, 0x8f, 0x8f, 0xaa, 0xd4, 0xdb, 0xce, 0xc6, 0xcd, 0xbf,
+ 0xe3, 0xe3, 0xe1, 0xd7, 0xca, 0xd3, 0xfa, 0xed, 0xe6, 0x2f,
+ 0x00, 0x2f},
+ {0x01, 0x91, 0x91, 0xac, 0xd3, 0xda, 0xce, 0xc5, 0xcd, 0xbe,
+ 0xe3, 0xe3, 0xe0, 0xd7, 0xca, 0xd3, 0xfa, 0xed, 0xe6, 0x2f,
+ 0x00, 0x2f},
+ {0x01, 0x93, 0x93, 0xaf, 0xd3, 0xda, 0xcd, 0xc5, 0xcd, 0xbe,
+ 0xe2, 0xe3, 0xdf, 0xd6, 0xca, 0xd3, 0xfa, 0xed, 0xe6, 0x2f,
+ 0x00, 0x2f},
+ {0x01, 0x95, 0x95, 0xb1, 0xd2, 0xd9, 0xcc, 0xc4, 0xcd, 0xbe,
+ 0xe2, 0xe3, 0xdf, 0xd7, 0xcc, 0xd3, 0xfa, 0xed, 0xe6, 0x2f,
+ 0x00, 0x2f},
+ {0x01, 0x99, 0x99, 0xb6, 0xd1, 0xd9, 0xcc, 0xc3, 0xcb, 0xbc,
+ 0xe2, 0xe4, 0xdf, 0xd6, 0xcc, 0xd3, 0xfa, 0xed, 0xe6, 0x2f,
+ 0x00, 0x2f},
+ {0x01, 0x9c, 0x9c, 0xba, 0xd0, 0xd8, 0xcb, 0xc3, 0xcb, 0xbb,
+ 0xe2, 0xe4, 0xdf, 0xd6, 0xce, 0xd3, 0xfa, 0xed, 0xe6, 0x2f,
+ 0x00, 0x2f},
+ {0x01, 0x9f, 0x9f, 0xbe, 0xcf, 0xd7, 0xc9, 0xc2, 0xcb, 0xbb,
+ 0xe1, 0xe3, 0xde, 0xd6, 0xd0, 0xd3, 0xfa, 0xed, 0xe6, 0x2f,
+ 0x00, 0x2f},
+};
+
+/*
+ * Table of elvss values provided in datasheet and corresponds to
+ * gamma values.
+ */
+static u8 ams495qa01_elvss[NUM_GAMMA_LEVELS] = {
+ 0x15, 0x15, 0x15, 0x15, 0x15, 0x15, 0x15, 0x15, 0x15, 0x15,
+ 0x15, 0x15, 0x14, 0x14, 0x13, 0x12,
+};
+
+static int ams495qa01_update_gamma(struct mipi_dbi *dbi, int brightness)
+{
+ int tmp = brightness;
+
+ mipi_dbi_command_buf(dbi, MCS_SET_GAMMA, ams495qa01_gamma[tmp],
+ ARRAY_SIZE(ams495qa01_gamma[tmp]));
+ mipi_dbi_command(dbi, MCS_SET_GAMMA, 0x00);
+
+ /* Undocumented command */
+ mipi_dbi_command(dbi, 0x26, 0x00);
+
+ mipi_dbi_command(dbi, MCS_TEMP_SWIRE, ams495qa01_elvss[tmp]);
+
+ return 0;
+}
+
+static void ams495qa01_panel_init(struct d53e6ea8966 *db)
+{
+ struct mipi_dbi *dbi = &db->dbi;
+
+ mipi_dbi_command(dbi, MCS_PASSWORD_0, 0x5a, 0x5a);
+ mipi_dbi_command(dbi, MCS_PASSWORD_1, 0x5a, 0x5a);
+
+ /* Undocumented commands */
+ mipi_dbi_command(dbi, 0xb0, 0x02);
+ mipi_dbi_command(dbi, 0xf3, 0x3b);
+
+ mipi_dbi_command(dbi, MCS_ANALOG_PWR_CTL_0, 0x33, 0x42, 0x00, 0x08);
+ mipi_dbi_command(dbi, MCS_ANALOG_PWR_CTL_1, 0x00, 0x06, 0x26, 0x35, 0x03);
+
+ /* Undocumented commands */
+ mipi_dbi_command(dbi, 0xf6, 0x02);
+ mipi_dbi_command(dbi, 0xc6, 0x0b, 0x00, 0x00, 0x3c, 0x00, 0x22,
+ 0x00, 0x00, 0x00, 0x00);
+
+ mipi_dbi_command(dbi, MCS_GTCON_SET, 0x20);
+ mipi_dbi_command(dbi, MCS_TEMP_SWIRE, 0x06, 0x06, 0x06, 0x06);
+ mipi_dbi_command(dbi, MCS_ELVSS_ON, 0x07, 0x00, 0x10);
+ mipi_dbi_command(dbi, MCS_GATELESS_SIGNAL_SET, 0x7f, 0x7a,
+ 0x89, 0x67, 0x26, 0x38, 0x00, 0x00, 0x09,
+ 0x67, 0x70, 0x88, 0x7a, 0x76, 0x05, 0x09,
+ 0x23, 0x23, 0x23);
+
+ /* Undocumented commands */
+ mipi_dbi_command(dbi, 0xb5, 0xff, 0xef, 0x35, 0x42, 0x0d, 0xd7,
+ 0xff, 0x07, 0xff, 0xff, 0xfd, 0x00, 0x01,
+ 0xff, 0x05, 0x12, 0x0f, 0xff, 0xff, 0xff,
+ 0xff);
+ mipi_dbi_command(dbi, 0xb4, 0x15);
+ mipi_dbi_command(dbi, 0xb3, 0x00);
+
+ ams495qa01_update_gamma(dbi, MAX_BRIGHTNESS);
+}
+
+static int d53e6ea8966_prepare(struct drm_panel *panel)
+{
+ struct d53e6ea8966 *db = to_d53e6ea8966(panel);
+ int ret;
+
+ /* Power up */
+ ret = regulator_enable(db->reg_vdd);
+ if (ret) {
+ dev_err(db->dev, "failed to enable vdd regulator: %d\n", ret);
+ return ret;
+ }
+
+ if (db->reg_elvdd) {
+ ret = regulator_enable(db->reg_elvdd);
+ if (ret) {
+ dev_err(db->dev,
+ "failed to enable elvdd regulator: %d\n", ret);
+ regulator_disable(db->reg_vdd);
+ return ret;
+ }
+ }
+
+ /* Enable */
+ if (db->enable)
+ gpiod_set_value_cansleep(db->enable, 1);
+
+ msleep(50);
+
+ /* Reset */
+ gpiod_set_value_cansleep(db->reset, 1);
+ usleep_range(1000, 5000);
+ gpiod_set_value_cansleep(db->reset, 0);
+ msleep(20);
+
+ db->panel_info->panel_init_seq(db);
+
+ return 0;
+}
+
+static int d53e6ea8966_enable(struct drm_panel *panel)
+{
+ struct d53e6ea8966 *db = to_d53e6ea8966(panel);
+ struct mipi_dbi *dbi = &db->dbi;
+
+ mipi_dbi_command(dbi, MIPI_DCS_EXIT_SLEEP_MODE);
+ msleep(200);
+ mipi_dbi_command(dbi, MIPI_DCS_SET_DISPLAY_ON);
+ usleep_range(10000, 15000);
+
+ return 0;
+}
+
+static int d53e6ea8966_disable(struct drm_panel *panel)
+{
+ struct d53e6ea8966 *db = to_d53e6ea8966(panel);
+ struct mipi_dbi *dbi = &db->dbi;
+
+ mipi_dbi_command(dbi, MIPI_DCS_SET_DISPLAY_OFF);
+ msleep(20);
+ mipi_dbi_command(dbi, MIPI_DCS_ENTER_SLEEP_MODE);
+ msleep(100);
+
+ return 0;
+}
+
+static int d53e6ea8966_unprepare(struct drm_panel *panel)
+{
+ struct d53e6ea8966 *db = to_d53e6ea8966(panel);
+
+ if (db->enable)
+ gpiod_set_value_cansleep(db->enable, 0);
+
+ gpiod_set_value_cansleep(db->reset, 1);
+
+ if (db->reg_elvdd)
+ regulator_disable(db->reg_elvdd);
+
+ regulator_disable(db->reg_vdd);
+ msleep(100);
+
+ return 0;
+}
+
+static int d53e6ea8966_get_modes(struct drm_panel *panel,
+ struct drm_connector *connector)
+{
+ struct d53e6ea8966 *db = to_d53e6ea8966(panel);
+ const struct d53e6ea8966_panel_info *panel_info = db->panel_info;
+ struct drm_display_mode *mode;
+ static const u32 bus_format = MEDIA_BUS_FMT_RGB888_1X24;
+ unsigned int i;
+
+ for (i = 0; i < panel_info->num_modes; i++) {
+ mode = drm_mode_duplicate(connector->dev,
+ &panel_info->display_modes[i]);
+ if (!mode)
+ return -ENOMEM;
+
+ drm_mode_set_name(mode);
+ drm_mode_probed_add(connector, mode);
+ }
+
+ connector->display_info.bpc = 8;
+ connector->display_info.width_mm = panel_info->width_mm;
+ connector->display_info.height_mm = panel_info->height_mm;
+ connector->display_info.bus_flags = panel_info->bus_flags;
+
+ drm_display_info_set_bus_formats(&connector->display_info,
+ &bus_format, 1);
+
+ return 1;
+}
+
+static const struct drm_panel_funcs d53e6ea8966_panel_funcs = {
+ .disable = d53e6ea8966_disable,
+ .enable = d53e6ea8966_enable,
+ .get_modes = d53e6ea8966_get_modes,
+ .prepare = d53e6ea8966_prepare,
+ .unprepare = d53e6ea8966_unprepare,
+};
+
+static int ams495qa01_set_brightness(struct backlight_device *bd)
+{
+ struct d53e6ea8966 *db = bl_get_data(bd);
+ struct mipi_dbi *dbi = &db->dbi;
+ int brightness = backlight_get_brightness(bd);
+
+ ams495qa01_update_gamma(dbi, brightness);
+
+ return 0;
+}
+
+static const struct backlight_ops ams495qa01_backlight_ops = {
+ .update_status = ams495qa01_set_brightness,
+};
+
+static int ams495qa01_backlight_register(struct d53e6ea8966 *db)
+{
+ struct backlight_properties props = {
+ .type = BACKLIGHT_RAW,
+ .brightness = MAX_BRIGHTNESS,
+ .max_brightness = MAX_BRIGHTNESS,
+ };
+ struct device *dev = db->dev;
+ int ret = 0;
+
+ db->bl_dev = devm_backlight_device_register(dev, "panel", dev, db,
+ &ams495qa01_backlight_ops,
+ &props);
+ if (IS_ERR(db->bl_dev)) {
+ ret = PTR_ERR(db->bl_dev);
+ dev_err(dev, "error registering backlight device (%d)\n", ret);
+ }
+
+ return ret;
+}
+
+static int d53e6ea8966_probe(struct spi_device *spi)
+{
+ struct device *dev = &spi->dev;
+ struct mipi_dsi_host *dsi_host;
+ struct d53e6ea8966 *db;
+ int ret;
+ struct mipi_dsi_device_info info = {
+ .type = "d53e6ea8966",
+ .channel = 0,
+ .node = NULL,
+ };
+
+ db = devm_kzalloc(dev, sizeof(*db), GFP_KERNEL);
+ if (!db)
+ return -ENOMEM;
+
+ spi_set_drvdata(spi, db);
+
+ db->dev = dev;
+
+ db->panel_info = of_device_get_match_data(dev);
+ if (!db->panel_info)
+ return -EINVAL;
+
+ db->reg_vdd = devm_regulator_get(dev, "vdd");
+ if (IS_ERR(db->reg_vdd))
+ return dev_err_probe(dev, PTR_ERR(db->reg_vdd),
+ "Failed to get vdd supply\n");
+
+ db->reg_elvdd = devm_regulator_get_optional(dev, "elvdd");
+ if (IS_ERR(db->reg_elvdd))
+ db->reg_elvdd = NULL;
+
+ db->reset = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(db->reset)) {
+ ret = PTR_ERR(db->reset);
+ return dev_err_probe(dev, ret, "no RESET GPIO\n");
+ }
+
+ db->enable = devm_gpiod_get_optional(dev, "enable", GPIOD_OUT_LOW);
+ if (IS_ERR(db->enable)) {
+ ret = PTR_ERR(db->enable);
+ return dev_err_probe(dev, ret, "cannot get ENABLE GPIO\n");
+ }
+
+ ret = mipi_dbi_spi_init(spi, &db->dbi, NULL);
+ if (ret)
+ return dev_err_probe(dev, ret, "MIPI DBI init failed\n");
+
+ dsi_host = drm_of_get_dsi_bus(dev);
+ if (IS_ERR(dsi_host)) {
+ ret = PTR_ERR(dsi_host);
+ return dev_err_probe(dev, ret, "Error attaching DSI bus\n");
+ }
+
+ db->dsi_dev = devm_mipi_dsi_device_register_full(dev, dsi_host, &info);
+ if (IS_ERR(db->dsi_dev)) {
+ dev_err(dev, "failed to register dsi device: %ld\n",
+ PTR_ERR(db->dsi_dev));
+ ret = PTR_ERR(db->dsi_dev);
+ }
+
+ db->dsi_dev->lanes = 2;
+ db->dsi_dev->format = MIPI_DSI_FMT_RGB888;
+ db->dsi_dev->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST |
+ MIPI_DSI_MODE_LPM | MIPI_DSI_MODE_NO_EOT_PACKET;
+
+ drm_panel_init(&db->panel, dev, &d53e6ea8966_panel_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+
+ if (db->panel_info->backlight_register) {
+ ret = db->panel_info->backlight_register(db);
+ if (ret < 0)
+ return ret;
+ db->panel.backlight = db->bl_dev;
+ }
+
+ drm_panel_add(&db->panel);
+
+ ret = devm_mipi_dsi_attach(dev, db->dsi_dev);
+ if (ret < 0) {
+ dev_err(dev, "mipi_dsi_attach failed: %d\n", ret);
+ drm_panel_remove(&db->panel);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void d53e6ea8966_remove(struct spi_device *spi)
+{
+ struct d53e6ea8966 *db = spi_get_drvdata(spi);
+
+ drm_panel_remove(&db->panel);
+}
+
+static const struct drm_display_mode ams495qa01_modes[] = {
+ { /* 60hz */
+ .clock = 33500,
+ .hdisplay = 960,
+ .hsync_start = 960 + 10,
+ .hsync_end = 960 + 10 + 2,
+ .htotal = 960 + 10 + 2 + 10,
+ .vdisplay = 544,
+ .vsync_start = 544 + 10,
+ .vsync_end = 544 + 10 + 2,
+ .vtotal = 544 + 10 + 2 + 10,
+ .flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC,
+ .type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
+ },
+ { /* 50hz */
+ .clock = 27800,
+ .hdisplay = 960,
+ .hsync_start = 960 + 10,
+ .hsync_end = 960 + 10 + 2,
+ .htotal = 960 + 10 + 2 + 10,
+ .vdisplay = 544,
+ .vsync_start = 544 + 10,
+ .vsync_end = 544 + 10 + 2,
+ .vtotal = 544 + 10 + 2 + 10,
+ .flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC,
+ .type = DRM_MODE_TYPE_DRIVER,
+ },
+};
+
+static const struct d53e6ea8966_panel_info ams495qa01_info = {
+ .display_modes = ams495qa01_modes,
+ .num_modes = ARRAY_SIZE(ams495qa01_modes),
+ .width_mm = 117,
+ .height_mm = 74,
+ .bus_flags = DRM_BUS_FLAG_DE_LOW | DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE,
+ .panel_init_seq = ams495qa01_panel_init,
+ .backlight_register = ams495qa01_backlight_register,
+};
+
+static const struct of_device_id d53e6ea8966_match[] = {
+ { .compatible = "samsung,ams495qa01", .data = &ams495qa01_info },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, d53e6ea8966_match);
+
+static const struct spi_device_id d53e6ea8966_ids[] = {
+ { "ams495qa01", 0 },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(spi, d53e6ea8966_ids);
+
+static struct spi_driver d53e6ea8966_driver = {
+ .driver = {
+ .name = "d53e6ea8966-panel",
+ .of_match_table = d53e6ea8966_match,
+ },
+ .id_table = d53e6ea8966_ids,
+ .probe = d53e6ea8966_probe,
+ .remove = d53e6ea8966_remove,
+};
+module_spi_driver(d53e6ea8966_driver);
+
+MODULE_AUTHOR("Chris Morgan <macromorgan@hotmail.com>");
+MODULE_DESCRIPTION("Magnachip d53e6ea8966 panel driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/panel/panel-novatek-nt36523.c b/drivers/gpu/drm/panel/panel-novatek-nt36523.c
new file mode 100644
index 000000000000..d30dbbfb67b1
--- /dev/null
+++ b/drivers/gpu/drm/panel/panel-novatek-nt36523.c
@@ -0,0 +1,777 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Novatek NT36523 DriverIC panels driver
+ *
+ * Copyright (c) 2022, 2023 Jianhua Lu <lujianhua000@gmail.com>
+ */
+
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/of_graph.h>
+#include <linux/regulator/consumer.h>
+
+#include <drm/drm_connector.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_modes.h>
+#include <drm/drm_panel.h>
+
+#define DSI_NUM_MIN 1
+
+#define mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, cmd, seq...) \
+ do { \
+ mipi_dsi_dcs_write_seq(dsi0, cmd, seq); \
+ mipi_dsi_dcs_write_seq(dsi1, cmd, seq); \
+ } while (0)
+
+struct panel_info {
+ struct drm_panel panel;
+ struct mipi_dsi_device *dsi[2];
+ const struct panel_desc *desc;
+
+ struct gpio_desc *reset_gpio;
+ struct backlight_device *backlight;
+ struct regulator *vddio;
+
+ bool prepared;
+};
+
+struct panel_desc {
+ unsigned int width_mm;
+ unsigned int height_mm;
+
+ unsigned int bpc;
+ unsigned int lanes;
+ unsigned long mode_flags;
+ enum mipi_dsi_pixel_format format;
+
+ const struct drm_display_mode *modes;
+ unsigned int num_modes;
+ const struct mipi_dsi_device_info dsi_info;
+ int (*init_sequence)(struct panel_info *pinfo);
+
+ bool is_dual_dsi;
+};
+
+static inline struct panel_info *to_panel_info(struct drm_panel *panel)
+{
+ return container_of(panel, struct panel_info, panel);
+}
+
+static int elish_boe_init_sequence(struct panel_info *pinfo)
+{
+ struct mipi_dsi_device *dsi0 = pinfo->dsi[0];
+ struct mipi_dsi_device *dsi1 = pinfo->dsi[1];
+ /* No datasheet, so write magic init sequence directly */
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x10);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xb9, 0x05);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x20);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x18, 0x40);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x10);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xb9, 0x02);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x23);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x00, 0x80);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x01, 0x84);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x05, 0x2d);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x06, 0x00);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x07, 0x00);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x08, 0x01);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x09, 0x45);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x11, 0x02);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x12, 0x80);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x15, 0x83);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x16, 0x0c);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x29, 0x0a);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x30, 0xff);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x31, 0xfe);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x32, 0xfd);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x33, 0xfb);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x34, 0xf8);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x35, 0xf5);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x36, 0xf3);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x37, 0xf2);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x38, 0xf2);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x39, 0xf2);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x3a, 0xef);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x3b, 0xec);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x3d, 0xe9);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x3f, 0xe5);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x40, 0xe5);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x41, 0xe5);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x2a, 0x13);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x45, 0xff);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x46, 0xf4);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x47, 0xe7);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x48, 0xda);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x49, 0xcd);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x4a, 0xc0);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x4b, 0xb3);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x4c, 0xb2);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x4d, 0xb2);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x4e, 0xb2);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x4f, 0x99);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x50, 0x80);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x51, 0x68);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x52, 0x66);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x53, 0x66);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x54, 0x66);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x2b, 0x0e);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x58, 0xff);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x59, 0xfb);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x5a, 0xf7);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x5b, 0xf3);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x5c, 0xef);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x5d, 0xe3);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x5e, 0xda);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x5f, 0xd8);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x60, 0xd8);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x61, 0xd8);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x62, 0xcb);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x63, 0xbf);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x64, 0xb3);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x65, 0xb2);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x66, 0xb2);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x67, 0xb2);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x2a);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x25, 0x47);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x30, 0x47);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x39, 0x47);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x26);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x19, 0x10);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x1a, 0xe0);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x1b, 0x10);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x1c, 0x00);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x2a, 0x10);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x2b, 0xe0);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x10);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0xf0);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x84, 0x08);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x85, 0x0c);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x20);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x51, 0x00);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x25);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x91, 0x1f);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x92, 0x0f);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x93, 0x01);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x94, 0x18);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x95, 0x03);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x96, 0x01);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x10);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xb0, 0x01);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x25);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x19, 0x1f);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x1b, 0x1b);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x24);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xb8, 0x28);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x27);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xd0, 0x31);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xd1, 0x20);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xd2, 0x30);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xd4, 0x08);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xde, 0x80);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xdf, 0x02);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x26);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x00, 0x81);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x01, 0xb0);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x22);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x9f, 0x50);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x6f, 0x01);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x70, 0x11);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x73, 0x01);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x74, 0x49);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x76, 0x01);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x77, 0x49);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xa0, 0x3f);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xa9, 0x50);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xaa, 0x28);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xab, 0x28);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xad, 0x10);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xb8, 0x00);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xb9, 0x49);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xba, 0x49);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xbb, 0x49);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xbe, 0x04);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xbf, 0x49);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xc0, 0x04);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xc1, 0x59);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xc2, 0x00);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xc5, 0x00);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xc6, 0x01);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xc7, 0x48);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xca, 0x43);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xcb, 0x3c);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xce, 0x00);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xcf, 0x43);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xd0, 0x3c);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xd3, 0x43);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xd4, 0x3c);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xd7, 0x00);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xdc, 0x43);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xdd, 0x3c);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xe1, 0x43);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xe2, 0x3c);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xf2, 0x00);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xf3, 0x01);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xf4, 0x48);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x25);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x13, 0x01);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x14, 0x23);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xbc, 0x01);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xbd, 0x23);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x2a);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x97, 0x3c);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x98, 0x02);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x99, 0x95);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x9a, 0x03);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x9b, 0x00);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x9c, 0x0b);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x9d, 0x0a);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x9e, 0x90);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x22);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x9f, 0x50);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x23);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xa3, 0x50);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0xe0);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x14, 0x60);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x16, 0xc0);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x4f, 0x02);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0xf0);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x3a, 0x08);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0xd0);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x02, 0xaf);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x09, 0xee);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x1c, 0x99);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x1d, 0x09);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x10);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x51, 0x0f, 0xff);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x53, 0x2c);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x35, 0x00);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xbb, 0x13);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x3b, 0x03, 0xac, 0x1a, 0x04, 0x04);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x11);
+ msleep(70);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x29);
+
+ return 0;
+}
+
+static int elish_csot_init_sequence(struct panel_info *pinfo)
+{
+ struct mipi_dsi_device *dsi0 = pinfo->dsi[0];
+ struct mipi_dsi_device *dsi1 = pinfo->dsi[1];
+ /* No datasheet, so write magic init sequence directly */
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x10);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xb9, 0x05);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x20);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x18, 0x40);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x10);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xb9, 0x02);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0xd0);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x02, 0xaf);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x00, 0x30);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x09, 0xee);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x1c, 0x99);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x1d, 0x09);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0xf0);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x3a, 0x08);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0xe0);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x4f, 0x02);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x20);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x58, 0x40);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x10);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x35, 0x00);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x23);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x00, 0x80);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x01, 0x84);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x05, 0x2d);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x06, 0x00);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x07, 0x00);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x08, 0x01);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x09, 0x45);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x11, 0x02);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x12, 0x80);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x15, 0x83);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x16, 0x0c);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x29, 0x0a);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x30, 0xff);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x31, 0xfe);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x32, 0xfd);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x33, 0xfb);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x34, 0xf8);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x35, 0xf5);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x36, 0xf3);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x37, 0xf2);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x38, 0xf2);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x39, 0xf2);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x3a, 0xef);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x3b, 0xec);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x3d, 0xe9);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x3f, 0xe5);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x40, 0xe5);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x41, 0xe5);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x2a, 0x13);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x45, 0xff);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x46, 0xf4);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x47, 0xe7);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x48, 0xda);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x49, 0xcd);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x4a, 0xc0);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x4b, 0xb3);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x4c, 0xb2);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x4d, 0xb2);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x4e, 0xb2);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x4f, 0x99);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x50, 0x80);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x51, 0x68);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x52, 0x66);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x53, 0x66);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x54, 0x66);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x2b, 0x0e);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x58, 0xff);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x59, 0xfb);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x5a, 0xf7);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x5b, 0xf3);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x5c, 0xef);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x5d, 0xe3);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x5e, 0xda);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x5f, 0xd8);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x60, 0xd8);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x61, 0xd8);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x62, 0xcb);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x63, 0xbf);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x64, 0xb3);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x65, 0xb2);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x66, 0xb2);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x67, 0xb2);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x10);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x51, 0x0f, 0xff);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x53, 0x2c);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x55, 0x00);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xbb, 0x13);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x3b, 0x03, 0xac, 0x1a, 0x04, 0x04);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x2a);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x25, 0x46);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x30, 0x46);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x39, 0x46);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x26);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x01, 0xb0);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x19, 0x10);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x1a, 0xe0);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x1b, 0x10);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x1c, 0x00);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x2a, 0x10);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x2b, 0xe0);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0xf0);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x84, 0x08);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x85, 0x0c);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x20);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x51, 0x00);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x25);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x91, 0x1f);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x92, 0x0f);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x93, 0x01);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x94, 0x18);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x95, 0x03);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x96, 0x01);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x10);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xb0, 0x01);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x25);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x19, 0x1f);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x1b, 0x1b);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x24);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xb8, 0x28);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x27);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xd0, 0x31);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xd1, 0x20);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xd4, 0x08);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xde, 0x80);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xdf, 0x02);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x26);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x00, 0x81);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x01, 0xb0);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x22);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x6f, 0x01);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x70, 0x11);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x73, 0x01);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x74, 0x4d);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xa0, 0x3f);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xa9, 0x50);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xaa, 0x28);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xab, 0x28);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xad, 0x10);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xb8, 0x00);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xb9, 0x4b);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xba, 0x96);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xbb, 0x4b);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xbe, 0x07);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xbf, 0x4b);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xc0, 0x07);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xc1, 0x5c);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xc2, 0x00);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xc5, 0x00);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xc6, 0x3f);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xc7, 0x00);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xca, 0x08);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xcb, 0x40);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xce, 0x00);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xcf, 0x08);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xd0, 0x40);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xd3, 0x08);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xd4, 0x40);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x25);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xbc, 0x01);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xbd, 0x1c);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x2a);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x9a, 0x03);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x10);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x11);
+ msleep(70);
+ mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x29);
+
+ return 0;
+}
+
+static const struct drm_display_mode elish_boe_modes[] = {
+ {
+ /* There is only one 120 Hz timing, but it doesn't work perfectly, 104 Hz preferred */
+ .clock = (1600 + 60 + 8 + 60) * (2560 + 26 + 4 + 168) * 104 / 1000,
+ .hdisplay = 1600,
+ .hsync_start = 1600 + 60,
+ .hsync_end = 1600 + 60 + 8,
+ .htotal = 1600 + 60 + 8 + 60,
+ .vdisplay = 2560,
+ .vsync_start = 2560 + 26,
+ .vsync_end = 2560 + 26 + 4,
+ .vtotal = 2560 + 26 + 4 + 168,
+ },
+};
+
+static const struct drm_display_mode elish_csot_modes[] = {
+ {
+ /* There is only one 120 Hz timing, but it doesn't work perfectly, 104 Hz preferred */
+ .clock = (1600 + 200 + 40 + 52) * (2560 + 26 + 4 + 168) * 104 / 1000,
+ .hdisplay = 1600,
+ .hsync_start = 1600 + 200,
+ .hsync_end = 1600 + 200 + 40,
+ .htotal = 1600 + 200 + 40 + 52,
+ .vdisplay = 2560,
+ .vsync_start = 2560 + 26,
+ .vsync_end = 2560 + 26 + 4,
+ .vtotal = 2560 + 26 + 4 + 168,
+ },
+};
+
+static const struct panel_desc elish_boe_desc = {
+ .modes = elish_boe_modes,
+ .num_modes = ARRAY_SIZE(elish_boe_modes),
+ .dsi_info = {
+ .type = "BOE-elish",
+ .channel = 0,
+ .node = NULL,
+ },
+ .width_mm = 127,
+ .height_mm = 203,
+ .bpc = 8,
+ .lanes = 3,
+ .format = MIPI_DSI_FMT_RGB888,
+ .mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_CLOCK_NON_CONTINUOUS | MIPI_DSI_MODE_LPM,
+ .init_sequence = elish_boe_init_sequence,
+ .is_dual_dsi = true,
+};
+
+static const struct panel_desc elish_csot_desc = {
+ .modes = elish_csot_modes,
+ .num_modes = ARRAY_SIZE(elish_csot_modes),
+ .dsi_info = {
+ .type = "CSOT-elish",
+ .channel = 0,
+ .node = NULL,
+ },
+ .width_mm = 127,
+ .height_mm = 203,
+ .bpc = 8,
+ .lanes = 3,
+ .format = MIPI_DSI_FMT_RGB888,
+ .mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_CLOCK_NON_CONTINUOUS | MIPI_DSI_MODE_LPM,
+ .init_sequence = elish_csot_init_sequence,
+ .is_dual_dsi = true,
+};
+
+static void nt36523_reset(struct panel_info *pinfo)
+{
+ gpiod_set_value_cansleep(pinfo->reset_gpio, 1);
+ usleep_range(12000, 13000);
+ gpiod_set_value_cansleep(pinfo->reset_gpio, 0);
+ usleep_range(12000, 13000);
+ gpiod_set_value_cansleep(pinfo->reset_gpio, 1);
+ usleep_range(12000, 13000);
+ gpiod_set_value_cansleep(pinfo->reset_gpio, 0);
+ usleep_range(12000, 13000);
+}
+
+static int nt36523_prepare(struct drm_panel *panel)
+{
+ struct panel_info *pinfo = to_panel_info(panel);
+ int ret;
+
+ if (pinfo->prepared)
+ return 0;
+
+ ret = regulator_enable(pinfo->vddio);
+ if (ret) {
+ dev_err(panel->dev, "failed to enable vddio regulator: %d\n", ret);
+ return ret;
+ }
+
+ nt36523_reset(pinfo);
+
+ ret = pinfo->desc->init_sequence(pinfo);
+ if (ret < 0) {
+ regulator_disable(pinfo->vddio);
+ dev_err(panel->dev, "failed to initialize panel: %d\n", ret);
+ return ret;
+ }
+
+ pinfo->prepared = true;
+
+ return 0;
+}
+
+static int nt36523_disable(struct drm_panel *panel)
+{
+ struct panel_info *pinfo = to_panel_info(panel);
+ int i, ret;
+
+ for (i = 0; i < DSI_NUM_MIN + pinfo->desc->is_dual_dsi; i++) {
+ ret = mipi_dsi_dcs_set_display_off(pinfo->dsi[i]);
+ if (ret < 0)
+ dev_err(&pinfo->dsi[i]->dev, "failed to set display off: %d\n", ret);
+ }
+
+ for (i = 0; i < DSI_NUM_MIN + pinfo->desc->is_dual_dsi; i++) {
+ ret = mipi_dsi_dcs_enter_sleep_mode(pinfo->dsi[i]);
+ if (ret < 0)
+ dev_err(&pinfo->dsi[i]->dev, "failed to enter sleep mode: %d\n", ret);
+ }
+
+ msleep(70);
+
+ return 0;
+}
+
+static int nt36523_unprepare(struct drm_panel *panel)
+{
+ struct panel_info *pinfo = to_panel_info(panel);
+
+ if (!pinfo->prepared)
+ return 0;
+
+ gpiod_set_value_cansleep(pinfo->reset_gpio, 1);
+ regulator_disable(pinfo->vddio);
+
+ pinfo->prepared = false;
+
+ return 0;
+}
+
+static void nt36523_remove(struct mipi_dsi_device *dsi)
+{
+ struct panel_info *pinfo = mipi_dsi_get_drvdata(dsi);
+ int ret;
+
+ ret = mipi_dsi_detach(pinfo->dsi[0]);
+ if (ret < 0)
+ dev_err(&dsi->dev, "failed to detach from DSI0 host: %d\n", ret);
+
+ if (pinfo->desc->is_dual_dsi) {
+ ret = mipi_dsi_detach(pinfo->dsi[1]);
+ if (ret < 0)
+ dev_err(&pinfo->dsi[1]->dev, "failed to detach from DSI1 host: %d\n", ret);
+ mipi_dsi_device_unregister(pinfo->dsi[1]);
+ }
+
+ drm_panel_remove(&pinfo->panel);
+}
+
+static int nt36523_get_modes(struct drm_panel *panel,
+ struct drm_connector *connector)
+{
+ struct panel_info *pinfo = to_panel_info(panel);
+ int i;
+
+ for (i = 0; i < pinfo->desc->num_modes; i++) {
+ const struct drm_display_mode *m = &pinfo->desc->modes[i];
+ struct drm_display_mode *mode;
+
+ mode = drm_mode_duplicate(connector->dev, m);
+ if (!mode) {
+ dev_err(panel->dev, "failed to add mode %ux%u@%u\n",
+ m->hdisplay, m->vdisplay, drm_mode_vrefresh(m));
+ return -ENOMEM;
+ }
+
+ mode->type = DRM_MODE_TYPE_DRIVER;
+ if (i == 0)
+ mode->type |= DRM_MODE_TYPE_PREFERRED;
+
+ drm_mode_set_name(mode);
+ drm_mode_probed_add(connector, mode);
+ }
+
+ connector->display_info.width_mm = pinfo->desc->width_mm;
+ connector->display_info.height_mm = pinfo->desc->height_mm;
+ connector->display_info.bpc = pinfo->desc->bpc;
+
+ return pinfo->desc->num_modes;
+}
+
+static const struct drm_panel_funcs nt36523_panel_funcs = {
+ .disable = nt36523_disable,
+ .prepare = nt36523_prepare,
+ .unprepare = nt36523_unprepare,
+ .get_modes = nt36523_get_modes,
+};
+
+static int nt36523_probe(struct mipi_dsi_device *dsi)
+{
+ struct device *dev = &dsi->dev;
+ struct device_node *dsi1;
+ struct mipi_dsi_host *dsi1_host;
+ struct panel_info *pinfo;
+ const struct mipi_dsi_device_info *info;
+ int i, ret;
+
+ pinfo = devm_kzalloc(dev, sizeof(*pinfo), GFP_KERNEL);
+ if (!pinfo)
+ return -ENOMEM;
+
+ pinfo->vddio = devm_regulator_get(dev, "vddio");
+ if (IS_ERR(pinfo->vddio))
+ return dev_err_probe(dev, PTR_ERR(pinfo->vddio), "failed to get vddio regulator\n");
+
+ pinfo->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(pinfo->reset_gpio))
+ return dev_err_probe(dev, PTR_ERR(pinfo->reset_gpio), "failed to get reset gpio\n");
+
+ pinfo->desc = of_device_get_match_data(dev);
+ if (!pinfo->desc)
+ return -ENODEV;
+
+ /* If the panel is dual dsi, register DSI1 */
+ if (pinfo->desc->is_dual_dsi) {
+ info = &pinfo->desc->dsi_info;
+
+ dsi1 = of_graph_get_remote_node(dsi->dev.of_node, 1, -1);
+ if (!dsi1) {
+ dev_err(dev, "cannot get secondary DSI node.\n");
+ return -ENODEV;
+ }
+
+ dsi1_host = of_find_mipi_dsi_host_by_node(dsi1);
+ of_node_put(dsi1);
+ if (!dsi1_host)
+ return dev_err_probe(dev, -EPROBE_DEFER, "cannot get secondary DSI host\n");
+
+ pinfo->dsi[1] = mipi_dsi_device_register_full(dsi1_host, info);
+ if (!pinfo->dsi[1]) {
+ dev_err(dev, "cannot get secondary DSI device\n");
+ return -ENODEV;
+ }
+ }
+
+ pinfo->dsi[0] = dsi;
+ mipi_dsi_set_drvdata(dsi, pinfo);
+ drm_panel_init(&pinfo->panel, dev, &nt36523_panel_funcs, DRM_MODE_CONNECTOR_DSI);
+
+ ret = drm_panel_of_backlight(&pinfo->panel);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to get backlight\n");
+
+ drm_panel_add(&pinfo->panel);
+
+ for (i = 0; i < DSI_NUM_MIN + pinfo->desc->is_dual_dsi; i++) {
+ pinfo->dsi[i]->lanes = pinfo->desc->lanes;
+ pinfo->dsi[i]->format = pinfo->desc->format;
+ pinfo->dsi[i]->mode_flags = pinfo->desc->mode_flags;
+
+ ret = mipi_dsi_attach(pinfo->dsi[i]);
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "cannot attach to DSI%d host.\n", i);
+ }
+
+ return 0;
+}
+
+static const struct of_device_id nt36523_of_match[] = {
+ {
+ .compatible = "xiaomi,elish-boe-nt36523",
+ .data = &elish_boe_desc,
+ },
+ {
+ .compatible = "xiaomi,elish-csot-nt36523",
+ .data = &elish_csot_desc,
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(of, nt36523_of_match);
+
+static struct mipi_dsi_driver nt36523_driver = {
+ .probe = nt36523_probe,
+ .remove = nt36523_remove,
+ .driver = {
+ .name = "panel-novatek-nt36523",
+ .of_match_table = nt36523_of_match,
+ },
+};
+module_mipi_dsi_driver(nt36523_driver);
+
+MODULE_AUTHOR("Jianhua Lu <lujianhua000@gmail.com>");
+MODULE_DESCRIPTION("DRM driver for Novatek NT36523 based MIPI DSI panels");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/panel/panel-seiko-43wvf1g.c b/drivers/gpu/drm/panel/panel-seiko-43wvf1g.c
index 76160e5d43bd..c250ca36a5b3 100644
--- a/drivers/gpu/drm/panel/panel-seiko-43wvf1g.c
+++ b/drivers/gpu/drm/panel/panel-seiko-43wvf1g.c
@@ -7,6 +7,7 @@
*/
#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
#include <linux/media-bus-format.h>
#include <linux/module.h>
#include <linux/of.h>
@@ -48,6 +49,7 @@ struct seiko_panel {
const struct seiko_panel_desc *desc;
struct regulator *dvdd;
struct regulator *avdd;
+ struct gpio_desc *enable_gpio;
};
static inline struct seiko_panel *to_seiko_panel(struct drm_panel *panel)
@@ -139,6 +141,8 @@ static int seiko_panel_unprepare(struct drm_panel *panel)
if (!p->prepared)
return 0;
+ gpiod_set_value_cansleep(p->enable_gpio, 0);
+
regulator_disable(p->avdd);
/* Add a 100ms delay as per the panel datasheet */
@@ -174,6 +178,8 @@ static int seiko_panel_prepare(struct drm_panel *panel)
goto disable_dvdd;
}
+ gpiod_set_value_cansleep(p->enable_gpio, 1);
+
p->prepared = true;
return 0;
@@ -252,6 +258,12 @@ static int seiko_panel_probe(struct device *dev,
if (IS_ERR(panel->avdd))
return PTR_ERR(panel->avdd);
+ panel->enable_gpio = devm_gpiod_get_optional(dev, "enable",
+ GPIOD_OUT_LOW);
+ if (IS_ERR(panel->enable_gpio))
+ return dev_err_probe(dev, PTR_ERR(panel->enable_gpio),
+ "failed to request GPIO\n");
+
drm_panel_init(&panel->base, dev, &seiko_panel_funcs,
DRM_MODE_CONNECTOR_DPI);
diff --git a/drivers/gpu/drm/panel/panel-sitronix-st7701.c b/drivers/gpu/drm/panel/panel-sitronix-st7701.c
index 0b8cf65172ff..7eae83aa0ea1 100644
--- a/drivers/gpu/drm/panel/panel-sitronix-st7701.c
+++ b/drivers/gpu/drm/panel/panel-sitronix-st7701.c
@@ -135,6 +135,7 @@ struct st7701 {
struct regulator_bulk_data supplies[2];
struct gpio_desc *reset;
unsigned int sleep_delay;
+ enum drm_panel_orientation orientation;
};
static inline struct st7701 *panel_to_st7701(struct drm_panel *panel)
@@ -397,6 +398,31 @@ static void dmt028vghmcmi_1a_gip_sequence(struct st7701 *st7701)
ST7701_DSI(st7701, 0x3A, 0x70);
}
+static void kd50t048a_gip_sequence(struct st7701 *st7701)
+{
+ /**
+ * ST7701_SPEC_V1.2 is unable to provide enough information above this
+ * specific command sequence, so grab the same from vendor BSP driver.
+ */
+ ST7701_DSI(st7701, 0xE0, 0x00, 0x00, 0x02);
+ ST7701_DSI(st7701, 0xE1, 0x08, 0x00, 0x0A, 0x00, 0x07, 0x00, 0x09,
+ 0x00, 0x00, 0x33, 0x33);
+ ST7701_DSI(st7701, 0xE2, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00);
+ ST7701_DSI(st7701, 0xE3, 0x00, 0x00, 0x33, 0x33);
+ ST7701_DSI(st7701, 0xE4, 0x44, 0x44);
+ ST7701_DSI(st7701, 0xE5, 0x0E, 0x60, 0xA0, 0xA0, 0x10, 0x60, 0xA0,
+ 0xA0, 0x0A, 0x60, 0xA0, 0xA0, 0x0C, 0x60, 0xA0, 0xA0);
+ ST7701_DSI(st7701, 0xE6, 0x00, 0x00, 0x33, 0x33);
+ ST7701_DSI(st7701, 0xE7, 0x44, 0x44);
+ ST7701_DSI(st7701, 0xE8, 0x0D, 0x60, 0xA0, 0xA0, 0x0F, 0x60, 0xA0,
+ 0xA0, 0x09, 0x60, 0xA0, 0xA0, 0x0B, 0x60, 0xA0, 0xA0);
+ ST7701_DSI(st7701, 0xEB, 0x02, 0x01, 0xE4, 0xE4, 0x44, 0x00, 0x40);
+ ST7701_DSI(st7701, 0xEC, 0x02, 0x01);
+ ST7701_DSI(st7701, 0xED, 0xAB, 0x89, 0x76, 0x54, 0x01, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0x10, 0x45, 0x67, 0x98, 0xBA);
+}
+
static int st7701_prepare(struct drm_panel *panel)
{
struct st7701 *st7701 = panel_to_st7701(panel);
@@ -489,15 +515,29 @@ static int st7701_get_modes(struct drm_panel *panel,
connector->display_info.width_mm = desc_mode->width_mm;
connector->display_info.height_mm = desc_mode->height_mm;
+ /*
+ * TODO: Remove once all drm drivers call
+ * drm_connector_set_orientation_from_panel()
+ */
+ drm_connector_set_panel_orientation(connector, st7701->orientation);
+
return 1;
}
+static enum drm_panel_orientation st7701_get_orientation(struct drm_panel *panel)
+{
+ struct st7701 *st7701 = panel_to_st7701(panel);
+
+ return st7701->orientation;
+}
+
static const struct drm_panel_funcs st7701_funcs = {
.disable = st7701_disable,
.unprepare = st7701_unprepare,
.prepare = st7701_prepare,
.enable = st7701_enable,
.get_modes = st7701_get_modes,
+ .get_orientation = st7701_get_orientation,
};
static const struct drm_display_mode ts8550b_mode = {
@@ -700,6 +740,105 @@ static const struct st7701_panel_desc dmt028vghmcmi_1a_desc = {
.gip_sequence = dmt028vghmcmi_1a_gip_sequence,
};
+static const struct drm_display_mode kd50t048a_mode = {
+ .clock = 27500,
+
+ .hdisplay = 480,
+ .hsync_start = 480 + 2,
+ .hsync_end = 480 + 2 + 10,
+ .htotal = 480 + 2 + 10 + 2,
+
+ .vdisplay = 854,
+ .vsync_start = 854 + 2,
+ .vsync_end = 854 + 2 + 2,
+ .vtotal = 854 + 2 + 2 + 17,
+
+ .width_mm = 69,
+ .height_mm = 139,
+
+ .type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
+};
+
+static const struct st7701_panel_desc kd50t048a_desc = {
+ .mode = &kd50t048a_mode,
+ .lanes = 2,
+ .format = MIPI_DSI_FMT_RGB888,
+ .panel_sleep_delay = 0,
+
+ .pv_gamma = {
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC0_MASK, 0),
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC4_MASK, 0xd),
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC8_MASK, 0x14),
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC16_MASK, 0xd),
+
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC24_MASK, 0x10),
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC52_MASK, 0x5),
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC80_MASK, 0x2),
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC108_MASK, 0x8),
+
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC147_MASK, 0x8),
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC175_MASK, 0x1e),
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC203_MASK, 0x5),
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC231_MASK, 0x13),
+
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC239_MASK, 0x11),
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 2) |
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC247_MASK, 0x23),
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC251_MASK, 0x29),
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC255_MASK, 0x18)
+ },
+ .nv_gamma = {
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC0_MASK, 0),
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC4_MASK, 0xc),
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC8_MASK, 0x14),
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC16_MASK, 0xc),
+
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC24_MASK, 0x10),
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC52_MASK, 0x5),
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC80_MASK, 0x3),
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC108_MASK, 0x8),
+
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC147_MASK, 0x7),
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC175_MASK, 0x20),
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC203_MASK, 0x5),
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC231_MASK, 0x13),
+
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC239_MASK, 0x11),
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 2) |
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC247_MASK, 0x24),
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC251_MASK, 0x29),
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC255_MASK, 0x18)
+ },
+ .nlinv = 1,
+ .vop_uv = 4887500,
+ .vcom_uv = 937500,
+ .vgh_mv = 15000,
+ .vgl_mv = -9510,
+ .avdd_mv = 6600,
+ .avcl_mv = -4400,
+ .gamma_op_bias = OP_BIAS_MIDDLE,
+ .input_op_bias = OP_BIAS_MIN,
+ .output_op_bias = OP_BIAS_MIN,
+ .t2d_ns = 1600,
+ .t3d_ns = 10400,
+ .eot_en = true,
+ .gip_sequence = kd50t048a_gip_sequence,
+};
+
static int st7701_dsi_probe(struct mipi_dsi_device *dsi)
{
const struct st7701_panel_desc *desc;
@@ -730,6 +869,10 @@ static int st7701_dsi_probe(struct mipi_dsi_device *dsi)
return PTR_ERR(st7701->reset);
}
+ ret = of_drm_get_panel_orientation(dsi->dev.of_node, &st7701->orientation);
+ if (ret < 0)
+ return dev_err_probe(&dsi->dev, ret, "Failed to get orientation\n");
+
drm_panel_init(&st7701->panel, &dsi->dev, &st7701_funcs,
DRM_MODE_CONNECTOR_DSI);
@@ -775,6 +918,7 @@ static void st7701_dsi_remove(struct mipi_dsi_device *dsi)
static const struct of_device_id st7701_of_match[] = {
{ .compatible = "densitron,dmt028vghmcmi-1a", .data = &dmt028vghmcmi_1a_desc },
+ { .compatible = "elida,kd50t048a", .data = &kd50t048a_desc },
{ .compatible = "techstar,ts8550b", .data = &ts8550b_desc },
{ }
};
diff --git a/drivers/gpu/drm/panel/panel-sony-td4353-jdi.c b/drivers/gpu/drm/panel/panel-sony-td4353-jdi.c
new file mode 100644
index 000000000000..8d8813dbaa45
--- /dev/null
+++ b/drivers/gpu/drm/panel/panel-sony-td4353-jdi.c
@@ -0,0 +1,329 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2022 Konrad Dybcio <konrad.dybcio@somainline.org>
+ *
+ * Generated with linux-mdss-dsi-panel-driver-generator with a
+ * substantial amount of manual adjustments.
+ *
+ * SONY Downstream kernel calls this one:
+ * - "JDI ID3" for Akari (XZ2)
+ * - "JDI ID4" for Apollo (XZ2 Compact)
+ */
+
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/regulator/consumer.h>
+
+#include <video/mipi_display.h>
+
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_modes.h>
+#include <drm/drm_panel.h>
+
+enum {
+ TYPE_TAMA_60HZ,
+ /*
+ * Leaving room for expansion - SONY very often uses
+ * *truly reliably* overclockable panels on their flagships!
+ */
+};
+
+struct sony_td4353_jdi {
+ struct drm_panel panel;
+ struct mipi_dsi_device *dsi;
+ struct regulator_bulk_data supplies[3];
+ struct gpio_desc *panel_reset_gpio;
+ struct gpio_desc *touch_reset_gpio;
+ bool prepared;
+ int type;
+};
+
+static inline struct sony_td4353_jdi *to_sony_td4353_jdi(struct drm_panel *panel)
+{
+ return container_of(panel, struct sony_td4353_jdi, panel);
+}
+
+static int sony_td4353_jdi_on(struct sony_td4353_jdi *ctx)
+{
+ struct mipi_dsi_device *dsi = ctx->dsi;
+ struct device *dev = &dsi->dev;
+ int ret;
+
+ dsi->mode_flags |= MIPI_DSI_MODE_LPM;
+
+ ret = mipi_dsi_dcs_set_column_address(dsi, 0x0000, 1080 - 1);
+ if (ret < 0) {
+ dev_err(dev, "Failed to set column address: %d\n", ret);
+ return ret;
+ }
+
+ ret = mipi_dsi_dcs_set_page_address(dsi, 0x0000, 2160 - 1);
+ if (ret < 0) {
+ dev_err(dev, "Failed to set page address: %d\n", ret);
+ return ret;
+ }
+
+ ret = mipi_dsi_dcs_set_tear_scanline(dsi, 0);
+ if (ret < 0) {
+ dev_err(dev, "Failed to set tear scanline: %d\n", ret);
+ return ret;
+ }
+
+ ret = mipi_dsi_dcs_set_tear_on(dsi, MIPI_DSI_DCS_TEAR_MODE_VBLANK);
+ if (ret < 0) {
+ dev_err(dev, "Failed to set tear on: %d\n", ret);
+ return ret;
+ }
+
+ mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_SET_ADDRESS_MODE, 0x00);
+
+ ret = mipi_dsi_dcs_set_pixel_format(dsi, 0x77);
+ if (ret < 0) {
+ dev_err(dev, "Failed to set pixel format: %d\n", ret);
+ return ret;
+ }
+
+ mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_SET_PARTIAL_ROWS,
+ 0x00, 0x00, 0x08, 0x6f);
+
+ ret = mipi_dsi_dcs_exit_sleep_mode(dsi);
+ if (ret < 0) {
+ dev_err(dev, "Failed to exit sleep mode: %d\n", ret);
+ return ret;
+ }
+ msleep(70);
+
+ mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_WRITE_MEMORY_START);
+
+ ret = mipi_dsi_dcs_set_display_on(dsi);
+ if (ret < 0) {
+ dev_err(dev, "Failed to turn display on: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int sony_td4353_jdi_off(struct sony_td4353_jdi *ctx)
+{
+ struct mipi_dsi_device *dsi = ctx->dsi;
+ struct device *dev = &dsi->dev;
+ int ret;
+
+ dsi->mode_flags &= ~MIPI_DSI_MODE_LPM;
+
+ ret = mipi_dsi_dcs_set_display_off(dsi);
+ if (ret < 0) {
+ dev_err(dev, "Failed to set display off: %d\n", ret);
+ return ret;
+ }
+ msleep(22);
+
+ ret = mipi_dsi_dcs_set_tear_off(dsi);
+ if (ret < 0) {
+ dev_err(dev, "Failed to set tear off: %d\n", ret);
+ return ret;
+ }
+
+ ret = mipi_dsi_dcs_enter_sleep_mode(dsi);
+ if (ret < 0) {
+ dev_err(dev, "Failed to enter sleep mode: %d\n", ret);
+ return ret;
+ }
+ msleep(80);
+
+ return 0;
+}
+
+static void sony_td4353_assert_reset_gpios(struct sony_td4353_jdi *ctx, int mode)
+{
+ gpiod_set_value_cansleep(ctx->touch_reset_gpio, mode);
+ gpiod_set_value_cansleep(ctx->panel_reset_gpio, mode);
+ usleep_range(5000, 5100);
+}
+
+static int sony_td4353_jdi_prepare(struct drm_panel *panel)
+{
+ struct sony_td4353_jdi *ctx = to_sony_td4353_jdi(panel);
+ struct device *dev = &ctx->dsi->dev;
+ int ret;
+
+ if (ctx->prepared)
+ return 0;
+
+ ret = regulator_bulk_enable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
+ if (ret < 0) {
+ dev_err(dev, "Failed to enable regulators: %d\n", ret);
+ return ret;
+ }
+
+ msleep(100);
+
+ sony_td4353_assert_reset_gpios(ctx, 1);
+
+ ret = sony_td4353_jdi_on(ctx);
+ if (ret < 0) {
+ dev_err(dev, "Failed to power on panel: %d\n", ret);
+ sony_td4353_assert_reset_gpios(ctx, 0);
+ regulator_bulk_disable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
+ return ret;
+ }
+
+ ctx->prepared = true;
+ return 0;
+}
+
+static int sony_td4353_jdi_unprepare(struct drm_panel *panel)
+{
+ struct sony_td4353_jdi *ctx = to_sony_td4353_jdi(panel);
+ struct device *dev = &ctx->dsi->dev;
+ int ret;
+
+ if (!ctx->prepared)
+ return 0;
+
+ ret = sony_td4353_jdi_off(ctx);
+ if (ret < 0)
+ dev_err(dev, "Failed to power off panel: %d\n", ret);
+
+ sony_td4353_assert_reset_gpios(ctx, 0);
+ regulator_bulk_disable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
+
+ ctx->prepared = false;
+ return 0;
+}
+
+static const struct drm_display_mode sony_td4353_jdi_mode_tama_60hz = {
+ .clock = (1080 + 4 + 8 + 8) * (2160 + 259 + 8 + 8) * 60 / 1000,
+ .hdisplay = 1080,
+ .hsync_start = 1080 + 4,
+ .hsync_end = 1080 + 4 + 8,
+ .htotal = 1080 + 4 + 8 + 8,
+ .vdisplay = 2160,
+ .vsync_start = 2160 + 259,
+ .vsync_end = 2160 + 259 + 8,
+ .vtotal = 2160 + 259 + 8 + 8,
+ .width_mm = 64,
+ .height_mm = 128,
+};
+
+static int sony_td4353_jdi_get_modes(struct drm_panel *panel,
+ struct drm_connector *connector)
+{
+ struct sony_td4353_jdi *ctx = to_sony_td4353_jdi(panel);
+ struct drm_display_mode *mode = NULL;
+
+ if (ctx->type == TYPE_TAMA_60HZ)
+ mode = drm_mode_duplicate(connector->dev, &sony_td4353_jdi_mode_tama_60hz);
+ else
+ return -EINVAL;
+
+ if (!mode)
+ return -ENOMEM;
+
+ drm_mode_set_name(mode);
+
+ mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
+ connector->display_info.width_mm = mode->width_mm;
+ connector->display_info.height_mm = mode->height_mm;
+ drm_mode_probed_add(connector, mode);
+
+ return 1;
+}
+
+static const struct drm_panel_funcs sony_td4353_jdi_panel_funcs = {
+ .prepare = sony_td4353_jdi_prepare,
+ .unprepare = sony_td4353_jdi_unprepare,
+ .get_modes = sony_td4353_jdi_get_modes,
+};
+
+static int sony_td4353_jdi_probe(struct mipi_dsi_device *dsi)
+{
+ struct device *dev = &dsi->dev;
+ struct sony_td4353_jdi *ctx;
+ int ret;
+
+ ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ ctx->type = (uintptr_t)of_device_get_match_data(dev);
+
+ ctx->supplies[0].supply = "vddio";
+ ctx->supplies[1].supply = "vsp";
+ ctx->supplies[2].supply = "vsn";
+ ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(ctx->supplies),
+ ctx->supplies);
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "Failed to get regulators\n");
+
+ ctx->panel_reset_gpio = devm_gpiod_get(dev, "panel-reset", GPIOD_ASIS);
+ if (IS_ERR(ctx->panel_reset_gpio))
+ return dev_err_probe(dev, PTR_ERR(ctx->panel_reset_gpio),
+ "Failed to get panel-reset-gpios\n");
+
+ ctx->touch_reset_gpio = devm_gpiod_get(dev, "touch-reset", GPIOD_ASIS);
+ if (IS_ERR(ctx->touch_reset_gpio))
+ return dev_err_probe(dev, PTR_ERR(ctx->touch_reset_gpio),
+ "Failed to get touch-reset-gpios\n");
+
+ ctx->dsi = dsi;
+ mipi_dsi_set_drvdata(dsi, ctx);
+
+ dsi->lanes = 4;
+ dsi->format = MIPI_DSI_FMT_RGB888;
+ dsi->mode_flags = MIPI_DSI_CLOCK_NON_CONTINUOUS;
+
+ drm_panel_init(&ctx->panel, dev, &sony_td4353_jdi_panel_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+
+ ret = drm_panel_of_backlight(&ctx->panel);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to get backlight\n");
+
+ drm_panel_add(&ctx->panel);
+
+ ret = mipi_dsi_attach(dsi);
+ if (ret < 0) {
+ dev_err(dev, "Failed to attach to DSI host: %d\n", ret);
+ drm_panel_remove(&ctx->panel);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void sony_td4353_jdi_remove(struct mipi_dsi_device *dsi)
+{
+ struct sony_td4353_jdi *ctx = mipi_dsi_get_drvdata(dsi);
+ int ret;
+
+ ret = mipi_dsi_detach(dsi);
+ if (ret < 0)
+ dev_err(&dsi->dev, "Failed to detach from DSI host: %d\n", ret);
+
+ drm_panel_remove(&ctx->panel);
+}
+
+static const struct of_device_id sony_td4353_jdi_of_match[] = {
+ { .compatible = "sony,td4353-jdi-tama", .data = (void *)TYPE_TAMA_60HZ },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, sony_td4353_jdi_of_match);
+
+static struct mipi_dsi_driver sony_td4353_jdi_driver = {
+ .probe = sony_td4353_jdi_probe,
+ .remove = sony_td4353_jdi_remove,
+ .driver = {
+ .name = "panel-sony-td4353-jdi",
+ .of_match_table = sony_td4353_jdi_of_match,
+ },
+};
+module_mipi_dsi_driver(sony_td4353_jdi_driver);
+
+MODULE_AUTHOR("Konrad Dybcio <konrad.dybcio@somainline.org>");
+MODULE_DESCRIPTION("DRM panel driver for SONY Xperia XZ2/XZ2c JDI panel");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/panfrost/panfrost_drv.c b/drivers/gpu/drm/panfrost/panfrost_drv.c
index abb0dadd8f63..f49096f53141 100644
--- a/drivers/gpu/drm/panfrost/panfrost_drv.c
+++ b/drivers/gpu/drm/panfrost/panfrost_drv.c
@@ -220,15 +220,8 @@ panfrost_copy_in_sync(struct drm_device *dev,
}
for (i = 0; i < in_fence_count; i++) {
- struct dma_fence *fence;
-
- ret = drm_syncobj_find_fence(file_priv, handles[i], 0, 0,
- &fence);
- if (ret)
- goto fail;
-
- ret = drm_sched_job_add_dependency(&job->base, fence);
-
+ ret = drm_sched_job_add_syncobj_dependency(&job->base, file_priv,
+ handles[i], 0);
if (ret)
goto fail;
}
diff --git a/drivers/gpu/drm/pl111/pl111_drv.c b/drivers/gpu/drm/pl111/pl111_drv.c
index 00deba0b7271..4b2a9e9753f6 100644
--- a/drivers/gpu/drm/pl111/pl111_drv.c
+++ b/drivers/gpu/drm/pl111/pl111_drv.c
@@ -48,7 +48,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_bridge.h>
#include <drm/drm_drv.h>
-#include <drm/drm_fbdev_generic.h>
+#include <drm/drm_fbdev_dma.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
@@ -308,7 +308,7 @@ static int pl111_amba_probe(struct amba_device *amba_dev,
if (ret < 0)
goto dev_put;
- drm_fbdev_generic_setup(drm, priv->variant->fb_bpp);
+ drm_fbdev_dma_setup(drm, priv->variant->fb_bpp);
return 0;
diff --git a/drivers/gpu/drm/qxl/qxl_ttm.c b/drivers/gpu/drm/qxl/qxl_ttm.c
index a92a5b0d4c25..1a82629bce3f 100644
--- a/drivers/gpu/drm/qxl/qxl_ttm.c
+++ b/drivers/gpu/drm/qxl/qxl_ttm.c
@@ -143,6 +143,17 @@ static int qxl_bo_move(struct ttm_buffer_object *bo, bool evict,
struct ttm_resource *old_mem = bo->resource;
int ret;
+ if (!old_mem) {
+ if (new_mem->mem_type != TTM_PL_SYSTEM) {
+ hop->mem_type = TTM_PL_SYSTEM;
+ hop->flags = TTM_PL_FLAG_TEMPORARY;
+ return -EMULTIHOP;
+ }
+
+ ttm_bo_move_null(bo, new_mem);
+ return 0;
+ }
+
qxl_bo_move_notify(bo, new_mem);
ret = ttm_bo_wait_ctx(bo, ctx);
diff --git a/drivers/gpu/drm/radeon/Kconfig b/drivers/gpu/drm/radeon/Kconfig
index 62a596d3a891..e19d77d58810 100644
--- a/drivers/gpu/drm/radeon/Kconfig
+++ b/drivers/gpu/drm/radeon/Kconfig
@@ -8,6 +8,7 @@ config DRM_RADEON
select DRM_DISPLAY_DP_HELPER
select DRM_DISPLAY_HELPER
select DRM_KMS_HELPER
+ select DRM_SUBALLOC_HELPER
select DRM_TTM
select DRM_TTM_HELPER
select SND_HDA_COMPONENT if SND_HDA_CORE
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 57e20780a458..d19a4b1c1a8f 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -79,6 +79,7 @@
#include <drm/drm_gem.h>
#include <drm/drm_audio_component.h>
+#include <drm/drm_suballoc.h>
#include "radeon_family.h"
#include "radeon_mode.h"
@@ -511,52 +512,12 @@ struct radeon_bo {
};
#define gem_to_radeon_bo(gobj) container_of((gobj), struct radeon_bo, tbo.base)
-/* sub-allocation manager, it has to be protected by another lock.
- * By conception this is an helper for other part of the driver
- * like the indirect buffer or semaphore, which both have their
- * locking.
- *
- * Principe is simple, we keep a list of sub allocation in offset
- * order (first entry has offset == 0, last entry has the highest
- * offset).
- *
- * When allocating new object we first check if there is room at
- * the end total_size - (last_object_offset + last_object_size) >=
- * alloc_size. If so we allocate new object there.
- *
- * When there is not enough room at the end, we start waiting for
- * each sub object until we reach object_offset+object_size >=
- * alloc_size, this object then become the sub object we return.
- *
- * Alignment can't be bigger than page size.
- *
- * Hole are not considered for allocation to keep things simple.
- * Assumption is that there won't be hole (all object on same
- * alignment).
- */
struct radeon_sa_manager {
- wait_queue_head_t wq;
- struct radeon_bo *bo;
- struct list_head *hole;
- struct list_head flist[RADEON_NUM_RINGS];
- struct list_head olist;
- unsigned size;
- uint64_t gpu_addr;
- void *cpu_ptr;
- uint32_t domain;
- uint32_t align;
-};
-
-struct radeon_sa_bo;
-
-/* sub-allocation buffer */
-struct radeon_sa_bo {
- struct list_head olist;
- struct list_head flist;
- struct radeon_sa_manager *manager;
- unsigned soffset;
- unsigned eoffset;
- struct radeon_fence *fence;
+ struct drm_suballoc_manager base;
+ struct radeon_bo *bo;
+ uint64_t gpu_addr;
+ void *cpu_ptr;
+ u32 domain;
};
/*
@@ -587,7 +548,7 @@ int radeon_mode_dumb_mmap(struct drm_file *filp,
* Semaphores.
*/
struct radeon_semaphore {
- struct radeon_sa_bo *sa_bo;
+ struct drm_suballoc *sa_bo;
signed waiters;
uint64_t gpu_addr;
};
@@ -816,7 +777,7 @@ void radeon_irq_kms_disable_hpd(struct radeon_device *rdev, unsigned hpd_mask);
*/
struct radeon_ib {
- struct radeon_sa_bo *sa_bo;
+ struct drm_suballoc *sa_bo;
uint32_t length_dw;
uint64_t gpu_addr;
uint32_t *ptr;
diff --git a/drivers/gpu/drm/radeon/radeon_ib.c b/drivers/gpu/drm/radeon/radeon_ib.c
index 62b116727b4f..6a45a72488f9 100644
--- a/drivers/gpu/drm/radeon/radeon_ib.c
+++ b/drivers/gpu/drm/radeon/radeon_ib.c
@@ -61,7 +61,7 @@ int radeon_ib_get(struct radeon_device *rdev, int ring,
{
int r;
- r = radeon_sa_bo_new(rdev, &rdev->ring_tmp_bo, &ib->sa_bo, size, 256);
+ r = radeon_sa_bo_new(&rdev->ring_tmp_bo, &ib->sa_bo, size, 256);
if (r) {
dev_err(rdev->dev, "failed to get a new IB (%d)\n", r);
return r;
@@ -77,7 +77,7 @@ int radeon_ib_get(struct radeon_device *rdev, int ring,
/* ib pool is bound at RADEON_VA_IB_OFFSET in virtual address
* space and soffset is the offset inside the pool bo
*/
- ib->gpu_addr = ib->sa_bo->soffset + RADEON_VA_IB_OFFSET;
+ ib->gpu_addr = drm_suballoc_soffset(ib->sa_bo) + RADEON_VA_IB_OFFSET;
} else {
ib->gpu_addr = radeon_sa_bo_gpu_addr(ib->sa_bo);
}
@@ -97,7 +97,7 @@ int radeon_ib_get(struct radeon_device *rdev, int ring,
void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib *ib)
{
radeon_sync_free(rdev, &ib->sync, ib->fence);
- radeon_sa_bo_free(rdev, &ib->sa_bo, ib->fence);
+ radeon_sa_bo_free(&ib->sa_bo, ib->fence);
radeon_fence_unref(&ib->fence);
}
@@ -201,8 +201,7 @@ int radeon_ib_pool_init(struct radeon_device *rdev)
if (rdev->family >= CHIP_BONAIRE) {
r = radeon_sa_bo_manager_init(rdev, &rdev->ring_tmp_bo,
- RADEON_IB_POOL_SIZE*64*1024,
- RADEON_GPU_PAGE_SIZE,
+ RADEON_IB_POOL_SIZE*64*1024, 256,
RADEON_GEM_DOMAIN_GTT,
RADEON_GEM_GTT_WC);
} else {
@@ -210,8 +209,7 @@ int radeon_ib_pool_init(struct radeon_device *rdev)
* to the command stream checking
*/
r = radeon_sa_bo_manager_init(rdev, &rdev->ring_tmp_bo,
- RADEON_IB_POOL_SIZE*64*1024,
- RADEON_GPU_PAGE_SIZE,
+ RADEON_IB_POOL_SIZE*64*1024, 256,
RADEON_GEM_DOMAIN_GTT, 0);
}
if (r) {
diff --git a/drivers/gpu/drm/radeon/radeon_object.h b/drivers/gpu/drm/radeon/radeon_object.h
index 0a6ef49e990a..39cc87a59a9a 100644
--- a/drivers/gpu/drm/radeon/radeon_object.h
+++ b/drivers/gpu/drm/radeon/radeon_object.h
@@ -169,15 +169,22 @@ extern void radeon_bo_fence(struct radeon_bo *bo, struct radeon_fence *fence,
/*
* sub allocation
*/
+static inline struct radeon_sa_manager *
+to_radeon_sa_manager(struct drm_suballoc_manager *manager)
+{
+ return container_of(manager, struct radeon_sa_manager, base);
+}
-static inline uint64_t radeon_sa_bo_gpu_addr(struct radeon_sa_bo *sa_bo)
+static inline uint64_t radeon_sa_bo_gpu_addr(struct drm_suballoc *sa_bo)
{
- return sa_bo->manager->gpu_addr + sa_bo->soffset;
+ return to_radeon_sa_manager(sa_bo->manager)->gpu_addr +
+ drm_suballoc_soffset(sa_bo);
}
-static inline void * radeon_sa_bo_cpu_addr(struct radeon_sa_bo *sa_bo)
+static inline void *radeon_sa_bo_cpu_addr(struct drm_suballoc *sa_bo)
{
- return sa_bo->manager->cpu_ptr + sa_bo->soffset;
+ return to_radeon_sa_manager(sa_bo->manager)->cpu_ptr +
+ drm_suballoc_soffset(sa_bo);
}
extern int radeon_sa_bo_manager_init(struct radeon_device *rdev,
@@ -190,12 +197,10 @@ extern int radeon_sa_bo_manager_start(struct radeon_device *rdev,
struct radeon_sa_manager *sa_manager);
extern int radeon_sa_bo_manager_suspend(struct radeon_device *rdev,
struct radeon_sa_manager *sa_manager);
-extern int radeon_sa_bo_new(struct radeon_device *rdev,
- struct radeon_sa_manager *sa_manager,
- struct radeon_sa_bo **sa_bo,
- unsigned size, unsigned align);
-extern void radeon_sa_bo_free(struct radeon_device *rdev,
- struct radeon_sa_bo **sa_bo,
+extern int radeon_sa_bo_new(struct radeon_sa_manager *sa_manager,
+ struct drm_suballoc **sa_bo,
+ unsigned int size, unsigned int align);
+extern void radeon_sa_bo_free(struct drm_suballoc **sa_bo,
struct radeon_fence *fence);
#if defined(CONFIG_DEBUG_FS)
extern void radeon_sa_bo_dump_debug_info(struct radeon_sa_manager *sa_manager,
diff --git a/drivers/gpu/drm/radeon/radeon_sa.c b/drivers/gpu/drm/radeon/radeon_sa.c
index 0981948bd9ed..c87a57c9c592 100644
--- a/drivers/gpu/drm/radeon/radeon_sa.c
+++ b/drivers/gpu/drm/radeon/radeon_sa.c
@@ -44,53 +44,32 @@
#include "radeon.h"
-static void radeon_sa_bo_remove_locked(struct radeon_sa_bo *sa_bo);
-static void radeon_sa_bo_try_free(struct radeon_sa_manager *sa_manager);
-
int radeon_sa_bo_manager_init(struct radeon_device *rdev,
struct radeon_sa_manager *sa_manager,
- unsigned size, u32 align, u32 domain, u32 flags)
+ unsigned int size, u32 sa_align, u32 domain,
+ u32 flags)
{
- int i, r;
-
- init_waitqueue_head(&sa_manager->wq);
- sa_manager->bo = NULL;
- sa_manager->size = size;
- sa_manager->domain = domain;
- sa_manager->align = align;
- sa_manager->hole = &sa_manager->olist;
- INIT_LIST_HEAD(&sa_manager->olist);
- for (i = 0; i < RADEON_NUM_RINGS; ++i) {
- INIT_LIST_HEAD(&sa_manager->flist[i]);
- }
+ int r;
- r = radeon_bo_create(rdev, size, align, true,
+ r = radeon_bo_create(rdev, size, RADEON_GPU_PAGE_SIZE, true,
domain, flags, NULL, NULL, &sa_manager->bo);
if (r) {
dev_err(rdev->dev, "(%d) failed to allocate bo for manager\n", r);
return r;
}
+ sa_manager->domain = domain;
+
+ drm_suballoc_manager_init(&sa_manager->base, size, sa_align);
+
return r;
}
void radeon_sa_bo_manager_fini(struct radeon_device *rdev,
struct radeon_sa_manager *sa_manager)
{
- struct radeon_sa_bo *sa_bo, *tmp;
-
- if (!list_empty(&sa_manager->olist)) {
- sa_manager->hole = &sa_manager->olist,
- radeon_sa_bo_try_free(sa_manager);
- if (!list_empty(&sa_manager->olist)) {
- dev_err(rdev->dev, "sa_manager is not empty, clearing anyway\n");
- }
- }
- list_for_each_entry_safe(sa_bo, tmp, &sa_manager->olist, olist) {
- radeon_sa_bo_remove_locked(sa_bo);
- }
+ drm_suballoc_manager_fini(&sa_manager->base);
radeon_bo_unref(&sa_manager->bo);
- sa_manager->size = 0;
}
int radeon_sa_bo_manager_start(struct radeon_device *rdev,
@@ -139,260 +118,34 @@ int radeon_sa_bo_manager_suspend(struct radeon_device *rdev,
return r;
}
-static void radeon_sa_bo_remove_locked(struct radeon_sa_bo *sa_bo)
+int radeon_sa_bo_new(struct radeon_sa_manager *sa_manager,
+ struct drm_suballoc **sa_bo,
+ unsigned int size, unsigned int align)
{
- struct radeon_sa_manager *sa_manager = sa_bo->manager;
- if (sa_manager->hole == &sa_bo->olist) {
- sa_manager->hole = sa_bo->olist.prev;
- }
- list_del_init(&sa_bo->olist);
- list_del_init(&sa_bo->flist);
- radeon_fence_unref(&sa_bo->fence);
- kfree(sa_bo);
-}
-
-static void radeon_sa_bo_try_free(struct radeon_sa_manager *sa_manager)
-{
- struct radeon_sa_bo *sa_bo, *tmp;
-
- if (sa_manager->hole->next == &sa_manager->olist)
- return;
+ struct drm_suballoc *sa = drm_suballoc_new(&sa_manager->base, size,
+ GFP_KERNEL, true, align);
- sa_bo = list_entry(sa_manager->hole->next, struct radeon_sa_bo, olist);
- list_for_each_entry_safe_from(sa_bo, tmp, &sa_manager->olist, olist) {
- if (sa_bo->fence == NULL || !radeon_fence_signaled(sa_bo->fence)) {
- return;
- }
- radeon_sa_bo_remove_locked(sa_bo);
+ if (IS_ERR(sa)) {
+ *sa_bo = NULL;
+ return PTR_ERR(sa);
}
-}
-static inline unsigned radeon_sa_bo_hole_soffset(struct radeon_sa_manager *sa_manager)
-{
- struct list_head *hole = sa_manager->hole;
-
- if (hole != &sa_manager->olist) {
- return list_entry(hole, struct radeon_sa_bo, olist)->eoffset;
- }
+ *sa_bo = sa;
return 0;
}
-static inline unsigned radeon_sa_bo_hole_eoffset(struct radeon_sa_manager *sa_manager)
-{
- struct list_head *hole = sa_manager->hole;
-
- if (hole->next != &sa_manager->olist) {
- return list_entry(hole->next, struct radeon_sa_bo, olist)->soffset;
- }
- return sa_manager->size;
-}
-
-static bool radeon_sa_bo_try_alloc(struct radeon_sa_manager *sa_manager,
- struct radeon_sa_bo *sa_bo,
- unsigned size, unsigned align)
-{
- unsigned soffset, eoffset, wasted;
-
- soffset = radeon_sa_bo_hole_soffset(sa_manager);
- eoffset = radeon_sa_bo_hole_eoffset(sa_manager);
- wasted = (align - (soffset % align)) % align;
-
- if ((eoffset - soffset) >= (size + wasted)) {
- soffset += wasted;
-
- sa_bo->manager = sa_manager;
- sa_bo->soffset = soffset;
- sa_bo->eoffset = soffset + size;
- list_add(&sa_bo->olist, sa_manager->hole);
- INIT_LIST_HEAD(&sa_bo->flist);
- sa_manager->hole = &sa_bo->olist;
- return true;
- }
- return false;
-}
-
-/**
- * radeon_sa_event - Check if we can stop waiting
- *
- * @sa_manager: pointer to the sa_manager
- * @size: number of bytes we want to allocate
- * @align: alignment we need to match
- *
- * Check if either there is a fence we can wait for or
- * enough free memory to satisfy the allocation directly
- */
-static bool radeon_sa_event(struct radeon_sa_manager *sa_manager,
- unsigned size, unsigned align)
-{
- unsigned soffset, eoffset, wasted;
- int i;
-
- for (i = 0; i < RADEON_NUM_RINGS; ++i) {
- if (!list_empty(&sa_manager->flist[i])) {
- return true;
- }
- }
-
- soffset = radeon_sa_bo_hole_soffset(sa_manager);
- eoffset = radeon_sa_bo_hole_eoffset(sa_manager);
- wasted = (align - (soffset % align)) % align;
-
- if ((eoffset - soffset) >= (size + wasted)) {
- return true;
- }
-
- return false;
-}
-
-static bool radeon_sa_bo_next_hole(struct radeon_sa_manager *sa_manager,
- struct radeon_fence **fences,
- unsigned *tries)
-{
- struct radeon_sa_bo *best_bo = NULL;
- unsigned i, soffset, best, tmp;
-
- /* if hole points to the end of the buffer */
- if (sa_manager->hole->next == &sa_manager->olist) {
- /* try again with its beginning */
- sa_manager->hole = &sa_manager->olist;
- return true;
- }
-
- soffset = radeon_sa_bo_hole_soffset(sa_manager);
- /* to handle wrap around we add sa_manager->size */
- best = sa_manager->size * 2;
- /* go over all fence list and try to find the closest sa_bo
- * of the current last
- */
- for (i = 0; i < RADEON_NUM_RINGS; ++i) {
- struct radeon_sa_bo *sa_bo;
-
- fences[i] = NULL;
-
- if (list_empty(&sa_manager->flist[i])) {
- continue;
- }
-
- sa_bo = list_first_entry(&sa_manager->flist[i],
- struct radeon_sa_bo, flist);
-
- if (!radeon_fence_signaled(sa_bo->fence)) {
- fences[i] = sa_bo->fence;
- continue;
- }
-
- /* limit the number of tries each ring gets */
- if (tries[i] > 2) {
- continue;
- }
-
- tmp = sa_bo->soffset;
- if (tmp < soffset) {
- /* wrap around, pretend it's after */
- tmp += sa_manager->size;
- }
- tmp -= soffset;
- if (tmp < best) {
- /* this sa bo is the closest one */
- best = tmp;
- best_bo = sa_bo;
- }
- }
-
- if (best_bo) {
- ++tries[best_bo->fence->ring];
- sa_manager->hole = best_bo->olist.prev;
-
- /* we knew that this one is signaled,
- so it's save to remote it */
- radeon_sa_bo_remove_locked(best_bo);
- return true;
- }
- return false;
-}
-
-int radeon_sa_bo_new(struct radeon_device *rdev,
- struct radeon_sa_manager *sa_manager,
- struct radeon_sa_bo **sa_bo,
- unsigned size, unsigned align)
-{
- struct radeon_fence *fences[RADEON_NUM_RINGS];
- unsigned tries[RADEON_NUM_RINGS];
- int i, r;
-
- BUG_ON(align > sa_manager->align);
- BUG_ON(size > sa_manager->size);
-
- *sa_bo = kmalloc(sizeof(struct radeon_sa_bo), GFP_KERNEL);
- if ((*sa_bo) == NULL) {
- return -ENOMEM;
- }
- (*sa_bo)->manager = sa_manager;
- (*sa_bo)->fence = NULL;
- INIT_LIST_HEAD(&(*sa_bo)->olist);
- INIT_LIST_HEAD(&(*sa_bo)->flist);
-
- spin_lock(&sa_manager->wq.lock);
- do {
- for (i = 0; i < RADEON_NUM_RINGS; ++i)
- tries[i] = 0;
-
- do {
- radeon_sa_bo_try_free(sa_manager);
-
- if (radeon_sa_bo_try_alloc(sa_manager, *sa_bo,
- size, align)) {
- spin_unlock(&sa_manager->wq.lock);
- return 0;
- }
-
- /* see if we can skip over some allocations */
- } while (radeon_sa_bo_next_hole(sa_manager, fences, tries));
-
- for (i = 0; i < RADEON_NUM_RINGS; ++i)
- radeon_fence_ref(fences[i]);
-
- spin_unlock(&sa_manager->wq.lock);
- r = radeon_fence_wait_any(rdev, fences, false);
- for (i = 0; i < RADEON_NUM_RINGS; ++i)
- radeon_fence_unref(&fences[i]);
- spin_lock(&sa_manager->wq.lock);
- /* if we have nothing to wait for block */
- if (r == -ENOENT) {
- r = wait_event_interruptible_locked(
- sa_manager->wq,
- radeon_sa_event(sa_manager, size, align)
- );
- }
-
- } while (!r);
-
- spin_unlock(&sa_manager->wq.lock);
- kfree(*sa_bo);
- *sa_bo = NULL;
- return r;
-}
-
-void radeon_sa_bo_free(struct radeon_device *rdev, struct radeon_sa_bo **sa_bo,
+void radeon_sa_bo_free(struct drm_suballoc **sa_bo,
struct radeon_fence *fence)
{
- struct radeon_sa_manager *sa_manager;
-
if (sa_bo == NULL || *sa_bo == NULL) {
return;
}
- sa_manager = (*sa_bo)->manager;
- spin_lock(&sa_manager->wq.lock);
- if (fence && !radeon_fence_signaled(fence)) {
- (*sa_bo)->fence = radeon_fence_ref(fence);
- list_add_tail(&(*sa_bo)->flist,
- &sa_manager->flist[fence->ring]);
- } else {
- radeon_sa_bo_remove_locked(*sa_bo);
- }
- wake_up_all_locked(&sa_manager->wq);
- spin_unlock(&sa_manager->wq.lock);
+ if (fence)
+ drm_suballoc_free(*sa_bo, &fence->base);
+ else
+ drm_suballoc_free(*sa_bo, NULL);
+
*sa_bo = NULL;
}
@@ -400,25 +153,8 @@ void radeon_sa_bo_free(struct radeon_device *rdev, struct radeon_sa_bo **sa_bo,
void radeon_sa_bo_dump_debug_info(struct radeon_sa_manager *sa_manager,
struct seq_file *m)
{
- struct radeon_sa_bo *i;
+ struct drm_printer p = drm_seq_file_printer(m);
- spin_lock(&sa_manager->wq.lock);
- list_for_each_entry(i, &sa_manager->olist, olist) {
- uint64_t soffset = i->soffset + sa_manager->gpu_addr;
- uint64_t eoffset = i->eoffset + sa_manager->gpu_addr;
- if (&i->olist == sa_manager->hole) {
- seq_printf(m, ">");
- } else {
- seq_printf(m, " ");
- }
- seq_printf(m, "[0x%010llx 0x%010llx] size %8lld",
- soffset, eoffset, eoffset - soffset);
- if (i->fence) {
- seq_printf(m, " protected by 0x%016llx on ring %d",
- i->fence->seq, i->fence->ring);
- }
- seq_printf(m, "\n");
- }
- spin_unlock(&sa_manager->wq.lock);
+ drm_suballoc_dump_debug_info(&sa_manager->base, &p, sa_manager->gpu_addr);
}
#endif
diff --git a/drivers/gpu/drm/radeon/radeon_semaphore.c b/drivers/gpu/drm/radeon/radeon_semaphore.c
index 221e59476f64..1f0a9a4ff5ae 100644
--- a/drivers/gpu/drm/radeon/radeon_semaphore.c
+++ b/drivers/gpu/drm/radeon/radeon_semaphore.c
@@ -40,7 +40,7 @@ int radeon_semaphore_create(struct radeon_device *rdev,
if (*semaphore == NULL) {
return -ENOMEM;
}
- r = radeon_sa_bo_new(rdev, &rdev->ring_tmp_bo,
+ r = radeon_sa_bo_new(&rdev->ring_tmp_bo,
&(*semaphore)->sa_bo, 8, 8);
if (r) {
kfree(*semaphore);
@@ -100,7 +100,7 @@ void radeon_semaphore_free(struct radeon_device *rdev,
dev_err(rdev->dev, "semaphore %p has more waiters than signalers,"
" hardware lockup imminent!\n", *semaphore);
}
- radeon_sa_bo_free(rdev, &(*semaphore)->sa_bo, fence);
+ radeon_sa_bo_free(&(*semaphore)->sa_bo, fence);
kfree(*semaphore);
*semaphore = NULL;
}
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index 1e8e287e113c..2220cdf6a3f6 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -211,13 +211,10 @@ static int radeon_bo_move(struct ttm_buffer_object *bo, bool evict,
if (r)
return r;
- /* Can't move a pinned BO */
rbo = container_of(bo, struct radeon_bo, tbo);
- if (WARN_ON_ONCE(rbo->tbo.pin_count > 0))
- return -EINVAL;
-
rdev = radeon_get_rdev(bo->bdev);
- if (old_mem->mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) {
+ if (!old_mem || (old_mem->mem_type == TTM_PL_SYSTEM &&
+ bo->ttm == NULL)) {
ttm_bo_move_null(bo, new_mem);
goto out;
}
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
index 008e172ed43b..d6d29be6b4f4 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
@@ -298,13 +298,26 @@ static void rcar_du_crtc_set_display_timing(struct rcar_du_crtc *rcrtc)
escr = params.escr;
}
- if (rcdu->info->gen < 4) {
+ /*
+ * The ESCR register only exists in DU channels that can output to an
+ * LVDS or DPAT, and the OTAR register in DU channels that can output
+ * to a DPAD.
+ */
+ if ((rcdu->info->routes[RCAR_DU_OUTPUT_DPAD0].possible_crtcs |
+ rcdu->info->routes[RCAR_DU_OUTPUT_DPAD1].possible_crtcs |
+ rcdu->info->routes[RCAR_DU_OUTPUT_LVDS0].possible_crtcs |
+ rcdu->info->routes[RCAR_DU_OUTPUT_LVDS1].possible_crtcs) &
+ BIT(rcrtc->index)) {
dev_dbg(rcrtc->dev->dev, "%s: ESCR 0x%08x\n", __func__, escr);
rcar_du_crtc_write(rcrtc, rcrtc->index % 2 ? ESCR13 : ESCR02, escr);
- rcar_du_crtc_write(rcrtc, rcrtc->index % 2 ? OTAR13 : OTAR02, 0);
}
+ if ((rcdu->info->routes[RCAR_DU_OUTPUT_DPAD0].possible_crtcs |
+ rcdu->info->routes[RCAR_DU_OUTPUT_DPAD1].possible_crtcs) &
+ BIT(rcrtc->index))
+ rcar_du_crtc_write(rcrtc, rcrtc->index % 2 ? OTAR13 : OTAR02, 0);
+
/* Signal polarities */
dsmr = ((mode->flags & DRM_MODE_FLAG_PVSYNC) ? DSMR_VSL : 0)
| ((mode->flags & DRM_MODE_FLAG_PHSYNC) ? DSMR_HSL : 0)
@@ -749,16 +762,17 @@ static void rcar_du_crtc_atomic_enable(struct drm_crtc *crtc,
/*
* On D3/E3 the dot clock is provided by the LVDS encoder attached to
- * the DU channel. We need to enable its clock output explicitly if
- * the LVDS output is disabled.
+ * the DU channel. We need to enable its clock output explicitly before
+ * starting the CRTC, as the bridge hasn't been enabled by the atomic
+ * helpers yet.
*/
- if (rcdu->info->lvds_clk_mask & BIT(rcrtc->index) &&
- rstate->outputs == BIT(RCAR_DU_OUTPUT_DPAD0)) {
+ if (rcdu->info->lvds_clk_mask & BIT(rcrtc->index)) {
+ bool dot_clk_only = rstate->outputs == BIT(RCAR_DU_OUTPUT_DPAD0);
struct drm_bridge *bridge = rcdu->lvds[rcrtc->index];
const struct drm_display_mode *mode =
&crtc->state->adjusted_mode;
- rcar_lvds_pclk_enable(bridge, mode->clock * 1000);
+ rcar_lvds_pclk_enable(bridge, mode->clock * 1000, dot_clk_only);
}
/*
@@ -795,15 +809,16 @@ static void rcar_du_crtc_atomic_disable(struct drm_crtc *crtc,
rcar_du_crtc_stop(rcrtc);
rcar_du_crtc_put(rcrtc);
- if (rcdu->info->lvds_clk_mask & BIT(rcrtc->index) &&
- rstate->outputs == BIT(RCAR_DU_OUTPUT_DPAD0)) {
+ if (rcdu->info->lvds_clk_mask & BIT(rcrtc->index)) {
+ bool dot_clk_only = rstate->outputs == BIT(RCAR_DU_OUTPUT_DPAD0);
struct drm_bridge *bridge = rcdu->lvds[rcrtc->index];
/*
* Disable the LVDS clock output, see
- * rcar_du_crtc_atomic_enable().
+ * rcar_du_crtc_atomic_enable(). When the LVDS output is used,
+ * this also disables the LVDS encoder.
*/
- rcar_lvds_pclk_disable(bridge);
+ rcar_lvds_pclk_disable(bridge, dot_clk_only);
}
if ((rcdu->info->dsi_clk_mask & BIT(rcrtc->index)) &&
@@ -815,7 +830,6 @@ static void rcar_du_crtc_atomic_disable(struct drm_crtc *crtc,
* Disable the DSI clock output, see
* rcar_du_crtc_atomic_enable().
*/
-
rcar_mipi_dsi_pclk_disable(bridge);
}
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_encoder.c b/drivers/gpu/drm/rcar-du/rcar_du_encoder.c
index b1787be31e92..7ecec7b04a8d 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_encoder.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_encoder.c
@@ -109,8 +109,8 @@ int rcar_du_encoder_init(struct rcar_du_device *rcdu,
renc = drmm_encoder_alloc(&rcdu->ddev, struct rcar_du_encoder, base,
&rcar_du_encoder_funcs, DRM_MODE_ENCODER_NONE,
NULL);
- if (!renc)
- return -ENOMEM;
+ if (IS_ERR(renc))
+ return PTR_ERR(renc);
renc->output = output;
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_group.c b/drivers/gpu/drm/rcar-du/rcar_du_group.c
index 152602236377..2ccd2581f544 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_group.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_group.c
@@ -138,6 +138,7 @@ static void rcar_du_group_setup(struct rcar_du_group *rgrp)
{
struct rcar_du_device *rcdu = rgrp->dev;
u32 defr7 = DEFR7_CODE;
+ u32 dorcr;
/* Enable extended features */
rcar_du_group_write(rgrp, DEFR, DEFR_CODE | DEFR_DEFE);
@@ -174,8 +175,15 @@ static void rcar_du_group_setup(struct rcar_du_group *rgrp)
/*
* Use DS1PR and DS2PR to configure planes priorities and connects the
* superposition 0 to DU0 pins. DU1 pins will be configured dynamically.
+ *
+ * Groups that have a single channel have a hardcoded configuration. On
+ * Gen3 and newer, the documentation requires PG1T, DK1S and PG1D_DS1 to
+ * always be set in this case.
*/
- rcar_du_group_write(rgrp, DORCR, DORCR_PG1D_DS1 | DORCR_DPRS);
+ dorcr = DORCR_PG0D_DS0 | DORCR_DPRS;
+ if (rcdu->info->gen >= 3 && rgrp->num_crtcs == 1)
+ dorcr |= DORCR_PG1T | DORCR_DK1S | DORCR_PG1D_DS1;
+ rcar_du_group_write(rgrp, DORCR, dorcr);
/* Apply planes to CRTCs association. */
mutex_lock(&rgrp->lock);
@@ -349,7 +357,7 @@ int rcar_du_group_set_routing(struct rcar_du_group *rgrp)
struct rcar_du_device *rcdu = rgrp->dev;
u32 dorcr = rcar_du_group_read(rgrp, DORCR);
- dorcr &= ~(DORCR_PG2T | DORCR_DK2S | DORCR_PG2D_MASK);
+ dorcr &= ~(DORCR_PG1T | DORCR_DK1S | DORCR_PG1D_MASK);
/*
* Set the DPAD1 pins sources. Select CRTC 0 if explicitly requested and
@@ -357,9 +365,9 @@ int rcar_du_group_set_routing(struct rcar_du_group *rgrp)
* by default.
*/
if (rcdu->dpad1_source == rgrp->index * 2)
- dorcr |= DORCR_PG2D_DS1;
+ dorcr |= DORCR_PG1D_DS0;
else
- dorcr |= DORCR_PG2T | DORCR_DK2S | DORCR_PG2D_DS2;
+ dorcr |= DORCR_PG1T | DORCR_DK1S | DORCR_PG1D_DS1;
rcar_du_group_write(rgrp, DORCR, dorcr);
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_regs.h b/drivers/gpu/drm/rcar-du/rcar_du_regs.h
index 789ae9285108..6c750fab6ebb 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_regs.h
+++ b/drivers/gpu/drm/rcar-du/rcar_du_regs.h
@@ -511,19 +511,19 @@
*/
#define DORCR 0x11000
-#define DORCR_PG2T (1 << 30)
-#define DORCR_DK2S (1 << 28)
-#define DORCR_PG2D_DS1 (0 << 24)
-#define DORCR_PG2D_DS2 (1 << 24)
-#define DORCR_PG2D_FIX0 (2 << 24)
-#define DORCR_PG2D_DOOR (3 << 24)
-#define DORCR_PG2D_MASK (3 << 24)
-#define DORCR_DR1D (1 << 21)
-#define DORCR_PG1D_DS1 (0 << 16)
-#define DORCR_PG1D_DS2 (1 << 16)
-#define DORCR_PG1D_FIX0 (2 << 16)
-#define DORCR_PG1D_DOOR (3 << 16)
-#define DORCR_PG1D_MASK (3 << 16)
+#define DORCR_PG1T (1 << 30)
+#define DORCR_DK1S (1 << 28)
+#define DORCR_PG1D_DS0 (0 << 24)
+#define DORCR_PG1D_DS1 (1 << 24)
+#define DORCR_PG1D_FIX0 (2 << 24)
+#define DORCR_PG1D_DOOR (3 << 24)
+#define DORCR_PG1D_MASK (3 << 24)
+#define DORCR_DR0D (1 << 21)
+#define DORCR_PG0D_DS0 (0 << 16)
+#define DORCR_PG0D_DS1 (1 << 16)
+#define DORCR_PG0D_FIX0 (2 << 16)
+#define DORCR_PG0D_DOOR (3 << 16)
+#define DORCR_PG0D_MASK (3 << 16)
#define DORCR_RGPV (1 << 4)
#define DORCR_DPRS (1 << 0)
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_vsp.c b/drivers/gpu/drm/rcar-du/rcar_du_vsp.c
index fe90be51d64e..45c05d0ffc70 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_vsp.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_vsp.c
@@ -73,7 +73,7 @@ void rcar_du_vsp_enable(struct rcar_du_crtc *crtc)
.src.y2 = mode->vdisplay << 16,
.zpos = 0,
},
- .format = rcar_du_format_info(DRM_FORMAT_ARGB8888),
+ .format = rcar_du_format_info(DRM_FORMAT_XRGB8888),
.source = RCAR_DU_PLANE_VSPD1,
.colorkey = 0,
};
diff --git a/drivers/gpu/drm/rcar-du/rcar_lvds.c b/drivers/gpu/drm/rcar-du/rcar_lvds.c
index 260ea5d8624e..ca215b588fd7 100644
--- a/drivers/gpu/drm/rcar-du/rcar_lvds.c
+++ b/drivers/gpu/drm/rcar-du/rcar_lvds.c
@@ -269,8 +269,8 @@ done:
pll->pll_m, pll->pll_n, pll->pll_e, pll->div);
}
-static void __rcar_lvds_pll_setup_d3_e3(struct rcar_lvds *lvds,
- unsigned int freq, bool dot_clock_only)
+static void rcar_lvds_pll_setup_d3_e3(struct rcar_lvds *lvds,
+ unsigned int freq, bool dot_clock_only)
{
struct pll_info pll = { .diff = (unsigned long)-1 };
u32 lvdpllcr;
@@ -305,52 +305,8 @@ static void __rcar_lvds_pll_setup_d3_e3(struct rcar_lvds *lvds,
rcar_lvds_write(lvds, LVDDIV, 0);
}
-static void rcar_lvds_pll_setup_d3_e3(struct rcar_lvds *lvds, unsigned int freq)
-{
- __rcar_lvds_pll_setup_d3_e3(lvds, freq, false);
-}
-
/* -----------------------------------------------------------------------------
- * Clock - D3/E3 only
- */
-
-int rcar_lvds_pclk_enable(struct drm_bridge *bridge, unsigned long freq)
-{
- struct rcar_lvds *lvds = bridge_to_rcar_lvds(bridge);
- int ret;
-
- if (WARN_ON(!(lvds->info->quirks & RCAR_LVDS_QUIRK_EXT_PLL)))
- return -ENODEV;
-
- dev_dbg(lvds->dev, "enabling LVDS PLL, freq=%luHz\n", freq);
-
- ret = pm_runtime_resume_and_get(lvds->dev);
- if (ret)
- return ret;
-
- __rcar_lvds_pll_setup_d3_e3(lvds, freq, true);
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(rcar_lvds_pclk_enable);
-
-void rcar_lvds_pclk_disable(struct drm_bridge *bridge)
-{
- struct rcar_lvds *lvds = bridge_to_rcar_lvds(bridge);
-
- if (WARN_ON(!(lvds->info->quirks & RCAR_LVDS_QUIRK_EXT_PLL)))
- return;
-
- dev_dbg(lvds->dev, "disabling LVDS PLL\n");
-
- rcar_lvds_write(lvds, LVDPLLCR, 0);
-
- pm_runtime_put_sync(lvds->dev);
-}
-EXPORT_SYMBOL_GPL(rcar_lvds_pclk_disable);
-
-/* -----------------------------------------------------------------------------
- * Bridge
+ * Enable/disable
*/
static enum rcar_lvds_mode rcar_lvds_get_lvds_mode(struct rcar_lvds *lvds,
@@ -394,10 +350,10 @@ static enum rcar_lvds_mode rcar_lvds_get_lvds_mode(struct rcar_lvds *lvds,
return mode;
}
-static void __rcar_lvds_atomic_enable(struct drm_bridge *bridge,
- struct drm_atomic_state *state,
- struct drm_crtc *crtc,
- struct drm_connector *connector)
+static void rcar_lvds_enable(struct drm_bridge *bridge,
+ struct drm_atomic_state *state,
+ struct drm_crtc *crtc,
+ struct drm_connector *connector)
{
struct rcar_lvds *lvds = bridge_to_rcar_lvds(bridge);
u32 lvdhcr;
@@ -410,8 +366,7 @@ static void __rcar_lvds_atomic_enable(struct drm_bridge *bridge,
/* Enable the companion LVDS encoder in dual-link mode. */
if (lvds->link_type != RCAR_LVDS_SINGLE_LINK && lvds->companion)
- __rcar_lvds_atomic_enable(lvds->companion, state, crtc,
- connector);
+ rcar_lvds_enable(lvds->companion, state, crtc, connector);
/*
* Hardcode the channels and control signals routing for now.
@@ -465,8 +420,12 @@ static void __rcar_lvds_atomic_enable(struct drm_bridge *bridge,
/*
* PLL clock configuration on all instances but the companion in
* dual-link mode.
+ *
+ * The extended PLL has been turned on by an explicit call to
+ * rcar_lvds_pclk_enable() from the DU driver.
*/
- if (lvds->link_type == RCAR_LVDS_SINGLE_LINK || lvds->companion) {
+ if ((lvds->link_type == RCAR_LVDS_SINGLE_LINK || lvds->companion) &&
+ !(lvds->info->quirks & RCAR_LVDS_QUIRK_EXT_PLL)) {
const struct drm_crtc_state *crtc_state =
drm_atomic_get_new_crtc_state(state, crtc);
const struct drm_display_mode *mode =
@@ -531,22 +490,7 @@ static void __rcar_lvds_atomic_enable(struct drm_bridge *bridge,
rcar_lvds_write(lvds, LVDCR0, lvdcr0);
}
-static void rcar_lvds_atomic_enable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_bridge_state)
-{
- struct drm_atomic_state *state = old_bridge_state->base.state;
- struct drm_connector *connector;
- struct drm_crtc *crtc;
-
- connector = drm_atomic_get_new_connector_for_encoder(state,
- bridge->encoder);
- crtc = drm_atomic_get_new_connector_state(state, connector)->crtc;
-
- __rcar_lvds_atomic_enable(bridge, state, crtc, connector);
-}
-
-static void rcar_lvds_atomic_disable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_bridge_state)
+static void rcar_lvds_disable(struct drm_bridge *bridge)
{
struct rcar_lvds *lvds = bridge_to_rcar_lvds(bridge);
u32 lvdcr0;
@@ -578,15 +522,99 @@ static void rcar_lvds_atomic_disable(struct drm_bridge *bridge,
rcar_lvds_write(lvds, LVDCR0, 0);
rcar_lvds_write(lvds, LVDCR1, 0);
- rcar_lvds_write(lvds, LVDPLLCR, 0);
+
+ /* The extended PLL is turned off in rcar_lvds_pclk_disable(). */
+ if (!(lvds->info->quirks & RCAR_LVDS_QUIRK_EXT_PLL))
+ rcar_lvds_write(lvds, LVDPLLCR, 0);
/* Disable the companion LVDS encoder in dual-link mode. */
if (lvds->link_type != RCAR_LVDS_SINGLE_LINK && lvds->companion)
- lvds->companion->funcs->atomic_disable(lvds->companion,
- old_bridge_state);
+ rcar_lvds_disable(lvds->companion);
+
+ pm_runtime_put_sync(lvds->dev);
+}
+
+/* -----------------------------------------------------------------------------
+ * Clock - D3/E3 only
+ */
+
+int rcar_lvds_pclk_enable(struct drm_bridge *bridge, unsigned long freq,
+ bool dot_clk_only)
+{
+ struct rcar_lvds *lvds = bridge_to_rcar_lvds(bridge);
+ int ret;
+
+ if (WARN_ON(!(lvds->info->quirks & RCAR_LVDS_QUIRK_EXT_PLL)))
+ return -ENODEV;
+
+ dev_dbg(lvds->dev, "enabling LVDS PLL, freq=%luHz\n", freq);
+
+ ret = pm_runtime_resume_and_get(lvds->dev);
+ if (ret)
+ return ret;
+
+ rcar_lvds_pll_setup_d3_e3(lvds, freq, dot_clk_only);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(rcar_lvds_pclk_enable);
+
+void rcar_lvds_pclk_disable(struct drm_bridge *bridge, bool dot_clk_only)
+{
+ struct rcar_lvds *lvds = bridge_to_rcar_lvds(bridge);
+
+ if (WARN_ON(!(lvds->info->quirks & RCAR_LVDS_QUIRK_EXT_PLL)))
+ return;
+
+ dev_dbg(lvds->dev, "disabling LVDS PLL\n");
+
+ if (!dot_clk_only)
+ rcar_lvds_disable(bridge);
+
+ rcar_lvds_write(lvds, LVDPLLCR, 0);
pm_runtime_put_sync(lvds->dev);
}
+EXPORT_SYMBOL_GPL(rcar_lvds_pclk_disable);
+
+/* -----------------------------------------------------------------------------
+ * Bridge
+ */
+
+static void rcar_lvds_atomic_enable(struct drm_bridge *bridge,
+ struct drm_bridge_state *old_bridge_state)
+{
+ struct drm_atomic_state *state = old_bridge_state->base.state;
+ struct drm_connector *connector;
+ struct drm_crtc *crtc;
+
+ connector = drm_atomic_get_new_connector_for_encoder(state,
+ bridge->encoder);
+ crtc = drm_atomic_get_new_connector_state(state, connector)->crtc;
+
+ rcar_lvds_enable(bridge, state, crtc, connector);
+}
+
+static void rcar_lvds_atomic_disable(struct drm_bridge *bridge,
+ struct drm_bridge_state *old_bridge_state)
+{
+ struct rcar_lvds *lvds = bridge_to_rcar_lvds(bridge);
+
+ /*
+ * For D3 and E3, disabling the LVDS encoder before the DU would stall
+ * the DU, causing a vblank wait timeout when stopping the DU. This has
+ * been traced to clearing the LVEN bit, but the exact reason is
+ * unknown. Keep the encoder enabled, it will be disabled by an explicit
+ * call to rcar_lvds_pclk_disable() from the DU driver.
+ *
+ * We could clear the LVRES bit already to disable the LVDS output, but
+ * that's likely pointless.
+ */
+ if (lvds->info->quirks & RCAR_LVDS_QUIRK_EXT_PLL)
+ return;
+
+ rcar_lvds_disable(bridge);
+}
static bool rcar_lvds_mode_fixup(struct drm_bridge *bridge,
const struct drm_display_mode *mode,
@@ -922,14 +950,12 @@ static const struct rcar_lvds_device_info rcar_lvds_r8a77990_info = {
.gen = 3,
.quirks = RCAR_LVDS_QUIRK_GEN3_LVEN | RCAR_LVDS_QUIRK_EXT_PLL
| RCAR_LVDS_QUIRK_DUAL_LINK,
- .pll_setup = rcar_lvds_pll_setup_d3_e3,
};
static const struct rcar_lvds_device_info rcar_lvds_r8a77995_info = {
.gen = 3,
.quirks = RCAR_LVDS_QUIRK_GEN3_LVEN | RCAR_LVDS_QUIRK_PWD
| RCAR_LVDS_QUIRK_EXT_PLL | RCAR_LVDS_QUIRK_DUAL_LINK,
- .pll_setup = rcar_lvds_pll_setup_d3_e3,
};
static const struct of_device_id rcar_lvds_of_table[] = {
diff --git a/drivers/gpu/drm/rcar-du/rcar_lvds.h b/drivers/gpu/drm/rcar-du/rcar_lvds.h
index bee7033b60d6..887c63500000 100644
--- a/drivers/gpu/drm/rcar-du/rcar_lvds.h
+++ b/drivers/gpu/drm/rcar-du/rcar_lvds.h
@@ -13,17 +13,21 @@
struct drm_bridge;
#if IS_ENABLED(CONFIG_DRM_RCAR_LVDS)
-int rcar_lvds_pclk_enable(struct drm_bridge *bridge, unsigned long freq);
-void rcar_lvds_pclk_disable(struct drm_bridge *bridge);
+int rcar_lvds_pclk_enable(struct drm_bridge *bridge, unsigned long freq,
+ bool dot_clk_only);
+void rcar_lvds_pclk_disable(struct drm_bridge *bridge, bool dot_clk_only);
bool rcar_lvds_dual_link(struct drm_bridge *bridge);
bool rcar_lvds_is_connected(struct drm_bridge *bridge);
#else
static inline int rcar_lvds_pclk_enable(struct drm_bridge *bridge,
- unsigned long freq)
+ unsigned long freq, bool dot_clk_only)
{
return -ENOSYS;
}
-static inline void rcar_lvds_pclk_disable(struct drm_bridge *bridge) { }
+static inline void rcar_lvds_pclk_disable(struct drm_bridge *bridge,
+ bool dot_clock_only)
+{
+}
static inline bool rcar_lvds_dual_link(struct drm_bridge *bridge)
{
return false;
diff --git a/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c b/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c
index 7901c3babc8c..917e79951aac 100644
--- a/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c
+++ b/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c
@@ -359,11 +359,6 @@ static inline void dsi_write(struct dw_mipi_dsi_rockchip *dsi, u32 reg, u32 val)
writel(val, dsi->base + reg);
}
-static inline u32 dsi_read(struct dw_mipi_dsi_rockchip *dsi, u32 reg)
-{
- return readl(dsi->base + reg);
-}
-
static void dw_mipi_dsi_phy_write(struct dw_mipi_dsi_rockchip *dsi,
u8 test_code,
u8 test_data)
diff --git a/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c b/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c
index 2f4b8f64cbad..112699949db9 100644
--- a/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c
+++ b/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c
@@ -74,6 +74,7 @@ struct rockchip_hdmi {
struct regmap *regmap;
struct rockchip_encoder encoder;
const struct rockchip_hdmi_chip_data *chip_data;
+ const struct dw_hdmi_plat_data *plat_data;
struct clk *ref_clk;
struct clk *grf_clk;
struct dw_hdmi *hdmi;
@@ -161,6 +162,12 @@ static const struct dw_hdmi_mpll_config rockchip_mpll_cfg[] = {
{ 0x4064, 0x0003}
},
}, {
+ 340000000, {
+ { 0x0040, 0x0003 },
+ { 0x3b4c, 0x0003 },
+ { 0x5a64, 0x0003 },
+ },
+ }, {
~0UL, {
{ 0x00a0, 0x000a },
{ 0x2001, 0x000f },
@@ -186,6 +193,8 @@ static const struct dw_hdmi_curr_ctrl rockchip_cur_ctr[] = {
}, {
148500000, { 0x0000, 0x0038, 0x0038 },
}, {
+ 600000000, { 0x0000, 0x0000, 0x0000 },
+ }, {
~0UL, { 0x0000, 0x0000, 0x0000},
}
};
@@ -241,23 +250,39 @@ static int rockchip_hdmi_parse_dt(struct rockchip_hdmi *hdmi)
}
static enum drm_mode_status
-dw_hdmi_rockchip_mode_valid(struct dw_hdmi *hdmi, void *data,
+dw_hdmi_rockchip_mode_valid(struct dw_hdmi *dw_hdmi, void *data,
const struct drm_display_info *info,
const struct drm_display_mode *mode)
{
+ struct rockchip_hdmi *hdmi = data;
const struct dw_hdmi_mpll_config *mpll_cfg = rockchip_mpll_cfg;
int pclk = mode->clock * 1000;
- bool valid = false;
+ bool exact_match = hdmi->plat_data->phy_force_vendor;
int i;
+ if (hdmi->ref_clk) {
+ int rpclk = clk_round_rate(hdmi->ref_clk, pclk);
+
+ if (abs(rpclk - pclk) > pclk / 1000)
+ return MODE_NOCLOCK;
+ }
+
for (i = 0; mpll_cfg[i].mpixelclock != (~0UL); i++) {
- if (pclk == mpll_cfg[i].mpixelclock) {
- valid = true;
- break;
- }
+ /*
+ * For vendor specific phys force an exact match of the pixelclock
+ * to preserve the original behaviour of the driver.
+ */
+ if (exact_match && pclk == mpll_cfg[i].mpixelclock)
+ return MODE_OK;
+ /*
+ * The Synopsys phy can work with pixelclocks up to the value given
+ * in the corresponding mpll_cfg entry.
+ */
+ if (!exact_match && pclk <= mpll_cfg[i].mpixelclock)
+ return MODE_OK;
}
- return (valid) ? MODE_OK : MODE_BAD;
+ return MODE_BAD;
}
static void dw_hdmi_rockchip_encoder_disable(struct drm_encoder *encoder)
@@ -546,8 +571,10 @@ static int dw_hdmi_rockchip_bind(struct device *dev, struct device *master,
return -ENOMEM;
hdmi->dev = &pdev->dev;
+ hdmi->plat_data = plat_data;
hdmi->chip_data = plat_data->phy_data;
plat_data->phy_data = hdmi;
+ plat_data->priv_data = hdmi;
encoder = &hdmi->encoder.encoder;
encoder->possible_crtcs = drm_of_find_possible_crtcs(drm, dev->of_node);
@@ -640,6 +667,7 @@ static void dw_hdmi_rockchip_unbind(struct device *dev, struct device *master,
struct rockchip_hdmi *hdmi = dev_get_drvdata(dev);
dw_hdmi_unbind(hdmi->hdmi);
+ drm_encoder_cleanup(&hdmi->encoder.encoder);
clk_disable_unprepare(hdmi->ref_clk);
regulator_disable(hdmi->avdd_1v8);
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
index 8ea09d915c3c..b8f8b45ebf59 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
@@ -261,9 +261,6 @@ static int rockchip_drm_gem_object_mmap(struct drm_gem_object *obj,
else
ret = rockchip_drm_gem_object_mmap_dma(obj, vma);
- if (ret)
- drm_gem_vm_close(vma);
-
return ret;
}
@@ -518,8 +515,14 @@ int rockchip_gem_prime_vmap(struct drm_gem_object *obj, struct iosys_map *map)
struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj);
if (rk_obj->pages) {
- void *vaddr = vmap(rk_obj->pages, rk_obj->num_pages, VM_MAP,
- pgprot_writecombine(PAGE_KERNEL));
+ void *vaddr;
+
+ if (rk_obj->kvaddr)
+ vaddr = rk_obj->kvaddr;
+ else
+ vaddr = vmap(rk_obj->pages, rk_obj->num_pages, VM_MAP,
+ pgprot_writecombine(PAGE_KERNEL));
+
if (!vaddr)
return -ENOMEM;
iosys_map_set_vaddr(map, vaddr);
@@ -539,7 +542,8 @@ void rockchip_gem_prime_vunmap(struct drm_gem_object *obj,
struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj);
if (rk_obj->pages) {
- vunmap(map->vaddr);
+ if (map->vaddr != rk_obj->kvaddr)
+ vunmap(map->vaddr);
return;
}
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
index fa1f4ee6d195..d8f5e064a1ba 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
@@ -316,13 +316,10 @@ static int vop_convert_afbc_format(uint32_t format)
case DRM_FORMAT_RGB565:
case DRM_FORMAT_BGR565:
return AFBC_FMT_RGB565;
- /* either of the below should not be reachable */
default:
- DRM_WARN_ONCE("unsupported AFBC format[%08x]\n", format);
+ DRM_DEBUG_KMS("unsupported AFBC format[%08x]\n", format);
return -EINVAL;
}
-
- return -EINVAL;
}
static uint16_t scl_vop_cal_scale(enum scale_mode mode, uint32_t src,
@@ -1174,6 +1171,17 @@ static void vop_crtc_disable_vblank(struct drm_crtc *crtc)
spin_unlock_irqrestore(&vop->irq_lock, flags);
}
+static enum drm_mode_status vop_crtc_mode_valid(struct drm_crtc *crtc,
+ const struct drm_display_mode *mode)
+{
+ struct vop *vop = to_vop(crtc);
+
+ if (vop->data->max_output.width && mode->hdisplay > vop->data->max_output.width)
+ return MODE_BAD_HVALUE;
+
+ return MODE_OK;
+}
+
static bool vop_crtc_mode_fixup(struct drm_crtc *crtc,
const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
@@ -1585,6 +1593,7 @@ static void vop_crtc_atomic_flush(struct drm_crtc *crtc,
}
static const struct drm_crtc_helper_funcs vop_crtc_helper_funcs = {
+ .mode_valid = vop_crtc_mode_valid,
.mode_fixup = vop_crtc_mode_fixup,
.atomic_check = vop_crtc_atomic_check,
.atomic_begin = vop_crtc_atomic_begin,
@@ -2221,7 +2230,7 @@ static int vop_bind(struct device *dev, struct device *master, void *data)
goto err_disable_pm_runtime;
if (vop->data->feature & VOP_FEATURE_INTERNAL_RGB) {
- vop->rgb = rockchip_rgb_init(dev, &vop->crtc, vop->drm_dev);
+ vop->rgb = rockchip_rgb_init(dev, &vop->crtc, vop->drm_dev, 0);
if (IS_ERR(vop->rgb)) {
ret = PTR_ERR(vop->rgb);
goto err_disable_pm_runtime;
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.h b/drivers/gpu/drm/rockchip/rockchip_drm_vop.h
index 8502849833d9..5f56e0597df8 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.h
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.h
@@ -42,6 +42,11 @@ enum vop_data_format {
VOP_FMT_YUV444SP,
};
+struct vop_rect {
+ int width;
+ int height;
+};
+
struct vop_reg {
uint32_t mask;
uint16_t offset;
@@ -225,6 +230,7 @@ struct vop_data {
const struct vop_win_data *win;
unsigned int win_size;
unsigned int lut_size;
+ struct vop_rect max_output;
#define VOP_FEATURE_OUTPUT_RGB10 BIT(0)
#define VOP_FEATURE_INTERNAL_RGB BIT(1)
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c
index ba3b81789509..38554ca2fc39 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c
@@ -38,6 +38,7 @@
#include "rockchip_drm_gem.h"
#include "rockchip_drm_fb.h"
#include "rockchip_drm_vop2.h"
+#include "rockchip_rgb.h"
/*
* VOP2 architecture
@@ -211,6 +212,9 @@ struct vop2 {
struct clk *hclk;
struct clk *aclk;
+ /* optional internal rgb encoder */
+ struct rockchip_rgb *rgb;
+
/* must be put at the end of the struct */
struct vop2_win win[];
};
@@ -1434,6 +1438,8 @@ static void rk3568_set_intf_mux(struct vop2_video_port *vp, int id,
die &= ~RK3568_SYS_DSP_INFACE_EN_RGB_MUX;
die |= RK3568_SYS_DSP_INFACE_EN_RGB |
FIELD_PREP(RK3568_SYS_DSP_INFACE_EN_RGB_MUX, vp->id);
+ dip &= ~RK3568_DSP_IF_POL__RGB_LVDS_PIN_POL;
+ dip |= FIELD_PREP(RK3568_DSP_IF_POL__RGB_LVDS_PIN_POL, polflags);
if (polflags & POLFLAG_DCLK_INV)
regmap_write(vop2->grf, RK3568_GRF_VO_CON1, BIT(3 + 16) | BIT(3));
else
@@ -2245,7 +2251,7 @@ static struct vop2_video_port *find_vp_without_primary(struct vop2 *vop2)
#define NR_LAYERS 6
-static int vop2_create_crtc(struct vop2 *vop2)
+static int vop2_create_crtcs(struct vop2 *vop2)
{
const struct vop2_data *vop2_data = vop2->data;
struct drm_device *drm = vop2->drm;
@@ -2295,7 +2301,7 @@ static int vop2_create_crtc(struct vop2 *vop2)
nvp = 0;
for (i = 0; i < vop2->registered_num_wins; i++) {
struct vop2_win *win = &vop2->win[i];
- u32 possible_crtcs;
+ u32 possible_crtcs = 0;
if (vop2->data->soc_id == 3566) {
/*
@@ -2370,15 +2376,44 @@ static int vop2_create_crtc(struct vop2 *vop2)
return 0;
}
-static void vop2_destroy_crtc(struct drm_crtc *crtc)
+static void vop2_destroy_crtcs(struct vop2 *vop2)
{
- of_node_put(crtc->port);
+ struct drm_device *drm = vop2->drm;
+ struct list_head *crtc_list = &drm->mode_config.crtc_list;
+ struct list_head *plane_list = &drm->mode_config.plane_list;
+ struct drm_crtc *crtc, *tmpc;
+ struct drm_plane *plane, *tmpp;
+
+ list_for_each_entry_safe(plane, tmpp, plane_list, head)
+ drm_plane_cleanup(plane);
/*
* Destroy CRTC after vop2_plane_destroy() since vop2_disable_plane()
* references the CRTC.
*/
- drm_crtc_cleanup(crtc);
+ list_for_each_entry_safe(crtc, tmpc, crtc_list, head) {
+ of_node_put(crtc->port);
+ drm_crtc_cleanup(crtc);
+ }
+}
+
+static int vop2_find_rgb_encoder(struct vop2 *vop2)
+{
+ struct device_node *node = vop2->dev->of_node;
+ struct device_node *endpoint;
+ int i;
+
+ for (i = 0; i < vop2->data->nr_vps; i++) {
+ endpoint = of_graph_get_endpoint_by_regs(node, i,
+ ROCKCHIP_VOP2_EP_RGB0);
+ if (!endpoint)
+ continue;
+
+ of_node_put(endpoint);
+ return i;
+ }
+
+ return -ENOENT;
}
static struct reg_field vop2_cluster_regs[VOP2_WIN_MAX_REG] = {
@@ -2621,7 +2656,7 @@ static int vop2_bind(struct device *dev, struct device *master, void *data)
return -ENODEV;
/* Allocate vop2 struct and its vop2_win array */
- alloc_size = sizeof(*vop2) + sizeof(*vop2->win) * vop2_data->win_size;
+ alloc_size = struct_size(vop2, win, vop2_data->win_size);
vop2 = devm_kzalloc(dev, alloc_size, GFP_KERNEL);
if (!vop2)
return -ENOMEM;
@@ -2644,6 +2679,8 @@ static int vop2_bind(struct device *dev, struct device *master, void *data)
vop2->len = resource_size(res);
vop2->map = devm_regmap_init_mmio(dev, vop2->regs, &vop2_regmap_config);
+ if (IS_ERR(vop2->map))
+ return PTR_ERR(vop2->map);
ret = vop2_win_init(vop2);
if (ret)
@@ -2682,33 +2719,45 @@ static int vop2_bind(struct device *dev, struct device *master, void *data)
if (ret)
return ret;
- ret = vop2_create_crtc(vop2);
+ ret = vop2_create_crtcs(vop2);
if (ret)
return ret;
+ ret = vop2_find_rgb_encoder(vop2);
+ if (ret >= 0) {
+ vop2->rgb = rockchip_rgb_init(dev, &vop2->vps[ret].crtc,
+ vop2->drm, ret);
+ if (IS_ERR(vop2->rgb)) {
+ if (PTR_ERR(vop2->rgb) == -EPROBE_DEFER) {
+ ret = PTR_ERR(vop2->rgb);
+ goto err_crtcs;
+ }
+ vop2->rgb = NULL;
+ }
+ }
+
rockchip_drm_dma_init_device(vop2->drm, vop2->dev);
pm_runtime_enable(&pdev->dev);
return 0;
+
+err_crtcs:
+ vop2_destroy_crtcs(vop2);
+
+ return ret;
}
static void vop2_unbind(struct device *dev, struct device *master, void *data)
{
struct vop2 *vop2 = dev_get_drvdata(dev);
- struct drm_device *drm = vop2->drm;
- struct list_head *plane_list = &drm->mode_config.plane_list;
- struct list_head *crtc_list = &drm->mode_config.crtc_list;
- struct drm_crtc *crtc, *tmpc;
- struct drm_plane *plane, *tmpp;
pm_runtime_disable(dev);
- list_for_each_entry_safe(plane, tmpp, plane_list, head)
- drm_plane_cleanup(plane);
+ if (vop2->rgb)
+ rockchip_rgb_fini(vop2->rgb);
- list_for_each_entry_safe(crtc, tmpc, crtc_list, head)
- vop2_destroy_crtc(crtc);
+ vop2_destroy_crtcs(vop2);
}
const struct component_ops vop2_component_ops = {
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop2.h b/drivers/gpu/drm/rockchip/rockchip_drm_vop2.h
index c727093a06d6..f1234a151130 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop2.h
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop2.h
@@ -27,11 +27,6 @@ enum win_dly_mode {
VOP2_DLY_MODE_MAX,
};
-struct vop_rect {
- int width;
- int height;
-};
-
enum vop2_scale_up_mode {
VOP2_SCALE_UP_NRST_NBOR,
VOP2_SCALE_UP_BIL,
diff --git a/drivers/gpu/drm/rockchip/rockchip_rgb.c b/drivers/gpu/drm/rockchip/rockchip_rgb.c
index 75eb7cca3d82..c677b71ae516 100644
--- a/drivers/gpu/drm/rockchip/rockchip_rgb.c
+++ b/drivers/gpu/drm/rockchip/rockchip_rgb.c
@@ -22,13 +22,11 @@
#include "rockchip_drm_vop.h"
#include "rockchip_rgb.h"
-#define encoder_to_rgb(c) container_of(c, struct rockchip_rgb, encoder)
-
struct rockchip_rgb {
struct device *dev;
struct drm_device *drm_dev;
struct drm_bridge *bridge;
- struct drm_encoder encoder;
+ struct rockchip_encoder encoder;
struct drm_connector connector;
int output_mode;
};
@@ -74,7 +72,8 @@ struct drm_encoder_helper_funcs rockchip_rgb_encoder_helper_funcs = {
struct rockchip_rgb *rockchip_rgb_init(struct device *dev,
struct drm_crtc *crtc,
- struct drm_device *drm_dev)
+ struct drm_device *drm_dev,
+ int video_port)
{
struct rockchip_rgb *rgb;
struct drm_encoder *encoder;
@@ -92,7 +91,7 @@ struct rockchip_rgb *rockchip_rgb_init(struct device *dev,
rgb->dev = dev;
rgb->drm_dev = drm_dev;
- port = of_graph_get_port_by_id(dev->of_node, 0);
+ port = of_graph_get_port_by_id(dev->of_node, video_port);
if (!port)
return ERR_PTR(-EINVAL);
@@ -105,8 +104,8 @@ struct rockchip_rgb *rockchip_rgb_init(struct device *dev,
continue;
child_count++;
- ret = drm_of_find_panel_or_bridge(dev->of_node, 0, endpoint_id,
- &panel, &bridge);
+ ret = drm_of_find_panel_or_bridge(dev->of_node, video_port,
+ endpoint_id, &panel, &bridge);
if (!ret) {
of_node_put(endpoint);
break;
@@ -125,7 +124,7 @@ struct rockchip_rgb *rockchip_rgb_init(struct device *dev,
return ERR_PTR(ret);
}
- encoder = &rgb->encoder;
+ encoder = &rgb->encoder.encoder;
encoder->possible_crtcs = drm_crtc_mask(crtc);
ret = drm_simple_encoder_init(drm_dev, encoder, DRM_MODE_ENCODER_NONE);
@@ -161,6 +160,8 @@ struct rockchip_rgb *rockchip_rgb_init(struct device *dev,
goto err_free_encoder;
}
+ rgb->encoder.crtc_endpoint_id = endpoint_id;
+
ret = drm_connector_attach_encoder(connector, encoder);
if (ret < 0) {
DRM_DEV_ERROR(drm_dev->dev,
@@ -182,6 +183,6 @@ void rockchip_rgb_fini(struct rockchip_rgb *rgb)
{
drm_panel_bridge_remove(rgb->bridge);
drm_connector_cleanup(&rgb->connector);
- drm_encoder_cleanup(&rgb->encoder);
+ drm_encoder_cleanup(&rgb->encoder.encoder);
}
EXPORT_SYMBOL_GPL(rockchip_rgb_fini);
diff --git a/drivers/gpu/drm/rockchip/rockchip_rgb.h b/drivers/gpu/drm/rockchip/rockchip_rgb.h
index 27b9635124bc..1bd4e20e91eb 100644
--- a/drivers/gpu/drm/rockchip/rockchip_rgb.h
+++ b/drivers/gpu/drm/rockchip/rockchip_rgb.h
@@ -8,12 +8,14 @@
#ifdef CONFIG_ROCKCHIP_RGB
struct rockchip_rgb *rockchip_rgb_init(struct device *dev,
struct drm_crtc *crtc,
- struct drm_device *drm_dev);
+ struct drm_device *drm_dev,
+ int video_port);
void rockchip_rgb_fini(struct rockchip_rgb *rgb);
#else
static inline struct rockchip_rgb *rockchip_rgb_init(struct device *dev,
struct drm_crtc *crtc,
- struct drm_device *drm_dev)
+ struct drm_device *drm_dev,
+ int video_port)
{
return NULL;
}
diff --git a/drivers/gpu/drm/rockchip/rockchip_vop_reg.c b/drivers/gpu/drm/rockchip/rockchip_vop_reg.c
index 014f99e8928e..20ac7811c5eb 100644
--- a/drivers/gpu/drm/rockchip/rockchip_vop_reg.c
+++ b/drivers/gpu/drm/rockchip/rockchip_vop_reg.c
@@ -181,6 +181,7 @@ static const struct vop_data rk3036_vop = {
.output = &rk3036_output,
.win = rk3036_vop_win_data,
.win_size = ARRAY_SIZE(rk3036_vop_win_data),
+ .max_output = { 1920, 1080 },
};
static const struct vop_win_phy rk3126_win1_data = {
@@ -213,6 +214,7 @@ static const struct vop_data rk3126_vop = {
.output = &rk3036_output,
.win = rk3126_vop_win_data,
.win_size = ARRAY_SIZE(rk3126_vop_win_data),
+ .max_output = { 1920, 1080 },
};
static const int px30_vop_intrs[] = {
@@ -340,6 +342,7 @@ static const struct vop_data px30_vop_big = {
.output = &px30_output,
.win = px30_vop_big_win_data,
.win_size = ARRAY_SIZE(px30_vop_big_win_data),
+ .max_output = { 1920, 1080 },
};
static const struct vop_win_data px30_vop_lit_win_data[] = {
@@ -356,6 +359,7 @@ static const struct vop_data px30_vop_lit = {
.output = &px30_output,
.win = px30_vop_lit_win_data,
.win_size = ARRAY_SIZE(px30_vop_lit_win_data),
+ .max_output = { 1920, 1080 },
};
static const struct vop_scl_regs rk3066_win_scl = {
@@ -479,6 +483,7 @@ static const struct vop_data rk3066_vop = {
.output = &rk3066_output,
.win = rk3066_vop_win_data,
.win_size = ARRAY_SIZE(rk3066_vop_win_data),
+ .max_output = { 1920, 1080 },
};
static const struct vop_scl_regs rk3188_win_scl = {
@@ -585,6 +590,7 @@ static const struct vop_data rk3188_vop = {
.win = rk3188_vop_win_data,
.win_size = ARRAY_SIZE(rk3188_vop_win_data),
.feature = VOP_FEATURE_INTERNAL_RGB,
+ .max_output = { 2048, 1536 },
};
static const struct vop_scl_extension rk3288_win_full_scl_ext = {
@@ -732,6 +738,12 @@ static const struct vop_data rk3288_vop = {
.win = rk3288_vop_win_data,
.win_size = ARRAY_SIZE(rk3288_vop_win_data),
.lut_size = 1024,
+ /*
+ * This is the maximum resolution for the VOPB, the VOPL can only do
+ * 2560x1600, but we can't distinguish them as they have the same
+ * compatible.
+ */
+ .max_output = { 3840, 2160 },
};
static const int rk3368_vop_intrs[] = {
@@ -833,6 +845,7 @@ static const struct vop_data rk3368_vop = {
.misc = &rk3368_misc,
.win = rk3368_vop_win_data,
.win_size = ARRAY_SIZE(rk3368_vop_win_data),
+ .max_output = { 4096, 2160 },
};
static const struct vop_intr rk3366_vop_intr = {
@@ -854,6 +867,7 @@ static const struct vop_data rk3366_vop = {
.misc = &rk3368_misc,
.win = rk3368_vop_win_data,
.win_size = ARRAY_SIZE(rk3368_vop_win_data),
+ .max_output = { 4096, 2160 },
};
static const struct vop_output rk3399_output = {
@@ -984,6 +998,7 @@ static const struct vop_data rk3399_vop_big = {
.win_size = ARRAY_SIZE(rk3399_vop_win_data),
.win_yuv2yuv = rk3399_vop_big_win_yuv2yuv_data,
.lut_size = 1024,
+ .max_output = { 4096, 2160 },
};
static const struct vop_win_data rk3399_vop_lit_win_data[] = {
@@ -1010,6 +1025,7 @@ static const struct vop_data rk3399_vop_lit = {
.win_size = ARRAY_SIZE(rk3399_vop_lit_win_data),
.win_yuv2yuv = rk3399_vop_lit_win_yuv2yuv_data,
.lut_size = 256,
+ .max_output = { 2560, 1600 },
};
static const struct vop_win_data rk3228_vop_win_data[] = {
@@ -1029,6 +1045,7 @@ static const struct vop_data rk3228_vop = {
.misc = &rk3368_misc,
.win = rk3228_vop_win_data,
.win_size = ARRAY_SIZE(rk3228_vop_win_data),
+ .max_output = { 4096, 2160 },
};
static const struct vop_modeset rk3328_modeset = {
@@ -1100,6 +1117,7 @@ static const struct vop_data rk3328_vop = {
.misc = &rk3328_misc,
.win = rk3328_vop_win_data,
.win_size = ARRAY_SIZE(rk3328_vop_win_data),
+ .max_output = { 4096, 2160 },
};
static const struct of_device_id vop_driver_dt_match[] = {
diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c
index 007f98c48f8d..250c46cd9c34 100644
--- a/drivers/gpu/drm/scheduler/sched_main.c
+++ b/drivers/gpu/drm/scheduler/sched_main.c
@@ -53,6 +53,7 @@
#include <drm/drm_print.h>
#include <drm/drm_gem.h>
+#include <drm/drm_syncobj.h>
#include <drm/gpu_scheduler.h>
#include <drm/spsc_queue.h>
@@ -719,6 +720,34 @@ int drm_sched_job_add_dependency(struct drm_sched_job *job,
EXPORT_SYMBOL(drm_sched_job_add_dependency);
/**
+ * drm_sched_job_add_syncobj_dependency - adds a syncobj's fence as a job dependency
+ * @job: scheduler job to add the dependencies to
+ * @file_private: drm file private pointer
+ * @handle: syncobj handle to lookup
+ * @point: timeline point
+ *
+ * This adds the fence matching the given syncobj to @job.
+ *
+ * Returns:
+ * 0 on success, or an error on failing to expand the array.
+ */
+int drm_sched_job_add_syncobj_dependency(struct drm_sched_job *job,
+ struct drm_file *file,
+ u32 handle,
+ u32 point)
+{
+ struct dma_fence *fence;
+ int ret;
+
+ ret = drm_syncobj_find_fence(file, handle, point, 0, &fence);
+ if (ret)
+ return ret;
+
+ return drm_sched_job_add_dependency(job, fence);
+}
+EXPORT_SYMBOL(drm_sched_job_add_syncobj_dependency);
+
+/**
* drm_sched_job_add_resv_dependencies - add all fences from the resv to the job
* @job: scheduler job to add the dependencies to
* @resv: the dma_resv object to get the fences from
diff --git a/drivers/gpu/drm/sti/Kconfig b/drivers/gpu/drm/sti/Kconfig
index f2a880c48485..3c7a5feff8de 100644
--- a/drivers/gpu/drm/sti/Kconfig
+++ b/drivers/gpu/drm/sti/Kconfig
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0-only
config DRM_STI
tristate "DRM Support for STMicroelectronics SoC stiH4xx Series"
- depends on OF && DRM && (ARCH_STI || ARCH_MULTIPLATFORM)
+ depends on OF && DRM && ARCH_STI
select RESET_CONTROLLER
select DRM_KMS_HELPER
select DRM_GEM_DMA_HELPER
diff --git a/drivers/gpu/drm/sti/sti_drv.c b/drivers/gpu/drm/sti/sti_drv.c
index ef6a4e63198f..1b87b5899f9e 100644
--- a/drivers/gpu/drm/sti/sti_drv.c
+++ b/drivers/gpu/drm/sti/sti_drv.c
@@ -14,7 +14,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_debugfs.h>
#include <drm/drm_drv.h>
-#include <drm/drm_fbdev_generic.h>
+#include <drm/drm_fbdev_dma.h>
#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_of.h>
@@ -199,7 +199,7 @@ static int sti_bind(struct device *dev)
drm_mode_config_reset(ddev);
- drm_fbdev_generic_setup(ddev, 32);
+ drm_fbdev_dma_setup(ddev, 32);
return 0;
diff --git a/drivers/gpu/drm/stm/Kconfig b/drivers/gpu/drm/stm/Kconfig
index ded72f879482..fa49cde43bb2 100644
--- a/drivers/gpu/drm/stm/Kconfig
+++ b/drivers/gpu/drm/stm/Kconfig
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0-only
config DRM_STM
tristate "DRM Support for STMicroelectronics SoC Series"
- depends on DRM && (ARCH_STM32 || ARCH_MULTIPLATFORM)
+ depends on DRM && ARCH_STM32
select DRM_KMS_HELPER
select DRM_GEM_DMA_HELPER
select DRM_PANEL_BRIDGE
diff --git a/drivers/gpu/drm/stm/drv.c b/drivers/gpu/drm/stm/drv.c
index 50410bd99dfe..422220df7d8c 100644
--- a/drivers/gpu/drm/stm/drv.c
+++ b/drivers/gpu/drm/stm/drv.c
@@ -18,7 +18,7 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_drv.h>
-#include <drm/drm_fbdev_generic.h>
+#include <drm/drm_fbdev_dma.h>
#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_module.h>
@@ -203,7 +203,7 @@ static int stm_drm_platform_probe(struct platform_device *pdev)
if (ret)
goto err_put;
- drm_fbdev_generic_setup(ddev, 16);
+ drm_fbdev_dma_setup(ddev, 16);
return 0;
diff --git a/drivers/gpu/drm/sun4i/sun4i_drv.c b/drivers/gpu/drm/sun4i/sun4i_drv.c
index cc94efbbf2d4..dd283a3a4e36 100644
--- a/drivers/gpu/drm/sun4i/sun4i_drv.c
+++ b/drivers/gpu/drm/sun4i/sun4i_drv.c
@@ -17,7 +17,7 @@
#include <drm/drm_aperture.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_drv.h>
-#include <drm/drm_fbdev_generic.h>
+#include <drm/drm_fbdev_dma.h>
#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_module.h>
#include <drm/drm_of.h>
@@ -111,7 +111,7 @@ static int sun4i_drv_bind(struct device *dev)
if (ret)
goto finish_poll;
- drm_fbdev_generic_setup(drm, 32);
+ drm_fbdev_dma_setup(drm, 32);
dev_set_drvdata(dev, drm);
diff --git a/drivers/gpu/drm/tests/drm_format_helper_test.c b/drivers/gpu/drm/tests/drm_format_helper_test.c
index 34e80eb6d96e..474bb7a1c4ee 100644
--- a/drivers/gpu/drm/tests/drm_format_helper_test.c
+++ b/drivers/gpu/drm/tests/drm_format_helper_test.c
@@ -67,6 +67,11 @@ struct convert_to_argb2101010_result {
const u32 expected[TEST_BUF_SIZE];
};
+struct convert_to_mono_result {
+ unsigned int dst_pitch;
+ const u8 expected[TEST_BUF_SIZE];
+};
+
struct convert_xrgb8888_case {
const char *name;
unsigned int pitch;
@@ -82,6 +87,7 @@ struct convert_xrgb8888_case {
struct convert_to_argb8888_result argb8888_result;
struct convert_to_xrgb2101010_result xrgb2101010_result;
struct convert_to_argb2101010_result argb2101010_result;
+ struct convert_to_mono_result mono_result;
};
static struct convert_xrgb8888_case convert_xrgb8888_cases[] = {
@@ -131,6 +137,10 @@ static struct convert_xrgb8888_case convert_xrgb8888_cases[] = {
.dst_pitch = 0,
.expected = { 0xFFF00000 },
},
+ .mono_result = {
+ .dst_pitch = 0,
+ .expected = { 0b0 },
+ },
},
{
.name = "single_pixel_clip_rectangle",
@@ -181,6 +191,10 @@ static struct convert_xrgb8888_case convert_xrgb8888_cases[] = {
.dst_pitch = 0,
.expected = { 0xFFF00000 },
},
+ .mono_result = {
+ .dst_pitch = 0,
+ .expected = { 0b0 },
+ },
},
{
/* Well known colors: White, black, red, green, blue, magenta,
@@ -293,6 +307,15 @@ static struct convert_xrgb8888_case convert_xrgb8888_cases[] = {
0xFFFFFC00, 0xC00FFFFF,
},
},
+ .mono_result = {
+ .dst_pitch = 0,
+ .expected = {
+ 0b01,
+ 0b10,
+ 0b00,
+ 0b11,
+ },
+ },
},
{
/* Randomly picked colors. Full buffer within the clip area. */
@@ -300,96 +323,104 @@ static struct convert_xrgb8888_case convert_xrgb8888_cases[] = {
.pitch = 3 * 4,
.clip = DRM_RECT_INIT(0, 0, 3, 3),
.xrgb8888 = {
- 0xA10E449C, 0xB1114D05, 0xC1A80303,
- 0xD16C7073, 0xA20E449C, 0xB2114D05,
- 0xC2A80303, 0xD26C7073, 0xA30E449C,
+ 0xA10E449C, 0xB1114D05, 0xC1A8F303,
+ 0xD16CF073, 0xA20E449C, 0xB2114D05,
+ 0xC2A80303, 0xD26CF073, 0xA30E449C,
},
.gray8_result = {
.dst_pitch = 5,
.expected = {
- 0x3C, 0x33, 0x34, 0x00, 0x00,
- 0x6F, 0x3C, 0x33, 0x00, 0x00,
- 0x34, 0x6F, 0x3C, 0x00, 0x00,
+ 0x3C, 0x33, 0xC4, 0x00, 0x00,
+ 0xBB, 0x3C, 0x33, 0x00, 0x00,
+ 0x34, 0xBB, 0x3C, 0x00, 0x00,
},
},
.rgb332_result = {
.dst_pitch = 5,
.expected = {
- 0x0A, 0x08, 0xA0, 0x00, 0x00,
- 0x6D, 0x0A, 0x08, 0x00, 0x00,
- 0xA0, 0x6D, 0x0A, 0x00, 0x00,
+ 0x0A, 0x08, 0xBC, 0x00, 0x00,
+ 0x7D, 0x0A, 0x08, 0x00, 0x00,
+ 0xA0, 0x7D, 0x0A, 0x00, 0x00,
},
},
.rgb565_result = {
.dst_pitch = 10,
.expected = {
- 0x0A33, 0x1260, 0xA800, 0x0000, 0x0000,
- 0x6B8E, 0x0A33, 0x1260, 0x0000, 0x0000,
- 0xA800, 0x6B8E, 0x0A33, 0x0000, 0x0000,
+ 0x0A33, 0x1260, 0xAF80, 0x0000, 0x0000,
+ 0x6F8E, 0x0A33, 0x1260, 0x0000, 0x0000,
+ 0xA800, 0x6F8E, 0x0A33, 0x0000, 0x0000,
},
.expected_swab = {
- 0x330A, 0x6012, 0x00A8, 0x0000, 0x0000,
- 0x8E6B, 0x330A, 0x6012, 0x0000, 0x0000,
- 0x00A8, 0x8E6B, 0x330A, 0x0000, 0x0000,
+ 0x330A, 0x6012, 0x80AF, 0x0000, 0x0000,
+ 0x8E6F, 0x330A, 0x6012, 0x0000, 0x0000,
+ 0x00A8, 0x8E6F, 0x330A, 0x0000, 0x0000,
},
},
.xrgb1555_result = {
.dst_pitch = 10,
.expected = {
- 0x0513, 0x0920, 0x5400, 0x0000, 0x0000,
- 0x35CE, 0x0513, 0x0920, 0x0000, 0x0000,
- 0x5400, 0x35CE, 0x0513, 0x0000, 0x0000,
+ 0x0513, 0x0920, 0x57C0, 0x0000, 0x0000,
+ 0x37CE, 0x0513, 0x0920, 0x0000, 0x0000,
+ 0x5400, 0x37CE, 0x0513, 0x0000, 0x0000,
},
},
.argb1555_result = {
.dst_pitch = 10,
.expected = {
- 0x8513, 0x8920, 0xD400, 0x0000, 0x0000,
- 0xB5CE, 0x8513, 0x8920, 0x0000, 0x0000,
- 0xD400, 0xB5CE, 0x8513, 0x0000, 0x0000,
+ 0x8513, 0x8920, 0xD7C0, 0x0000, 0x0000,
+ 0xB7CE, 0x8513, 0x8920, 0x0000, 0x0000,
+ 0xD400, 0xB7CE, 0x8513, 0x0000, 0x0000,
},
},
.rgba5551_result = {
.dst_pitch = 10,
.expected = {
- 0x0A27, 0x1241, 0xA801, 0x0000, 0x0000,
- 0x6B9D, 0x0A27, 0x1241, 0x0000, 0x0000,
- 0xA801, 0x6B9D, 0x0A27, 0x0000, 0x0000,
+ 0x0A27, 0x1241, 0xAF81, 0x0000, 0x0000,
+ 0x6F9D, 0x0A27, 0x1241, 0x0000, 0x0000,
+ 0xA801, 0x6F9D, 0x0A27, 0x0000, 0x0000,
},
},
.rgb888_result = {
.dst_pitch = 15,
.expected = {
- 0x9C, 0x44, 0x0E, 0x05, 0x4D, 0x11, 0x03, 0x03, 0xA8,
+ 0x9C, 0x44, 0x0E, 0x05, 0x4D, 0x11, 0x03, 0xF3, 0xA8,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x73, 0x70, 0x6C, 0x9C, 0x44, 0x0E, 0x05, 0x4D, 0x11,
+ 0x73, 0xF0, 0x6C, 0x9C, 0x44, 0x0E, 0x05, 0x4D, 0x11,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x03, 0x03, 0xA8, 0x73, 0x70, 0x6C, 0x9C, 0x44, 0x0E,
+ 0x03, 0x03, 0xA8, 0x73, 0xF0, 0x6C, 0x9C, 0x44, 0x0E,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
},
},
.argb8888_result = {
.dst_pitch = 20,
.expected = {
- 0xFF0E449C, 0xFF114D05, 0xFFA80303, 0x00000000, 0x00000000,
- 0xFF6C7073, 0xFF0E449C, 0xFF114D05, 0x00000000, 0x00000000,
- 0xFFA80303, 0xFF6C7073, 0xFF0E449C, 0x00000000, 0x00000000,
+ 0xFF0E449C, 0xFF114D05, 0xFFA8F303, 0x00000000, 0x00000000,
+ 0xFF6CF073, 0xFF0E449C, 0xFF114D05, 0x00000000, 0x00000000,
+ 0xFFA80303, 0xFF6CF073, 0xFF0E449C, 0x00000000, 0x00000000,
},
},
.xrgb2101010_result = {
.dst_pitch = 20,
.expected = {
- 0x03844672, 0x0444D414, 0x2A20300C, 0x00000000, 0x00000000,
- 0x1B1705CD, 0x03844672, 0x0444D414, 0x00000000, 0x00000000,
- 0x2A20300C, 0x1B1705CD, 0x03844672, 0x00000000, 0x00000000,
+ 0x03844672, 0x0444D414, 0x2A2F3C0C, 0x00000000, 0x00000000,
+ 0x1B1F0DCD, 0x03844672, 0x0444D414, 0x00000000, 0x00000000,
+ 0x2A20300C, 0x1B1F0DCD, 0x03844672, 0x00000000, 0x00000000,
},
},
.argb2101010_result = {
.dst_pitch = 20,
.expected = {
- 0xC3844672, 0xC444D414, 0xEA20300C, 0x00000000, 0x00000000,
- 0xDB1705CD, 0xC3844672, 0xC444D414, 0x00000000, 0x00000000,
- 0xEA20300C, 0xDB1705CD, 0xC3844672, 0x00000000, 0x00000000,
+ 0xC3844672, 0xC444D414, 0xEA2F3C0C, 0x00000000, 0x00000000,
+ 0xDB1F0DCD, 0xC3844672, 0xC444D414, 0x00000000, 0x00000000,
+ 0xEA20300C, 0xDB1F0DCD, 0xC3844672, 0x00000000, 0x00000000,
+ },
+ },
+ .mono_result = {
+ .dst_pitch = 2,
+ .expected = {
+ 0b100, 0b000,
+ 0b001, 0b000,
+ 0b010, 0b000,
},
},
},
@@ -414,7 +445,7 @@ static size_t conversion_buf_size(u32 dst_format, unsigned int dst_pitch,
return -EINVAL;
if (!dst_pitch)
- dst_pitch = drm_rect_width(clip) * dst_fi->cpp[0];
+ dst_pitch = drm_format_info_min_pitch(dst_fi, 0, drm_rect_width(clip));
return dst_pitch * drm_rect_height(clip);
}
@@ -597,7 +628,7 @@ static void drm_test_fb_xrgb8888_to_xrgb1555(struct kunit *test)
drm_fb_xrgb8888_to_xrgb1555(&dst, &result->dst_pitch, &src, &fb, &params->clip);
buf = le16buf_to_cpu(test, (__force const __le16 *)buf, dst_size / sizeof(__le16));
- KUNIT_EXPECT_EQ(test, memcmp(buf, result->expected, dst_size), 0);
+ KUNIT_EXPECT_MEMEQ(test, buf, result->expected, dst_size);
}
static void drm_test_fb_xrgb8888_to_argb1555(struct kunit *test)
@@ -628,7 +659,7 @@ static void drm_test_fb_xrgb8888_to_argb1555(struct kunit *test)
drm_fb_xrgb8888_to_argb1555(&dst, &result->dst_pitch, &src, &fb, &params->clip);
buf = le16buf_to_cpu(test, (__force const __le16 *)buf, dst_size / sizeof(__le16));
- KUNIT_EXPECT_EQ(test, memcmp(buf, result->expected, dst_size), 0);
+ KUNIT_EXPECT_MEMEQ(test, buf, result->expected, dst_size);
}
static void drm_test_fb_xrgb8888_to_rgba5551(struct kunit *test)
@@ -659,7 +690,7 @@ static void drm_test_fb_xrgb8888_to_rgba5551(struct kunit *test)
drm_fb_xrgb8888_to_rgba5551(&dst, &result->dst_pitch, &src, &fb, &params->clip);
buf = le16buf_to_cpu(test, (__force const __le16 *)buf, dst_size / sizeof(__le16));
- KUNIT_EXPECT_EQ(test, memcmp(buf, result->expected, dst_size), 0);
+ KUNIT_EXPECT_MEMEQ(test, buf, result->expected, dst_size);
}
static void drm_test_fb_xrgb8888_to_rgb888(struct kunit *test)
@@ -724,7 +755,7 @@ static void drm_test_fb_xrgb8888_to_argb8888(struct kunit *test)
drm_fb_xrgb8888_to_argb8888(&dst, &result->dst_pitch, &src, &fb, &params->clip);
buf = le32buf_to_cpu(test, (__force const __le32 *)buf, dst_size / sizeof(u32));
- KUNIT_EXPECT_EQ(test, memcmp(buf, result->expected, dst_size), 0);
+ KUNIT_EXPECT_MEMEQ(test, buf, result->expected, dst_size);
}
static void drm_test_fb_xrgb8888_to_xrgb2101010(struct kunit *test)
@@ -786,7 +817,37 @@ static void drm_test_fb_xrgb8888_to_argb2101010(struct kunit *test)
drm_fb_xrgb8888_to_argb2101010(&dst, &result->dst_pitch, &src, &fb, &params->clip);
buf = le32buf_to_cpu(test, (__force const __le32 *)buf, dst_size / sizeof(u32));
- KUNIT_EXPECT_EQ(test, memcmp(buf, result->expected, dst_size), 0);
+ KUNIT_EXPECT_MEMEQ(test, buf, result->expected, dst_size);
+}
+
+static void drm_test_fb_xrgb8888_to_mono(struct kunit *test)
+{
+ const struct convert_xrgb8888_case *params = test->param_value;
+ const struct convert_to_mono_result *result = &params->mono_result;
+ size_t dst_size;
+ u8 *buf = NULL;
+ __le32 *xrgb8888 = NULL;
+ struct iosys_map dst, src;
+
+ struct drm_framebuffer fb = {
+ .format = drm_format_info(DRM_FORMAT_XRGB8888),
+ .pitches = { params->pitch, 0, 0 },
+ };
+
+ dst_size = conversion_buf_size(DRM_FORMAT_C1, result->dst_pitch, &params->clip);
+
+ KUNIT_ASSERT_GT(test, dst_size, 0);
+
+ buf = kunit_kzalloc(test, dst_size, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, buf);
+ iosys_map_set_vaddr(&dst, buf);
+
+ xrgb8888 = cpubuf_to_le32(test, params->xrgb8888, TEST_BUF_SIZE);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, xrgb8888);
+ iosys_map_set_vaddr(&src, xrgb8888);
+
+ drm_fb_xrgb8888_to_mono(&dst, &result->dst_pitch, &src, &fb, &params->clip);
+ KUNIT_EXPECT_MEMEQ(test, buf, result->expected, dst_size);
}
static struct kunit_case drm_format_helper_test_cases[] = {
@@ -800,6 +861,7 @@ static struct kunit_case drm_format_helper_test_cases[] = {
KUNIT_CASE_PARAM(drm_test_fb_xrgb8888_to_argb8888, convert_xrgb8888_gen_params),
KUNIT_CASE_PARAM(drm_test_fb_xrgb8888_to_xrgb2101010, convert_xrgb8888_gen_params),
KUNIT_CASE_PARAM(drm_test_fb_xrgb8888_to_argb2101010, convert_xrgb8888_gen_params),
+ KUNIT_CASE_PARAM(drm_test_fb_xrgb8888_to_mono, convert_xrgb8888_gen_params),
{}
};
diff --git a/drivers/gpu/drm/tests/drm_kunit_helpers.c b/drivers/gpu/drm/tests/drm_kunit_helpers.c
index e98b4150f556..4df47071dc88 100644
--- a/drivers/gpu/drm/tests/drm_kunit_helpers.c
+++ b/drivers/gpu/drm/tests/drm_kunit_helpers.c
@@ -19,14 +19,8 @@ static int fake_probe(struct platform_device *pdev)
return 0;
}
-static int fake_remove(struct platform_device *pdev)
-{
- return 0;
-}
-
static struct platform_driver fake_platform_driver = {
.probe = fake_probe,
- .remove = fake_remove,
.driver = {
.name = KUNIT_DEVICE_NAME,
},
diff --git a/drivers/gpu/drm/tidss/tidss_dispc.c b/drivers/gpu/drm/tidss/tidss_dispc.c
index 165365b515e1..dca077411f77 100644
--- a/drivers/gpu/drm/tidss/tidss_dispc.c
+++ b/drivers/gpu/drm/tidss/tidss_dispc.c
@@ -1985,9 +1985,9 @@ dma_addr_t dispc_plane_state_p_uv_addr(const struct drm_plane_state *state)
(y * fb->pitches[1] / fb->format->vsub);
}
-int dispc_plane_setup(struct dispc_device *dispc, u32 hw_plane,
- const struct drm_plane_state *state,
- u32 hw_videoport)
+void dispc_plane_setup(struct dispc_device *dispc, u32 hw_plane,
+ const struct drm_plane_state *state,
+ u32 hw_videoport)
{
bool lite = dispc->feat->vid_lite[hw_plane];
u32 fourcc = state->fb->format->format;
@@ -2066,15 +2066,11 @@ int dispc_plane_setup(struct dispc_device *dispc, u32 hw_plane,
else
VID_REG_FLD_MOD(dispc, hw_plane, DISPC_VID_ATTRIBUTES, 0,
28, 28);
-
- return 0;
}
-int dispc_plane_enable(struct dispc_device *dispc, u32 hw_plane, bool enable)
+void dispc_plane_enable(struct dispc_device *dispc, u32 hw_plane, bool enable)
{
VID_REG_FLD_MOD(dispc, hw_plane, DISPC_VID_ATTRIBUTES, !!enable, 0, 0);
-
- return 0;
}
static u32 dispc_vid_get_fifo_size(struct dispc_device *dispc, u32 hw_plane)
diff --git a/drivers/gpu/drm/tidss/tidss_dispc.h b/drivers/gpu/drm/tidss/tidss_dispc.h
index e49432f0abf5..946ed769caaf 100644
--- a/drivers/gpu/drm/tidss/tidss_dispc.h
+++ b/drivers/gpu/drm/tidss/tidss_dispc.h
@@ -123,10 +123,10 @@ int dispc_runtime_resume(struct dispc_device *dispc);
int dispc_plane_check(struct dispc_device *dispc, u32 hw_plane,
const struct drm_plane_state *state,
u32 hw_videoport);
-int dispc_plane_setup(struct dispc_device *dispc, u32 hw_plane,
- const struct drm_plane_state *state,
- u32 hw_videoport);
-int dispc_plane_enable(struct dispc_device *dispc, u32 hw_plane, bool enable);
+void dispc_plane_setup(struct dispc_device *dispc, u32 hw_plane,
+ const struct drm_plane_state *state,
+ u32 hw_videoport);
+void dispc_plane_enable(struct dispc_device *dispc, u32 hw_plane, bool enable);
const u32 *dispc_plane_formats(struct dispc_device *dispc, unsigned int *len);
int dispc_init(struct tidss_device *tidss);
diff --git a/drivers/gpu/drm/tidss/tidss_drv.c b/drivers/gpu/drm/tidss/tidss_drv.c
index 2dac8727d2f4..3f5f27fb6ebc 100644
--- a/drivers/gpu/drm/tidss/tidss_drv.c
+++ b/drivers/gpu/drm/tidss/tidss_drv.c
@@ -13,7 +13,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_drv.h>
-#include <drm/drm_fbdev_generic.h>
+#include <drm/drm_fbdev_dma.h>
#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_managed.h>
#include <drm/drm_module.h>
@@ -179,7 +179,7 @@ static int tidss_probe(struct platform_device *pdev)
goto err_irq_uninstall;
}
- drm_fbdev_generic_setup(ddev, 32);
+ drm_fbdev_dma_setup(ddev, 32);
dev_dbg(dev, "%s done\n", __func__);
diff --git a/drivers/gpu/drm/tidss/tidss_plane.c b/drivers/gpu/drm/tidss/tidss_plane.c
index fe2c41f0cd4f..6bdd6e4a955a 100644
--- a/drivers/gpu/drm/tidss/tidss_plane.c
+++ b/drivers/gpu/drm/tidss/tidss_plane.c
@@ -113,7 +113,6 @@ static void tidss_plane_atomic_update(struct drm_plane *plane,
struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
plane);
u32 hw_videoport;
- int ret;
dev_dbg(ddev->dev, "%s\n", __func__);
@@ -124,15 +123,17 @@ static void tidss_plane_atomic_update(struct drm_plane *plane,
hw_videoport = to_tidss_crtc(new_state->crtc)->hw_videoport;
- ret = dispc_plane_setup(tidss->dispc, tplane->hw_plane_id,
- new_state, hw_videoport);
+ dispc_plane_setup(tidss->dispc, tplane->hw_plane_id, new_state, hw_videoport);
+}
- if (ret) {
- dev_err(plane->dev->dev, "%s: Failed to setup plane %d\n",
- __func__, tplane->hw_plane_id);
- dispc_plane_enable(tidss->dispc, tplane->hw_plane_id, false);
- return;
- }
+static void tidss_plane_atomic_enable(struct drm_plane *plane,
+ struct drm_atomic_state *state)
+{
+ struct drm_device *ddev = plane->dev;
+ struct tidss_device *tidss = to_tidss(ddev);
+ struct tidss_plane *tplane = to_tidss_plane(plane);
+
+ dev_dbg(ddev->dev, "%s\n", __func__);
dispc_plane_enable(tidss->dispc, tplane->hw_plane_id, true);
}
@@ -160,6 +161,7 @@ static void drm_plane_destroy(struct drm_plane *plane)
static const struct drm_plane_helper_funcs tidss_plane_helper_funcs = {
.atomic_check = tidss_plane_atomic_check,
.atomic_update = tidss_plane_atomic_update,
+ .atomic_enable = tidss_plane_atomic_enable,
.atomic_disable = tidss_plane_atomic_disable,
};
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_drv.c b/drivers/gpu/drm/tilcdc/tilcdc_drv.c
index 4ca426007dc8..fe56beea3e93 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_drv.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_drv.c
@@ -16,7 +16,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_debugfs.h>
#include <drm/drm_drv.h>
-#include <drm/drm_fbdev_generic.h>
+#include <drm/drm_fbdev_dma.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
@@ -384,7 +384,7 @@ static int tilcdc_init(const struct drm_driver *ddrv, struct device *dev)
goto init_failed;
priv->is_registered = true;
- drm_fbdev_generic_setup(ddev, bpp);
+ drm_fbdev_dma_setup(ddev, bpp);
return 0;
init_failed:
diff --git a/drivers/gpu/drm/tiny/arcpgu.c b/drivers/gpu/drm/tiny/arcpgu.c
index 611bbee15071..e5b10e41554a 100644
--- a/drivers/gpu/drm/tiny/arcpgu.c
+++ b/drivers/gpu/drm/tiny/arcpgu.c
@@ -12,7 +12,7 @@
#include <drm/drm_drv.h>
#include <drm/drm_edid.h>
#include <drm/drm_fb_dma_helper.h>
-#include <drm/drm_fbdev_generic.h>
+#include <drm/drm_fbdev_dma.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_dma_helper.h>
@@ -394,7 +394,7 @@ static int arcpgu_probe(struct platform_device *pdev)
if (ret)
goto err_unload;
- drm_fbdev_generic_setup(&arcpgu->drm, 16);
+ drm_fbdev_dma_setup(&arcpgu->drm, 16);
return 0;
diff --git a/drivers/gpu/drm/tiny/bochs.c b/drivers/gpu/drm/tiny/bochs.c
index 024346054c70..d254679a136e 100644
--- a/drivers/gpu/drm/tiny/bochs.c
+++ b/drivers/gpu/drm/tiny/bochs.c
@@ -545,7 +545,6 @@ static int bochs_kms_init(struct bochs_device *bochs)
bochs->dev->mode_config.preferred_depth = 24;
bochs->dev->mode_config.prefer_shadow = 0;
- bochs->dev->mode_config.prefer_shadow_fbdev = 1;
bochs->dev->mode_config.quirk_addfb_prefer_host_byte_order = true;
bochs->dev->mode_config.funcs = &bochs_mode_funcs;
diff --git a/drivers/gpu/drm/tiny/cirrus.c b/drivers/gpu/drm/tiny/cirrus.c
index accfa52e78c5..594bc472862f 100644
--- a/drivers/gpu/drm/tiny/cirrus.c
+++ b/drivers/gpu/drm/tiny/cirrus.c
@@ -24,6 +24,7 @@
#include <video/vga.h>
#include <drm/drm_aperture.h>
+#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_atomic_state_helper.h>
#include <drm/drm_connector.h>
@@ -43,7 +44,6 @@
#include <drm/drm_modeset_helper_vtables.h>
#include <drm/drm_module.h>
#include <drm/drm_probe_helper.h>
-#include <drm/drm_simple_kms_helper.h>
#define DRIVER_NAME "cirrus"
#define DRIVER_DESC "qemu cirrus vga"
@@ -56,16 +56,34 @@
struct cirrus_device {
struct drm_device dev;
- struct drm_simple_display_pipe pipe;
- struct drm_connector conn;
- unsigned int cpp;
- unsigned int pitch;
+
+ /* modesetting pipeline */
+ struct drm_plane primary_plane;
+ struct drm_crtc crtc;
+ struct drm_encoder encoder;
+ struct drm_connector connector;
+
+ /* HW resources */
void __iomem *vram;
void __iomem *mmio;
};
#define to_cirrus(_dev) container_of(_dev, struct cirrus_device, dev)
+struct cirrus_primary_plane_state {
+ struct drm_shadow_plane_state base;
+
+ /* HW scanout buffer */
+ const struct drm_format_info *format;
+ unsigned int pitch;
+};
+
+static inline struct cirrus_primary_plane_state *
+to_cirrus_primary_plane_state(struct drm_plane_state *plane_state)
+{
+ return container_of(plane_state, struct cirrus_primary_plane_state, base.base);
+};
+
/* ------------------------------------------------------------------ */
/*
* The meat of this driver. The core passes us a mode and we have to program
@@ -126,46 +144,42 @@ static void wreg_hdr(struct cirrus_device *cirrus, u8 val)
iowrite8(val, cirrus->mmio + VGA_DAC_MASK);
}
-static int cirrus_convert_to(struct drm_framebuffer *fb)
+static const struct drm_format_info *cirrus_convert_to(struct drm_framebuffer *fb)
{
- if (fb->format->cpp[0] == 4 && fb->pitches[0] > CIRRUS_MAX_PITCH) {
+ if (fb->format->format == DRM_FORMAT_XRGB8888 && fb->pitches[0] > CIRRUS_MAX_PITCH) {
if (fb->width * 3 <= CIRRUS_MAX_PITCH)
/* convert from XR24 to RG24 */
- return 3;
+ return drm_format_info(DRM_FORMAT_RGB888);
else
/* convert from XR24 to RG16 */
- return 2;
+ return drm_format_info(DRM_FORMAT_RGB565);
}
- return 0;
+ return NULL;
}
-static int cirrus_cpp(struct drm_framebuffer *fb)
+static const struct drm_format_info *cirrus_format(struct drm_framebuffer *fb)
{
- int convert_cpp = cirrus_convert_to(fb);
+ const struct drm_format_info *format = cirrus_convert_to(fb);
- if (convert_cpp)
- return convert_cpp;
- return fb->format->cpp[0];
+ if (format)
+ return format;
+ return fb->format;
}
static int cirrus_pitch(struct drm_framebuffer *fb)
{
- int convert_cpp = cirrus_convert_to(fb);
+ const struct drm_format_info *format = cirrus_convert_to(fb);
- if (convert_cpp)
- return convert_cpp * fb->width;
+ if (format)
+ return drm_format_info_min_pitch(format, 0, fb->width);
return fb->pitches[0];
}
static void cirrus_set_start_address(struct cirrus_device *cirrus, u32 offset)
{
- int idx;
u32 addr;
u8 tmp;
- if (!drm_dev_enter(&cirrus->dev, &idx))
- return;
-
addr = offset >> 2;
wreg_crt(cirrus, 0x0c, (u8)((addr >> 8) & 0xff));
wreg_crt(cirrus, 0x0d, (u8)(addr & 0xff));
@@ -180,21 +194,14 @@ static void cirrus_set_start_address(struct cirrus_device *cirrus, u32 offset)
tmp &= 0x7f;
tmp |= (addr >> 12) & 0x80;
wreg_crt(cirrus, 0x1d, tmp);
-
- drm_dev_exit(idx);
}
-static int cirrus_mode_set(struct cirrus_device *cirrus,
- struct drm_display_mode *mode,
- struct drm_framebuffer *fb)
+static void cirrus_mode_set(struct cirrus_device *cirrus,
+ struct drm_display_mode *mode)
{
int hsyncstart, hsyncend, htotal, hdispend;
int vtotal, vdispend;
- int tmp, idx;
- int sr07 = 0, hdr = 0;
-
- if (!drm_dev_enter(&cirrus->dev, &idx))
- return -1;
+ int tmp;
htotal = mode->htotal / 8;
hsyncend = mode->hsync_end / 8;
@@ -258,46 +265,39 @@ static int cirrus_mode_set(struct cirrus_device *cirrus,
/* Disable Hercules/CGA compatibility */
wreg_crt(cirrus, VGA_CRTC_MODE, 0x03);
+}
+
+static void cirrus_format_set(struct cirrus_device *cirrus,
+ const struct drm_format_info *format)
+{
+ u8 sr07, hdr;
sr07 = rreg_seq(cirrus, 0x07);
sr07 &= 0xe0;
- hdr = 0;
- cirrus->cpp = cirrus_cpp(fb);
- switch (cirrus->cpp * 8) {
- case 8:
+ switch (format->format) {
+ case DRM_FORMAT_C8:
sr07 |= 0x11;
+ hdr = 0x00;
break;
- case 16:
+ case DRM_FORMAT_RGB565:
sr07 |= 0x17;
hdr = 0xc1;
break;
- case 24:
+ case DRM_FORMAT_RGB888:
sr07 |= 0x15;
hdr = 0xc5;
break;
- case 32:
+ case DRM_FORMAT_XRGB8888:
sr07 |= 0x19;
hdr = 0xc5;
break;
default:
- drm_dev_exit(idx);
- return -1;
+ return;
}
wreg_seq(cirrus, 0x7, sr07);
- /* Program the pitch */
- cirrus->pitch = cirrus_pitch(fb);
- tmp = cirrus->pitch / 8;
- wreg_crt(cirrus, VGA_CRTC_OFFSET, tmp);
-
- /* Enable extended blanking and pitch bits, and enable full memory */
- tmp = 0x22;
- tmp |= (cirrus->pitch >> 7) & 0x10;
- tmp |= (cirrus->pitch >> 6) & 0x40;
- wreg_crt(cirrus, 0x1b, tmp);
-
/* Enable high-colour modes */
wreg_gfx(cirrus, VGA_GFX_MODE, 0x40);
@@ -305,208 +305,323 @@ static int cirrus_mode_set(struct cirrus_device *cirrus,
wreg_gfx(cirrus, VGA_GFX_MISC, 0x01);
wreg_hdr(cirrus, hdr);
+}
- cirrus_set_start_address(cirrus, 0);
+static void cirrus_pitch_set(struct cirrus_device *cirrus, unsigned int pitch)
+{
+ u8 cr13, cr1b;
- /* Unblank (needed on S3 resume, vgabios doesn't do it then) */
- outb(0x20, 0x3c0);
+ /* Program the pitch */
+ cr13 = pitch / 8;
+ wreg_crt(cirrus, VGA_CRTC_OFFSET, cr13);
- drm_dev_exit(idx);
- return 0;
+ /* Enable extended blanking and pitch bits, and enable full memory */
+ cr1b = 0x22;
+ cr1b |= (pitch >> 7) & 0x10;
+ cr1b |= (pitch >> 6) & 0x40;
+ wreg_crt(cirrus, 0x1b, cr1b);
+
+ cirrus_set_start_address(cirrus, 0);
}
-static int cirrus_fb_blit_rect(struct drm_framebuffer *fb,
- const struct iosys_map *vmap,
- struct drm_rect *rect)
-{
- struct cirrus_device *cirrus = to_cirrus(fb->dev);
- struct iosys_map dst;
- int idx;
+/* ------------------------------------------------------------------ */
+/* cirrus display pipe */
- if (!drm_dev_enter(&cirrus->dev, &idx))
- return -ENODEV;
+static const uint32_t cirrus_primary_plane_formats[] = {
+ DRM_FORMAT_RGB565,
+ DRM_FORMAT_RGB888,
+ DRM_FORMAT_XRGB8888,
+};
- iosys_map_set_vaddr_iomem(&dst, cirrus->vram);
+static const uint64_t cirrus_primary_plane_format_modifiers[] = {
+ DRM_FORMAT_MOD_LINEAR,
+ DRM_FORMAT_MOD_INVALID
+};
+
+static int cirrus_primary_plane_helper_atomic_check(struct drm_plane *plane,
+ struct drm_atomic_state *state)
+{
+ struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state, plane);
+ struct cirrus_primary_plane_state *new_primary_plane_state =
+ to_cirrus_primary_plane_state(new_plane_state);
+ struct drm_framebuffer *fb = new_plane_state->fb;
+ struct drm_crtc *new_crtc = new_plane_state->crtc;
+ struct drm_crtc_state *new_crtc_state = NULL;
+ int ret;
+ unsigned int pitch;
- if (cirrus->cpp == fb->format->cpp[0]) {
- iosys_map_incr(&dst, drm_fb_clip_offset(fb->pitches[0], fb->format, rect));
- drm_fb_memcpy(&dst, fb->pitches, vmap, fb, rect);
+ if (new_crtc)
+ new_crtc_state = drm_atomic_get_new_crtc_state(state, new_crtc);
- } else if (fb->format->cpp[0] == 4 && cirrus->cpp == 2) {
- iosys_map_incr(&dst, drm_fb_clip_offset(cirrus->pitch, fb->format, rect));
- drm_fb_xrgb8888_to_rgb565(&dst, &cirrus->pitch, vmap, fb, rect, false);
+ ret = drm_atomic_helper_check_plane_state(new_plane_state, new_crtc_state,
+ DRM_PLANE_NO_SCALING,
+ DRM_PLANE_NO_SCALING,
+ false, false);
+ if (ret)
+ return ret;
+ else if (!new_plane_state->visible)
+ return 0;
- } else if (fb->format->cpp[0] == 4 && cirrus->cpp == 3) {
- iosys_map_incr(&dst, drm_fb_clip_offset(cirrus->pitch, fb->format, rect));
- drm_fb_xrgb8888_to_rgb888(&dst, &cirrus->pitch, vmap, fb, rect);
+ pitch = cirrus_pitch(fb);
- } else {
- WARN_ON_ONCE("cpp mismatch");
- }
+ /* validate size constraints */
+ if (pitch > CIRRUS_MAX_PITCH)
+ return -EINVAL;
+ else if (pitch * fb->height > CIRRUS_VRAM_SIZE)
+ return -EINVAL;
- drm_dev_exit(idx);
+ new_primary_plane_state->format = cirrus_format(fb);
+ new_primary_plane_state->pitch = pitch;
return 0;
}
-static int cirrus_fb_blit_fullscreen(struct drm_framebuffer *fb,
- const struct iosys_map *map)
+static void cirrus_primary_plane_helper_atomic_update(struct drm_plane *plane,
+ struct drm_atomic_state *state)
{
- struct drm_rect fullscreen = {
- .x1 = 0,
- .x2 = fb->width,
- .y1 = 0,
- .y2 = fb->height,
- };
- return cirrus_fb_blit_rect(fb, map, &fullscreen);
-}
+ struct cirrus_device *cirrus = to_cirrus(plane->dev);
+ struct drm_plane_state *plane_state = drm_atomic_get_new_plane_state(state, plane);
+ struct cirrus_primary_plane_state *primary_plane_state =
+ to_cirrus_primary_plane_state(plane_state);
+ struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(plane_state);
+ struct drm_framebuffer *fb = plane_state->fb;
+ const struct drm_format_info *format = primary_plane_state->format;
+ unsigned int pitch = primary_plane_state->pitch;
+ struct drm_plane_state *old_plane_state = drm_atomic_get_old_plane_state(state, plane);
+ struct cirrus_primary_plane_state *old_primary_plane_state =
+ to_cirrus_primary_plane_state(old_plane_state);
+ struct iosys_map vaddr = IOSYS_MAP_INIT_VADDR_IOMEM(cirrus->vram);
+ struct drm_atomic_helper_damage_iter iter;
+ struct drm_rect damage;
+ int idx;
-static int cirrus_check_size(int width, int height,
- struct drm_framebuffer *fb)
-{
- int pitch = width * 2;
+ if (!fb)
+ return;
- if (fb)
- pitch = cirrus_pitch(fb);
+ if (!drm_dev_enter(&cirrus->dev, &idx))
+ return;
- if (pitch > CIRRUS_MAX_PITCH)
- return -EINVAL;
- if (pitch * height > CIRRUS_VRAM_SIZE)
- return -EINVAL;
- return 0;
-}
+ if (old_primary_plane_state->format != format)
+ cirrus_format_set(cirrus, format);
+ if (old_primary_plane_state->pitch != pitch)
+ cirrus_pitch_set(cirrus, pitch);
-/* ------------------------------------------------------------------ */
-/* cirrus connector */
+ drm_atomic_helper_damage_iter_init(&iter, old_plane_state, plane_state);
+ drm_atomic_for_each_plane_damage(&iter, &damage) {
+ unsigned int offset = drm_fb_clip_offset(pitch, format, &damage);
+ struct iosys_map dst = IOSYS_MAP_INIT_OFFSET(&vaddr, offset);
-static int cirrus_conn_get_modes(struct drm_connector *conn)
-{
- int count;
+ drm_fb_blit(&dst, &pitch, format->format, shadow_plane_state->data, fb, &damage);
+ }
- count = drm_add_modes_noedid(conn,
- conn->dev->mode_config.max_width,
- conn->dev->mode_config.max_height);
- drm_set_preferred_mode(conn, 1024, 768);
- return count;
+ drm_dev_exit(idx);
}
-static const struct drm_connector_helper_funcs cirrus_conn_helper_funcs = {
- .get_modes = cirrus_conn_get_modes,
-};
-
-static const struct drm_connector_funcs cirrus_conn_funcs = {
- .fill_modes = drm_helper_probe_single_connector_modes,
- .destroy = drm_connector_cleanup,
- .reset = drm_atomic_helper_connector_reset,
- .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
- .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+static const struct drm_plane_helper_funcs cirrus_primary_plane_helper_funcs = {
+ DRM_GEM_SHADOW_PLANE_HELPER_FUNCS,
+ .atomic_check = cirrus_primary_plane_helper_atomic_check,
+ .atomic_update = cirrus_primary_plane_helper_atomic_update,
};
-static int cirrus_conn_init(struct cirrus_device *cirrus)
+static struct drm_plane_state *
+cirrus_primary_plane_atomic_duplicate_state(struct drm_plane *plane)
{
- drm_connector_helper_add(&cirrus->conn, &cirrus_conn_helper_funcs);
- return drm_connector_init(&cirrus->dev, &cirrus->conn,
- &cirrus_conn_funcs, DRM_MODE_CONNECTOR_VGA);
+ struct drm_plane_state *plane_state = plane->state;
+ struct cirrus_primary_plane_state *primary_plane_state =
+ to_cirrus_primary_plane_state(plane_state);
+ struct cirrus_primary_plane_state *new_primary_plane_state;
+ struct drm_shadow_plane_state *new_shadow_plane_state;
-}
+ if (!plane_state)
+ return NULL;
-/* ------------------------------------------------------------------ */
-/* cirrus (simple) display pipe */
+ new_primary_plane_state = kzalloc(sizeof(*new_primary_plane_state), GFP_KERNEL);
+ if (!new_primary_plane_state)
+ return NULL;
+ new_shadow_plane_state = &new_primary_plane_state->base;
+
+ __drm_gem_duplicate_shadow_plane_state(plane, new_shadow_plane_state);
+ new_primary_plane_state->format = primary_plane_state->format;
+ new_primary_plane_state->pitch = primary_plane_state->pitch;
+
+ return &new_shadow_plane_state->base;
+}
-static enum drm_mode_status cirrus_pipe_mode_valid(struct drm_simple_display_pipe *pipe,
- const struct drm_display_mode *mode)
+static void cirrus_primary_plane_atomic_destroy_state(struct drm_plane *plane,
+ struct drm_plane_state *plane_state)
{
- if (cirrus_check_size(mode->hdisplay, mode->vdisplay, NULL) < 0)
- return MODE_BAD;
- return MODE_OK;
+ struct cirrus_primary_plane_state *primary_plane_state =
+ to_cirrus_primary_plane_state(plane_state);
+
+ __drm_gem_destroy_shadow_plane_state(&primary_plane_state->base);
+ kfree(primary_plane_state);
}
-static int cirrus_pipe_check(struct drm_simple_display_pipe *pipe,
- struct drm_plane_state *plane_state,
- struct drm_crtc_state *crtc_state)
+static void cirrus_reset_primary_plane(struct drm_plane *plane)
{
- struct drm_framebuffer *fb = plane_state->fb;
+ struct cirrus_primary_plane_state *primary_plane_state;
- if (!fb)
- return 0;
- return cirrus_check_size(fb->width, fb->height, fb);
+ if (plane->state) {
+ cirrus_primary_plane_atomic_destroy_state(plane, plane->state);
+ plane->state = NULL; /* must be set to NULL here */
+ }
+
+ primary_plane_state = kzalloc(sizeof(*primary_plane_state), GFP_KERNEL);
+ if (!primary_plane_state)
+ return;
+ __drm_gem_reset_shadow_plane(plane, &primary_plane_state->base);
}
-static void cirrus_pipe_enable(struct drm_simple_display_pipe *pipe,
- struct drm_crtc_state *crtc_state,
- struct drm_plane_state *plane_state)
+static const struct drm_plane_funcs cirrus_primary_plane_funcs = {
+ .update_plane = drm_atomic_helper_update_plane,
+ .disable_plane = drm_atomic_helper_disable_plane,
+ .destroy = drm_plane_cleanup,
+ .reset = cirrus_reset_primary_plane,
+ .atomic_duplicate_state = cirrus_primary_plane_atomic_duplicate_state,
+ .atomic_destroy_state = cirrus_primary_plane_atomic_destroy_state,
+};
+
+static int cirrus_crtc_helper_atomic_check(struct drm_crtc *crtc, struct drm_atomic_state *state)
{
- struct cirrus_device *cirrus = to_cirrus(pipe->crtc.dev);
- struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(plane_state);
+ struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
+ int ret;
+
+ if (!crtc_state->enable)
+ return 0;
+
+ ret = drm_atomic_helper_check_crtc_primary_plane(crtc_state);
+ if (ret)
+ return ret;
- cirrus_mode_set(cirrus, &crtc_state->mode, plane_state->fb);
- cirrus_fb_blit_fullscreen(plane_state->fb, &shadow_plane_state->data[0]);
+ return 0;
}
-static void cirrus_pipe_update(struct drm_simple_display_pipe *pipe,
- struct drm_plane_state *old_state)
+static void cirrus_crtc_helper_atomic_enable(struct drm_crtc *crtc,
+ struct drm_atomic_state *state)
{
- struct cirrus_device *cirrus = to_cirrus(pipe->crtc.dev);
- struct drm_plane_state *state = pipe->plane.state;
- struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(state);
- struct drm_crtc *crtc = &pipe->crtc;
- struct drm_rect rect;
+ struct cirrus_device *cirrus = to_cirrus(crtc->dev);
+ struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
+ int idx;
+
+ if (!drm_dev_enter(&cirrus->dev, &idx))
+ return;
- if (state->fb && cirrus->cpp != cirrus_cpp(state->fb))
- cirrus_mode_set(cirrus, &crtc->mode, state->fb);
+ cirrus_mode_set(cirrus, &crtc_state->mode);
- if (state->fb && drm_atomic_helper_damage_merged(old_state, state, &rect))
- cirrus_fb_blit_rect(state->fb, &shadow_plane_state->data[0], &rect);
+ /* Unblank (needed on S3 resume, vgabios doesn't do it then) */
+ outb(VGA_AR_ENABLE_DISPLAY, VGA_ATT_W);
+
+ drm_dev_exit(idx);
}
-static const struct drm_simple_display_pipe_funcs cirrus_pipe_funcs = {
- .mode_valid = cirrus_pipe_mode_valid,
- .check = cirrus_pipe_check,
- .enable = cirrus_pipe_enable,
- .update = cirrus_pipe_update,
- DRM_GEM_SIMPLE_DISPLAY_PIPE_SHADOW_PLANE_FUNCS,
+static const struct drm_crtc_helper_funcs cirrus_crtc_helper_funcs = {
+ .atomic_check = cirrus_crtc_helper_atomic_check,
+ .atomic_enable = cirrus_crtc_helper_atomic_enable,
};
-static const uint32_t cirrus_formats[] = {
- DRM_FORMAT_RGB565,
- DRM_FORMAT_RGB888,
- DRM_FORMAT_XRGB8888,
+static const struct drm_crtc_funcs cirrus_crtc_funcs = {
+ .reset = drm_atomic_helper_crtc_reset,
+ .destroy = drm_crtc_cleanup,
+ .set_config = drm_atomic_helper_set_config,
+ .page_flip = drm_atomic_helper_page_flip,
+ .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
};
-static const uint64_t cirrus_modifiers[] = {
- DRM_FORMAT_MOD_LINEAR,
- DRM_FORMAT_MOD_INVALID
+static const struct drm_encoder_funcs cirrus_encoder_funcs = {
+ .destroy = drm_encoder_cleanup,
+};
+
+static int cirrus_connector_helper_get_modes(struct drm_connector *connector)
+{
+ int count;
+
+ count = drm_add_modes_noedid(connector,
+ connector->dev->mode_config.max_width,
+ connector->dev->mode_config.max_height);
+ drm_set_preferred_mode(connector, 1024, 768);
+ return count;
+}
+
+static const struct drm_connector_helper_funcs cirrus_connector_helper_funcs = {
+ .get_modes = cirrus_connector_helper_get_modes,
+};
+
+static const struct drm_connector_funcs cirrus_connector_funcs = {
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .destroy = drm_connector_cleanup,
+ .reset = drm_atomic_helper_connector_reset,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
};
static int cirrus_pipe_init(struct cirrus_device *cirrus)
{
- return drm_simple_display_pipe_init(&cirrus->dev,
- &cirrus->pipe,
- &cirrus_pipe_funcs,
- cirrus_formats,
- ARRAY_SIZE(cirrus_formats),
- cirrus_modifiers,
- &cirrus->conn);
+ struct drm_device *dev = &cirrus->dev;
+ struct drm_plane *primary_plane;
+ struct drm_crtc *crtc;
+ struct drm_encoder *encoder;
+ struct drm_connector *connector;
+ int ret;
+
+ primary_plane = &cirrus->primary_plane;
+ ret = drm_universal_plane_init(dev, primary_plane, 0,
+ &cirrus_primary_plane_funcs,
+ cirrus_primary_plane_formats,
+ ARRAY_SIZE(cirrus_primary_plane_formats),
+ cirrus_primary_plane_format_modifiers,
+ DRM_PLANE_TYPE_PRIMARY, NULL);
+ if (ret)
+ return ret;
+ drm_plane_helper_add(primary_plane, &cirrus_primary_plane_helper_funcs);
+ drm_plane_enable_fb_damage_clips(primary_plane);
+
+ crtc = &cirrus->crtc;
+ ret = drm_crtc_init_with_planes(dev, crtc, primary_plane, NULL,
+ &cirrus_crtc_funcs, NULL);
+ if (ret)
+ return ret;
+ drm_crtc_helper_add(crtc, &cirrus_crtc_helper_funcs);
+
+ encoder = &cirrus->encoder;
+ ret = drm_encoder_init(dev, encoder, &cirrus_encoder_funcs,
+ DRM_MODE_ENCODER_DAC, NULL);
+ if (ret)
+ return ret;
+ encoder->possible_crtcs = drm_crtc_mask(crtc);
+
+ connector = &cirrus->connector;
+ ret = drm_connector_init(dev, connector, &cirrus_connector_funcs,
+ DRM_MODE_CONNECTOR_VGA);
+ if (ret)
+ return ret;
+ drm_connector_helper_add(connector, &cirrus_connector_helper_funcs);
+
+ ret = drm_connector_attach_encoder(connector, encoder);
+ if (ret)
+ return ret;
+
+ return 0;
}
/* ------------------------------------------------------------------ */
/* cirrus framebuffers & mode config */
-static struct drm_framebuffer*
-cirrus_fb_create(struct drm_device *dev, struct drm_file *file_priv,
- const struct drm_mode_fb_cmd2 *mode_cmd)
+static enum drm_mode_status cirrus_mode_config_mode_valid(struct drm_device *dev,
+ const struct drm_display_mode *mode)
{
- if (mode_cmd->pixel_format != DRM_FORMAT_RGB565 &&
- mode_cmd->pixel_format != DRM_FORMAT_RGB888 &&
- mode_cmd->pixel_format != DRM_FORMAT_XRGB8888)
- return ERR_PTR(-EINVAL);
- if (cirrus_check_size(mode_cmd->width, mode_cmd->height, NULL) < 0)
- return ERR_PTR(-EINVAL);
- return drm_gem_fb_create_with_dirty(dev, file_priv, mode_cmd);
+ const struct drm_format_info *format = drm_format_info(DRM_FORMAT_XRGB8888);
+ uint64_t pitch = drm_format_info_min_pitch(format, 0, mode->hdisplay);
+
+ if (pitch * mode->vdisplay > CIRRUS_VRAM_SIZE)
+ return MODE_MEM;
+
+ return MODE_OK;
}
static const struct drm_mode_config_funcs cirrus_mode_config_funcs = {
- .fb_create = cirrus_fb_create,
+ .fb_create = drm_gem_fb_create_with_dirty,
+ .mode_valid = cirrus_mode_config_mode_valid,
.atomic_check = drm_atomic_helper_check,
.atomic_commit = drm_atomic_helper_commit,
};
@@ -589,10 +704,6 @@ static int cirrus_pci_probe(struct pci_dev *pdev,
if (ret)
return ret;
- ret = cirrus_conn_init(cirrus);
- if (ret < 0)
- return ret;
-
ret = cirrus_pipe_init(cirrus);
if (ret < 0)
return ret;
diff --git a/drivers/gpu/drm/tiny/simpledrm.c b/drivers/gpu/drm/tiny/simpledrm.c
index 63881a3754f8..c38d85848af8 100644
--- a/drivers/gpu/drm/tiny/simpledrm.c
+++ b/drivers/gpu/drm/tiny/simpledrm.c
@@ -606,16 +606,12 @@ static const struct drm_mode_config_funcs simpledrm_mode_config_funcs = {
*/
static struct drm_display_mode simpledrm_mode(unsigned int width,
- unsigned int height)
+ unsigned int height,
+ unsigned int width_mm,
+ unsigned int height_mm)
{
- /*
- * Assume a monitor resolution of 96 dpi to
- * get a somewhat reasonable screen size.
- */
const struct drm_display_mode mode = {
- DRM_MODE_INIT(60, width, height,
- DRM_MODE_RES_MM(width, 96ul),
- DRM_MODE_RES_MM(height, 96ul))
+ DRM_MODE_INIT(60, width, height, width_mm, height_mm)
};
return mode;
@@ -629,6 +625,8 @@ static struct simpledrm_device *simpledrm_device_create(struct drm_driver *drv,
struct simpledrm_device *sdev;
struct drm_device *dev;
int width, height, stride;
+ int width_mm = 0, height_mm = 0;
+ struct device_node *panel_node;
const struct drm_format_info *format;
struct resource *res, *mem = NULL;
struct drm_plane *primary_plane;
@@ -685,6 +683,12 @@ static struct simpledrm_device *simpledrm_device_create(struct drm_driver *drv,
mem = simplefb_get_memory_of(dev, of_node);
if (IS_ERR(mem))
return ERR_CAST(mem);
+ panel_node = of_parse_phandle(of_node, "panel", 0);
+ if (panel_node) {
+ simplefb_read_u32_of(dev, panel_node, "width-mm", &width_mm);
+ simplefb_read_u32_of(dev, panel_node, "height-mm", &height_mm);
+ of_node_put(panel_node);
+ }
} else {
drm_err(dev, "no simplefb configuration found\n");
return ERR_PTR(-ENODEV);
@@ -695,7 +699,16 @@ static struct simpledrm_device *simpledrm_device_create(struct drm_driver *drv,
return ERR_PTR(-EINVAL);
}
- sdev->mode = simpledrm_mode(width, height);
+ /*
+ * Assume a monitor resolution of 96 dpi if physical dimensions
+ * are not specified to get a somewhat reasonable screen size.
+ */
+ if (!width_mm)
+ width_mm = DRM_MODE_RES_MM(width, 96ul);
+ if (!height_mm)
+ height_mm = DRM_MODE_RES_MM(height, 96ul);
+
+ sdev->mode = simpledrm_mode(width, height, width_mm, height_mm);
sdev->format = format;
sdev->pitch = stride;
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 326a3d13a829..d056d28f8758 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -84,6 +84,7 @@ EXPORT_SYMBOL(ttm_bo_move_to_lru_tail);
* ttm_bo_set_bulk_move - update BOs bulk move object
*
* @bo: The buffer object.
+ * @bulk: bulk move structure
*
* Update the BOs bulk move object, making sure that resources are added/removed
* as well. A bulk move allows to move many resource on the LRU at once,
@@ -120,8 +121,7 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
bool old_use_tt, new_use_tt;
int ret;
- old_use_tt = bo->resource &&
- ttm_manager_type(bdev, bo->resource->mem_type)->use_tt;
+ old_use_tt = !bo->resource || ttm_manager_type(bdev, bo->resource->mem_type)->use_tt;
new_use_tt = ttm_manager_type(bdev, mem->mem_type)->use_tt;
ttm_bo_unmap_virtual(bo);
@@ -465,7 +465,8 @@ bounce:
if (ret == -EMULTIHOP) {
ret = ttm_bo_bounce_temp_buffer(bo, &evict_mem, ctx, &hop);
if (ret) {
- pr_err("Buffer eviction failed\n");
+ if (ret != -ERESTARTSYS && ret != -EINTR)
+ pr_err("Buffer eviction failed\n");
ttm_resource_free(bo, &evict_mem);
goto out;
}
@@ -748,7 +749,7 @@ static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
*
* @bo: Pointer to a struct ttm_buffer_object. the data of which
* we want to allocate space for.
- * @proposed_placement: Proposed new placement for the buffer object.
+ * @placement: Proposed new placement for the buffer object.
* @mem: A struct ttm_resource.
* @ctx: if and how to sleep, lock buffers and alloc memory
*
@@ -894,14 +895,18 @@ int ttm_bo_validate(struct ttm_buffer_object *bo,
if (!placement->num_placement && !placement->num_busy_placement)
return ttm_bo_pipeline_gutting(bo);
- /*
- * Check whether we need to move buffer.
- */
- if (!bo->resource || !ttm_resource_compat(bo->resource, placement)) {
- ret = ttm_bo_move_buffer(bo, placement, ctx);
- if (ret)
- return ret;
- }
+ /* Check whether we need to move buffer. */
+ if (bo->resource && ttm_resource_compat(bo->resource, placement))
+ return 0;
+
+ /* Moving of pinned BOs is forbidden */
+ if (bo->pin_count)
+ return -EINVAL;
+
+ ret = ttm_bo_move_buffer(bo, placement, ctx);
+ if (ret)
+ return ret;
+
/*
* We might need to add a TTM.
*/
@@ -953,7 +958,6 @@ int ttm_bo_init_reserved(struct ttm_device *bdev, struct ttm_buffer_object *bo,
struct sg_table *sg, struct dma_resv *resv,
void (*destroy) (struct ttm_buffer_object *))
{
- static const struct ttm_place sys_mem = { .mem_type = TTM_PL_SYSTEM };
int ret;
kref_init(&bo->kref);
@@ -970,12 +974,6 @@ int ttm_bo_init_reserved(struct ttm_device *bdev, struct ttm_buffer_object *bo,
bo->base.resv = &bo->base._resv;
atomic_inc(&ttm_glob.bo_count);
- ret = ttm_resource_alloc(bo, &sys_mem, &bo->resource);
- if (unlikely(ret)) {
- ttm_bo_put(bo);
- return ret;
- }
-
/*
* For ttm_bo_type_device buffers, allocate
* address space from the device.
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index 7635d7d6b13b..fd9fd3d15101 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -157,8 +157,8 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
bool clear;
int ret = 0;
- if (!src_mem)
- return 0;
+ if (WARN_ON(!src_mem))
+ return -EINVAL;
src_man = ttm_manager_type(bdev, src_mem->mem_type);
if (ttm && ((ttm->page_flags & TTM_TT_FLAG_SWAPPED) ||
@@ -704,30 +704,23 @@ EXPORT_SYMBOL(ttm_bo_move_sync_cleanup);
*/
int ttm_bo_pipeline_gutting(struct ttm_buffer_object *bo)
{
- static const struct ttm_place sys_mem = { .mem_type = TTM_PL_SYSTEM };
struct ttm_buffer_object *ghost;
- struct ttm_resource *sys_res;
struct ttm_tt *ttm;
int ret;
- ret = ttm_resource_alloc(bo, &sys_mem, &sys_res);
- if (ret)
- return ret;
-
/* If already idle, no need for ghost object dance. */
if (dma_resv_test_signaled(bo->base.resv, DMA_RESV_USAGE_BOOKKEEP)) {
if (!bo->ttm) {
/* See comment below about clearing. */
ret = ttm_tt_create(bo, true);
if (ret)
- goto error_free_sys_mem;
+ return ret;
} else {
ttm_tt_unpopulate(bo->bdev, bo->ttm);
if (bo->type == ttm_bo_type_device)
ttm_tt_mark_for_clear(bo->ttm);
}
ttm_resource_free(bo, &bo->resource);
- ttm_bo_assign_mem(bo, sys_res);
return 0;
}
@@ -744,7 +737,7 @@ int ttm_bo_pipeline_gutting(struct ttm_buffer_object *bo)
ret = ttm_tt_create(bo, true);
swap(bo->ttm, ttm);
if (ret)
- goto error_free_sys_mem;
+ return ret;
ret = ttm_buffer_object_transfer(bo, &ghost);
if (ret)
@@ -760,13 +753,9 @@ int ttm_bo_pipeline_gutting(struct ttm_buffer_object *bo)
dma_resv_unlock(&ghost->base._resv);
ttm_bo_put(ghost);
bo->ttm = ttm;
- ttm_bo_assign_mem(bo, sys_res);
return 0;
error_destroy_tt:
ttm_tt_destroy(bo->bdev, ttm);
-
-error_free_sys_mem:
- ttm_resource_free(bo, &sys_res);
return ret;
}
diff --git a/drivers/gpu/drm/ttm/ttm_device.c b/drivers/gpu/drm/ttm/ttm_device.c
index c7a1862f322a..cd73631b6106 100644
--- a/drivers/gpu/drm/ttm/ttm_device.c
+++ b/drivers/gpu/drm/ttm/ttm_device.c
@@ -137,7 +137,6 @@ int ttm_global_swapout(struct ttm_operation_ctx *ctx, gfp_t gfp_flags)
mutex_unlock(&ttm_global_mutex);
return ret;
}
-EXPORT_SYMBOL(ttm_global_swapout);
int ttm_device_swapout(struct ttm_device *bdev, struct ttm_operation_ctx *ctx,
gfp_t gfp_flags)
diff --git a/drivers/gpu/drm/ttm/ttm_resource.c b/drivers/gpu/drm/ttm/ttm_resource.c
index b8a826a24fb2..7333f7a87a2f 100644
--- a/drivers/gpu/drm/ttm/ttm_resource.c
+++ b/drivers/gpu/drm/ttm/ttm_resource.c
@@ -361,7 +361,6 @@ bool ttm_resource_compat(struct ttm_resource *res,
return false;
}
-EXPORT_SYMBOL(ttm_resource_compat);
void ttm_resource_set_bo(struct ttm_resource *res,
struct ttm_buffer_object *bo)
diff --git a/drivers/gpu/drm/tve200/tve200_drv.c b/drivers/gpu/drm/tve200/tve200_drv.c
index 0d05c386d303..abd557332b28 100644
--- a/drivers/gpu/drm/tve200/tve200_drv.c
+++ b/drivers/gpu/drm/tve200/tve200_drv.c
@@ -40,7 +40,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_bridge.h>
#include <drm/drm_drv.h>
-#include <drm/drm_fbdev_generic.h>
+#include <drm/drm_fbdev_dma.h>
#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_module.h>
@@ -227,7 +227,7 @@ static int tve200_probe(struct platform_device *pdev)
* Passing in 16 here will make the RGB565 mode the default
* Passing in 32 will use XRGB8888 mode
*/
- drm_fbdev_generic_setup(drm, 16);
+ drm_fbdev_dma_setup(drm, 16);
return 0;
diff --git a/drivers/gpu/drm/v3d/v3d_gem.c b/drivers/gpu/drm/v3d/v3d_gem.c
index 5da1806f3969..2e94ce788c71 100644
--- a/drivers/gpu/drm/v3d/v3d_gem.c
+++ b/drivers/gpu/drm/v3d/v3d_gem.c
@@ -397,20 +397,6 @@ v3d_wait_bo_ioctl(struct drm_device *dev, void *data,
}
static int
-v3d_job_add_deps(struct drm_file *file_priv, struct v3d_job *job,
- u32 in_sync, u32 point)
-{
- struct dma_fence *in_fence = NULL;
- int ret;
-
- ret = drm_syncobj_find_fence(file_priv, in_sync, point, 0, &in_fence);
- if (ret == -EINVAL)
- return ret;
-
- return drm_sched_job_add_dependency(&job->base, in_fence);
-}
-
-static int
v3d_job_init(struct v3d_dev *v3d, struct drm_file *file_priv,
void **container, size_t size, void (*free)(struct kref *ref),
u32 in_sync, struct v3d_submit_ext *se, enum v3d_queue queue)
@@ -447,14 +433,18 @@ v3d_job_init(struct v3d_dev *v3d, struct drm_file *file_priv,
DRM_DEBUG("Failed to copy wait dep handle.\n");
goto fail_deps;
}
- ret = v3d_job_add_deps(file_priv, job, in.handle, 0);
- if (ret)
+ ret = drm_sched_job_add_syncobj_dependency(&job->base, file_priv, in.handle, 0);
+
+ // TODO: Investigate why this was filtered out for the IOCTL.
+ if (ret && ret != -ENOENT)
goto fail_deps;
}
}
} else {
- ret = v3d_job_add_deps(file_priv, job, in_sync, 0);
- if (ret)
+ ret = drm_sched_job_add_syncobj_dependency(&job->base, file_priv, in_sync, 0);
+
+ // TODO: Investigate why this was filtered out for the IOCTL.
+ if (ret && ret != -ENOENT)
goto fail_deps;
}
diff --git a/drivers/gpu/drm/vc4/vc4_drv.c b/drivers/gpu/drm/vc4/vc4_drv.c
index 0ccaee57fe9a..c8bf954042e0 100644
--- a/drivers/gpu/drm/vc4/vc4_drv.c
+++ b/drivers/gpu/drm/vc4/vc4_drv.c
@@ -33,7 +33,7 @@
#include <drm/drm_aperture.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_drv.h>
-#include <drm/drm_fbdev_generic.h>
+#include <drm/drm_fbdev_dma.h>
#include <drm/drm_vblank.h>
#include <soc/bcm2835/raspberrypi-firmware.h>
@@ -387,7 +387,7 @@ static int vc4_drm_bind(struct device *dev)
if (ret < 0)
goto unbind_all;
- drm_fbdev_generic_setup(drm, 16);
+ drm_fbdev_dma_setup(drm, 16);
return 0;
diff --git a/drivers/gpu/drm/vc4/vc4_drv.h b/drivers/gpu/drm/vc4/vc4_drv.h
index 95069bb16821..8768566c610b 100644
--- a/drivers/gpu/drm/vc4/vc4_drv.h
+++ b/drivers/gpu/drm/vc4/vc4_drv.h
@@ -690,7 +690,7 @@ struct vc4_exec_info {
/* This is the array of BOs that were looked up at the start of exec.
* Command validation will use indices into this array.
*/
- struct drm_gem_dma_object **bo;
+ struct drm_gem_object **bo;
uint32_t bo_count;
/* List of BOs that are being written by the RCL. Other than
diff --git a/drivers/gpu/drm/vc4/vc4_gem.c b/drivers/gpu/drm/vc4/vc4_gem.c
index 628d40ff3aa1..03648f954985 100644
--- a/drivers/gpu/drm/vc4/vc4_gem.c
+++ b/drivers/gpu/drm/vc4/vc4_gem.c
@@ -199,7 +199,7 @@ vc4_save_hang_state(struct drm_device *dev)
continue;
for (j = 0; j < exec[i]->bo_count; j++) {
- bo = to_vc4_bo(&exec[i]->bo[j]->base);
+ bo = to_vc4_bo(exec[i]->bo[j]);
/* Retain BOs just in case they were marked purgeable.
* This prevents the BO from being purged before
@@ -207,8 +207,8 @@ vc4_save_hang_state(struct drm_device *dev)
*/
WARN_ON(!refcount_read(&bo->usecnt));
refcount_inc(&bo->usecnt);
- drm_gem_object_get(&exec[i]->bo[j]->base);
- kernel_state->bo[k++] = &exec[i]->bo[j]->base;
+ drm_gem_object_get(exec[i]->bo[j]);
+ kernel_state->bo[k++] = exec[i]->bo[j];
}
list_for_each_entry(bo, &exec[i]->unref_list, unref_head) {
@@ -558,7 +558,7 @@ vc4_update_bo_seqnos(struct vc4_exec_info *exec, uint64_t seqno)
unsigned i;
for (i = 0; i < exec->bo_count; i++) {
- bo = to_vc4_bo(&exec->bo[i]->base);
+ bo = to_vc4_bo(exec->bo[i]);
bo->seqno = seqno;
dma_resv_add_fence(bo->base.base.resv, exec->fence,
@@ -585,11 +585,8 @@ vc4_unlock_bo_reservations(struct drm_device *dev,
{
int i;
- for (i = 0; i < exec->bo_count; i++) {
- struct drm_gem_object *bo = &exec->bo[i]->base;
-
- dma_resv_unlock(bo->resv);
- }
+ for (i = 0; i < exec->bo_count; i++)
+ dma_resv_unlock(exec->bo[i]->resv);
ww_acquire_fini(acquire_ctx);
}
@@ -614,7 +611,7 @@ vc4_lock_bo_reservations(struct drm_device *dev,
retry:
if (contended_lock != -1) {
- bo = &exec->bo[contended_lock]->base;
+ bo = exec->bo[contended_lock];
ret = dma_resv_lock_slow_interruptible(bo->resv, acquire_ctx);
if (ret) {
ww_acquire_done(acquire_ctx);
@@ -626,19 +623,19 @@ retry:
if (i == contended_lock)
continue;
- bo = &exec->bo[i]->base;
+ bo = exec->bo[i];
ret = dma_resv_lock_interruptible(bo->resv, acquire_ctx);
if (ret) {
int j;
for (j = 0; j < i; j++) {
- bo = &exec->bo[j]->base;
+ bo = exec->bo[j];
dma_resv_unlock(bo->resv);
}
if (contended_lock != -1 && contended_lock >= i) {
- bo = &exec->bo[contended_lock]->base;
+ bo = exec->bo[contended_lock];
dma_resv_unlock(bo->resv);
}
@@ -659,7 +656,7 @@ retry:
* before we commit the CL to the hardware.
*/
for (i = 0; i < exec->bo_count; i++) {
- bo = &exec->bo[i]->base;
+ bo = exec->bo[i];
ret = dma_resv_reserve_fences(bo->resv, 1);
if (ret) {
@@ -749,7 +746,6 @@ vc4_cl_lookup_bos(struct drm_device *dev,
struct vc4_exec_info *exec)
{
struct drm_vc4_submit_cl *args = exec->args;
- uint32_t *handles;
int ret = 0;
int i;
@@ -763,54 +759,18 @@ vc4_cl_lookup_bos(struct drm_device *dev,
return -EINVAL;
}
- exec->bo = kvmalloc_array(exec->bo_count,
- sizeof(struct drm_gem_dma_object *),
- GFP_KERNEL | __GFP_ZERO);
- if (!exec->bo) {
- DRM_ERROR("Failed to allocate validated BO pointers\n");
- return -ENOMEM;
- }
-
- handles = kvmalloc_array(exec->bo_count, sizeof(uint32_t), GFP_KERNEL);
- if (!handles) {
- ret = -ENOMEM;
- DRM_ERROR("Failed to allocate incoming GEM handles\n");
- goto fail;
- }
-
- if (copy_from_user(handles, u64_to_user_ptr(args->bo_handles),
- exec->bo_count * sizeof(uint32_t))) {
- ret = -EFAULT;
- DRM_ERROR("Failed to copy in GEM handles\n");
- goto fail;
- }
-
- spin_lock(&file_priv->table_lock);
- for (i = 0; i < exec->bo_count; i++) {
- struct drm_gem_object *bo = idr_find(&file_priv->object_idr,
- handles[i]);
- if (!bo) {
- DRM_DEBUG("Failed to look up GEM BO %d: %d\n",
- i, handles[i]);
- ret = -EINVAL;
- break;
- }
-
- drm_gem_object_get(bo);
- exec->bo[i] = (struct drm_gem_dma_object *)bo;
- }
- spin_unlock(&file_priv->table_lock);
+ ret = drm_gem_objects_lookup(file_priv, u64_to_user_ptr(args->bo_handles),
+ exec->bo_count, &exec->bo);
if (ret)
goto fail_put_bo;
for (i = 0; i < exec->bo_count; i++) {
- ret = vc4_bo_inc_usecnt(to_vc4_bo(&exec->bo[i]->base));
+ ret = vc4_bo_inc_usecnt(to_vc4_bo(exec->bo[i]));
if (ret)
goto fail_dec_usecnt;
}
- kvfree(handles);
return 0;
fail_dec_usecnt:
@@ -823,15 +783,13 @@ fail_dec_usecnt:
* step.
*/
for (i-- ; i >= 0; i--)
- vc4_bo_dec_usecnt(to_vc4_bo(&exec->bo[i]->base));
+ vc4_bo_dec_usecnt(to_vc4_bo(exec->bo[i]));
fail_put_bo:
/* Release any reference to acquired objects. */
for (i = 0; i < exec->bo_count && exec->bo[i]; i++)
- drm_gem_object_put(&exec->bo[i]->base);
+ drm_gem_object_put(exec->bo[i]);
-fail:
- kvfree(handles);
kvfree(exec->bo);
exec->bo = NULL;
return ret;
@@ -974,10 +932,10 @@ vc4_complete_exec(struct drm_device *dev, struct vc4_exec_info *exec)
if (exec->bo) {
for (i = 0; i < exec->bo_count; i++) {
- struct vc4_bo *bo = to_vc4_bo(&exec->bo[i]->base);
+ struct vc4_bo *bo = to_vc4_bo(exec->bo[i]);
vc4_bo_dec_usecnt(bo);
- drm_gem_object_put(&exec->bo[i]->base);
+ drm_gem_object_put(exec->bo[i]);
}
kvfree(exec->bo);
}
diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c
index ea22c9bf223a..d30e4547b4c5 100644
--- a/drivers/gpu/drm/vc4/vc4_hdmi.c
+++ b/drivers/gpu/drm/vc4/vc4_hdmi.c
@@ -1466,6 +1466,12 @@ static void vc4_hdmi_encoder_pre_crtc_configure(struct drm_encoder *encoder,
if (!drm_dev_enter(drm, &idx))
goto out;
+ ret = pm_runtime_resume_and_get(&vc4_hdmi->pdev->dev);
+ if (ret < 0) {
+ DRM_ERROR("Failed to retain power domain: %d\n", ret);
+ goto err_dev_exit;
+ }
+
/*
* As stated in RPi's vc4 firmware "HDMI state machine (HSM) clock must
* be faster than pixel clock, infinitesimally faster, tested in
@@ -1482,17 +1488,13 @@ static void vc4_hdmi_encoder_pre_crtc_configure(struct drm_encoder *encoder,
* Additionally, the AXI clock needs to be at least 25% of
* pixel clock, but HSM ends up being the limiting factor.
*/
- hsm_rate = max_t(unsigned long, 120000000, (tmds_char_rate / 100) * 101);
+ hsm_rate = max_t(unsigned long,
+ HSM_MIN_CLOCK_FREQ,
+ (tmds_char_rate / 100) * 101);
ret = clk_set_min_rate(vc4_hdmi->hsm_clock, hsm_rate);
if (ret) {
DRM_ERROR("Failed to set HSM clock rate: %d\n", ret);
- goto err_dev_exit;
- }
-
- ret = pm_runtime_resume_and_get(&vc4_hdmi->pdev->dev);
- if (ret < 0) {
- DRM_ERROR("Failed to retain power domain: %d\n", ret);
- goto err_dev_exit;
+ goto err_put_runtime_pm;
}
ret = clk_set_rate(vc4_hdmi->pixel_clock, tmds_char_rate);
@@ -3188,16 +3190,9 @@ static int vc4_hdmi_init_resources(struct drm_device *drm,
DRM_ERROR("Failed to get HDMI state machine clock\n");
return PTR_ERR(vc4_hdmi->hsm_clock);
}
-
vc4_hdmi->audio_clock = vc4_hdmi->hsm_clock;
vc4_hdmi->cec_clock = vc4_hdmi->hsm_clock;
- vc4_hdmi->hsm_rpm_clock = devm_clk_get(dev, "hdmi");
- if (IS_ERR(vc4_hdmi->hsm_rpm_clock)) {
- DRM_ERROR("Failed to get HDMI state machine clock\n");
- return PTR_ERR(vc4_hdmi->hsm_rpm_clock);
- }
-
return 0;
}
@@ -3280,12 +3275,6 @@ static int vc5_hdmi_init_resources(struct drm_device *drm,
return PTR_ERR(vc4_hdmi->hsm_clock);
}
- vc4_hdmi->hsm_rpm_clock = devm_clk_get(dev, "hdmi");
- if (IS_ERR(vc4_hdmi->hsm_rpm_clock)) {
- DRM_ERROR("Failed to get HDMI state machine clock\n");
- return PTR_ERR(vc4_hdmi->hsm_rpm_clock);
- }
-
vc4_hdmi->pixel_bvb_clock = devm_clk_get(dev, "bvb");
if (IS_ERR(vc4_hdmi->pixel_bvb_clock)) {
DRM_ERROR("Failed to get pixel bvb clock\n");
@@ -3349,7 +3338,7 @@ static int vc4_hdmi_runtime_suspend(struct device *dev)
{
struct vc4_hdmi *vc4_hdmi = dev_get_drvdata(dev);
- clk_disable_unprepare(vc4_hdmi->hsm_rpm_clock);
+ clk_disable_unprepare(vc4_hdmi->hsm_clock);
return 0;
}
@@ -3362,16 +3351,7 @@ static int vc4_hdmi_runtime_resume(struct device *dev)
unsigned long rate;
int ret;
- /*
- * The HSM clock is in the HDMI power domain, so we need to set
- * its frequency while the power domain is active so that it
- * keeps its rate.
- */
- ret = clk_set_min_rate(vc4_hdmi->hsm_rpm_clock, HSM_MIN_CLOCK_FREQ);
- if (ret)
- return ret;
-
- ret = clk_prepare_enable(vc4_hdmi->hsm_rpm_clock);
+ ret = clk_prepare_enable(vc4_hdmi->hsm_clock);
if (ret)
return ret;
@@ -3384,7 +3364,7 @@ static int vc4_hdmi_runtime_resume(struct device *dev)
* case, it will lead to a silent CPU stall. Let's make sure we
* prevent such a case.
*/
- rate = clk_get_rate(vc4_hdmi->hsm_rpm_clock);
+ rate = clk_get_rate(vc4_hdmi->hsm_clock);
if (!rate) {
ret = -EINVAL;
goto err_disable_clk;
diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.h b/drivers/gpu/drm/vc4/vc4_hdmi.h
index dc3ccd8002a0..e3619836ca17 100644
--- a/drivers/gpu/drm/vc4/vc4_hdmi.h
+++ b/drivers/gpu/drm/vc4/vc4_hdmi.h
@@ -164,7 +164,6 @@ struct vc4_hdmi {
struct clk *cec_clock;
struct clk *pixel_clock;
struct clk *hsm_clock;
- struct clk *hsm_rpm_clock;
struct clk *audio_clock;
struct clk *pixel_bvb_clock;
diff --git a/drivers/gpu/drm/vc4/vc4_validate.c b/drivers/gpu/drm/vc4/vc4_validate.c
index 520231af4df9..7dff3ca5af6b 100644
--- a/drivers/gpu/drm/vc4/vc4_validate.c
+++ b/drivers/gpu/drm/vc4/vc4_validate.c
@@ -117,7 +117,7 @@ vc4_use_bo(struct vc4_exec_info *exec, uint32_t hindex)
hindex, exec->bo_count);
return NULL;
}
- obj = exec->bo[hindex];
+ obj = to_drm_gem_dma_obj(exec->bo[hindex]);
bo = to_vc4_bo(&obj->base);
if (bo->validated_shader) {
@@ -810,7 +810,7 @@ validate_gl_shader_rec(struct drm_device *dev,
return -EINVAL;
}
- bo[i] = exec->bo[src_handles[i]];
+ bo[i] = to_drm_gem_dma_obj(exec->bo[src_handles[i]]);
if (!bo[i])
return -EINVAL;
}
diff --git a/drivers/gpu/drm/vgem/vgem_drv.h b/drivers/gpu/drm/vgem/vgem_drv.h
index 0ed300317f87..34cf63e6fb3d 100644
--- a/drivers/gpu/drm/vgem/vgem_drv.h
+++ b/drivers/gpu/drm/vgem/vgem_drv.h
@@ -39,17 +39,6 @@ struct vgem_file {
struct mutex fence_mutex;
};
-#define to_vgem_bo(x) container_of(x, struct drm_vgem_gem_object, base)
-struct drm_vgem_gem_object {
- struct drm_gem_object base;
-
- struct page **pages;
- unsigned int pages_pin_count;
- struct mutex pages_lock;
-
- struct sg_table *table;
-};
-
int vgem_fence_open(struct vgem_file *file);
int vgem_fence_attach_ioctl(struct drm_device *dev,
void *data,
diff --git a/drivers/gpu/drm/vgem/vgem_fence.c b/drivers/gpu/drm/vgem/vgem_fence.c
index c2a879734d40..e15754178395 100644
--- a/drivers/gpu/drm/vgem/vgem_fence.c
+++ b/drivers/gpu/drm/vgem/vgem_fence.c
@@ -249,4 +249,5 @@ void vgem_fence_close(struct vgem_file *vfile)
{
idr_for_each(&vfile->fence_idr, __vgem_fence_idr_fini, vfile);
idr_destroy(&vfile->fence_idr);
+ mutex_destroy(&vfile->fence_mutex);
}
diff --git a/drivers/gpu/drm/virtio/Kconfig b/drivers/gpu/drm/virtio/Kconfig
index 51ec7c3240c9..ea06ff2aa4b4 100644
--- a/drivers/gpu/drm/virtio/Kconfig
+++ b/drivers/gpu/drm/virtio/Kconfig
@@ -11,3 +11,14 @@ config DRM_VIRTIO_GPU
QEMU based VMMs (like KVM or Xen).
If unsure say M.
+
+config DRM_VIRTIO_GPU_KMS
+ bool "Virtio GPU driver modesetting support"
+ depends on DRM_VIRTIO_GPU
+ default y
+ help
+ Enable modesetting support for virtio GPU driver. This can be
+ disabled in cases where only "headless" usage of the GPU is
+ required.
+
+ If unsure, say Y.
diff --git a/drivers/gpu/drm/virtio/virtgpu_display.c b/drivers/gpu/drm/virtio/virtgpu_display.c
index 9ea7611a9e0f..ad924a8502e9 100644
--- a/drivers/gpu/drm/virtio/virtgpu_display.c
+++ b/drivers/gpu/drm/virtio/virtgpu_display.c
@@ -336,6 +336,9 @@ int virtio_gpu_modeset_init(struct virtio_gpu_device *vgdev)
{
int i, ret;
+ if (!vgdev->num_scanouts)
+ return 0;
+
ret = drmm_mode_config_init(vgdev->ddev);
if (ret)
return ret;
@@ -362,6 +365,9 @@ void virtio_gpu_modeset_fini(struct virtio_gpu_device *vgdev)
{
int i;
+ if (!vgdev->num_scanouts)
+ return;
+
for (i = 0 ; i < vgdev->num_scanouts; ++i)
kfree(vgdev->outputs[i].edid);
}
diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.c b/drivers/gpu/drm/virtio/virtgpu_drv.c
index ae97b98750b6..add075681e18 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drv.c
+++ b/drivers/gpu/drm/virtio/virtgpu_drv.c
@@ -172,6 +172,10 @@ MODULE_AUTHOR("Alon Levy");
DEFINE_DRM_GEM_FOPS(virtio_gpu_driver_fops);
static const struct drm_driver driver = {
+ /*
+ * If KMS is disabled DRIVER_MODESET and DRIVER_ATOMIC are masked
+ * out via drm_device::driver_features:
+ */
.driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_RENDER | DRIVER_ATOMIC,
.open = virtio_gpu_driver_open,
.postclose = virtio_gpu_driver_postclose,
diff --git a/drivers/gpu/drm/virtio/virtgpu_kms.c b/drivers/gpu/drm/virtio/virtgpu_kms.c
index 27b7f14dae89..5a3b5aaed1f3 100644
--- a/drivers/gpu/drm/virtio/virtgpu_kms.c
+++ b/drivers/gpu/drm/virtio/virtgpu_kms.c
@@ -43,11 +43,13 @@ static void virtio_gpu_config_changed_work_func(struct work_struct *work)
virtio_cread_le(vgdev->vdev, struct virtio_gpu_config,
events_read, &events_read);
if (events_read & VIRTIO_GPU_EVENT_DISPLAY) {
- if (vgdev->has_edid)
- virtio_gpu_cmd_get_edids(vgdev);
- virtio_gpu_cmd_get_display_info(vgdev);
- virtio_gpu_notify(vgdev);
- drm_helper_hpd_irq_event(vgdev->ddev);
+ if (vgdev->num_scanouts) {
+ if (vgdev->has_edid)
+ virtio_gpu_cmd_get_edids(vgdev);
+ virtio_gpu_cmd_get_display_info(vgdev);
+ virtio_gpu_notify(vgdev);
+ drm_helper_hpd_irq_event(vgdev->ddev);
+ }
events_clear |= VIRTIO_GPU_EVENT_DISPLAY;
}
virtio_cwrite_le(vgdev->vdev, struct virtio_gpu_config,
@@ -223,12 +225,15 @@ int virtio_gpu_init(struct virtio_device *vdev, struct drm_device *dev)
num_scanouts, &num_scanouts);
vgdev->num_scanouts = min_t(uint32_t, num_scanouts,
VIRTIO_GPU_MAX_SCANOUTS);
- if (!vgdev->num_scanouts) {
- DRM_ERROR("num_scanouts is zero\n");
- ret = -EINVAL;
- goto err_scanouts;
+
+ if (!IS_ENABLED(CONFIG_DRM_VIRTIO_GPU_KMS) || !vgdev->num_scanouts) {
+ DRM_INFO("KMS disabled\n");
+ vgdev->num_scanouts = 0;
+ vgdev->has_edid = false;
+ dev->driver_features &= ~(DRIVER_MODESET | DRIVER_ATOMIC);
+ } else {
+ DRM_INFO("number of scanouts: %d\n", num_scanouts);
}
- DRM_INFO("number of scanouts: %d\n", num_scanouts);
virtio_cread_le(vgdev->vdev, struct virtio_gpu_config,
num_capsets, &num_capsets);
@@ -244,12 +249,14 @@ int virtio_gpu_init(struct virtio_device *vdev, struct drm_device *dev)
if (num_capsets)
virtio_gpu_get_capsets(vgdev, num_capsets);
- if (vgdev->has_edid)
- virtio_gpu_cmd_get_edids(vgdev);
- virtio_gpu_cmd_get_display_info(vgdev);
- virtio_gpu_notify(vgdev);
- wait_event_timeout(vgdev->resp_wq, !vgdev->display_info_pending,
- 5 * HZ);
+ if (vgdev->num_scanouts) {
+ if (vgdev->has_edid)
+ virtio_gpu_cmd_get_edids(vgdev);
+ virtio_gpu_cmd_get_display_info(vgdev);
+ virtio_gpu_notify(vgdev);
+ wait_event_timeout(vgdev->resp_wq, !vgdev->display_info_pending,
+ 5 * HZ);
+ }
return 0;
err_scanouts:
diff --git a/drivers/gpu/drm/virtio/virtgpu_plane.c b/drivers/gpu/drm/virtio/virtgpu_plane.c
index 4c09e313bebc..a2e045f3a000 100644
--- a/drivers/gpu/drm/virtio/virtgpu_plane.c
+++ b/drivers/gpu/drm/virtio/virtgpu_plane.c
@@ -390,5 +390,9 @@ struct drm_plane *virtio_gpu_plane_init(struct virtio_gpu_device *vgdev,
return plane;
drm_plane_helper_add(plane, funcs);
+
+ if (type == DRM_PLANE_TYPE_PRIMARY)
+ drm_plane_enable_fb_damage_clips(plane);
+
return plane;
}
diff --git a/drivers/gpu/drm/virtio/virtgpu_vq.c b/drivers/gpu/drm/virtio/virtgpu_vq.c
index a04a9b20896d..e1accfc47edf 100644
--- a/drivers/gpu/drm/virtio/virtgpu_vq.c
+++ b/drivers/gpu/drm/virtio/virtgpu_vq.c
@@ -923,8 +923,7 @@ void virtio_gpu_cmd_context_create(struct virtio_gpu_device *vgdev, uint32_t id,
cmd_p->hdr.ctx_id = cpu_to_le32(id);
cmd_p->nlen = cpu_to_le32(nlen);
cmd_p->context_init = cpu_to_le32(context_init);
- strncpy(cmd_p->debug_name, name, sizeof(cmd_p->debug_name) - 1);
- cmd_p->debug_name[sizeof(cmd_p->debug_name) - 1] = 0;
+ strscpy(cmd_p->debug_name, name, sizeof(cmd_p->debug_name));
virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
}
diff --git a/drivers/gpu/drm/vmwgfx/Makefile b/drivers/gpu/drm/vmwgfx/Makefile
index 2a644f035597..e94479d9cd5b 100644
--- a/drivers/gpu/drm/vmwgfx/Makefile
+++ b/drivers/gpu/drm/vmwgfx/Makefile
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
vmwgfx-y := vmwgfx_execbuf.o vmwgfx_gmr.o vmwgfx_kms.o vmwgfx_drv.o \
vmwgfx_ioctl.o vmwgfx_resource.o vmwgfx_ttm_buffer.o \
- vmwgfx_cmd.o vmwgfx_irq.o vmwgfx_ldu.o vmwgfx_ttm_glue.o \
+ vmwgfx_cmd.o vmwgfx_irq.o vmwgfx_ldu.o \
vmwgfx_overlay.o vmwgfx_gmrid_manager.o vmwgfx_fence.o \
vmwgfx_bo.o vmwgfx_scrn.o vmwgfx_context.o \
vmwgfx_surface.o vmwgfx_prime.o vmwgfx_mob.o vmwgfx_shader.o \
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
index 4dcf2eb7aa80..82094c137855 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 OR MIT
/**************************************************************************
*
- * Copyright © 2011-2018 VMware, Inc., Palo Alto, CA., USA
+ * Copyright © 2011-2023 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -26,40 +26,31 @@
*
**************************************************************************/
-#include <drm/ttm/ttm_placement.h>
-
+#include "vmwgfx_bo.h"
#include "vmwgfx_drv.h"
-#include "ttm_object.h"
-/**
- * vmw_buffer_object - Convert a struct ttm_buffer_object to a struct
- * vmw_buffer_object.
- *
- * @bo: Pointer to the TTM buffer object.
- * Return: Pointer to the struct vmw_buffer_object embedding the
- * TTM buffer object.
- */
-static struct vmw_buffer_object *
-vmw_buffer_object(struct ttm_buffer_object *bo)
+#include <drm/ttm/ttm_placement.h>
+
+static void vmw_bo_release(struct vmw_bo *vbo)
{
- return container_of(bo, struct vmw_buffer_object, base);
+ vmw_bo_unmap(vbo);
+ drm_gem_object_release(&vbo->tbo.base);
}
/**
- * bo_is_vmw - check if the buffer object is a &vmw_buffer_object
- * @bo: ttm buffer object to be checked
+ * vmw_bo_free - vmw_bo destructor
*
- * Uses destroy function associated with the object to determine if this is
- * a &vmw_buffer_object.
- *
- * Returns:
- * true if the object is of &vmw_buffer_object type, false if not.
+ * @bo: Pointer to the embedded struct ttm_buffer_object
*/
-static bool bo_is_vmw(struct ttm_buffer_object *bo)
+static void vmw_bo_free(struct ttm_buffer_object *bo)
{
- return bo->destroy == &vmw_bo_bo_free ||
- bo->destroy == &vmw_gem_destroy;
+ struct vmw_bo *vbo = to_vmw_bo(&bo->base);
+
+ WARN_ON(vbo->dirty);
+ WARN_ON(!RB_EMPTY_ROOT(&vbo->res_tree));
+ vmw_bo_release(vbo);
+ kfree(vbo);
}
/**
@@ -72,13 +63,13 @@ static bool bo_is_vmw(struct ttm_buffer_object *bo)
* Return: Zero on success, Negative error code on failure. In particular
* -ERESTARTSYS if interrupted by a signal
*/
-int vmw_bo_pin_in_placement(struct vmw_private *dev_priv,
- struct vmw_buffer_object *buf,
- struct ttm_placement *placement,
- bool interruptible)
+static int vmw_bo_pin_in_placement(struct vmw_private *dev_priv,
+ struct vmw_bo *buf,
+ struct ttm_placement *placement,
+ bool interruptible)
{
struct ttm_operation_ctx ctx = {interruptible, false };
- struct ttm_buffer_object *bo = &buf->base;
+ struct ttm_buffer_object *bo = &buf->tbo;
int ret;
vmw_execbuf_release_pinned_bo(dev_priv);
@@ -87,12 +78,7 @@ int vmw_bo_pin_in_placement(struct vmw_private *dev_priv,
if (unlikely(ret != 0))
goto err;
- if (buf->base.pin_count > 0)
- ret = ttm_resource_compat(bo->resource, placement)
- ? 0 : -EINVAL;
- else
- ret = ttm_bo_validate(bo, placement, &ctx);
-
+ ret = ttm_bo_validate(bo, placement, &ctx);
if (!ret)
vmw_bo_pin_reserved(buf, true);
@@ -115,11 +101,11 @@ err:
* -ERESTARTSYS if interrupted by a signal
*/
int vmw_bo_pin_in_vram_or_gmr(struct vmw_private *dev_priv,
- struct vmw_buffer_object *buf,
+ struct vmw_bo *buf,
bool interruptible)
{
struct ttm_operation_ctx ctx = {interruptible, false };
- struct ttm_buffer_object *bo = &buf->base;
+ struct ttm_buffer_object *bo = &buf->tbo;
int ret;
vmw_execbuf_release_pinned_bo(dev_priv);
@@ -128,17 +114,17 @@ int vmw_bo_pin_in_vram_or_gmr(struct vmw_private *dev_priv,
if (unlikely(ret != 0))
goto err;
- if (buf->base.pin_count > 0) {
- ret = ttm_resource_compat(bo->resource, &vmw_vram_gmr_placement)
- ? 0 : -EINVAL;
- goto out_unreserve;
- }
-
- ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, &ctx);
+ vmw_bo_placement_set(buf,
+ VMW_BO_DOMAIN_GMR | VMW_BO_DOMAIN_VRAM,
+ VMW_BO_DOMAIN_GMR);
+ ret = ttm_bo_validate(bo, &buf->placement, &ctx);
if (likely(ret == 0) || ret == -ERESTARTSYS)
goto out_unreserve;
- ret = ttm_bo_validate(bo, &vmw_vram_placement, &ctx);
+ vmw_bo_placement_set(buf,
+ VMW_BO_DOMAIN_VRAM,
+ VMW_BO_DOMAIN_VRAM);
+ ret = ttm_bo_validate(bo, &buf->placement, &ctx);
out_unreserve:
if (!ret)
@@ -163,7 +149,7 @@ err:
* -ERESTARTSYS if interrupted by a signal
*/
int vmw_bo_pin_in_vram(struct vmw_private *dev_priv,
- struct vmw_buffer_object *buf,
+ struct vmw_bo *buf,
bool interruptible)
{
return vmw_bo_pin_in_placement(dev_priv, buf, &vmw_vram_placement,
@@ -184,22 +170,13 @@ int vmw_bo_pin_in_vram(struct vmw_private *dev_priv,
* -ERESTARTSYS if interrupted by a signal
*/
int vmw_bo_pin_in_start_of_vram(struct vmw_private *dev_priv,
- struct vmw_buffer_object *buf,
+ struct vmw_bo *buf,
bool interruptible)
{
struct ttm_operation_ctx ctx = {interruptible, false };
- struct ttm_buffer_object *bo = &buf->base;
- struct ttm_placement placement;
- struct ttm_place place;
+ struct ttm_buffer_object *bo = &buf->tbo;
int ret = 0;
- place = vmw_vram_placement.placement[0];
- place.lpfn = PFN_UP(bo->resource->size);
- placement.num_placement = 1;
- placement.placement = &place;
- placement.num_busy_placement = 1;
- placement.busy_placement = &place;
-
vmw_execbuf_release_pinned_bo(dev_priv);
ret = ttm_bo_reserve(bo, interruptible, false, NULL);
if (unlikely(ret != 0))
@@ -213,16 +190,19 @@ int vmw_bo_pin_in_start_of_vram(struct vmw_private *dev_priv,
if (bo->resource->mem_type == TTM_PL_VRAM &&
bo->resource->start < PFN_UP(bo->resource->size) &&
bo->resource->start > 0 &&
- buf->base.pin_count == 0) {
+ buf->tbo.pin_count == 0) {
ctx.interruptible = false;
- (void) ttm_bo_validate(bo, &vmw_sys_placement, &ctx);
+ vmw_bo_placement_set(buf,
+ VMW_BO_DOMAIN_SYS,
+ VMW_BO_DOMAIN_SYS);
+ (void)ttm_bo_validate(bo, &buf->placement, &ctx);
}
- if (buf->base.pin_count > 0)
- ret = ttm_resource_compat(bo->resource, &placement)
- ? 0 : -EINVAL;
- else
- ret = ttm_bo_validate(bo, &placement, &ctx);
+ vmw_bo_placement_set(buf,
+ VMW_BO_DOMAIN_VRAM,
+ VMW_BO_DOMAIN_VRAM);
+ buf->places[0].lpfn = PFN_UP(bo->resource->size);
+ ret = ttm_bo_validate(bo, &buf->placement, &ctx);
/* For some reason we didn't end up at the start of vram */
WARN_ON(ret == 0 && bo->resource->start != 0);
@@ -248,10 +228,10 @@ err_unlock:
* -ERESTARTSYS if interrupted by a signal
*/
int vmw_bo_unpin(struct vmw_private *dev_priv,
- struct vmw_buffer_object *buf,
+ struct vmw_bo *buf,
bool interruptible)
{
- struct ttm_buffer_object *bo = &buf->base;
+ struct ttm_buffer_object *bo = &buf->tbo;
int ret;
ret = ttm_bo_reserve(bo, interruptible, false, NULL);
@@ -293,12 +273,12 @@ void vmw_bo_get_guest_ptr(const struct ttm_buffer_object *bo,
* @pin: Whether to pin or unpin.
*
*/
-void vmw_bo_pin_reserved(struct vmw_buffer_object *vbo, bool pin)
+void vmw_bo_pin_reserved(struct vmw_bo *vbo, bool pin)
{
struct ttm_operation_ctx ctx = { false, true };
struct ttm_place pl;
struct ttm_placement placement;
- struct ttm_buffer_object *bo = &vbo->base;
+ struct ttm_buffer_object *bo = &vbo->tbo;
uint32_t old_mem_type = bo->resource->mem_type;
int ret;
@@ -341,9 +321,9 @@ void vmw_bo_pin_reserved(struct vmw_buffer_object *vbo, bool pin)
* 3) Buffer object destruction
*
*/
-void *vmw_bo_map_and_cache(struct vmw_buffer_object *vbo)
+void *vmw_bo_map_and_cache(struct vmw_bo *vbo)
{
- struct ttm_buffer_object *bo = &vbo->base;
+ struct ttm_buffer_object *bo = &vbo->tbo;
bool not_used;
void *virtual;
int ret;
@@ -366,96 +346,70 @@ void *vmw_bo_map_and_cache(struct vmw_buffer_object *vbo)
* @vbo: The buffer object whose map we are tearing down.
*
* This function tears down a cached map set up using
- * vmw_buffer_object_map_and_cache().
+ * vmw_bo_map_and_cache().
*/
-void vmw_bo_unmap(struct vmw_buffer_object *vbo)
+void vmw_bo_unmap(struct vmw_bo *vbo)
{
if (vbo->map.bo == NULL)
return;
ttm_bo_kunmap(&vbo->map);
+ vbo->map.bo = NULL;
}
/**
- * vmw_bo_bo_free - vmw buffer object destructor
- *
- * @bo: Pointer to the embedded struct ttm_buffer_object
- */
-void vmw_bo_bo_free(struct ttm_buffer_object *bo)
-{
- struct vmw_buffer_object *vmw_bo = vmw_buffer_object(bo);
-
- WARN_ON(vmw_bo->dirty);
- WARN_ON(!RB_EMPTY_ROOT(&vmw_bo->res_tree));
- vmw_bo_unmap(vmw_bo);
- drm_gem_object_release(&bo->base);
- kfree(vmw_bo);
-}
-
-/* default destructor */
-static void vmw_bo_default_destroy(struct ttm_buffer_object *bo)
-{
- kfree(bo);
-}
-
-/**
- * vmw_bo_create_kernel - Create a pinned BO for internal kernel use.
+ * vmw_bo_init - Initialize a vmw buffer object
*
* @dev_priv: Pointer to the device private struct
- * @size: size of the BO we need
- * @placement: where to put it
- * @p_bo: resulting BO
+ * @vmw_bo: Buffer object to initialize
+ * @params: Parameters used to initialize the buffer object
+ * @destroy: The function used to delete the buffer object
+ * Returns: Zero on success, negative error code on error.
*
- * Creates and pin a simple BO for in kernel use.
*/
-int vmw_bo_create_kernel(struct vmw_private *dev_priv, unsigned long size,
- struct ttm_placement *placement,
- struct ttm_buffer_object **p_bo)
+static int vmw_bo_init(struct vmw_private *dev_priv,
+ struct vmw_bo *vmw_bo,
+ struct vmw_bo_params *params,
+ void (*destroy)(struct ttm_buffer_object *))
{
struct ttm_operation_ctx ctx = {
- .interruptible = false,
+ .interruptible = params->bo_type != ttm_bo_type_kernel,
.no_wait_gpu = false
};
- struct ttm_buffer_object *bo;
+ struct ttm_device *bdev = &dev_priv->bdev;
struct drm_device *vdev = &dev_priv->drm;
int ret;
- bo = kzalloc(sizeof(*bo), GFP_KERNEL);
- if (unlikely(!bo))
- return -ENOMEM;
+ memset(vmw_bo, 0, sizeof(*vmw_bo));
- size = ALIGN(size, PAGE_SIZE);
+ BUILD_BUG_ON(TTM_MAX_BO_PRIORITY <= 3);
+ vmw_bo->tbo.priority = 3;
+ vmw_bo->res_tree = RB_ROOT;
- drm_gem_private_object_init(vdev, &bo->base, size);
+ params->size = ALIGN(params->size, PAGE_SIZE);
+ drm_gem_private_object_init(vdev, &vmw_bo->tbo.base, params->size);
- ret = ttm_bo_init_reserved(&dev_priv->bdev, bo, ttm_bo_type_kernel,
- placement, 0, &ctx, NULL, NULL,
- vmw_bo_default_destroy);
+ vmw_bo_placement_set(vmw_bo, params->domain, params->busy_domain);
+ ret = ttm_bo_init_reserved(bdev, &vmw_bo->tbo, params->bo_type,
+ &vmw_bo->placement, 0, &ctx, NULL,
+ NULL, destroy);
if (unlikely(ret))
- goto error_free;
+ return ret;
- ttm_bo_pin(bo);
- ttm_bo_unreserve(bo);
- *p_bo = bo;
+ if (params->pin)
+ ttm_bo_pin(&vmw_bo->tbo);
+ ttm_bo_unreserve(&vmw_bo->tbo);
return 0;
-
-error_free:
- kfree(bo);
- return ret;
}
int vmw_bo_create(struct vmw_private *vmw,
- size_t size, struct ttm_placement *placement,
- bool interruptible, bool pin,
- void (*bo_free)(struct ttm_buffer_object *bo),
- struct vmw_buffer_object **p_bo)
+ struct vmw_bo_params *params,
+ struct vmw_bo **p_bo)
{
int ret;
- BUG_ON(!bo_free);
-
*p_bo = kmalloc(sizeof(**p_bo), GFP_KERNEL);
if (unlikely(!*p_bo)) {
DRM_ERROR("Failed to allocate a buffer.\n");
@@ -465,9 +419,7 @@ int vmw_bo_create(struct vmw_private *vmw,
/*
* vmw_bo_init will delete the *p_bo object if it fails
*/
- ret = vmw_bo_init(vmw, *p_bo, size,
- placement, interruptible, pin,
- bo_free);
+ ret = vmw_bo_init(vmw, *p_bo, params, vmw_bo_free);
if (unlikely(ret != 0))
goto out_error;
@@ -478,57 +430,7 @@ out_error:
}
/**
- * vmw_bo_init - Initialize a vmw buffer object
- *
- * @dev_priv: Pointer to the device private struct
- * @vmw_bo: Pointer to the struct vmw_buffer_object to initialize.
- * @size: Buffer object size in bytes.
- * @placement: Initial placement.
- * @interruptible: Whether waits should be performed interruptible.
- * @pin: If the BO should be created pinned at a fixed location.
- * @bo_free: The buffer object destructor.
- * Returns: Zero on success, negative error code on error.
- *
- * Note that on error, the code will free the buffer object.
- */
-int vmw_bo_init(struct vmw_private *dev_priv,
- struct vmw_buffer_object *vmw_bo,
- size_t size, struct ttm_placement *placement,
- bool interruptible, bool pin,
- void (*bo_free)(struct ttm_buffer_object *bo))
-{
- struct ttm_operation_ctx ctx = {
- .interruptible = interruptible,
- .no_wait_gpu = false
- };
- struct ttm_device *bdev = &dev_priv->bdev;
- struct drm_device *vdev = &dev_priv->drm;
- int ret;
-
- WARN_ON_ONCE(!bo_free);
- memset(vmw_bo, 0, sizeof(*vmw_bo));
- BUILD_BUG_ON(TTM_MAX_BO_PRIORITY <= 3);
- vmw_bo->base.priority = 3;
- vmw_bo->res_tree = RB_ROOT;
-
- size = ALIGN(size, PAGE_SIZE);
- drm_gem_private_object_init(vdev, &vmw_bo->base.base, size);
-
- ret = ttm_bo_init_reserved(bdev, &vmw_bo->base, ttm_bo_type_device,
- placement, 0, &ctx, NULL, NULL, bo_free);
- if (unlikely(ret)) {
- return ret;
- }
-
- if (pin)
- ttm_bo_pin(&vmw_bo->base);
- ttm_bo_unreserve(&vmw_bo->base);
-
- return 0;
-}
-
-/**
- * vmw_user_bo_synccpu_grab - Grab a struct vmw_buffer_object for cpu
+ * vmw_user_bo_synccpu_grab - Grab a struct vmw_bo for cpu
* access, idling previous GPU operations on the buffer and optionally
* blocking it for further command submissions.
*
@@ -541,11 +443,11 @@ int vmw_bo_init(struct vmw_private *dev_priv,
*
* A blocking grab will be automatically released when @tfile is closed.
*/
-static int vmw_user_bo_synccpu_grab(struct vmw_buffer_object *vmw_bo,
+static int vmw_user_bo_synccpu_grab(struct vmw_bo *vmw_bo,
uint32_t flags)
{
bool nonblock = !!(flags & drm_vmw_synccpu_dontblock);
- struct ttm_buffer_object *bo = &vmw_bo->base;
+ struct ttm_buffer_object *bo = &vmw_bo->tbo;
int ret;
if (flags & drm_vmw_synccpu_allow_cs) {
@@ -588,17 +490,17 @@ static int vmw_user_bo_synccpu_release(struct drm_file *filp,
uint32_t handle,
uint32_t flags)
{
- struct vmw_buffer_object *vmw_bo;
+ struct vmw_bo *vmw_bo;
int ret = vmw_user_bo_lookup(filp, handle, &vmw_bo);
if (!ret) {
if (!(flags & drm_vmw_synccpu_allow_cs)) {
atomic_dec(&vmw_bo->cpu_writers);
}
- ttm_bo_put(&vmw_bo->base);
+ ttm_bo_put(&vmw_bo->tbo);
}
- drm_gem_object_put(&vmw_bo->base.base);
+ drm_gem_object_put(&vmw_bo->tbo.base);
return ret;
}
@@ -620,7 +522,7 @@ int vmw_user_bo_synccpu_ioctl(struct drm_device *dev, void *data,
{
struct drm_vmw_synccpu_arg *arg =
(struct drm_vmw_synccpu_arg *) data;
- struct vmw_buffer_object *vbo;
+ struct vmw_bo *vbo;
int ret;
if ((arg->flags & (drm_vmw_synccpu_read | drm_vmw_synccpu_write)) == 0
@@ -639,7 +541,7 @@ int vmw_user_bo_synccpu_ioctl(struct drm_device *dev, void *data,
ret = vmw_user_bo_synccpu_grab(vbo, arg->flags);
vmw_bo_unreference(&vbo);
- drm_gem_object_put(&vbo->base.base);
+ drm_gem_object_put(&vbo->tbo.base);
if (unlikely(ret != 0)) {
if (ret == -ERESTARTSYS || ret == -EBUSY)
return -EBUSY;
@@ -683,8 +585,7 @@ int vmw_bo_unref_ioctl(struct drm_device *dev, void *data,
struct drm_vmw_unref_dmabuf_arg *arg =
(struct drm_vmw_unref_dmabuf_arg *)data;
- drm_gem_handle_delete(file_priv, arg->handle);
- return 0;
+ return drm_gem_handle_delete(file_priv, arg->handle);
}
@@ -694,14 +595,14 @@ int vmw_bo_unref_ioctl(struct drm_device *dev, void *data,
* @filp: The file the handle is registered with.
* @handle: The user buffer object handle
* @out: Pointer to a where a pointer to the embedded
- * struct vmw_buffer_object should be placed.
+ * struct vmw_bo should be placed.
* Return: Zero on success, Negative error code on error.
*
* The vmw buffer object pointer will be refcounted (both ttm and gem)
*/
int vmw_user_bo_lookup(struct drm_file *filp,
- uint32_t handle,
- struct vmw_buffer_object **out)
+ u32 handle,
+ struct vmw_bo **out)
{
struct drm_gem_object *gobj;
@@ -712,8 +613,8 @@ int vmw_user_bo_lookup(struct drm_file *filp,
return -ESRCH;
}
- *out = gem_to_vmw_bo(gobj);
- ttm_bo_get(&(*out)->base);
+ *out = to_vmw_bo(gobj);
+ ttm_bo_get(&(*out)->tbo);
return 0;
}
@@ -734,8 +635,7 @@ void vmw_bo_fence_single(struct ttm_buffer_object *bo,
struct vmw_fence_obj *fence)
{
struct ttm_device *bdev = bo->bdev;
- struct vmw_private *dev_priv =
- container_of(bdev, struct vmw_private, bdev);
+ struct vmw_private *dev_priv = vmw_priv_from_ttm(bdev);
int ret;
if (fence == NULL)
@@ -771,7 +671,7 @@ int vmw_dumb_create(struct drm_file *file_priv,
struct drm_mode_create_dumb *args)
{
struct vmw_private *dev_priv = vmw_priv(dev);
- struct vmw_buffer_object *vbo;
+ struct vmw_bo *vbo;
int cpp = DIV_ROUND_UP(args->bpp, 8);
int ret;
@@ -795,7 +695,7 @@ int vmw_dumb_create(struct drm_file *file_priv,
args->size, &args->handle,
&vbo);
/* drop reference from allocate - handle holds it now */
- drm_gem_object_put(&vbo->base.base);
+ drm_gem_object_put(&vbo->tbo.base);
return ret;
}
@@ -806,12 +706,8 @@ int vmw_dumb_create(struct drm_file *file_priv,
*/
void vmw_bo_swap_notify(struct ttm_buffer_object *bo)
{
- /* Is @bo embedded in a struct vmw_buffer_object? */
- if (!bo_is_vmw(bo))
- return;
-
/* Kill any cached kernel maps before swapout */
- vmw_bo_unmap(vmw_buffer_object(bo));
+ vmw_bo_unmap(to_vmw_bo(&bo->base));
}
@@ -828,13 +724,7 @@ void vmw_bo_swap_notify(struct ttm_buffer_object *bo)
void vmw_bo_move_notify(struct ttm_buffer_object *bo,
struct ttm_resource *mem)
{
- struct vmw_buffer_object *vbo;
-
- /* Make sure @bo is embedded in a struct vmw_buffer_object? */
- if (!bo_is_vmw(bo))
- return;
-
- vbo = container_of(bo, struct vmw_buffer_object, base);
+ struct vmw_bo *vbo = to_vmw_bo(&bo->base);
/*
* Kill any cached kernel maps before move to or from VRAM.
@@ -852,3 +742,98 @@ void vmw_bo_move_notify(struct ttm_buffer_object *bo,
if (mem->mem_type != VMW_PL_MOB && bo->resource->mem_type == VMW_PL_MOB)
vmw_resource_unbind_list(vbo);
}
+
+static u32
+set_placement_list(struct ttm_place *pl, u32 domain)
+{
+ u32 n = 0;
+
+ /*
+ * The placements are ordered according to our preferences
+ */
+ if (domain & VMW_BO_DOMAIN_MOB) {
+ pl[n].mem_type = VMW_PL_MOB;
+ pl[n].flags = 0;
+ pl[n].fpfn = 0;
+ pl[n].lpfn = 0;
+ n++;
+ }
+ if (domain & VMW_BO_DOMAIN_GMR) {
+ pl[n].mem_type = VMW_PL_GMR;
+ pl[n].flags = 0;
+ pl[n].fpfn = 0;
+ pl[n].lpfn = 0;
+ n++;
+ }
+ if (domain & VMW_BO_DOMAIN_VRAM) {
+ pl[n].mem_type = TTM_PL_VRAM;
+ pl[n].flags = 0;
+ pl[n].fpfn = 0;
+ pl[n].lpfn = 0;
+ n++;
+ }
+ if (domain & VMW_BO_DOMAIN_WAITABLE_SYS) {
+ pl[n].mem_type = VMW_PL_SYSTEM;
+ pl[n].flags = 0;
+ pl[n].fpfn = 0;
+ pl[n].lpfn = 0;
+ n++;
+ }
+ if (domain & VMW_BO_DOMAIN_SYS) {
+ pl[n].mem_type = TTM_PL_SYSTEM;
+ pl[n].flags = 0;
+ pl[n].fpfn = 0;
+ pl[n].lpfn = 0;
+ n++;
+ }
+
+ WARN_ON(!n);
+ if (!n) {
+ pl[n].mem_type = TTM_PL_SYSTEM;
+ pl[n].flags = 0;
+ pl[n].fpfn = 0;
+ pl[n].lpfn = 0;
+ n++;
+ }
+ return n;
+}
+
+void vmw_bo_placement_set(struct vmw_bo *bo, u32 domain, u32 busy_domain)
+{
+ struct ttm_device *bdev = bo->tbo.bdev;
+ struct vmw_private *vmw = vmw_priv_from_ttm(bdev);
+ struct ttm_placement *pl = &bo->placement;
+ bool mem_compatible = false;
+ u32 i;
+
+ pl->placement = bo->places;
+ pl->num_placement = set_placement_list(bo->places, domain);
+
+ if (drm_debug_enabled(DRM_UT_DRIVER) && bo->tbo.resource) {
+ for (i = 0; i < pl->num_placement; ++i) {
+ if (bo->tbo.resource->mem_type == TTM_PL_SYSTEM ||
+ bo->tbo.resource->mem_type == pl->placement[i].mem_type)
+ mem_compatible = true;
+ }
+ if (!mem_compatible)
+ drm_warn(&vmw->drm,
+ "%s: Incompatible transition from "
+ "bo->base.resource->mem_type = %u to domain = %u\n",
+ __func__, bo->tbo.resource->mem_type, domain);
+ }
+
+ pl->busy_placement = bo->busy_places;
+ pl->num_busy_placement = set_placement_list(bo->busy_places, busy_domain);
+}
+
+void vmw_bo_placement_set_default_accelerated(struct vmw_bo *bo)
+{
+ struct ttm_device *bdev = bo->tbo.bdev;
+ struct vmw_private *vmw = vmw_priv_from_ttm(bdev);
+ u32 domain = VMW_BO_DOMAIN_GMR | VMW_BO_DOMAIN_VRAM;
+
+ if (vmw->has_mob)
+ domain = VMW_BO_DOMAIN_MOB;
+
+ vmw_bo_placement_set(bo, domain, domain);
+}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.h b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.h
new file mode 100644
index 000000000000..50a836e70994
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.h
@@ -0,0 +1,203 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/**************************************************************************
+ *
+ * Copyright 2023 VMware, Inc., Palo Alto, CA., USA
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#ifndef VMWGFX_BO_H
+#define VMWGFX_BO_H
+
+#include "device_include/svga_reg.h"
+
+#include <drm/ttm/ttm_bo.h>
+#include <drm/ttm/ttm_placement.h>
+
+#include <linux/rbtree_types.h>
+#include <linux/types.h>
+
+struct vmw_bo_dirty;
+struct vmw_fence_obj;
+struct vmw_private;
+struct vmw_resource;
+
+enum vmw_bo_domain {
+ VMW_BO_DOMAIN_SYS = BIT(0),
+ VMW_BO_DOMAIN_WAITABLE_SYS = BIT(1),
+ VMW_BO_DOMAIN_VRAM = BIT(2),
+ VMW_BO_DOMAIN_GMR = BIT(3),
+ VMW_BO_DOMAIN_MOB = BIT(4),
+};
+
+struct vmw_bo_params {
+ u32 domain;
+ u32 busy_domain;
+ enum ttm_bo_type bo_type;
+ size_t size;
+ bool pin;
+};
+
+/**
+ * struct vmw_bo - TTM buffer object with vmwgfx additions
+ * @tbo: The TTM buffer object
+ * @placement: The preferred placement for this buffer object
+ * @places: The chosen places for the preferred placement.
+ * @busy_places: Chosen busy places for the preferred placement
+ * @map: Kmap object for semi-persistent mappings
+ * @res_tree: RB tree of resources using this buffer object as a backing MOB
+ * @res_prios: Eviction priority counts for attached resources
+ * @cpu_writers: Number of synccpu write grabs. Protected by reservation when
+ * increased. May be decreased without reservation.
+ * @dx_query_ctx: DX context if this buffer object is used as a DX query MOB
+ * @dirty: structure for user-space dirty-tracking
+ */
+struct vmw_bo {
+ struct ttm_buffer_object tbo;
+
+ struct ttm_placement placement;
+ struct ttm_place places[5];
+ struct ttm_place busy_places[5];
+
+ /* Protected by reservation */
+ struct ttm_bo_kmap_obj map;
+
+ struct rb_root res_tree;
+ u32 res_prios[TTM_MAX_BO_PRIORITY];
+
+ atomic_t cpu_writers;
+ /* Not ref-counted. Protected by binding_mutex */
+ struct vmw_resource *dx_query_ctx;
+ struct vmw_bo_dirty *dirty;
+};
+
+void vmw_bo_placement_set(struct vmw_bo *bo, u32 domain, u32 busy_domain);
+void vmw_bo_placement_set_default_accelerated(struct vmw_bo *bo);
+
+int vmw_bo_create(struct vmw_private *dev_priv,
+ struct vmw_bo_params *params,
+ struct vmw_bo **p_bo);
+
+int vmw_bo_unref_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+
+int vmw_bo_pin_in_vram(struct vmw_private *dev_priv,
+ struct vmw_bo *buf,
+ bool interruptible);
+int vmw_bo_pin_in_vram_or_gmr(struct vmw_private *dev_priv,
+ struct vmw_bo *buf,
+ bool interruptible);
+int vmw_bo_pin_in_start_of_vram(struct vmw_private *vmw_priv,
+ struct vmw_bo *bo,
+ bool interruptible);
+void vmw_bo_pin_reserved(struct vmw_bo *bo, bool pin);
+int vmw_bo_unpin(struct vmw_private *vmw_priv,
+ struct vmw_bo *bo,
+ bool interruptible);
+
+void vmw_bo_get_guest_ptr(const struct ttm_buffer_object *buf,
+ SVGAGuestPtr *ptr);
+int vmw_user_bo_synccpu_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+void vmw_bo_fence_single(struct ttm_buffer_object *bo,
+ struct vmw_fence_obj *fence);
+
+void *vmw_bo_map_and_cache(struct vmw_bo *vbo);
+void vmw_bo_unmap(struct vmw_bo *vbo);
+
+void vmw_bo_move_notify(struct ttm_buffer_object *bo,
+ struct ttm_resource *mem);
+void vmw_bo_swap_notify(struct ttm_buffer_object *bo);
+
+int vmw_user_bo_lookup(struct drm_file *filp,
+ u32 handle,
+ struct vmw_bo **out);
+/**
+ * vmw_bo_adjust_prio - Adjust the buffer object eviction priority
+ * according to attached resources
+ * @vbo: The struct vmw_bo
+ */
+static inline void vmw_bo_prio_adjust(struct vmw_bo *vbo)
+{
+ int i = ARRAY_SIZE(vbo->res_prios);
+
+ while (i--) {
+ if (vbo->res_prios[i]) {
+ vbo->tbo.priority = i;
+ return;
+ }
+ }
+
+ vbo->tbo.priority = 3;
+}
+
+/**
+ * vmw_bo_prio_add - Notify a buffer object of a newly attached resource
+ * eviction priority
+ * @vbo: The struct vmw_bo
+ * @prio: The resource priority
+ *
+ * After being notified, the code assigns the highest resource eviction priority
+ * to the backing buffer object (mob).
+ */
+static inline void vmw_bo_prio_add(struct vmw_bo *vbo, int prio)
+{
+ if (vbo->res_prios[prio]++ == 0)
+ vmw_bo_prio_adjust(vbo);
+}
+
+/**
+ * vmw_bo_used_prio_del - Notify a buffer object of a resource with a certain
+ * priority being removed
+ * @vbo: The struct vmw_bo
+ * @prio: The resource priority
+ *
+ * After being notified, the code assigns the highest resource eviction priority
+ * to the backing buffer object (mob).
+ */
+static inline void vmw_bo_prio_del(struct vmw_bo *vbo, int prio)
+{
+ if (--vbo->res_prios[prio] == 0)
+ vmw_bo_prio_adjust(vbo);
+}
+
+static inline void vmw_bo_unreference(struct vmw_bo **buf)
+{
+ struct vmw_bo *tmp_buf = *buf;
+
+ *buf = NULL;
+ if (tmp_buf)
+ ttm_bo_put(&tmp_buf->tbo);
+}
+
+static inline struct vmw_bo *vmw_bo_reference(struct vmw_bo *buf)
+{
+ ttm_bo_get(&buf->tbo);
+ return buf;
+}
+
+static inline struct vmw_bo *to_vmw_bo(struct drm_gem_object *gobj)
+{
+ return container_of((gobj), struct vmw_bo, tbo.base);
+}
+
+#endif // VMWGFX_BO_H
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cmd.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cmd.c
index 162dfeb1cc5a..195ff8792e5a 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_cmd.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cmd.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 OR MIT
/**************************************************************************
*
- * Copyright 2009-2020 VMware, Inc., Palo Alto, CA., USA
+ * Copyright 2009-2023 VMware, Inc., Palo Alto, CA., USA
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
@@ -24,13 +24,13 @@
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
-
-#include <linux/sched/signal.h>
+#include "vmwgfx_bo.h"
+#include "vmwgfx_drv.h"
+#include "vmwgfx_devcaps.h"
#include <drm/ttm/ttm_placement.h>
-#include "vmwgfx_drv.h"
-#include "vmwgfx_devcaps.h"
+#include <linux/sched/signal.h>
bool vmw_supports_3d(struct vmw_private *dev_priv)
{
@@ -567,7 +567,7 @@ static int vmw_cmd_emit_dummy_legacy_query(struct vmw_private *dev_priv,
* without writing to the query result structure.
*/
- struct ttm_buffer_object *bo = &dev_priv->dummy_query_bo->base;
+ struct ttm_buffer_object *bo = &dev_priv->dummy_query_bo->tbo;
struct {
SVGA3dCmdHeader header;
SVGA3dCmdWaitForQuery body;
@@ -613,7 +613,7 @@ static int vmw_cmd_emit_dummy_gb_query(struct vmw_private *dev_priv,
* without writing to the query result structure.
*/
- struct ttm_buffer_object *bo = &dev_priv->dummy_query_bo->base;
+ struct ttm_buffer_object *bo = &dev_priv->dummy_query_bo->tbo;
struct {
SVGA3dCmdHeader header;
SVGA3dCmdWaitForGBQuery body;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
index 2b843ff4b437..94e8982f5616 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 OR MIT
/**************************************************************************
*
- * Copyright 2015 VMware, Inc., Palo Alto, CA., USA
+ * Copyright 2015-2023 VMware, Inc., Palo Alto, CA., USA
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
@@ -25,12 +25,13 @@
*
**************************************************************************/
-#include <linux/dmapool.h>
-#include <linux/pci.h>
+#include "vmwgfx_bo.h"
+#include "vmwgfx_drv.h"
#include <drm/ttm/ttm_bo.h>
-#include "vmwgfx_drv.h"
+#include <linux/dmapool.h>
+#include <linux/pci.h>
/*
* Size of inline command buffers. Try to make sure that a page size is a
@@ -79,7 +80,6 @@ struct vmw_cmdbuf_context {
* frees are protected by @lock.
* @cmd_space: Buffer object for the command buffer space, unless we were
* able to make a contigous coherent DMA memory allocation, @handle. Immutable.
- * @map_obj: Mapping state for @cmd_space. Immutable.
* @map: Pointer to command buffer space. May be a mapped buffer object or
* a contigous coherent DMA memory allocation. Immutable.
* @cur: Command buffer for small kernel command submissions. Protected by
@@ -116,8 +116,7 @@ struct vmw_cmdbuf_man {
struct vmw_cmdbuf_context ctx[SVGA_CB_CONTEXT_MAX];
struct list_head error;
struct drm_mm mm;
- struct ttm_buffer_object *cmd_space;
- struct ttm_bo_kmap_obj map_obj;
+ struct vmw_bo *cmd_space;
u8 *map;
struct vmw_cmdbuf_header *cur;
size_t cur_pos;
@@ -888,7 +887,7 @@ static int vmw_cmdbuf_space_pool(struct vmw_cmdbuf_man *man,
header->cmd = man->map + offset;
if (man->using_mob) {
cb_hdr->flags = SVGA_CB_FLAG_MOB;
- cb_hdr->ptr.mob.mobid = man->cmd_space->resource->start;
+ cb_hdr->ptr.mob.mobid = man->cmd_space->tbo.resource->start;
cb_hdr->ptr.mob.mobOffset = offset;
} else {
cb_hdr->ptr.pa = (u64)man->handle + (u64)offset;
@@ -1221,7 +1220,6 @@ static int vmw_cmdbuf_startstop(struct vmw_cmdbuf_man *man, u32 context,
int vmw_cmdbuf_set_pool_size(struct vmw_cmdbuf_man *man, size_t size)
{
struct vmw_private *dev_priv = man->dev_priv;
- bool dummy;
int ret;
if (man->has_pool)
@@ -1234,6 +1232,13 @@ int vmw_cmdbuf_set_pool_size(struct vmw_cmdbuf_man *man, size_t size)
if (man->map) {
man->using_mob = false;
} else {
+ struct vmw_bo_params bo_params = {
+ .domain = VMW_BO_DOMAIN_MOB,
+ .busy_domain = VMW_BO_DOMAIN_MOB,
+ .bo_type = ttm_bo_type_kernel,
+ .size = size,
+ .pin = true
+ };
/*
* DMA memory failed. If we can have command buffers in a
* MOB, try to use that instead. Note that this will
@@ -1244,19 +1249,12 @@ int vmw_cmdbuf_set_pool_size(struct vmw_cmdbuf_man *man, size_t size)
!dev_priv->has_mob)
return -ENOMEM;
- ret = vmw_bo_create_kernel(dev_priv, size,
- &vmw_mob_placement,
- &man->cmd_space);
+ ret = vmw_bo_create(dev_priv, &bo_params, &man->cmd_space);
if (ret)
return ret;
- man->using_mob = true;
- ret = ttm_bo_kmap(man->cmd_space, 0, size >> PAGE_SHIFT,
- &man->map_obj);
- if (ret)
- goto out_no_map;
-
- man->map = ttm_kmap_obj_virtual(&man->map_obj, &dummy);
+ man->map = vmw_bo_map_and_cache(man->cmd_space);
+ man->using_mob = man->map;
}
man->size = size;
@@ -1276,14 +1274,6 @@ int vmw_cmdbuf_set_pool_size(struct vmw_cmdbuf_man *man, size_t size)
(man->using_mob) ? "MOB" : "DMA");
return 0;
-
-out_no_map:
- if (man->using_mob) {
- ttm_bo_put(man->cmd_space);
- man->cmd_space = NULL;
- }
-
- return ret;
}
/**
@@ -1382,14 +1372,11 @@ void vmw_cmdbuf_remove_pool(struct vmw_cmdbuf_man *man)
man->has_pool = false;
man->default_size = VMW_CMDBUF_INLINE_SIZE;
(void) vmw_cmdbuf_idle(man, false, 10*HZ);
- if (man->using_mob) {
- (void) ttm_bo_kunmap(&man->map_obj);
- ttm_bo_put(man->cmd_space);
- man->cmd_space = NULL;
- } else {
+ if (man->using_mob)
+ vmw_bo_unreference(&man->cmd_space);
+ else
dma_free_coherent(man->dev_priv->drm.dev,
man->size, man->map, man->handle);
- }
}
/**
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_context.c b/drivers/gpu/drm/vmwgfx/vmwgfx_context.c
index e0f48cd9529b..ecc503e42790 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_context.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_context.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 OR MIT
/**************************************************************************
*
- * Copyright 2009-2015 VMware, Inc., Palo Alto, CA., USA
+ * Copyright 2009-2023 VMware, Inc., Palo Alto, CA., USA
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
@@ -27,9 +27,10 @@
#include <drm/ttm/ttm_placement.h>
+#include "vmwgfx_binding.h"
+#include "vmwgfx_bo.h"
#include "vmwgfx_drv.h"
#include "vmwgfx_resource_priv.h"
-#include "vmwgfx_binding.h"
struct vmw_user_context {
struct ttm_base_object base;
@@ -38,7 +39,7 @@ struct vmw_user_context {
struct vmw_cmdbuf_res_manager *man;
struct vmw_resource *cotables[SVGA_COTABLE_MAX];
spinlock_t cotable_lock;
- struct vmw_buffer_object *dx_query_mob;
+ struct vmw_bo *dx_query_mob;
};
static void vmw_user_context_free(struct vmw_resource *res);
@@ -72,10 +73,11 @@ const struct vmw_user_resource_conv *user_context_converter =
static const struct vmw_res_func vmw_legacy_context_func = {
.res_type = vmw_res_context,
- .needs_backup = false,
+ .needs_guest_memory = false,
.may_evict = false,
.type_name = "legacy contexts",
- .backup_placement = NULL,
+ .domain = VMW_BO_DOMAIN_SYS,
+ .busy_domain = VMW_BO_DOMAIN_SYS,
.create = NULL,
.destroy = NULL,
.bind = NULL,
@@ -84,12 +86,13 @@ static const struct vmw_res_func vmw_legacy_context_func = {
static const struct vmw_res_func vmw_gb_context_func = {
.res_type = vmw_res_context,
- .needs_backup = true,
+ .needs_guest_memory = true,
.may_evict = true,
.prio = 3,
.dirty_prio = 3,
.type_name = "guest backed contexts",
- .backup_placement = &vmw_mob_placement,
+ .domain = VMW_BO_DOMAIN_MOB,
+ .busy_domain = VMW_BO_DOMAIN_MOB,
.create = vmw_gb_context_create,
.destroy = vmw_gb_context_destroy,
.bind = vmw_gb_context_bind,
@@ -98,12 +101,13 @@ static const struct vmw_res_func vmw_gb_context_func = {
static const struct vmw_res_func vmw_dx_context_func = {
.res_type = vmw_res_dx_context,
- .needs_backup = true,
+ .needs_guest_memory = true,
.may_evict = true,
.prio = 3,
.dirty_prio = 3,
.type_name = "dx contexts",
- .backup_placement = &vmw_mob_placement,
+ .domain = VMW_BO_DOMAIN_MOB,
+ .busy_domain = VMW_BO_DOMAIN_MOB,
.create = vmw_dx_context_create,
.destroy = vmw_dx_context_destroy,
.bind = vmw_dx_context_bind,
@@ -182,7 +186,7 @@ static int vmw_gb_context_init(struct vmw_private *dev_priv,
struct vmw_user_context *uctx =
container_of(res, struct vmw_user_context, res);
- res->backup_size = (dx ? sizeof(SVGADXContextMobFormat) :
+ res->guest_memory_size = (dx ? sizeof(SVGADXContextMobFormat) :
sizeof(SVGAGBContextData));
ret = vmw_resource_init(dev_priv, res, true,
res_free,
@@ -354,8 +358,8 @@ static int vmw_gb_context_bind(struct vmw_resource *res,
cmd->header.size = sizeof(cmd->body);
cmd->body.cid = res->id;
cmd->body.mobid = bo->resource->start;
- cmd->body.validContents = res->backup_dirty;
- res->backup_dirty = false;
+ cmd->body.validContents = res->guest_memory_dirty;
+ res->guest_memory_dirty = false;
vmw_cmd_commit(dev_priv, sizeof(*cmd));
return 0;
@@ -521,8 +525,8 @@ static int vmw_dx_context_bind(struct vmw_resource *res,
cmd->header.size = sizeof(cmd->body);
cmd->body.cid = res->id;
cmd->body.mobid = bo->resource->start;
- cmd->body.validContents = res->backup_dirty;
- res->backup_dirty = false;
+ cmd->body.validContents = res->guest_memory_dirty;
+ res->guest_memory_dirty = false;
vmw_cmd_commit(dev_priv, sizeof(*cmd));
@@ -853,7 +857,7 @@ vmw_context_binding_state(struct vmw_resource *ctx)
* specified in the parameter. 0 otherwise.
*/
int vmw_context_bind_dx_query(struct vmw_resource *ctx_res,
- struct vmw_buffer_object *mob)
+ struct vmw_bo *mob)
{
struct vmw_user_context *uctx =
container_of(ctx_res, struct vmw_user_context, res);
@@ -885,7 +889,7 @@ int vmw_context_bind_dx_query(struct vmw_resource *ctx_res,
*
* @ctx_res: The context resource
*/
-struct vmw_buffer_object *
+struct vmw_bo *
vmw_context_get_dx_query_mob(struct vmw_resource *ctx_res)
{
struct vmw_user_context *uctx =
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
index b78a10312fad..c0b24d1cacbf 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 OR MIT
/**************************************************************************
*
- * Copyright 2014-2015 VMware, Inc., Palo Alto, CA., USA
+ * Copyright 2014-2023 VMware, Inc., Palo Alto, CA., USA
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
@@ -30,13 +30,14 @@
* whenever the backing MOB is evicted.
*/
-#include <drm/ttm/ttm_placement.h>
-
+#include "vmwgfx_bo.h"
#include "vmwgfx_drv.h"
#include "vmwgfx_mksstat.h"
#include "vmwgfx_resource_priv.h"
#include "vmwgfx_so.h"
+#include <drm/ttm/ttm_placement.h>
+
/**
* struct vmw_cotable - Context Object Table resource
*
@@ -130,12 +131,13 @@ static int vmw_cotable_destroy(struct vmw_resource *res);
static const struct vmw_res_func vmw_cotable_func = {
.res_type = vmw_res_cotable,
- .needs_backup = true,
+ .needs_guest_memory = true,
.may_evict = true,
.prio = 3,
.dirty_prio = 3,
.type_name = "context guest backed object tables",
- .backup_placement = &vmw_mob_placement,
+ .domain = VMW_BO_DOMAIN_MOB,
+ .busy_domain = VMW_BO_DOMAIN_MOB,
.create = vmw_cotable_create,
.destroy = vmw_cotable_destroy,
.bind = vmw_cotable_bind,
@@ -180,7 +182,7 @@ static int vmw_cotable_unscrub(struct vmw_resource *res)
{
struct vmw_cotable *vcotbl = vmw_cotable(res);
struct vmw_private *dev_priv = res->dev_priv;
- struct ttm_buffer_object *bo = &res->backup->base;
+ struct ttm_buffer_object *bo = &res->guest_memory_bo->tbo;
struct {
SVGA3dCmdHeader header;
SVGA3dCmdDXSetCOTable body;
@@ -228,7 +230,7 @@ static int vmw_cotable_bind(struct vmw_resource *res,
* take the opportunity to correct the value here so that it's not
* misused in the future.
*/
- val_buf->bo = &res->backup->base;
+ val_buf->bo = &res->guest_memory_bo->tbo;
return vmw_cotable_unscrub(res);
}
@@ -289,7 +291,7 @@ int vmw_cotable_scrub(struct vmw_resource *res, bool readback)
cmd0->body.cid = vcotbl->ctx->id;
cmd0->body.type = vcotbl->type;
cmd1 = (void *) &cmd0[1];
- vcotbl->size_read_back = res->backup_size;
+ vcotbl->size_read_back = res->guest_memory_size;
}
cmd1->header.id = SVGA_3D_CMD_DX_SET_COTABLE;
cmd1->header.size = sizeof(cmd1->body);
@@ -371,12 +373,12 @@ static int vmw_cotable_readback(struct vmw_resource *res)
cmd->header.size = sizeof(cmd->body);
cmd->body.cid = vcotbl->ctx->id;
cmd->body.type = vcotbl->type;
- vcotbl->size_read_back = res->backup_size;
+ vcotbl->size_read_back = res->guest_memory_size;
vmw_cmd_commit(dev_priv, sizeof(*cmd));
}
(void) vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
- vmw_bo_fence_single(&res->backup->base, fence);
+ vmw_bo_fence_single(&res->guest_memory_bo->tbo, fence);
vmw_fence_obj_unreference(&fence);
return 0;
@@ -399,14 +401,21 @@ static int vmw_cotable_resize(struct vmw_resource *res, size_t new_size)
struct ttm_operation_ctx ctx = { false, false };
struct vmw_private *dev_priv = res->dev_priv;
struct vmw_cotable *vcotbl = vmw_cotable(res);
- struct vmw_buffer_object *buf, *old_buf = res->backup;
- struct ttm_buffer_object *bo, *old_bo = &res->backup->base;
- size_t old_size = res->backup_size;
+ struct vmw_bo *buf, *old_buf = res->guest_memory_bo;
+ struct ttm_buffer_object *bo, *old_bo = &res->guest_memory_bo->tbo;
+ size_t old_size = res->guest_memory_size;
size_t old_size_read_back = vcotbl->size_read_back;
size_t cur_size_read_back;
struct ttm_bo_kmap_obj old_map, new_map;
int ret;
size_t i;
+ struct vmw_bo_params bo_params = {
+ .domain = VMW_BO_DOMAIN_MOB,
+ .busy_domain = VMW_BO_DOMAIN_MOB,
+ .bo_type = ttm_bo_type_device,
+ .size = new_size,
+ .pin = true
+ };
MKS_STAT_TIME_DECL(MKSSTAT_KERN_COTABLE_RESIZE);
MKS_STAT_TIME_PUSH(MKSSTAT_KERN_COTABLE_RESIZE);
@@ -423,14 +432,13 @@ static int vmw_cotable_resize(struct vmw_resource *res, size_t new_size)
* for the new COTable. Initially pin the buffer object to make sure
* we can use tryreserve without failure.
*/
- ret = vmw_bo_create(dev_priv, new_size, &vmw_mob_placement,
- true, true, vmw_bo_bo_free, &buf);
+ ret = vmw_bo_create(dev_priv, &bo_params, &buf);
if (ret) {
DRM_ERROR("Failed initializing new cotable MOB.\n");
goto out_done;
}
- bo = &buf->base;
+ bo = &buf->tbo;
WARN_ON_ONCE(ttm_bo_reserve(bo, false, true, NULL));
ret = ttm_bo_wait(old_bo, false, false);
@@ -464,15 +472,18 @@ static int vmw_cotable_resize(struct vmw_resource *res, size_t new_size)
}
/* Unpin new buffer, and switch backup buffers. */
- ret = ttm_bo_validate(bo, &vmw_mob_placement, &ctx);
+ vmw_bo_placement_set(buf,
+ VMW_BO_DOMAIN_MOB,
+ VMW_BO_DOMAIN_MOB);
+ ret = ttm_bo_validate(bo, &buf->placement, &ctx);
if (unlikely(ret != 0)) {
DRM_ERROR("Failed validating new COTable backup buffer.\n");
goto out_wait;
}
vmw_resource_mob_detach(res);
- res->backup = buf;
- res->backup_size = new_size;
+ res->guest_memory_bo = buf;
+ res->guest_memory_size = new_size;
vcotbl->size_read_back = cur_size_read_back;
/*
@@ -482,8 +493,8 @@ static int vmw_cotable_resize(struct vmw_resource *res, size_t new_size)
ret = vmw_cotable_unscrub(res);
if (ret) {
DRM_ERROR("Failed switching COTable backup buffer.\n");
- res->backup = old_buf;
- res->backup_size = old_size;
+ res->guest_memory_bo = old_buf;
+ res->guest_memory_size = old_size;
vcotbl->size_read_back = old_size_read_back;
vmw_resource_mob_attach(res);
goto out_wait;
@@ -498,7 +509,7 @@ static int vmw_cotable_resize(struct vmw_resource *res, size_t new_size)
if (unlikely(ret))
goto out_wait;
- /* Release the pin acquired in vmw_bo_init */
+ /* Release the pin acquired in vmw_bo_create */
ttm_bo_unpin(bo);
MKS_STAT_TIME_POP(MKSSTAT_KERN_COTABLE_RESIZE);
@@ -533,7 +544,7 @@ out_done:
static int vmw_cotable_create(struct vmw_resource *res)
{
struct vmw_cotable *vcotbl = vmw_cotable(res);
- size_t new_size = res->backup_size;
+ size_t new_size = res->guest_memory_size;
size_t needed_size;
int ret;
@@ -542,7 +553,7 @@ static int vmw_cotable_create(struct vmw_resource *res)
while (needed_size > new_size)
new_size *= 2;
- if (likely(new_size <= res->backup_size)) {
+ if (likely(new_size <= res->guest_memory_size)) {
if (vcotbl->scrubbed && vmw_resource_mob_attached(res)) {
ret = vmw_cotable_unscrub(res);
if (ret)
@@ -606,12 +617,12 @@ struct vmw_resource *vmw_cotable_alloc(struct vmw_private *dev_priv,
INIT_LIST_HEAD(&vcotbl->resource_list);
vcotbl->res.id = type;
- vcotbl->res.backup_size = PAGE_SIZE;
+ vcotbl->res.guest_memory_size = PAGE_SIZE;
num_entries = PAGE_SIZE / co_info[type].size;
if (num_entries < co_info[type].min_initial_entries) {
- vcotbl->res.backup_size = co_info[type].min_initial_entries *
+ vcotbl->res.guest_memory_size = co_info[type].min_initial_entries *
co_info[type].size;
- vcotbl->res.backup_size = PFN_ALIGN(vcotbl->res.backup_size);
+ vcotbl->res.guest_memory_size = PFN_ALIGN(vcotbl->res.guest_memory_size);
}
vcotbl->scrubbed = true;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index 9ad28346aff7..2588615a2a38 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 OR MIT
/**************************************************************************
*
- * Copyright 2009-2022 VMware, Inc., Palo Alto, CA., USA
+ * Copyright 2009-2023 VMware, Inc., Palo Alto, CA., USA
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
@@ -28,9 +28,10 @@
#include "vmwgfx_drv.h"
+#include "vmwgfx_bo.h"
+#include "vmwgfx_binding.h"
#include "vmwgfx_devcaps.h"
#include "vmwgfx_mksstat.h"
-#include "vmwgfx_binding.h"
#include "ttm_object.h"
#include <drm/drm_aperture.h>
@@ -386,27 +387,32 @@ static void vmw_print_sm_type(struct vmw_private *dev_priv)
static int vmw_dummy_query_bo_create(struct vmw_private *dev_priv)
{
int ret;
- struct vmw_buffer_object *vbo;
+ struct vmw_bo *vbo;
struct ttm_bo_kmap_obj map;
volatile SVGA3dQueryResult *result;
bool dummy;
+ struct vmw_bo_params bo_params = {
+ .domain = VMW_BO_DOMAIN_SYS,
+ .busy_domain = VMW_BO_DOMAIN_SYS,
+ .bo_type = ttm_bo_type_kernel,
+ .size = PAGE_SIZE,
+ .pin = true
+ };
/*
* Create the vbo as pinned, so that a tryreserve will
* immediately succeed. This is because we're the only
* user of the bo currently.
*/
- ret = vmw_bo_create(dev_priv, PAGE_SIZE,
- &vmw_sys_placement, false, true,
- &vmw_bo_bo_free, &vbo);
+ ret = vmw_bo_create(dev_priv, &bo_params, &vbo);
if (unlikely(ret != 0))
return ret;
- ret = ttm_bo_reserve(&vbo->base, false, true, NULL);
+ ret = ttm_bo_reserve(&vbo->tbo, false, true, NULL);
BUG_ON(ret != 0);
vmw_bo_pin_reserved(vbo, true);
- ret = ttm_bo_kmap(&vbo->base, 0, 1, &map);
+ ret = ttm_bo_kmap(&vbo->tbo, 0, 1, &map);
if (likely(ret == 0)) {
result = ttm_kmap_obj_virtual(&map, &dummy);
result->totalSize = sizeof(*result);
@@ -415,7 +421,7 @@ static int vmw_dummy_query_bo_create(struct vmw_private *dev_priv)
ttm_bo_kunmap(&map);
}
vmw_bo_pin_reserved(vbo, false);
- ttm_bo_unreserve(&vbo->base);
+ ttm_bo_unreserve(&vbo->tbo);
if (unlikely(ret != 0)) {
DRM_ERROR("Dummy query buffer map failed.\n");
@@ -1565,7 +1571,7 @@ static const struct file_operations vmwgfx_driver_fops = {
.open = drm_open,
.release = drm_release,
.unlocked_ioctl = vmw_unlocked_ioctl,
- .mmap = vmw_mmap,
+ .mmap = drm_gem_mmap,
.poll = drm_poll,
.read = drm_read,
#if defined(CONFIG_COMPAT)
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index 203fa32cd4c1..fb8f0c0642c0 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 OR MIT */
/**************************************************************************
*
- * Copyright 2009-2022 VMware, Inc., Palo Alto, CA., USA
+ * Copyright 2009-2023 VMware, Inc., Palo Alto, CA., USA
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
@@ -117,32 +117,6 @@ struct vmwgfx_hash_item {
unsigned long key;
};
-/**
- * struct vmw_buffer_object - TTM buffer object with vmwgfx additions
- * @base: The TTM buffer object
- * @res_tree: RB tree of resources using this buffer object as a backing MOB
- * @base_mapped_count: ttm BO mapping count; used by KMS atomic helpers.
- * @cpu_writers: Number of synccpu write grabs. Protected by reservation when
- * increased. May be decreased without reservation.
- * @dx_query_ctx: DX context if this buffer object is used as a DX query MOB
- * @map: Kmap object for semi-persistent mappings
- * @res_prios: Eviction priority counts for attached resources
- * @dirty: structure for user-space dirty-tracking
- */
-struct vmw_buffer_object {
- struct ttm_buffer_object base;
- struct rb_root res_tree;
- /* For KMS atomic helpers: ttm bo mapping count */
- atomic_t base_mapped_count;
-
- atomic_t cpu_writers;
- /* Not ref-counted. Protected by binding_mutex */
- struct vmw_resource *dx_query_ctx;
- /* Protected by reservation */
- struct ttm_bo_kmap_obj map;
- u32 res_prios[TTM_MAX_BO_PRIORITY];
- struct vmw_bo_dirty *dirty;
-};
/**
* struct vmw_validate_buffer - Carries validation info about buffers.
@@ -168,21 +142,23 @@ struct vmw_res_func;
* @kref: For refcounting.
* @dev_priv: Pointer to the device private for this resource. Immutable.
* @id: Device id. Protected by @dev_priv::resource_lock.
- * @backup_size: Backup buffer size. Immutable.
- * @res_dirty: Resource contains data not yet in the backup buffer. Protected
- * by resource reserved.
- * @backup_dirty: Backup buffer contains data not yet in the HW resource.
+ * @guest_memory_size: Guest memory buffer size. Immutable.
+ * @res_dirty: Resource contains data not yet in the guest memory buffer.
* Protected by resource reserved.
+ * @guest_memory_dirty: Guest memory buffer contains data not yet in the HW
+ * resource. Protected by resource reserved.
* @coherent: Emulate coherency by tracking vm accesses.
- * @backup: The backup buffer if any. Protected by resource reserved.
- * @backup_offset: Offset into the backup buffer if any. Protected by resource
- * reserved. Note that only a few resource types can have a @backup_offset
- * different from zero.
+ * @guest_memory_bo: The guest memory buffer if any. Protected by resource
+ * reserved.
+ * @guest_memory_offset: Offset into the guest memory buffer if any. Protected
+ * by resource reserved. Note that only a few resource types can have a
+ * @guest_memory_offset different from zero.
* @pin_count: The pin count for this resource. A pinned resource has a
* pin-count greater than zero. It is not on the resource LRU lists and its
- * backup buffer is pinned. Hence it can't be evicted.
+ * guest memory buffer is pinned. Hence it can't be evicted.
* @func: Method vtable for this resource. Immutable.
- * @mob_node; Node for the MOB backup rbtree. Protected by @backup reserved.
+ * @mob_node; Node for the MOB guest memory rbtree. Protected by
+ * @guest_memory_bo reserved.
* @lru_head: List head for the LRU list. Protected by @dev_priv::resource_lock.
* @binding_head: List head for the context binding list. Protected by
* the @dev_priv::binding_mutex
@@ -190,18 +166,20 @@ struct vmw_res_func;
* @hw_destroy: Callback to destroy the resource on the device, as part of
* resource destruction.
*/
+struct vmw_bo;
+struct vmw_bo;
struct vmw_resource_dirty;
struct vmw_resource {
struct kref kref;
struct vmw_private *dev_priv;
int id;
u32 used_prio;
- unsigned long backup_size;
+ unsigned long guest_memory_size;
u32 res_dirty : 1;
- u32 backup_dirty : 1;
+ u32 guest_memory_dirty : 1;
u32 coherent : 1;
- struct vmw_buffer_object *backup;
- unsigned long backup_offset;
+ struct vmw_bo *guest_memory_bo;
+ unsigned long guest_memory_offset;
unsigned long pin_count;
const struct vmw_res_func *func;
struct rb_node mob_node;
@@ -446,7 +424,7 @@ struct vmw_sw_context{
struct drm_file *filp;
uint32_t *cmd_bounce;
uint32_t cmd_bounce_size;
- struct vmw_buffer_object *cur_query_bo;
+ struct vmw_bo *cur_query_bo;
struct list_head bo_relocations;
struct list_head res_relocations;
uint32_t *buf_start;
@@ -458,7 +436,7 @@ struct vmw_sw_context{
struct list_head staged_cmd_res;
struct list_head ctx_list;
struct vmw_ctx_validation_info *dx_ctx_node;
- struct vmw_buffer_object *dx_query_mob;
+ struct vmw_bo *dx_query_mob;
struct vmw_resource *dx_query_ctx;
struct vmw_cmdbuf_res_manager *man;
struct vmw_validation_context *ctx;
@@ -492,7 +470,7 @@ struct vmw_otable_batch {
unsigned num_otables;
struct vmw_otable *otables;
struct vmw_resource *context;
- struct ttm_buffer_object *otable_bo;
+ struct vmw_bo *otable_bo;
};
enum {
@@ -632,8 +610,8 @@ struct vmw_private {
* are protected by the cmdbuf mutex.
*/
- struct vmw_buffer_object *dummy_query_bo;
- struct vmw_buffer_object *pinned_bo;
+ struct vmw_bo *dummy_query_bo;
+ struct vmw_bo *pinned_bo;
uint32_t query_cid;
uint32_t query_cid_valid;
bool dummy_query_bo_pinned;
@@ -677,11 +655,6 @@ struct vmw_private {
#endif
};
-static inline struct vmw_buffer_object *gem_to_vmw_bo(struct drm_gem_object *gobj)
-{
- return container_of((gobj), struct vmw_buffer_object, base.base);
-}
-
static inline struct vmw_surface *vmw_res_to_srf(struct vmw_resource *res)
{
return container_of(res, struct vmw_surface, res);
@@ -692,6 +665,11 @@ static inline struct vmw_private *vmw_priv(struct drm_device *dev)
return (struct vmw_private *)dev->dev_private;
}
+static inline struct vmw_private *vmw_priv_from_ttm(struct ttm_device *bdev)
+{
+ return container_of(bdev, struct vmw_private, bdev);
+}
+
static inline struct vmw_fpriv *vmw_fpriv(struct drm_file *file_priv)
{
return (struct vmw_fpriv *)file_priv->driver_priv;
@@ -825,7 +803,7 @@ extern int vmw_user_lookup_handle(struct vmw_private *dev_priv,
struct drm_file *filp,
uint32_t handle,
struct vmw_surface **out_surf,
- struct vmw_buffer_object **out_buf);
+ struct vmw_bo **out_buf);
extern int vmw_user_resource_lookup_handle(
struct vmw_private *dev_priv,
struct ttm_object_file *tfile,
@@ -844,20 +822,20 @@ extern int vmw_user_stream_lookup(struct vmw_private *dev_priv,
extern void vmw_resource_unreserve(struct vmw_resource *res,
bool dirty_set,
bool dirty,
- bool switch_backup,
- struct vmw_buffer_object *new_backup,
- unsigned long new_backup_offset);
+ bool switch_guest_memory,
+ struct vmw_bo *new_guest_memory,
+ unsigned long new_guest_memory_offset);
extern void vmw_query_move_notify(struct ttm_buffer_object *bo,
struct ttm_resource *old_mem,
struct ttm_resource *new_mem);
-extern int vmw_query_readback_all(struct vmw_buffer_object *dx_query_mob);
-extern void vmw_resource_evict_all(struct vmw_private *dev_priv);
-extern void vmw_resource_unbind_list(struct vmw_buffer_object *vbo);
+int vmw_query_readback_all(struct vmw_bo *dx_query_mob);
+void vmw_resource_evict_all(struct vmw_private *dev_priv);
+void vmw_resource_unbind_list(struct vmw_bo *vbo);
void vmw_resource_mob_attach(struct vmw_resource *res);
void vmw_resource_mob_detach(struct vmw_resource *res);
void vmw_resource_dirty_update(struct vmw_resource *res, pgoff_t start,
pgoff_t end);
-int vmw_resources_clean(struct vmw_buffer_object *vbo, pgoff_t start,
+int vmw_resources_clean(struct vmw_bo *vbo, pgoff_t start,
pgoff_t end, pgoff_t *num_prefault);
/**
@@ -872,117 +850,15 @@ static inline bool vmw_resource_mob_attached(const struct vmw_resource *res)
}
/**
- * Buffer object helper functions - vmwgfx_bo.c
- */
-extern int vmw_bo_pin_in_placement(struct vmw_private *vmw_priv,
- struct vmw_buffer_object *bo,
- struct ttm_placement *placement,
- bool interruptible);
-extern int vmw_bo_pin_in_vram(struct vmw_private *dev_priv,
- struct vmw_buffer_object *buf,
- bool interruptible);
-extern int vmw_bo_pin_in_vram_or_gmr(struct vmw_private *dev_priv,
- struct vmw_buffer_object *buf,
- bool interruptible);
-extern int vmw_bo_pin_in_start_of_vram(struct vmw_private *vmw_priv,
- struct vmw_buffer_object *bo,
- bool interruptible);
-extern int vmw_bo_unpin(struct vmw_private *vmw_priv,
- struct vmw_buffer_object *bo,
- bool interruptible);
-extern void vmw_bo_get_guest_ptr(const struct ttm_buffer_object *buf,
- SVGAGuestPtr *ptr);
-extern void vmw_bo_pin_reserved(struct vmw_buffer_object *bo, bool pin);
-extern void vmw_bo_bo_free(struct ttm_buffer_object *bo);
-extern int vmw_bo_create_kernel(struct vmw_private *dev_priv,
- unsigned long size,
- struct ttm_placement *placement,
- struct ttm_buffer_object **p_bo);
-extern int vmw_bo_create(struct vmw_private *dev_priv,
- size_t size, struct ttm_placement *placement,
- bool interruptible, bool pin,
- void (*bo_free)(struct ttm_buffer_object *bo),
- struct vmw_buffer_object **p_bo);
-extern int vmw_bo_init(struct vmw_private *dev_priv,
- struct vmw_buffer_object *vmw_bo,
- size_t size, struct ttm_placement *placement,
- bool interruptible, bool pin,
- void (*bo_free)(struct ttm_buffer_object *bo));
-extern int vmw_bo_unref_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-extern int vmw_user_bo_synccpu_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-extern int vmw_user_bo_lookup(struct drm_file *filp,
- uint32_t handle,
- struct vmw_buffer_object **out);
-extern void vmw_bo_fence_single(struct ttm_buffer_object *bo,
- struct vmw_fence_obj *fence);
-extern void *vmw_bo_map_and_cache(struct vmw_buffer_object *vbo);
-extern void vmw_bo_unmap(struct vmw_buffer_object *vbo);
-extern void vmw_bo_move_notify(struct ttm_buffer_object *bo,
- struct ttm_resource *mem);
-extern void vmw_bo_swap_notify(struct ttm_buffer_object *bo);
-
-/**
- * vmw_bo_adjust_prio - Adjust the buffer object eviction priority
- * according to attached resources
- * @vbo: The struct vmw_buffer_object
- */
-static inline void vmw_bo_prio_adjust(struct vmw_buffer_object *vbo)
-{
- int i = ARRAY_SIZE(vbo->res_prios);
-
- while (i--) {
- if (vbo->res_prios[i]) {
- vbo->base.priority = i;
- return;
- }
- }
-
- vbo->base.priority = 3;
-}
-
-/**
- * vmw_bo_prio_add - Notify a buffer object of a newly attached resource
- * eviction priority
- * @vbo: The struct vmw_buffer_object
- * @prio: The resource priority
- *
- * After being notified, the code assigns the highest resource eviction priority
- * to the backing buffer object (mob).
- */
-static inline void vmw_bo_prio_add(struct vmw_buffer_object *vbo, int prio)
-{
- if (vbo->res_prios[prio]++ == 0)
- vmw_bo_prio_adjust(vbo);
-}
-
-/**
- * vmw_bo_prio_del - Notify a buffer object of a resource with a certain
- * priority being removed
- * @vbo: The struct vmw_buffer_object
- * @prio: The resource priority
- *
- * After being notified, the code assigns the highest resource eviction priority
- * to the backing buffer object (mob).
- */
-static inline void vmw_bo_prio_del(struct vmw_buffer_object *vbo, int prio)
-{
- if (--vbo->res_prios[prio] == 0)
- vmw_bo_prio_adjust(vbo);
-}
-
-/**
* GEM related functionality - vmwgfx_gem.c
*/
extern int vmw_gem_object_create_with_handle(struct vmw_private *dev_priv,
struct drm_file *filp,
uint32_t size,
uint32_t *handle,
- struct vmw_buffer_object **p_vbo);
+ struct vmw_bo **p_vbo);
extern int vmw_gem_object_create_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp);
-extern void vmw_gem_destroy(struct ttm_buffer_object *bo);
extern void vmw_debugfs_gem_init(struct vmw_private *vdev);
/**
@@ -1056,29 +932,20 @@ vmw_is_cursor_bypass3_enabled(const struct vmw_private *dev_priv)
}
/**
- * TTM glue - vmwgfx_ttm_glue.c
- */
-
-extern int vmw_mmap(struct file *filp, struct vm_area_struct *vma);
-
-/**
* TTM buffer object driver - vmwgfx_ttm_buffer.c
*/
extern const size_t vmw_tt_size;
extern struct ttm_placement vmw_vram_placement;
-extern struct ttm_placement vmw_vram_sys_placement;
extern struct ttm_placement vmw_vram_gmr_placement;
extern struct ttm_placement vmw_sys_placement;
-extern struct ttm_placement vmw_srf_placement;
-extern struct ttm_placement vmw_mob_placement;
-extern struct ttm_placement vmw_nonfixed_placement;
extern struct ttm_device_funcs vmw_bo_driver;
extern const struct vmw_sg_table *
vmw_bo_sg_table(struct ttm_buffer_object *bo);
-extern int vmw_bo_create_and_populate(struct vmw_private *dev_priv,
- unsigned long bo_size,
- struct ttm_buffer_object **bo_p);
+int vmw_bo_create_and_populate(struct vmw_private *dev_priv,
+ size_t bo_size,
+ u32 domain,
+ struct vmw_bo **bo_p);
extern void vmw_piter_start(struct vmw_piter *viter,
const struct vmw_sg_table *vsgt,
@@ -1297,8 +1164,8 @@ vmw_context_binding_state(struct vmw_resource *ctx);
extern void vmw_dx_context_scrub_cotables(struct vmw_resource *ctx,
bool readback);
extern int vmw_context_bind_dx_query(struct vmw_resource *ctx_res,
- struct vmw_buffer_object *mob);
-extern struct vmw_buffer_object *
+ struct vmw_bo *mob);
+extern struct vmw_bo *
vmw_context_get_dx_query_mob(struct vmw_resource *ctx_res);
@@ -1523,12 +1390,12 @@ int vmw_mksstat_remove_all(struct vmw_private *dev_priv);
DRM_DEBUG_DRIVER(fmt, ##__VA_ARGS__)
/* Resource dirtying - vmwgfx_page_dirty.c */
-void vmw_bo_dirty_scan(struct vmw_buffer_object *vbo);
-int vmw_bo_dirty_add(struct vmw_buffer_object *vbo);
+void vmw_bo_dirty_scan(struct vmw_bo *vbo);
+int vmw_bo_dirty_add(struct vmw_bo *vbo);
void vmw_bo_dirty_transfer_to_res(struct vmw_resource *res);
void vmw_bo_dirty_clear_res(struct vmw_resource *res);
-void vmw_bo_dirty_release(struct vmw_buffer_object *vbo);
-void vmw_bo_dirty_unmap(struct vmw_buffer_object *vbo,
+void vmw_bo_dirty_release(struct vmw_bo *vbo);
+void vmw_bo_dirty_unmap(struct vmw_bo *vbo,
pgoff_t start, pgoff_t end);
vm_fault_t vmw_bo_vm_fault(struct vm_fault *vmf);
vm_fault_t vmw_bo_vm_mkwrite(struct vm_fault *vmf);
@@ -1561,22 +1428,6 @@ static inline struct vmw_surface *vmw_surface_reference(struct vmw_surface *srf)
return srf;
}
-static inline void vmw_bo_unreference(struct vmw_buffer_object **buf)
-{
- struct vmw_buffer_object *tmp_buf = *buf;
-
- *buf = NULL;
- if (tmp_buf != NULL)
- ttm_bo_put(&tmp_buf->base);
-}
-
-static inline struct vmw_buffer_object *
-vmw_bo_reference(struct vmw_buffer_object *buf)
-{
- ttm_bo_get(&buf->base);
- return buf;
-}
-
static inline void vmw_fifo_resource_inc(struct vmw_private *dev_priv)
{
atomic_inc(&dev_priv->num_fifo_resources);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index 0590bb22c73a..6b9aa2b4ef54 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 OR MIT
/**************************************************************************
*
- * Copyright 2009 - 2022 VMware, Inc., Palo Alto, CA., USA
+ * Copyright 2009 - 2023 VMware, Inc., Palo Alto, CA., USA
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
@@ -24,17 +24,17 @@
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
-#include <linux/sync_file.h>
-#include <linux/hashtable.h>
-
+#include "vmwgfx_binding.h"
+#include "vmwgfx_bo.h"
#include "vmwgfx_drv.h"
-#include "vmwgfx_reg.h"
+#include "vmwgfx_mksstat.h"
+#include "vmwgfx_so.h"
+
#include <drm/ttm/ttm_bo.h>
#include <drm/ttm/ttm_placement.h>
-#include "vmwgfx_so.h"
-#include "vmwgfx_binding.h"
-#include "vmwgfx_mksstat.h"
+#include <linux/sync_file.h>
+#include <linux/hashtable.h>
/*
* Helper macro to get dx_ctx_node if available otherwise print an error
@@ -65,7 +65,7 @@
*/
struct vmw_relocation {
struct list_head head;
- struct vmw_buffer_object *vbo;
+ struct vmw_bo *vbo;
union {
SVGAMobId *mob_loc;
SVGAGuestPtr *location;
@@ -149,7 +149,7 @@ static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGAMobId *id,
- struct vmw_buffer_object **vmw_bo_p);
+ struct vmw_bo **vmw_bo_p);
/**
* vmw_ptr_diff - Compute the offset from a to b in bytes
*
@@ -475,12 +475,16 @@ static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
if (has_sm4_context(dev_priv) &&
vmw_res_type(ctx) == vmw_res_dx_context) {
- struct vmw_buffer_object *dx_query_mob;
+ struct vmw_bo *dx_query_mob;
dx_query_mob = vmw_context_get_dx_query_mob(ctx);
- if (dx_query_mob)
+ if (dx_query_mob) {
+ vmw_bo_placement_set(dx_query_mob,
+ VMW_BO_DOMAIN_MOB,
+ VMW_BO_DOMAIN_MOB);
ret = vmw_validation_add_bo(sw_context->ctx,
- dx_query_mob, true, false);
+ dx_query_mob);
+ }
}
mutex_unlock(&dev_priv->binding_mutex);
@@ -596,7 +600,7 @@ static int vmw_resources_reserve(struct vmw_sw_context *sw_context)
return ret;
if (sw_context->dx_query_mob) {
- struct vmw_buffer_object *expected_dx_query_mob;
+ struct vmw_bo *expected_dx_query_mob;
expected_dx_query_mob =
vmw_context_get_dx_query_mob(sw_context->dx_query_ctx);
@@ -703,7 +707,7 @@ res_check_done:
static int vmw_rebind_all_dx_query(struct vmw_resource *ctx_res)
{
struct vmw_private *dev_priv = ctx_res->dev_priv;
- struct vmw_buffer_object *dx_query_mob;
+ struct vmw_bo *dx_query_mob;
VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXBindAllQuery);
dx_query_mob = vmw_context_get_dx_query_mob(ctx_res);
@@ -718,7 +722,7 @@ static int vmw_rebind_all_dx_query(struct vmw_resource *ctx_res)
cmd->header.id = SVGA_3D_CMD_DX_BIND_ALL_QUERY;
cmd->header.size = sizeof(cmd->body);
cmd->body.cid = ctx_res->id;
- cmd->body.mobid = dx_query_mob->base.resource->start;
+ cmd->body.mobid = dx_query_mob->tbo.resource->start;
vmw_cmd_commit(dev_priv, sizeof(*cmd));
vmw_context_bind_dx_query(ctx_res, dx_query_mob);
@@ -1017,7 +1021,7 @@ static int vmw_cmd_present_check(struct vmw_private *dev_priv,
* after successful submission of the current command batch.
*/
static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv,
- struct vmw_buffer_object *new_query_bo,
+ struct vmw_bo *new_query_bo,
struct vmw_sw_context *sw_context)
{
struct vmw_res_cache_entry *ctx_entry =
@@ -1029,24 +1033,24 @@ static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv,
if (unlikely(new_query_bo != sw_context->cur_query_bo)) {
- if (unlikely(PFN_UP(new_query_bo->base.resource->size) > 4)) {
+ if (unlikely(PFN_UP(new_query_bo->tbo.resource->size) > 4)) {
VMW_DEBUG_USER("Query buffer too large.\n");
return -EINVAL;
}
if (unlikely(sw_context->cur_query_bo != NULL)) {
sw_context->needs_post_query_barrier = true;
+ vmw_bo_placement_set_default_accelerated(sw_context->cur_query_bo);
ret = vmw_validation_add_bo(sw_context->ctx,
- sw_context->cur_query_bo,
- dev_priv->has_mob, false);
+ sw_context->cur_query_bo);
if (unlikely(ret != 0))
return ret;
}
sw_context->cur_query_bo = new_query_bo;
+ vmw_bo_placement_set_default_accelerated(dev_priv->dummy_query_bo);
ret = vmw_validation_add_bo(sw_context->ctx,
- dev_priv->dummy_query_bo,
- dev_priv->has_mob, false);
+ dev_priv->dummy_query_bo);
if (unlikely(ret != 0))
return ret;
}
@@ -1145,9 +1149,9 @@ static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv,
static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGAMobId *id,
- struct vmw_buffer_object **vmw_bo_p)
+ struct vmw_bo **vmw_bo_p)
{
- struct vmw_buffer_object *vmw_bo;
+ struct vmw_bo *vmw_bo;
uint32_t handle = *id;
struct vmw_relocation *reloc;
int ret;
@@ -1158,9 +1162,10 @@ static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
drm_dbg(&dev_priv->drm, "Could not find or use MOB buffer.\n");
return PTR_ERR(vmw_bo);
}
- ret = vmw_validation_add_bo(sw_context->ctx, vmw_bo, true, false);
- ttm_bo_put(&vmw_bo->base);
- drm_gem_object_put(&vmw_bo->base.base);
+ vmw_bo_placement_set(vmw_bo, VMW_BO_DOMAIN_MOB, VMW_BO_DOMAIN_MOB);
+ ret = vmw_validation_add_bo(sw_context->ctx, vmw_bo);
+ ttm_bo_put(&vmw_bo->tbo);
+ drm_gem_object_put(&vmw_bo->tbo.base);
if (unlikely(ret != 0))
return ret;
@@ -1200,9 +1205,9 @@ static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGAGuestPtr *ptr,
- struct vmw_buffer_object **vmw_bo_p)
+ struct vmw_bo **vmw_bo_p)
{
- struct vmw_buffer_object *vmw_bo;
+ struct vmw_bo *vmw_bo;
uint32_t handle = ptr->gmrId;
struct vmw_relocation *reloc;
int ret;
@@ -1213,9 +1218,11 @@ static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
drm_dbg(&dev_priv->drm, "Could not find or use GMR region.\n");
return PTR_ERR(vmw_bo);
}
- ret = vmw_validation_add_bo(sw_context->ctx, vmw_bo, false, false);
- ttm_bo_put(&vmw_bo->base);
- drm_gem_object_put(&vmw_bo->base.base);
+ vmw_bo_placement_set(vmw_bo, VMW_BO_DOMAIN_GMR | VMW_BO_DOMAIN_VRAM,
+ VMW_BO_DOMAIN_GMR | VMW_BO_DOMAIN_VRAM);
+ ret = vmw_validation_add_bo(sw_context->ctx, vmw_bo);
+ ttm_bo_put(&vmw_bo->tbo);
+ drm_gem_object_put(&vmw_bo->tbo.base);
if (unlikely(ret != 0))
return ret;
@@ -1280,7 +1287,7 @@ static int vmw_cmd_dx_bind_query(struct vmw_private *dev_priv,
SVGA3dCmdHeader *header)
{
VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXBindQuery);
- struct vmw_buffer_object *vmw_bo;
+ struct vmw_bo *vmw_bo;
int ret;
cmd = container_of(header, typeof(*cmd), header);
@@ -1363,7 +1370,7 @@ static int vmw_cmd_end_gb_query(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
- struct vmw_buffer_object *vmw_bo;
+ struct vmw_bo *vmw_bo;
VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdEndGBQuery);
int ret;
@@ -1393,7 +1400,7 @@ static int vmw_cmd_end_query(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
- struct vmw_buffer_object *vmw_bo;
+ struct vmw_bo *vmw_bo;
VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdEndQuery);
int ret;
@@ -1439,7 +1446,7 @@ static int vmw_cmd_wait_gb_query(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
- struct vmw_buffer_object *vmw_bo;
+ struct vmw_bo *vmw_bo;
VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdWaitForGBQuery);
int ret;
@@ -1467,7 +1474,7 @@ static int vmw_cmd_wait_query(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
- struct vmw_buffer_object *vmw_bo;
+ struct vmw_bo *vmw_bo;
VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdWaitForQuery);
int ret;
@@ -1504,7 +1511,7 @@ static int vmw_cmd_dma(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
- struct vmw_buffer_object *vmw_bo = NULL;
+ struct vmw_bo *vmw_bo = NULL;
struct vmw_surface *srf = NULL;
VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSurfaceDMA);
int ret;
@@ -1528,7 +1535,7 @@ static int vmw_cmd_dma(struct vmw_private *dev_priv,
return ret;
/* Make sure DMA doesn't cross BO boundaries. */
- bo_size = vmw_bo->base.base.size;
+ bo_size = vmw_bo->tbo.base.size;
if (unlikely(cmd->body.guest.ptr.offset > bo_size)) {
VMW_DEBUG_USER("Invalid DMA offset.\n");
return -EINVAL;
@@ -1551,7 +1558,7 @@ static int vmw_cmd_dma(struct vmw_private *dev_priv,
srf = vmw_res_to_srf(sw_context->res_cache[vmw_res_surface].res);
- vmw_kms_cursor_snoop(srf, sw_context->fp->tfile, &vmw_bo->base, header);
+ vmw_kms_cursor_snoop(srf, sw_context->fp->tfile, &vmw_bo->tbo, header);
return 0;
}
@@ -1670,7 +1677,7 @@ static int vmw_cmd_check_define_gmrfb(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
void *buf)
{
- struct vmw_buffer_object *vmw_bo;
+ struct vmw_bo *vmw_bo;
struct {
uint32_t header;
@@ -1701,7 +1708,7 @@ static int vmw_cmd_res_switch_backup(struct vmw_private *dev_priv,
struct vmw_resource *res, uint32_t *buf_id,
unsigned long backup_offset)
{
- struct vmw_buffer_object *vbo;
+ struct vmw_bo *vbo;
void *info;
int ret;
@@ -3754,7 +3761,7 @@ static void vmw_apply_relocations(struct vmw_sw_context *sw_context)
struct ttm_buffer_object *bo;
list_for_each_entry(reloc, &sw_context->bo_relocations, head) {
- bo = &reloc->vbo->base;
+ bo = &reloc->vbo->tbo;
switch (bo->resource->mem_type) {
case TTM_PL_VRAM:
reloc->location->offset += bo->resource->start << PAGE_SHIFT;
@@ -4364,13 +4371,17 @@ void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
if (dev_priv->pinned_bo == NULL)
goto out_unlock;
- ret = vmw_validation_add_bo(&val_ctx, dev_priv->pinned_bo, false,
- false);
+ vmw_bo_placement_set(dev_priv->pinned_bo,
+ VMW_BO_DOMAIN_GMR | VMW_BO_DOMAIN_VRAM,
+ VMW_BO_DOMAIN_GMR | VMW_BO_DOMAIN_VRAM);
+ ret = vmw_validation_add_bo(&val_ctx, dev_priv->pinned_bo);
if (ret)
goto out_no_reserve;
- ret = vmw_validation_add_bo(&val_ctx, dev_priv->dummy_query_bo, false,
- false);
+ vmw_bo_placement_set(dev_priv->dummy_query_bo,
+ VMW_BO_DOMAIN_GMR | VMW_BO_DOMAIN_VRAM,
+ VMW_BO_DOMAIN_GMR | VMW_BO_DOMAIN_VRAM);
+ ret = vmw_validation_add_bo(&val_ctx, dev_priv->dummy_query_bo);
if (ret)
goto out_no_reserve;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
index 66cc35dc223e..2a0cda324703 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 OR MIT
/**************************************************************************
*
- * Copyright 2011-2014 VMware, Inc., Palo Alto, CA., USA
+ * Copyright 2011-2023 VMware, Inc., Palo Alto, CA., USA
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gem.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gem.c
index 4d2c28e39f4e..c0da89e16e6f 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_gem.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gem.c
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR MIT */
/*
- * Copyright 2021 VMware, Inc.
+ * Copyright 2021-2023 VMware, Inc.
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
@@ -24,31 +24,17 @@
*
*/
+#include "vmwgfx_bo.h"
#include "vmwgfx_drv.h"
#include "drm/drm_prime.h"
#include "drm/drm_gem_ttm_helper.h"
-/**
- * vmw_buffer_object - Convert a struct ttm_buffer_object to a struct
- * vmw_buffer_object.
- *
- * @bo: Pointer to the TTM buffer object.
- * Return: Pointer to the struct vmw_buffer_object embedding the
- * TTM buffer object.
- */
-static struct vmw_buffer_object *
-vmw_buffer_object(struct ttm_buffer_object *bo)
-{
- return container_of(bo, struct vmw_buffer_object, base);
-}
-
static void vmw_gem_object_free(struct drm_gem_object *gobj)
{
struct ttm_buffer_object *bo = drm_gem_ttm_of_gem(gobj);
- if (bo) {
+ if (bo)
ttm_bo_put(bo);
- }
}
static int vmw_gem_object_open(struct drm_gem_object *obj,
@@ -65,7 +51,7 @@ static void vmw_gem_object_close(struct drm_gem_object *obj,
static int vmw_gem_pin_private(struct drm_gem_object *obj, bool do_pin)
{
struct ttm_buffer_object *bo = drm_gem_ttm_of_gem(obj);
- struct vmw_buffer_object *vbo = vmw_buffer_object(bo);
+ struct vmw_bo *vbo = to_vmw_bo(obj);
int ret;
ret = ttm_bo_reserve(bo, false, false, NULL);
@@ -103,6 +89,13 @@ static struct sg_table *vmw_gem_object_get_sg_table(struct drm_gem_object *obj)
return drm_prime_pages_to_sg(obj->dev, vmw_tt->dma_ttm.pages, vmw_tt->dma_ttm.num_pages);
}
+static const struct vm_operations_struct vmw_vm_ops = {
+ .pfn_mkwrite = vmw_bo_vm_mkwrite,
+ .page_mkwrite = vmw_bo_vm_mkwrite,
+ .fault = vmw_bo_vm_fault,
+ .open = ttm_bo_vm_open,
+ .close = ttm_bo_vm_close,
+};
static const struct drm_gem_object_funcs vmw_gem_object_funcs = {
.free = vmw_gem_object_free,
@@ -115,43 +108,31 @@ static const struct drm_gem_object_funcs vmw_gem_object_funcs = {
.vmap = drm_gem_ttm_vmap,
.vunmap = drm_gem_ttm_vunmap,
.mmap = drm_gem_ttm_mmap,
+ .vm_ops = &vmw_vm_ops,
};
-/**
- * vmw_gem_destroy - vmw buffer object destructor
- *
- * @bo: Pointer to the embedded struct ttm_buffer_object
- */
-void vmw_gem_destroy(struct ttm_buffer_object *bo)
-{
- struct vmw_buffer_object *vbo = vmw_buffer_object(bo);
-
- WARN_ON(vbo->dirty);
- WARN_ON(!RB_EMPTY_ROOT(&vbo->res_tree));
- vmw_bo_unmap(vbo);
- drm_gem_object_release(&vbo->base.base);
- kfree(vbo);
-}
-
int vmw_gem_object_create_with_handle(struct vmw_private *dev_priv,
struct drm_file *filp,
uint32_t size,
uint32_t *handle,
- struct vmw_buffer_object **p_vbo)
+ struct vmw_bo **p_vbo)
{
int ret;
-
- ret = vmw_bo_create(dev_priv, size,
- (dev_priv->has_mob) ?
- &vmw_sys_placement :
- &vmw_vram_sys_placement,
- true, false, &vmw_gem_destroy, p_vbo);
+ struct vmw_bo_params params = {
+ .domain = (dev_priv->has_mob) ? VMW_BO_DOMAIN_SYS : VMW_BO_DOMAIN_VRAM,
+ .busy_domain = VMW_BO_DOMAIN_SYS,
+ .bo_type = ttm_bo_type_device,
+ .size = size,
+ .pin = false
+ };
+
+ ret = vmw_bo_create(dev_priv, &params, p_vbo);
if (ret != 0)
goto out_no_bo;
- (*p_vbo)->base.base.funcs = &vmw_gem_object_funcs;
+ (*p_vbo)->tbo.base.funcs = &vmw_gem_object_funcs;
- ret = drm_gem_handle_create(filp, &(*p_vbo)->base.base, handle);
+ ret = drm_gem_handle_create(filp, &(*p_vbo)->tbo.base, handle);
out_no_bo:
return ret;
}
@@ -165,7 +146,7 @@ int vmw_gem_object_create_ioctl(struct drm_device *dev, void *data,
(union drm_vmw_alloc_dmabuf_arg *)data;
struct drm_vmw_alloc_dmabuf_req *req = &arg->req;
struct drm_vmw_dmabuf_rep *rep = &arg->rep;
- struct vmw_buffer_object *vbo;
+ struct vmw_bo *vbo;
uint32_t handle;
int ret;
@@ -175,23 +156,23 @@ int vmw_gem_object_create_ioctl(struct drm_device *dev, void *data,
goto out_no_bo;
rep->handle = handle;
- rep->map_handle = drm_vma_node_offset_addr(&vbo->base.base.vma_node);
+ rep->map_handle = drm_vma_node_offset_addr(&vbo->tbo.base.vma_node);
rep->cur_gmr_id = handle;
rep->cur_gmr_offset = 0;
/* drop reference from allocate - handle holds it now */
- drm_gem_object_put(&vbo->base.base);
+ drm_gem_object_put(&vbo->tbo.base);
out_no_bo:
return ret;
}
#if defined(CONFIG_DEBUG_FS)
-static void vmw_bo_print_info(int id, struct vmw_buffer_object *bo, struct seq_file *m)
+static void vmw_bo_print_info(int id, struct vmw_bo *bo, struct seq_file *m)
{
const char *placement;
const char *type;
- switch (bo->base.resource->mem_type) {
+ switch (bo->tbo.resource->mem_type) {
case TTM_PL_SYSTEM:
placement = " CPU";
break;
@@ -212,7 +193,7 @@ static void vmw_bo_print_info(int id, struct vmw_buffer_object *bo, struct seq_f
break;
}
- switch (bo->base.type) {
+ switch (bo->tbo.type) {
case ttm_bo_type_device:
type = "device";
break;
@@ -228,12 +209,12 @@ static void vmw_bo_print_info(int id, struct vmw_buffer_object *bo, struct seq_f
}
seq_printf(m, "\t\t0x%08x: %12zu bytes %s, type = %s",
- id, bo->base.base.size, placement, type);
+ id, bo->tbo.base.size, placement, type);
seq_printf(m, ", priority = %u, pin_count = %u, GEM refs = %d, TTM refs = %d",
- bo->base.priority,
- bo->base.pin_count,
- kref_read(&bo->base.base.refcount),
- kref_read(&bo->base.kref));
+ bo->tbo.priority,
+ bo->tbo.pin_count,
+ kref_read(&bo->tbo.base.refcount),
+ kref_read(&bo->tbo.kref));
seq_puts(m, "\n");
}
@@ -260,14 +241,14 @@ static int vmw_debugfs_gem_info_show(struct seq_file *m, void *unused)
* Therefore, we need to protect this ->comm access using RCU.
*/
rcu_read_lock();
- task = pid_task(file->pid, PIDTYPE_PID);
+ task = pid_task(file->pid, PIDTYPE_TGID);
seq_printf(m, "pid %8d command %s:\n", pid_nr(file->pid),
task ? task->comm : "<unknown>");
rcu_read_unlock();
spin_lock(&file->table_lock);
idr_for_each_entry(&file->object_idr, gobj, id) {
- struct vmw_buffer_object *bo = gem_to_vmw_bo(gobj);
+ struct vmw_bo *bo = to_vmw_bo(gobj);
vmw_bo_print_info(id, bo, m);
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index 445d619e1fdc..5162a7a12792 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 OR MIT
/**************************************************************************
*
- * Copyright 2009-2022 VMware, Inc., Palo Alto, CA., USA
+ * Copyright 2009-2023 VMware, Inc., Palo Alto, CA., USA
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
@@ -24,8 +24,9 @@
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
-
#include "vmwgfx_kms.h"
+
+#include "vmwgfx_bo.h"
#include "vmw_surface_cache.h"
#include <drm/drm_atomic.h>
@@ -152,9 +153,8 @@ static void vmw_cursor_update_mob(struct vmw_private *dev_priv,
SVGAGBCursorHeader *header;
SVGAGBAlphaCursorHeader *alpha_header;
const u32 image_size = width * height * sizeof(*image);
- bool dummy;
- header = ttm_kmap_obj_virtual(&vps->cursor.map, &dummy);
+ header = vmw_bo_map_and_cache(vps->cursor.bo);
alpha_header = &header->header.alphaHeader;
memset(header, 0, sizeof(*header));
@@ -169,7 +169,7 @@ static void vmw_cursor_update_mob(struct vmw_private *dev_priv,
memcpy(header + 1, image, image_size);
vmw_write(dev_priv, SVGA_REG_CURSOR_MOBID,
- vps->cursor.bo->resource->start);
+ vps->cursor.bo->tbo.resource->start);
}
@@ -184,13 +184,13 @@ static u32 vmw_du_cursor_mob_size(u32 w, u32 h)
*/
static u32 *vmw_du_cursor_plane_acquire_image(struct vmw_plane_state *vps)
{
- bool dummy;
+ bool is_iomem;
if (vps->surf) {
if (vps->surf_mapped)
- return vmw_bo_map_and_cache(vps->surf->res.backup);
+ return vmw_bo_map_and_cache(vps->surf->res.guest_memory_bo);
return vps->surf->snooper.image;
} else if (vps->bo)
- return ttm_kmap_obj_virtual(&vps->bo->map, &dummy);
+ return ttm_kmap_obj_virtual(&vps->bo->map, &is_iomem);
return NULL;
}
@@ -222,15 +222,13 @@ static bool vmw_du_cursor_plane_has_changed(struct vmw_plane_state *old_vps,
return changed;
}
-static void vmw_du_destroy_cursor_mob(struct ttm_buffer_object **bo)
+static void vmw_du_destroy_cursor_mob(struct vmw_bo **vbo)
{
- if (!(*bo))
+ if (!(*vbo))
return;
- ttm_bo_unpin(*bo);
- ttm_bo_put(*bo);
- kfree(*bo);
- *bo = NULL;
+ ttm_bo_unpin(&(*vbo)->tbo);
+ vmw_bo_unreference(vbo);
}
static void vmw_du_put_cursor_mob(struct vmw_cursor_plane *vcp,
@@ -254,8 +252,8 @@ static void vmw_du_put_cursor_mob(struct vmw_cursor_plane *vcp,
/* Cache is full: See if this mob is bigger than an existing mob. */
for (i = 0; i < ARRAY_SIZE(vcp->cursor_mobs); i++) {
- if (vcp->cursor_mobs[i]->base.size <
- vps->cursor.bo->base.size) {
+ if (vcp->cursor_mobs[i]->tbo.base.size <
+ vps->cursor.bo->tbo.base.size) {
vmw_du_destroy_cursor_mob(&vcp->cursor_mobs[i]);
vcp->cursor_mobs[i] = vps->cursor.bo;
vps->cursor.bo = NULL;
@@ -288,7 +286,7 @@ static int vmw_du_get_cursor_mob(struct vmw_cursor_plane *vcp,
return -EINVAL;
if (vps->cursor.bo) {
- if (vps->cursor.bo->base.size >= size)
+ if (vps->cursor.bo->tbo.base.size >= size)
return 0;
vmw_du_put_cursor_mob(vcp, vps);
}
@@ -296,26 +294,27 @@ static int vmw_du_get_cursor_mob(struct vmw_cursor_plane *vcp,
/* Look for an unused mob in the cache. */
for (i = 0; i < ARRAY_SIZE(vcp->cursor_mobs); i++) {
if (vcp->cursor_mobs[i] &&
- vcp->cursor_mobs[i]->base.size >= size) {
+ vcp->cursor_mobs[i]->tbo.base.size >= size) {
vps->cursor.bo = vcp->cursor_mobs[i];
vcp->cursor_mobs[i] = NULL;
return 0;
}
}
/* Create a new mob if we can't find an existing one. */
- ret = vmw_bo_create_kernel(dev_priv, size, &vmw_mob_placement,
- &vps->cursor.bo);
+ ret = vmw_bo_create_and_populate(dev_priv, size,
+ VMW_BO_DOMAIN_MOB,
+ &vps->cursor.bo);
if (ret != 0)
return ret;
/* Fence the mob creation so we are guarateed to have the mob */
- ret = ttm_bo_reserve(vps->cursor.bo, false, false, NULL);
+ ret = ttm_bo_reserve(&vps->cursor.bo->tbo, false, false, NULL);
if (ret != 0)
goto teardown;
- vmw_bo_fence_single(vps->cursor.bo, NULL);
- ttm_bo_unreserve(vps->cursor.bo);
+ vmw_bo_fence_single(&vps->cursor.bo->tbo, NULL);
+ ttm_bo_unreserve(&vps->cursor.bo->tbo);
return 0;
teardown:
@@ -363,7 +362,7 @@ void vmw_kms_cursor_snoop(struct vmw_surface *srf,
SVGA3dCopyBox *box;
unsigned box_count;
void *virtual;
- bool dummy;
+ bool is_iomem;
struct vmw_dma_cmd {
SVGA3dCmdHeader header;
SVGA3dCmdSurfaceDMA dma;
@@ -423,7 +422,7 @@ void vmw_kms_cursor_snoop(struct vmw_surface *srf,
if (unlikely(ret != 0))
goto err_unreserve;
- virtual = ttm_kmap_obj_virtual(&map, &dummy);
+ virtual = ttm_kmap_obj_virtual(&map, &is_iomem);
if (box->w == VMW_CURSOR_SNOOP_WIDTH && cmd->dma.guest.pitch == image_pitch) {
memcpy(srf->snooper.image, virtual,
@@ -573,39 +572,30 @@ vmw_du_cursor_plane_map_cm(struct vmw_plane_state *vps)
{
int ret;
u32 size = vmw_du_cursor_mob_size(vps->base.crtc_w, vps->base.crtc_h);
- struct ttm_buffer_object *bo = vps->cursor.bo;
+ struct ttm_buffer_object *bo;
- if (!bo)
+ if (!vps->cursor.bo)
return -EINVAL;
+ bo = &vps->cursor.bo->tbo;
+
if (bo->base.size < size)
return -EINVAL;
- if (vps->cursor.mapped)
+ if (vps->cursor.bo->map.virtual)
return 0;
ret = ttm_bo_reserve(bo, false, false, NULL);
-
if (unlikely(ret != 0))
return -ENOMEM;
- ret = ttm_bo_kmap(bo, 0, PFN_UP(size), &vps->cursor.map);
-
- /*
- * We just want to try to get mob bind to finish
- * so that the first write to SVGA_REG_CURSOR_MOBID
- * is done with a buffer that the device has already
- * seen
- */
- (void) ttm_bo_wait(bo, false, false);
+ vmw_bo_map_and_cache(vps->cursor.bo);
ttm_bo_unreserve(bo);
if (unlikely(ret != 0))
return -ENOMEM;
- vps->cursor.mapped = true;
-
return 0;
}
@@ -622,19 +612,15 @@ static int
vmw_du_cursor_plane_unmap_cm(struct vmw_plane_state *vps)
{
int ret = 0;
- struct ttm_buffer_object *bo = vps->cursor.bo;
-
- if (!vps->cursor.mapped)
- return 0;
+ struct vmw_bo *vbo = vps->cursor.bo;
- if (!bo)
+ if (!vbo || !vbo->map.virtual)
return 0;
- ret = ttm_bo_reserve(bo, true, false, NULL);
+ ret = ttm_bo_reserve(&vbo->tbo, true, false, NULL);
if (likely(ret == 0)) {
- ttm_bo_kunmap(&vps->cursor.map);
- ttm_bo_unreserve(bo);
- vps->cursor.mapped = false;
+ vmw_bo_unmap(vbo);
+ ttm_bo_unreserve(&vbo->tbo);
}
return ret;
@@ -657,20 +643,19 @@ vmw_du_cursor_plane_cleanup_fb(struct drm_plane *plane,
{
struct vmw_cursor_plane *vcp = vmw_plane_to_vcp(plane);
struct vmw_plane_state *vps = vmw_plane_state_to_vps(old_state);
- bool dummy;
+ bool is_iomem;
if (vps->surf_mapped) {
- vmw_bo_unmap(vps->surf->res.backup);
+ vmw_bo_unmap(vps->surf->res.guest_memory_bo);
vps->surf_mapped = false;
}
- if (vps->bo && ttm_kmap_obj_virtual(&vps->bo->map, &dummy)) {
- const int ret = ttm_bo_reserve(&vps->bo->base, true, false, NULL);
+ if (vps->bo && ttm_kmap_obj_virtual(&vps->bo->map, &is_iomem)) {
+ const int ret = ttm_bo_reserve(&vps->bo->tbo, true, false, NULL);
if (likely(ret == 0)) {
- if (atomic_read(&vps->bo->base_mapped_count) == 0)
- ttm_bo_kunmap(&vps->bo->map);
- ttm_bo_unreserve(&vps->bo->base);
+ ttm_bo_kunmap(&vps->bo->map);
+ ttm_bo_unreserve(&vps->bo->tbo);
}
}
@@ -736,29 +721,26 @@ vmw_du_cursor_plane_prepare_fb(struct drm_plane *plane,
* reserve the ttm_buffer_object first which
* vmw_bo_map_and_cache() omits.
*/
- ret = ttm_bo_reserve(&vps->bo->base, true, false, NULL);
+ ret = ttm_bo_reserve(&vps->bo->tbo, true, false, NULL);
if (unlikely(ret != 0))
return -ENOMEM;
- ret = ttm_bo_kmap(&vps->bo->base, 0, PFN_UP(size), &vps->bo->map);
-
- if (likely(ret == 0))
- atomic_inc(&vps->bo->base_mapped_count);
+ ret = ttm_bo_kmap(&vps->bo->tbo, 0, PFN_UP(size), &vps->bo->map);
- ttm_bo_unreserve(&vps->bo->base);
+ ttm_bo_unreserve(&vps->bo->tbo);
if (unlikely(ret != 0))
return -ENOMEM;
- } else if (vps->surf && !vps->bo && vps->surf->res.backup) {
+ } else if (vps->surf && !vps->bo && vps->surf->res.guest_memory_bo) {
WARN_ON(vps->surf->snooper.image);
- ret = ttm_bo_reserve(&vps->surf->res.backup->base, true, false,
+ ret = ttm_bo_reserve(&vps->surf->res.guest_memory_bo->tbo, true, false,
NULL);
if (unlikely(ret != 0))
return -ENOMEM;
- vmw_bo_map_and_cache(vps->surf->res.backup);
- ttm_bo_unreserve(&vps->surf->res.backup->base);
+ vmw_bo_map_and_cache(vps->surf->res.guest_memory_bo);
+ ttm_bo_unreserve(&vps->surf->res.guest_memory_bo->tbo);
vps->surf_mapped = true;
}
@@ -785,7 +767,6 @@ vmw_du_cursor_plane_atomic_update(struct drm_plane *plane,
struct vmw_plane_state *vps = vmw_plane_state_to_vps(new_state);
struct vmw_plane_state *old_vps = vmw_plane_state_to_vps(old_state);
s32 hotspot_x, hotspot_y;
- bool dummy;
hotspot_x = du->hotspot_x;
hotspot_y = du->hotspot_y;
@@ -827,11 +808,6 @@ vmw_du_cursor_plane_atomic_update(struct drm_plane *plane,
hotspot_x, hotspot_y);
}
- if (vps->bo) {
- if (ttm_kmap_obj_virtual(&vps->bo->map, &dummy))
- atomic_dec(&vps->bo->base_mapped_count);
- }
-
du->cursor_x = new_state->crtc_x + du->set_gui_x;
du->cursor_y = new_state->crtc_y + du->set_gui_y;
@@ -935,7 +911,7 @@ int vmw_du_cursor_plane_atomic_check(struct drm_plane *plane,
WARN_ON(!surface);
if (!surface ||
- (!surface->snooper.image && !surface->res.backup)) {
+ (!surface->snooper.image && !surface->res.guest_memory_bo)) {
DRM_ERROR("surface not suitable for cursor\n");
return -EINVAL;
}
@@ -1279,9 +1255,9 @@ int vmw_kms_readback(struct vmw_private *dev_priv,
user_fence_rep, vclips, num_clips,
NULL);
case vmw_du_screen_target:
- return vmw_kms_stdu_dma(dev_priv, file_priv, vfb,
- user_fence_rep, NULL, vclips, num_clips,
- 1, false, true, NULL);
+ return vmw_kms_stdu_readback(dev_priv, file_priv, vfb,
+ user_fence_rep, NULL, vclips, num_clips,
+ 1, NULL);
default:
WARN_ONCE(true,
"Readback called with invalid display system.\n");
@@ -1406,7 +1382,7 @@ static int vmw_framebuffer_bo_create_handle(struct drm_framebuffer *fb,
struct vmw_framebuffer_bo *vfbd =
vmw_framebuffer_to_vfbd(fb);
- return drm_gem_handle_create(file_priv, &vfbd->buffer->base.base, handle);
+ return drm_gem_handle_create(file_priv, &vfbd->buffer->tbo.base, handle);
}
static void vmw_framebuffer_bo_destroy(struct drm_framebuffer *framebuffer)
@@ -1486,69 +1462,6 @@ static const struct drm_framebuffer_funcs vmw_framebuffer_bo_funcs = {
.dirty = vmw_framebuffer_bo_dirty_ext,
};
-/*
- * Pin the bofer in a location suitable for access by the
- * display system.
- */
-static int vmw_framebuffer_pin(struct vmw_framebuffer *vfb)
-{
- struct vmw_private *dev_priv = vmw_priv(vfb->base.dev);
- struct vmw_buffer_object *buf;
- struct ttm_placement *placement;
- int ret;
-
- buf = vfb->bo ? vmw_framebuffer_to_vfbd(&vfb->base)->buffer :
- vmw_framebuffer_to_vfbs(&vfb->base)->surface->res.backup;
-
- if (!buf)
- return 0;
-
- switch (dev_priv->active_display_unit) {
- case vmw_du_legacy:
- vmw_overlay_pause_all(dev_priv);
- ret = vmw_bo_pin_in_start_of_vram(dev_priv, buf, false);
- vmw_overlay_resume_all(dev_priv);
- break;
- case vmw_du_screen_object:
- case vmw_du_screen_target:
- if (vfb->bo) {
- if (dev_priv->capabilities & SVGA_CAP_3D) {
- /*
- * Use surface DMA to get content to
- * sreen target surface.
- */
- placement = &vmw_vram_gmr_placement;
- } else {
- /* Use CPU blit. */
- placement = &vmw_sys_placement;
- }
- } else {
- /* Use surface / image update */
- placement = &vmw_mob_placement;
- }
-
- return vmw_bo_pin_in_placement(dev_priv, buf, placement, false);
- default:
- return -EINVAL;
- }
-
- return ret;
-}
-
-static int vmw_framebuffer_unpin(struct vmw_framebuffer *vfb)
-{
- struct vmw_private *dev_priv = vmw_priv(vfb->base.dev);
- struct vmw_buffer_object *buf;
-
- buf = vfb->bo ? vmw_framebuffer_to_vfbd(&vfb->base)->buffer :
- vmw_framebuffer_to_vfbs(&vfb->base)->surface->res.backup;
-
- if (WARN_ON(!buf))
- return 0;
-
- return vmw_bo_unpin(dev_priv, buf, false);
-}
-
/**
* vmw_create_bo_proxy - create a proxy surface for the buffer object
*
@@ -1566,7 +1479,7 @@ static int vmw_framebuffer_unpin(struct vmw_framebuffer *vfb)
*/
static int vmw_create_bo_proxy(struct drm_device *dev,
const struct drm_mode_fb_cmd2 *mode_cmd,
- struct vmw_buffer_object *bo_mob,
+ struct vmw_bo *bo_mob,
struct vmw_surface **srf_out)
{
struct vmw_surface_metadata metadata = {0};
@@ -1618,9 +1531,9 @@ static int vmw_create_bo_proxy(struct drm_device *dev,
/* Reserve and switch the backing mob. */
mutex_lock(&res->dev_priv->cmdbuf_mutex);
(void) vmw_resource_reserve(res, false, true);
- vmw_bo_unreference(&res->backup);
- res->backup = vmw_bo_reference(bo_mob);
- res->backup_offset = 0;
+ vmw_bo_unreference(&res->guest_memory_bo);
+ res->guest_memory_bo = vmw_bo_reference(bo_mob);
+ res->guest_memory_offset = 0;
vmw_resource_unreserve(res, false, false, false, NULL, 0);
mutex_unlock(&res->dev_priv->cmdbuf_mutex);
@@ -1630,7 +1543,7 @@ static int vmw_create_bo_proxy(struct drm_device *dev,
static int vmw_kms_new_framebuffer_bo(struct vmw_private *dev_priv,
- struct vmw_buffer_object *bo,
+ struct vmw_bo *bo,
struct vmw_framebuffer **out,
const struct drm_mode_fb_cmd2
*mode_cmd)
@@ -1642,7 +1555,7 @@ static int vmw_kms_new_framebuffer_bo(struct vmw_private *dev_priv,
int ret;
requested_size = mode_cmd->height * mode_cmd->pitches[0];
- if (unlikely(requested_size > bo->base.base.size)) {
+ if (unlikely(requested_size > bo->tbo.base.size)) {
DRM_ERROR("Screen buffer object size is too small "
"for requested mode.\n");
return -EINVAL;
@@ -1663,7 +1576,7 @@ static int vmw_kms_new_framebuffer_bo(struct vmw_private *dev_priv,
goto out_err1;
}
- vfbd->base.base.obj[0] = &bo->base.base;
+ vfbd->base.base.obj[0] = &bo->tbo.base;
drm_helper_mode_fill_fb_struct(dev, &vfbd->base.base, mode_cmd);
vfbd->base.bo = true;
vfbd->buffer = vmw_bo_reference(bo);
@@ -1718,7 +1631,7 @@ vmw_kms_srf_ok(struct vmw_private *dev_priv, uint32_t width, uint32_t height)
*/
struct vmw_framebuffer *
vmw_kms_new_framebuffer(struct vmw_private *dev_priv,
- struct vmw_buffer_object *bo,
+ struct vmw_bo *bo,
struct vmw_surface *surface,
bool only_2d,
const struct drm_mode_fb_cmd2 *mode_cmd)
@@ -1765,9 +1678,6 @@ vmw_kms_new_framebuffer(struct vmw_private *dev_priv,
if (ret)
return ERR_PTR(ret);
- vfb->pin = vmw_framebuffer_pin;
- vfb->unpin = vmw_framebuffer_unpin;
-
return vfb;
}
@@ -1782,7 +1692,7 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
struct vmw_private *dev_priv = vmw_priv(dev);
struct vmw_framebuffer *vfb = NULL;
struct vmw_surface *surface = NULL;
- struct vmw_buffer_object *bo = NULL;
+ struct vmw_bo *bo = NULL;
int ret;
/* returns either a bo or surface */
@@ -1817,7 +1727,7 @@ err_out:
/* vmw_user_lookup_handle takes one ref so does new_fb */
if (bo) {
vmw_bo_unreference(&bo);
- drm_gem_object_put(&bo->base.base);
+ drm_gem_object_put(&bo->tbo.base);
}
if (surface)
vmw_surface_unreference(&surface);
@@ -2202,7 +2112,6 @@ int vmw_kms_init(struct vmw_private *dev_priv)
dev->mode_config.max_width = dev_priv->texture_max_width;
dev->mode_config.max_height = dev_priv->texture_max_height;
dev->mode_config.preferred_depth = dev_priv->assume_16bpp ? 16 : 32;
- dev->mode_config.prefer_shadow_fbdev = !dev_priv->has_mob;
drm_mode_create_suggested_offset_properties(dev);
vmw_kms_create_hotplug_mode_update_property(dev_priv);
@@ -3076,8 +2985,20 @@ int vmw_du_helper_plane_update(struct vmw_du_update_plane *update)
struct vmw_framebuffer_bo *vfbbo =
container_of(update->vfb, typeof(*vfbbo), base);
- ret = vmw_validation_add_bo(&val_ctx, vfbbo->buffer, false,
- update->cpu_blit);
+ /*
+ * For screen targets we want a mappable bo, for everything else we want
+ * accelerated i.e. host backed (vram or gmr) bo. If the display unit
+ * is not screen target then mob's shouldn't be available.
+ */
+ if (update->dev_priv->active_display_unit == vmw_du_screen_target) {
+ vmw_bo_placement_set(vfbbo->buffer,
+ VMW_BO_DOMAIN_SYS | VMW_BO_DOMAIN_MOB | VMW_BO_DOMAIN_GMR,
+ VMW_BO_DOMAIN_SYS | VMW_BO_DOMAIN_MOB | VMW_BO_DOMAIN_GMR);
+ } else {
+ WARN_ON(update->dev_priv->has_mob);
+ vmw_bo_placement_set_default_accelerated(vfbbo->buffer);
+ }
+ ret = vmw_validation_add_bo(&val_ctx, vfbbo->buffer);
} else {
struct vmw_framebuffer_surface *vfbs =
container_of(update->vfb, typeof(*vfbs), base);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
index 4d6e7b555db7..3de7b4b6a230 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 OR MIT */
/**************************************************************************
*
- * Copyright 2009-2022 VMware, Inc., Palo Alto, CA., USA
+ * Copyright 2009-2023 VMware, Inc., Palo Alto, CA., USA
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
@@ -126,7 +126,6 @@ struct vmw_du_update_plane {
struct vmw_framebuffer *vfb;
struct vmw_fence_obj **out_fence;
struct mutex *mutex;
- bool cpu_blit;
bool intr;
};
@@ -217,8 +216,6 @@ struct vmw_kms_dirty {
*/
struct vmw_framebuffer {
struct drm_framebuffer base;
- int (*pin)(struct vmw_framebuffer *fb);
- int (*unpin)(struct vmw_framebuffer *fb);
bool bo;
uint32_t user_handle;
};
@@ -233,7 +230,7 @@ struct vmw_clip_rect {
struct vmw_framebuffer_surface {
struct vmw_framebuffer base;
struct vmw_surface *surface;
- struct vmw_buffer_object *buffer;
+ struct vmw_bo *buffer;
struct list_head head;
bool is_bo_proxy; /* true if this is proxy surface for DMA buf */
};
@@ -241,7 +238,7 @@ struct vmw_framebuffer_surface {
struct vmw_framebuffer_bo {
struct vmw_framebuffer base;
- struct vmw_buffer_object *buffer;
+ struct vmw_bo *buffer;
};
@@ -273,9 +270,7 @@ struct vmw_crtc_state {
};
struct vmw_cursor_plane_state {
- struct ttm_buffer_object *bo;
- struct ttm_bo_kmap_obj map;
- bool mapped;
+ struct vmw_bo *bo;
s32 hotspot_x;
s32 hotspot_y;
};
@@ -293,7 +288,7 @@ struct vmw_cursor_plane_state {
struct vmw_plane_state {
struct drm_plane_state base;
struct vmw_surface *surf;
- struct vmw_buffer_object *bo;
+ struct vmw_bo *bo;
int content_fb_type;
unsigned long bo_size;
@@ -346,7 +341,7 @@ struct vmw_connector_state {
struct vmw_cursor_plane {
struct drm_plane base;
- struct ttm_buffer_object *cursor_mobs[3];
+ struct vmw_bo *cursor_mobs[3];
};
/**
@@ -364,7 +359,7 @@ struct vmw_display_unit {
struct vmw_cursor_plane cursor;
struct vmw_surface *cursor_surface;
- struct vmw_buffer_object *cursor_bo;
+ struct vmw_bo *cursor_bo;
size_t cursor_age;
int cursor_x;
@@ -397,7 +392,7 @@ struct vmw_display_unit {
struct vmw_validation_ctx {
struct vmw_resource *res;
- struct vmw_buffer_object *buf;
+ struct vmw_bo *buf;
};
#define vmw_crtc_to_du(x) \
@@ -458,7 +453,7 @@ int vmw_kms_readback(struct vmw_private *dev_priv,
uint32_t num_clips);
struct vmw_framebuffer *
vmw_kms_new_framebuffer(struct vmw_private *dev_priv,
- struct vmw_buffer_object *bo,
+ struct vmw_bo *bo,
struct vmw_surface *surface,
bool only_2d,
const struct drm_mode_fb_cmd2 *mode_cmd);
@@ -566,17 +561,15 @@ int vmw_kms_stdu_surface_dirty(struct vmw_private *dev_priv,
unsigned num_clips, int inc,
struct vmw_fence_obj **out_fence,
struct drm_crtc *crtc);
-int vmw_kms_stdu_dma(struct vmw_private *dev_priv,
- struct drm_file *file_priv,
- struct vmw_framebuffer *vfb,
- struct drm_vmw_fence_rep __user *user_fence_rep,
- struct drm_clip_rect *clips,
- struct drm_vmw_rect *vclips,
- uint32_t num_clips,
- int increment,
- bool to_surface,
- bool interruptible,
- struct drm_crtc *crtc);
+int vmw_kms_stdu_readback(struct vmw_private *dev_priv,
+ struct drm_file *file_priv,
+ struct vmw_framebuffer *vfb,
+ struct drm_vmw_fence_rep __user *user_fence_rep,
+ struct drm_clip_rect *clips,
+ struct drm_vmw_rect *vclips,
+ uint32_t num_clips,
+ int increment,
+ struct drm_crtc *crtc);
int vmw_du_helper_plane_update(struct vmw_du_update_plane *update);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
index a56e5d0ca3c6..c0e42f2ed144 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 OR MIT
/**************************************************************************
*
- * Copyright 2009-2022 VMware, Inc., Palo Alto, CA., USA
+ * Copyright 2009-2023 VMware, Inc., Palo Alto, CA., USA
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
@@ -25,11 +25,13 @@
*
**************************************************************************/
+#include "vmwgfx_bo.h"
+#include "vmwgfx_kms.h"
+
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_fourcc.h>
-#include "vmwgfx_kms.h"
#define vmw_crtc_to_ldu(x) \
container_of(x, struct vmw_legacy_display_unit, base.crtc)
@@ -134,6 +136,47 @@ static int vmw_ldu_commit_list(struct vmw_private *dev_priv)
return 0;
}
+/*
+ * Pin the buffer in a location suitable for access by the
+ * display system.
+ */
+static int vmw_ldu_fb_pin(struct vmw_framebuffer *vfb)
+{
+ struct vmw_private *dev_priv = vmw_priv(vfb->base.dev);
+ struct vmw_bo *buf;
+ int ret;
+
+ buf = vfb->bo ? vmw_framebuffer_to_vfbd(&vfb->base)->buffer :
+ vmw_framebuffer_to_vfbs(&vfb->base)->surface->res.guest_memory_bo;
+
+ if (!buf)
+ return 0;
+ WARN_ON(dev_priv->active_display_unit != vmw_du_legacy);
+
+ if (dev_priv->active_display_unit == vmw_du_legacy) {
+ vmw_overlay_pause_all(dev_priv);
+ ret = vmw_bo_pin_in_start_of_vram(dev_priv, buf, false);
+ vmw_overlay_resume_all(dev_priv);
+ } else
+ ret = -EINVAL;
+
+ return ret;
+}
+
+static int vmw_ldu_fb_unpin(struct vmw_framebuffer *vfb)
+{
+ struct vmw_private *dev_priv = vmw_priv(vfb->base.dev);
+ struct vmw_bo *buf;
+
+ buf = vfb->bo ? vmw_framebuffer_to_vfbd(&vfb->base)->buffer :
+ vmw_framebuffer_to_vfbs(&vfb->base)->surface->res.guest_memory_bo;
+
+ if (WARN_ON(!buf))
+ return 0;
+
+ return vmw_bo_unpin(dev_priv, buf, false);
+}
+
static int vmw_ldu_del_active(struct vmw_private *vmw_priv,
struct vmw_legacy_display_unit *ldu)
{
@@ -145,8 +188,7 @@ static int vmw_ldu_del_active(struct vmw_private *vmw_priv,
list_del_init(&ldu->active);
if (--(ld->num_active) == 0) {
BUG_ON(!ld->fb);
- if (ld->fb->unpin)
- ld->fb->unpin(ld->fb);
+ WARN_ON(vmw_ldu_fb_unpin(ld->fb));
ld->fb = NULL;
}
@@ -163,11 +205,10 @@ static int vmw_ldu_add_active(struct vmw_private *vmw_priv,
BUG_ON(!ld->num_active && ld->fb);
if (vfb != ld->fb) {
- if (ld->fb && ld->fb->unpin)
- ld->fb->unpin(ld->fb);
+ if (ld->fb)
+ WARN_ON(vmw_ldu_fb_unpin(ld->fb));
vmw_svga_enable(vmw_priv);
- if (vfb->pin)
- vfb->pin(vfb);
+ WARN_ON(vmw_ldu_fb_pin(vfb));
ld->fb = vfb;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c b/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c
index 0a8cc28d6606..7055cbefc768 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 OR MIT
/**************************************************************************
*
- * Copyright 2012-2021 VMware, Inc., Palo Alto, CA., USA
+ * Copyright 2012-2023 VMware, Inc., Palo Alto, CA., USA
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
@@ -25,10 +25,11 @@
*
**************************************************************************/
-#include <linux/highmem.h>
-
+#include "vmwgfx_bo.h"
#include "vmwgfx_drv.h"
+#include <linux/highmem.h>
+
#ifdef CONFIG_64BIT
#define VMW_PPN_SIZE 8
#define VMW_MOBFMT_PTDEPTH_0 SVGA3D_MOBFMT_PT64_0
@@ -50,7 +51,7 @@
* @pt_root_page DMA address of the level 0 page of the page table.
*/
struct vmw_mob {
- struct ttm_buffer_object *pt_bo;
+ struct vmw_bo *pt_bo;
unsigned long num_pages;
unsigned pt_level;
dma_addr_t pt_root_page;
@@ -203,7 +204,7 @@ static void vmw_takedown_otable_base(struct vmw_private *dev_priv,
if (otable->page_table == NULL)
return;
- bo = otable->page_table->pt_bo;
+ bo = &otable->page_table->pt_bo->tbo;
cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
if (unlikely(cmd == NULL))
return;
@@ -251,7 +252,9 @@ static int vmw_otable_batch_setup(struct vmw_private *dev_priv,
bo_size += otables[i].size;
}
- ret = vmw_bo_create_and_populate(dev_priv, bo_size, &batch->otable_bo);
+ ret = vmw_bo_create_and_populate(dev_priv, bo_size,
+ VMW_BO_DOMAIN_WAITABLE_SYS,
+ &batch->otable_bo);
if (unlikely(ret != 0))
return ret;
@@ -260,7 +263,8 @@ static int vmw_otable_batch_setup(struct vmw_private *dev_priv,
if (!batch->otables[i].enabled)
continue;
- ret = vmw_setup_otable_base(dev_priv, i, batch->otable_bo,
+ ret = vmw_setup_otable_base(dev_priv, i,
+ &batch->otable_bo->tbo,
offset,
&otables[i]);
if (unlikely(ret != 0))
@@ -277,8 +281,8 @@ out_no_setup:
&batch->otables[i]);
}
- vmw_bo_unpin_unlocked(batch->otable_bo);
- ttm_bo_put(batch->otable_bo);
+ vmw_bo_unpin_unlocked(&batch->otable_bo->tbo);
+ ttm_bo_put(&batch->otable_bo->tbo);
batch->otable_bo = NULL;
return ret;
}
@@ -329,7 +333,7 @@ static void vmw_otable_batch_takedown(struct vmw_private *dev_priv,
struct vmw_otable_batch *batch)
{
SVGAOTableType i;
- struct ttm_buffer_object *bo = batch->otable_bo;
+ struct ttm_buffer_object *bo = &batch->otable_bo->tbo;
int ret;
for (i = 0; i < batch->num_otables; ++i)
@@ -344,8 +348,7 @@ static void vmw_otable_batch_takedown(struct vmw_private *dev_priv,
ttm_bo_unpin(bo);
ttm_bo_unreserve(bo);
- ttm_bo_put(batch->otable_bo);
- batch->otable_bo = NULL;
+ vmw_bo_unreference(&batch->otable_bo);
}
/*
@@ -413,7 +416,9 @@ static int vmw_mob_pt_populate(struct vmw_private *dev_priv,
{
BUG_ON(mob->pt_bo != NULL);
- return vmw_bo_create_and_populate(dev_priv, mob->num_pages * PAGE_SIZE, &mob->pt_bo);
+ return vmw_bo_create_and_populate(dev_priv, mob->num_pages * PAGE_SIZE,
+ VMW_BO_DOMAIN_WAITABLE_SYS,
+ &mob->pt_bo);
}
/**
@@ -494,7 +499,7 @@ static void vmw_mob_pt_setup(struct vmw_mob *mob,
unsigned long num_data_pages)
{
unsigned long num_pt_pages = 0;
- struct ttm_buffer_object *bo = mob->pt_bo;
+ struct ttm_buffer_object *bo = &mob->pt_bo->tbo;
struct vmw_piter save_pt_iter = {0};
struct vmw_piter pt_iter;
const struct vmw_sg_table *vsgt;
@@ -531,9 +536,8 @@ static void vmw_mob_pt_setup(struct vmw_mob *mob,
void vmw_mob_destroy(struct vmw_mob *mob)
{
if (mob->pt_bo) {
- vmw_bo_unpin_unlocked(mob->pt_bo);
- ttm_bo_put(mob->pt_bo);
- mob->pt_bo = NULL;
+ vmw_bo_unpin_unlocked(&mob->pt_bo->tbo);
+ vmw_bo_unreference(&mob->pt_bo);
}
kfree(mob);
}
@@ -552,7 +556,7 @@ void vmw_mob_unbind(struct vmw_private *dev_priv,
SVGA3dCmdDestroyGBMob body;
} *cmd;
int ret;
- struct ttm_buffer_object *bo = mob->pt_bo;
+ struct ttm_buffer_object *bo = &mob->pt_bo->tbo;
if (bo) {
ret = ttm_bo_reserve(bo, false, true, NULL);
@@ -644,9 +648,8 @@ int vmw_mob_bind(struct vmw_private *dev_priv,
out_no_cmd_space:
vmw_fifo_resource_dec(dev_priv);
if (pt_set_up) {
- vmw_bo_unpin_unlocked(mob->pt_bo);
- ttm_bo_put(mob->pt_bo);
- mob->pt_bo = NULL;
+ vmw_bo_unpin_unlocked(&mob->pt_bo->tbo);
+ vmw_bo_unreference(&mob->pt_bo);
}
return -ENOMEM;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
index b5b311f2a91a..8d171d71cb8a 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 OR MIT
/**************************************************************************
*
- * Copyright 2009-2014 VMware, Inc., Palo Alto, CA., USA
+ * Copyright 2009-2023 VMware, Inc., Palo Alto, CA., USA
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
@@ -24,19 +24,19 @@
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
-
-#include <drm/ttm/ttm_placement.h>
+#include "vmwgfx_bo.h"
+#include "vmwgfx_drv.h"
#include "device_include/svga_overlay.h"
#include "device_include/svga_escape.h"
-#include "vmwgfx_drv.h"
+#include <drm/ttm/ttm_placement.h>
#define VMW_MAX_NUM_STREAMS 1
#define VMW_OVERLAY_CAP_MASK (SVGA_FIFO_CAP_VIDEO | SVGA_FIFO_CAP_ESCAPE)
struct vmw_stream {
- struct vmw_buffer_object *buf;
+ struct vmw_bo *buf;
bool claimed;
bool paused;
struct drm_vmw_control_stream_arg saved;
@@ -92,7 +92,7 @@ static inline void fill_flush(struct vmw_escape_video_flush *cmd,
* -ERESTARTSYS if interrupted by a signal.
*/
static int vmw_overlay_send_put(struct vmw_private *dev_priv,
- struct vmw_buffer_object *buf,
+ struct vmw_bo *buf,
struct drm_vmw_control_stream_arg *arg,
bool interruptible)
{
@@ -140,7 +140,7 @@ static int vmw_overlay_send_put(struct vmw_private *dev_priv,
for (i = 0; i < num_items; i++)
items[i].registerId = i;
- vmw_bo_get_guest_ptr(&buf->base, &ptr);
+ vmw_bo_get_guest_ptr(&buf->tbo, &ptr);
ptr.offset += arg->offset;
items[SVGA_VIDEO_ENABLED].value = true;
@@ -223,7 +223,7 @@ static int vmw_overlay_send_stop(struct vmw_private *dev_priv,
* used with GMRs instead of being locked to vram.
*/
static int vmw_overlay_move_buffer(struct vmw_private *dev_priv,
- struct vmw_buffer_object *buf,
+ struct vmw_bo *buf,
bool pin, bool inter)
{
if (!pin)
@@ -295,7 +295,7 @@ static int vmw_overlay_stop(struct vmw_private *dev_priv,
* -ERESTARTSYS if interrupted.
*/
static int vmw_overlay_update_stream(struct vmw_private *dev_priv,
- struct vmw_buffer_object *buf,
+ struct vmw_bo *buf,
struct drm_vmw_control_stream_arg *arg,
bool interruptible)
{
@@ -433,7 +433,7 @@ int vmw_overlay_ioctl(struct drm_device *dev, void *data,
struct vmw_overlay *overlay = dev_priv->overlay_priv;
struct drm_vmw_control_stream_arg *arg =
(struct drm_vmw_control_stream_arg *)data;
- struct vmw_buffer_object *buf;
+ struct vmw_bo *buf;
struct vmw_resource *res;
int ret;
@@ -458,7 +458,7 @@ int vmw_overlay_ioctl(struct drm_device *dev, void *data,
ret = vmw_overlay_update_stream(dev_priv, buf, arg, true);
vmw_bo_unreference(&buf);
- drm_gem_object_put(&buf->base.base);
+ drm_gem_object_put(&buf->tbo.base);
out_unlock:
mutex_unlock(&overlay->mutex);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c b/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c
index f41f041559f4..74ff2812d66a 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 OR MIT
/**************************************************************************
*
- * Copyright 2019 VMware, Inc., Palo Alto, CA., USA
+ * Copyright 2019-2023 VMware, Inc., Palo Alto, CA., USA
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
@@ -24,6 +24,7 @@
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
+#include "vmwgfx_bo.h"
#include "vmwgfx_drv.h"
/*
@@ -78,11 +79,11 @@ struct vmw_bo_dirty {
* dirty structure with the results. This function may change the
* dirty-tracking method.
*/
-static void vmw_bo_dirty_scan_pagetable(struct vmw_buffer_object *vbo)
+static void vmw_bo_dirty_scan_pagetable(struct vmw_bo *vbo)
{
struct vmw_bo_dirty *dirty = vbo->dirty;
- pgoff_t offset = drm_vma_node_start(&vbo->base.base.vma_node);
- struct address_space *mapping = vbo->base.bdev->dev_mapping;
+ pgoff_t offset = drm_vma_node_start(&vbo->tbo.base.vma_node);
+ struct address_space *mapping = vbo->tbo.bdev->dev_mapping;
pgoff_t num_marked;
num_marked = clean_record_shared_mapping_range
@@ -116,26 +117,25 @@ static void vmw_bo_dirty_scan_pagetable(struct vmw_buffer_object *vbo)
*
* This function may change the dirty-tracking method.
*/
-static void vmw_bo_dirty_scan_mkwrite(struct vmw_buffer_object *vbo)
+static void vmw_bo_dirty_scan_mkwrite(struct vmw_bo *vbo)
{
struct vmw_bo_dirty *dirty = vbo->dirty;
- unsigned long offset = drm_vma_node_start(&vbo->base.base.vma_node);
- struct address_space *mapping = vbo->base.bdev->dev_mapping;
+ unsigned long offset = drm_vma_node_start(&vbo->tbo.base.vma_node);
+ struct address_space *mapping = vbo->tbo.bdev->dev_mapping;
pgoff_t num_marked;
if (dirty->end <= dirty->start)
return;
- num_marked = wp_shared_mapping_range(vbo->base.bdev->dev_mapping,
- dirty->start + offset,
- dirty->end - dirty->start);
+ num_marked = wp_shared_mapping_range(vbo->tbo.bdev->dev_mapping,
+ dirty->start + offset,
+ dirty->end - dirty->start);
if (100UL * num_marked / dirty->bitmap_size >
- VMW_DIRTY_PERCENTAGE) {
+ VMW_DIRTY_PERCENTAGE)
dirty->change_count++;
- } else {
+ else
dirty->change_count = 0;
- }
if (dirty->change_count > VMW_DIRTY_NUM_CHANGE_TRIGGERS) {
pgoff_t start = 0;
@@ -160,7 +160,7 @@ static void vmw_bo_dirty_scan_mkwrite(struct vmw_buffer_object *vbo)
*
* This function may change the dirty tracking method.
*/
-void vmw_bo_dirty_scan(struct vmw_buffer_object *vbo)
+void vmw_bo_dirty_scan(struct vmw_bo *vbo)
{
struct vmw_bo_dirty *dirty = vbo->dirty;
@@ -181,12 +181,12 @@ void vmw_bo_dirty_scan(struct vmw_buffer_object *vbo)
* when calling unmap_mapping_range(). This function makes sure we pick
* up all dirty pages.
*/
-static void vmw_bo_dirty_pre_unmap(struct vmw_buffer_object *vbo,
+static void vmw_bo_dirty_pre_unmap(struct vmw_bo *vbo,
pgoff_t start, pgoff_t end)
{
struct vmw_bo_dirty *dirty = vbo->dirty;
- unsigned long offset = drm_vma_node_start(&vbo->base.base.vma_node);
- struct address_space *mapping = vbo->base.bdev->dev_mapping;
+ unsigned long offset = drm_vma_node_start(&vbo->tbo.base.vma_node);
+ struct address_space *mapping = vbo->tbo.bdev->dev_mapping;
if (dirty->method != VMW_BO_DIRTY_PAGETABLE || start >= end)
return;
@@ -206,11 +206,11 @@ static void vmw_bo_dirty_pre_unmap(struct vmw_buffer_object *vbo,
*
* This is similar to ttm_bo_unmap_virtual() except it takes a subrange.
*/
-void vmw_bo_dirty_unmap(struct vmw_buffer_object *vbo,
+void vmw_bo_dirty_unmap(struct vmw_bo *vbo,
pgoff_t start, pgoff_t end)
{
- unsigned long offset = drm_vma_node_start(&vbo->base.base.vma_node);
- struct address_space *mapping = vbo->base.bdev->dev_mapping;
+ unsigned long offset = drm_vma_node_start(&vbo->tbo.base.vma_node);
+ struct address_space *mapping = vbo->tbo.bdev->dev_mapping;
vmw_bo_dirty_pre_unmap(vbo, start, end);
unmap_shared_mapping_range(mapping, (offset + start) << PAGE_SHIFT,
@@ -227,10 +227,10 @@ void vmw_bo_dirty_unmap(struct vmw_buffer_object *vbo,
*
* Return: Zero on success, -ENOMEM on memory allocation failure.
*/
-int vmw_bo_dirty_add(struct vmw_buffer_object *vbo)
+int vmw_bo_dirty_add(struct vmw_bo *vbo)
{
struct vmw_bo_dirty *dirty = vbo->dirty;
- pgoff_t num_pages = PFN_UP(vbo->base.resource->size);
+ pgoff_t num_pages = PFN_UP(vbo->tbo.resource->size);
size_t size;
int ret;
@@ -253,8 +253,8 @@ int vmw_bo_dirty_add(struct vmw_buffer_object *vbo)
if (num_pages < PAGE_SIZE / sizeof(pte_t)) {
dirty->method = VMW_BO_DIRTY_PAGETABLE;
} else {
- struct address_space *mapping = vbo->base.bdev->dev_mapping;
- pgoff_t offset = drm_vma_node_start(&vbo->base.base.vma_node);
+ struct address_space *mapping = vbo->tbo.bdev->dev_mapping;
+ pgoff_t offset = drm_vma_node_start(&vbo->tbo.base.vma_node);
dirty->method = VMW_BO_DIRTY_MKWRITE;
@@ -284,7 +284,7 @@ out_no_dirty:
*
* Return: Zero on success, -ENOMEM on memory allocation failure.
*/
-void vmw_bo_dirty_release(struct vmw_buffer_object *vbo)
+void vmw_bo_dirty_release(struct vmw_bo *vbo)
{
struct vmw_bo_dirty *dirty = vbo->dirty;
@@ -306,11 +306,11 @@ void vmw_bo_dirty_release(struct vmw_buffer_object *vbo)
*/
void vmw_bo_dirty_transfer_to_res(struct vmw_resource *res)
{
- struct vmw_buffer_object *vbo = res->backup;
+ struct vmw_bo *vbo = res->guest_memory_bo;
struct vmw_bo_dirty *dirty = vbo->dirty;
pgoff_t start, cur, end;
- unsigned long res_start = res->backup_offset;
- unsigned long res_end = res->backup_offset + res->backup_size;
+ unsigned long res_start = res->guest_memory_offset;
+ unsigned long res_end = res->guest_memory_offset + res->guest_memory_size;
WARN_ON_ONCE(res_start & ~PAGE_MASK);
res_start >>= PAGE_SHIFT;
@@ -351,9 +351,9 @@ void vmw_bo_dirty_transfer_to_res(struct vmw_resource *res)
*/
void vmw_bo_dirty_clear_res(struct vmw_resource *res)
{
- unsigned long res_start = res->backup_offset;
- unsigned long res_end = res->backup_offset + res->backup_size;
- struct vmw_buffer_object *vbo = res->backup;
+ unsigned long res_start = res->guest_memory_offset;
+ unsigned long res_end = res->guest_memory_offset + res->guest_memory_size;
+ struct vmw_bo *vbo = res->guest_memory_bo;
struct vmw_bo_dirty *dirty = vbo->dirty;
res_start >>= PAGE_SHIFT;
@@ -380,8 +380,7 @@ vm_fault_t vmw_bo_vm_mkwrite(struct vm_fault *vmf)
vm_fault_t ret;
unsigned long page_offset;
unsigned int save_flags;
- struct vmw_buffer_object *vbo =
- container_of(bo, typeof(*vbo), base);
+ struct vmw_bo *vbo = to_vmw_bo(&bo->base);
/*
* mkwrite() doesn't handle the VM_FAULT_RETRY return value correctly.
@@ -419,8 +418,7 @@ vm_fault_t vmw_bo_vm_fault(struct vm_fault *vmf)
struct vm_area_struct *vma = vmf->vma;
struct ttm_buffer_object *bo = (struct ttm_buffer_object *)
vma->vm_private_data;
- struct vmw_buffer_object *vbo =
- container_of(bo, struct vmw_buffer_object, base);
+ struct vmw_bo *vbo = to_vmw_bo(&bo->base);
pgoff_t num_prefault;
pgprot_t prot;
vm_fault_t ret;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index c7d645e5ec7b..71eeabf001c8 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 OR MIT
/**************************************************************************
*
- * Copyright 2009-2015 VMware, Inc., Palo Alto, CA., USA
+ * Copyright 2009-2023 VMware, Inc., Palo Alto, CA., USA
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
@@ -27,9 +27,10 @@
#include <drm/ttm/ttm_placement.h>
-#include "vmwgfx_resource_priv.h"
#include "vmwgfx_binding.h"
+#include "vmwgfx_bo.h"
#include "vmwgfx_drv.h"
+#include "vmwgfx_resource_priv.h"
#define VMW_RES_EVICT_ERR_COUNT 10
@@ -39,10 +40,10 @@
*/
void vmw_resource_mob_attach(struct vmw_resource *res)
{
- struct vmw_buffer_object *backup = res->backup;
- struct rb_node **new = &backup->res_tree.rb_node, *parent = NULL;
+ struct vmw_bo *gbo = res->guest_memory_bo;
+ struct rb_node **new = &gbo->res_tree.rb_node, *parent = NULL;
- dma_resv_assert_held(res->backup->base.base.resv);
+ dma_resv_assert_held(gbo->tbo.base.resv);
res->used_prio = (res->res_dirty) ? res->func->dirty_prio :
res->func->prio;
@@ -51,14 +52,14 @@ void vmw_resource_mob_attach(struct vmw_resource *res)
container_of(*new, struct vmw_resource, mob_node);
parent = *new;
- new = (res->backup_offset < this->backup_offset) ?
+ new = (res->guest_memory_offset < this->guest_memory_offset) ?
&((*new)->rb_left) : &((*new)->rb_right);
}
rb_link_node(&res->mob_node, parent, new);
- rb_insert_color(&res->mob_node, &backup->res_tree);
+ rb_insert_color(&res->mob_node, &gbo->res_tree);
- vmw_bo_prio_add(backup, res->used_prio);
+ vmw_bo_prio_add(gbo, res->used_prio);
}
/**
@@ -67,13 +68,13 @@ void vmw_resource_mob_attach(struct vmw_resource *res)
*/
void vmw_resource_mob_detach(struct vmw_resource *res)
{
- struct vmw_buffer_object *backup = res->backup;
+ struct vmw_bo *gbo = res->guest_memory_bo;
- dma_resv_assert_held(backup->base.base.resv);
+ dma_resv_assert_held(gbo->tbo.base.resv);
if (vmw_resource_mob_attached(res)) {
- rb_erase(&res->mob_node, &backup->res_tree);
+ rb_erase(&res->mob_node, &gbo->res_tree);
RB_CLEAR_NODE(&res->mob_node);
- vmw_bo_prio_del(backup, res->used_prio);
+ vmw_bo_prio_del(gbo, res->used_prio);
}
}
@@ -120,8 +121,8 @@ static void vmw_resource_release(struct kref *kref)
spin_lock(&dev_priv->resource_lock);
list_del_init(&res->lru_head);
spin_unlock(&dev_priv->resource_lock);
- if (res->backup) {
- struct ttm_buffer_object *bo = &res->backup->base;
+ if (res->guest_memory_bo) {
+ struct ttm_buffer_object *bo = &res->guest_memory_bo->tbo;
ret = ttm_bo_reserve(bo, false, false, NULL);
BUG_ON(ret);
@@ -133,14 +134,14 @@ static void vmw_resource_release(struct kref *kref)
val_buf.num_shared = 0;
res->func->unbind(res, false, &val_buf);
}
- res->backup_dirty = false;
+ res->guest_memory_size = false;
vmw_resource_mob_detach(res);
if (res->dirty)
res->func->dirty_free(res);
if (res->coherent)
- vmw_bo_dirty_release(res->backup);
+ vmw_bo_dirty_release(res->guest_memory_bo);
ttm_bo_unreserve(bo);
- vmw_bo_unreference(&res->backup);
+ vmw_bo_unreference(&res->guest_memory_bo);
}
if (likely(res->hw_destroy != NULL)) {
@@ -223,9 +224,9 @@ int vmw_resource_init(struct vmw_private *dev_priv, struct vmw_resource *res,
INIT_LIST_HEAD(&res->lru_head);
INIT_LIST_HEAD(&res->binding_head);
res->id = -1;
- res->backup = NULL;
- res->backup_offset = 0;
- res->backup_dirty = false;
+ res->guest_memory_bo = NULL;
+ res->guest_memory_offset = 0;
+ res->guest_memory_dirty = false;
res->res_dirty = false;
res->coherent = false;
res->used_prio = 3;
@@ -263,7 +264,7 @@ int vmw_user_resource_lookup_handle(struct vmw_private *dev_priv,
int ret = -EINVAL;
base = ttm_base_object_lookup(tfile, handle);
- if (unlikely(base == NULL))
+ if (unlikely(!base))
return -EINVAL;
if (unlikely(ttm_base_object_type(base) != converter->object_type))
@@ -290,7 +291,7 @@ int vmw_user_lookup_handle(struct vmw_private *dev_priv,
struct drm_file *filp,
uint32_t handle,
struct vmw_surface **out_surf,
- struct vmw_buffer_object **out_buf)
+ struct vmw_bo **out_buf)
{
struct ttm_object_file *tfile = vmw_fpriv(filp)->tfile;
struct vmw_resource *res;
@@ -312,32 +313,36 @@ int vmw_user_lookup_handle(struct vmw_private *dev_priv,
}
/**
- * vmw_resource_buf_alloc - Allocate a backup buffer for a resource.
+ * vmw_resource_buf_alloc - Allocate a guest memory buffer for a resource.
*
- * @res: The resource for which to allocate a backup buffer.
+ * @res: The resource for which to allocate a gbo buffer.
* @interruptible: Whether any sleeps during allocation should be
* performed while interruptible.
*/
static int vmw_resource_buf_alloc(struct vmw_resource *res,
bool interruptible)
{
- unsigned long size = PFN_ALIGN(res->backup_size);
- struct vmw_buffer_object *backup;
+ unsigned long size = PFN_ALIGN(res->guest_memory_size);
+ struct vmw_bo *gbo;
+ struct vmw_bo_params bo_params = {
+ .domain = res->func->domain,
+ .busy_domain = res->func->busy_domain,
+ .bo_type = ttm_bo_type_device,
+ .size = res->guest_memory_size,
+ .pin = false
+ };
int ret;
- if (likely(res->backup)) {
- BUG_ON(res->backup->base.base.size < size);
+ if (likely(res->guest_memory_bo)) {
+ BUG_ON(res->guest_memory_bo->tbo.base.size < size);
return 0;
}
- ret = vmw_bo_create(res->dev_priv, res->backup_size,
- res->func->backup_placement,
- interruptible, false,
- &vmw_bo_bo_free, &backup);
+ ret = vmw_bo_create(res->dev_priv, &bo_params, &gbo);
if (unlikely(ret != 0))
goto out_no_bo;
- res->backup = backup;
+ res->guest_memory_bo = gbo;
out_no_bo:
return ret;
@@ -369,13 +374,13 @@ static int vmw_resource_do_validate(struct vmw_resource *res,
}
if (func->bind &&
- ((func->needs_backup && !vmw_resource_mob_attached(res) &&
- val_buf->bo != NULL) ||
- (!func->needs_backup && val_buf->bo != NULL))) {
+ ((func->needs_guest_memory && !vmw_resource_mob_attached(res) &&
+ val_buf->bo) ||
+ (!func->needs_guest_memory && val_buf->bo))) {
ret = func->bind(res, val_buf);
if (unlikely(ret != 0))
goto out_bind_failed;
- if (func->needs_backup)
+ if (func->needs_guest_memory)
vmw_resource_mob_attach(res);
}
@@ -385,11 +390,11 @@ static int vmw_resource_do_validate(struct vmw_resource *res,
*/
if (func->dirty_alloc && vmw_resource_mob_attached(res) &&
!res->coherent) {
- if (res->backup->dirty && !res->dirty) {
+ if (res->guest_memory_bo->dirty && !res->dirty) {
ret = func->dirty_alloc(res);
if (ret)
return ret;
- } else if (!res->backup->dirty && res->dirty) {
+ } else if (!res->guest_memory_bo->dirty && res->dirty) {
func->dirty_free(res);
}
}
@@ -400,12 +405,12 @@ static int vmw_resource_do_validate(struct vmw_resource *res,
*/
if (res->dirty) {
if (dirtying && !res->res_dirty) {
- pgoff_t start = res->backup_offset >> PAGE_SHIFT;
+ pgoff_t start = res->guest_memory_offset >> PAGE_SHIFT;
pgoff_t end = __KERNEL_DIV_ROUND_UP
- (res->backup_offset + res->backup_size,
+ (res->guest_memory_offset + res->guest_memory_size,
PAGE_SIZE);
- vmw_bo_dirty_unmap(res->backup, start, end);
+ vmw_bo_dirty_unmap(res->guest_memory_bo, start, end);
}
vmw_bo_dirty_transfer_to_res(res);
@@ -427,10 +432,10 @@ out_bind_failed:
* @res: Pointer to the struct vmw_resource to unreserve.
* @dirty_set: Change dirty status of the resource.
* @dirty: When changing dirty status indicates the new status.
- * @switch_backup: Backup buffer has been switched.
- * @new_backup: Pointer to new backup buffer if command submission
+ * @switch_guest_memory: Guest memory buffer has been switched.
+ * @new_guest_memory_bo: Pointer to new guest memory buffer if command submission
* switched. May be NULL.
- * @new_backup_offset: New backup offset if @switch_backup is true.
+ * @new_guest_memory_offset: New gbo offset if @switch_guest_memory is true.
*
* Currently unreserving a resource means putting it back on the device's
* resource lru list, so that it can be evicted if necessary.
@@ -438,42 +443,42 @@ out_bind_failed:
void vmw_resource_unreserve(struct vmw_resource *res,
bool dirty_set,
bool dirty,
- bool switch_backup,
- struct vmw_buffer_object *new_backup,
- unsigned long new_backup_offset)
+ bool switch_guest_memory,
+ struct vmw_bo *new_guest_memory_bo,
+ unsigned long new_guest_memory_offset)
{
struct vmw_private *dev_priv = res->dev_priv;
if (!list_empty(&res->lru_head))
return;
- if (switch_backup && new_backup != res->backup) {
- if (res->backup) {
+ if (switch_guest_memory && new_guest_memory_bo != res->guest_memory_bo) {
+ if (res->guest_memory_bo) {
vmw_resource_mob_detach(res);
if (res->coherent)
- vmw_bo_dirty_release(res->backup);
- vmw_bo_unreference(&res->backup);
+ vmw_bo_dirty_release(res->guest_memory_bo);
+ vmw_bo_unreference(&res->guest_memory_bo);
}
- if (new_backup) {
- res->backup = vmw_bo_reference(new_backup);
+ if (new_guest_memory_bo) {
+ res->guest_memory_bo = vmw_bo_reference(new_guest_memory_bo);
/*
* The validation code should already have added a
* dirty tracker here.
*/
- WARN_ON(res->coherent && !new_backup->dirty);
+ WARN_ON(res->coherent && !new_guest_memory_bo->dirty);
vmw_resource_mob_attach(res);
} else {
- res->backup = NULL;
+ res->guest_memory_bo = NULL;
}
- } else if (switch_backup && res->coherent) {
- vmw_bo_dirty_release(res->backup);
+ } else if (switch_guest_memory && res->coherent) {
+ vmw_bo_dirty_release(res->guest_memory_bo);
}
- if (switch_backup)
- res->backup_offset = new_backup_offset;
+ if (switch_guest_memory)
+ res->guest_memory_offset = new_guest_memory_offset;
if (dirty_set)
res->res_dirty = dirty;
@@ -507,30 +512,32 @@ vmw_resource_check_buffer(struct ww_acquire_ctx *ticket,
{
struct ttm_operation_ctx ctx = { true, false };
struct list_head val_list;
- bool backup_dirty = false;
+ bool guest_memory_dirty = false;
int ret;
- if (unlikely(res->backup == NULL)) {
+ if (unlikely(!res->guest_memory_bo)) {
ret = vmw_resource_buf_alloc(res, interruptible);
if (unlikely(ret != 0))
return ret;
}
INIT_LIST_HEAD(&val_list);
- ttm_bo_get(&res->backup->base);
- val_buf->bo = &res->backup->base;
+ ttm_bo_get(&res->guest_memory_bo->tbo);
+ val_buf->bo = &res->guest_memory_bo->tbo;
val_buf->num_shared = 0;
list_add_tail(&val_buf->head, &val_list);
ret = ttm_eu_reserve_buffers(ticket, &val_list, interruptible, NULL);
if (unlikely(ret != 0))
goto out_no_reserve;
- if (res->func->needs_backup && !vmw_resource_mob_attached(res))
+ if (res->func->needs_guest_memory && !vmw_resource_mob_attached(res))
return 0;
- backup_dirty = res->backup_dirty;
- ret = ttm_bo_validate(&res->backup->base,
- res->func->backup_placement,
+ guest_memory_dirty = res->guest_memory_dirty;
+ vmw_bo_placement_set(res->guest_memory_bo, res->func->domain,
+ res->func->busy_domain);
+ ret = ttm_bo_validate(&res->guest_memory_bo->tbo,
+ &res->guest_memory_bo->placement,
&ctx);
if (unlikely(ret != 0))
@@ -543,8 +550,8 @@ out_no_validate:
out_no_reserve:
ttm_bo_put(val_buf->bo);
val_buf->bo = NULL;
- if (backup_dirty)
- vmw_bo_unreference(&res->backup);
+ if (guest_memory_dirty)
+ vmw_bo_unreference(&res->guest_memory_bo);
return ret;
}
@@ -555,12 +562,13 @@ out_no_reserve:
* @res: The resource to reserve.
*
* This function takes the resource off the LRU list and make sure
- * a backup buffer is present for guest-backed resources. However,
- * the buffer may not be bound to the resource at this point.
+ * a guest memory buffer is present for guest-backed resources.
+ * However, the buffer may not be bound to the resource at this
+ * point.
*
*/
int vmw_resource_reserve(struct vmw_resource *res, bool interruptible,
- bool no_backup)
+ bool no_guest_memory)
{
struct vmw_private *dev_priv = res->dev_priv;
int ret;
@@ -569,13 +577,13 @@ int vmw_resource_reserve(struct vmw_resource *res, bool interruptible,
list_del_init(&res->lru_head);
spin_unlock(&dev_priv->resource_lock);
- if (res->func->needs_backup && res->backup == NULL &&
- !no_backup) {
+ if (res->func->needs_guest_memory && !res->guest_memory_bo &&
+ !no_guest_memory) {
ret = vmw_resource_buf_alloc(res, interruptible);
if (unlikely(ret != 0)) {
- DRM_ERROR("Failed to allocate a backup buffer "
+ DRM_ERROR("Failed to allocate a guest memory buffer "
"of size %lu. bytes\n",
- (unsigned long) res->backup_size);
+ (unsigned long) res->guest_memory_size);
return ret;
}
}
@@ -585,10 +593,10 @@ int vmw_resource_reserve(struct vmw_resource *res, bool interruptible,
/**
* vmw_resource_backoff_reservation - Unreserve and unreference a
- * backup buffer
+ * guest memory buffer
*.
* @ticket: The ww acquire ctx used for reservation.
- * @val_buf: Backup buffer information.
+ * @val_buf: Guest memory buffer information.
*/
static void
vmw_resource_backoff_reservation(struct ww_acquire_ctx *ticket,
@@ -630,14 +638,14 @@ static int vmw_resource_do_evict(struct ww_acquire_ctx *ticket,
return ret;
if (unlikely(func->unbind != NULL &&
- (!func->needs_backup || vmw_resource_mob_attached(res)))) {
+ (!func->needs_guest_memory || vmw_resource_mob_attached(res)))) {
ret = func->unbind(res, res->res_dirty, &val_buf);
if (unlikely(ret != 0))
goto out_no_unbind;
vmw_resource_mob_detach(res);
}
ret = func->destroy(res);
- res->backup_dirty = true;
+ res->guest_memory_dirty = true;
res->res_dirty = false;
out_no_unbind:
vmw_resource_backoff_reservation(ticket, &val_buf);
@@ -676,8 +684,8 @@ int vmw_resource_validate(struct vmw_resource *res, bool intr,
val_buf.bo = NULL;
val_buf.num_shared = 0;
- if (res->backup)
- val_buf.bo = &res->backup->base;
+ if (res->guest_memory_bo)
+ val_buf.bo = &res->guest_memory_bo->tbo;
do {
ret = vmw_resource_do_validate(res, &val_buf, dirtying);
if (likely(ret != -EBUSY))
@@ -717,9 +725,9 @@ int vmw_resource_validate(struct vmw_resource *res, bool intr,
if (unlikely(ret != 0))
goto out_no_validate;
- else if (!res->func->needs_backup && res->backup) {
+ else if (!res->func->needs_guest_memory && res->guest_memory_bo) {
WARN_ON_ONCE(vmw_resource_mob_attached(res));
- vmw_bo_unreference(&res->backup);
+ vmw_bo_unreference(&res->guest_memory_bo);
}
return 0;
@@ -740,14 +748,14 @@ out_no_validate:
* validation code, since resource validation and eviction
* both require the backup buffer to be reserved.
*/
-void vmw_resource_unbind_list(struct vmw_buffer_object *vbo)
+void vmw_resource_unbind_list(struct vmw_bo *vbo)
{
struct ttm_validate_buffer val_buf = {
- .bo = &vbo->base,
+ .bo = &vbo->tbo,
.num_shared = 0
};
- dma_resv_assert_held(vbo->base.base.resv);
+ dma_resv_assert_held(vbo->tbo.base.resv);
while (!RB_EMPTY_ROOT(&vbo->res_tree)) {
struct rb_node *node = vbo->res_tree.rb_node;
struct vmw_resource *res =
@@ -756,12 +764,12 @@ void vmw_resource_unbind_list(struct vmw_buffer_object *vbo)
if (!WARN_ON_ONCE(!res->func->unbind))
(void) res->func->unbind(res, res->res_dirty, &val_buf);
- res->backup_dirty = true;
+ res->guest_memory_size = true;
res->res_dirty = false;
vmw_resource_mob_detach(res);
}
- (void) ttm_bo_wait(&vbo->base, false, false);
+ (void) ttm_bo_wait(&vbo->tbo, false, false);
}
@@ -773,7 +781,7 @@ void vmw_resource_unbind_list(struct vmw_buffer_object *vbo)
* Read back cached states from the device if they exist. This function
* assumes binding_mutex is held.
*/
-int vmw_query_readback_all(struct vmw_buffer_object *dx_query_mob)
+int vmw_query_readback_all(struct vmw_bo *dx_query_mob)
{
struct vmw_resource *dx_query_ctx;
struct vmw_private *dev_priv;
@@ -822,20 +830,19 @@ void vmw_query_move_notify(struct ttm_buffer_object *bo,
struct ttm_resource *old_mem,
struct ttm_resource *new_mem)
{
- struct vmw_buffer_object *dx_query_mob;
+ struct vmw_bo *dx_query_mob;
struct ttm_device *bdev = bo->bdev;
- struct vmw_private *dev_priv;
-
- dev_priv = container_of(bdev, struct vmw_private, bdev);
+ struct vmw_private *dev_priv = vmw_priv_from_ttm(bdev);
mutex_lock(&dev_priv->binding_mutex);
/* If BO is being moved from MOB to system memory */
- if (new_mem->mem_type == TTM_PL_SYSTEM &&
+ if (old_mem &&
+ new_mem->mem_type == TTM_PL_SYSTEM &&
old_mem->mem_type == VMW_PL_MOB) {
struct vmw_fence_obj *fence;
- dx_query_mob = container_of(bo, struct vmw_buffer_object, base);
+ dx_query_mob = to_vmw_bo(&bo->base);
if (!dx_query_mob || !dx_query_mob->dx_query_ctx) {
mutex_unlock(&dev_priv->binding_mutex);
return;
@@ -863,7 +870,7 @@ void vmw_query_move_notify(struct ttm_buffer_object *bo,
*/
bool vmw_resource_needs_backup(const struct vmw_resource *res)
{
- return res->func->needs_backup;
+ return res->func->needs_guest_memory;
}
/**
@@ -959,21 +966,24 @@ int vmw_resource_pin(struct vmw_resource *res, bool interruptible)
goto out_no_reserve;
if (res->pin_count == 0) {
- struct vmw_buffer_object *vbo = NULL;
+ struct vmw_bo *vbo = NULL;
- if (res->backup) {
- vbo = res->backup;
+ if (res->guest_memory_bo) {
+ vbo = res->guest_memory_bo;
- ret = ttm_bo_reserve(&vbo->base, interruptible, false, NULL);
+ ret = ttm_bo_reserve(&vbo->tbo, interruptible, false, NULL);
if (ret)
goto out_no_validate;
- if (!vbo->base.pin_count) {
+ if (!vbo->tbo.pin_count) {
+ vmw_bo_placement_set(vbo,
+ res->func->domain,
+ res->func->busy_domain);
ret = ttm_bo_validate
- (&vbo->base,
- res->func->backup_placement,
+ (&vbo->tbo,
+ &vbo->placement,
&ctx);
if (ret) {
- ttm_bo_unreserve(&vbo->base);
+ ttm_bo_unreserve(&vbo->tbo);
goto out_no_validate;
}
}
@@ -983,7 +993,7 @@ int vmw_resource_pin(struct vmw_resource *res, bool interruptible)
}
ret = vmw_resource_validate(res, interruptible, true);
if (vbo)
- ttm_bo_unreserve(&vbo->base);
+ ttm_bo_unreserve(&vbo->tbo);
if (ret)
goto out_no_validate;
}
@@ -1016,12 +1026,12 @@ void vmw_resource_unpin(struct vmw_resource *res)
WARN_ON(ret);
WARN_ON(res->pin_count == 0);
- if (--res->pin_count == 0 && res->backup) {
- struct vmw_buffer_object *vbo = res->backup;
+ if (--res->pin_count == 0 && res->guest_memory_bo) {
+ struct vmw_bo *vbo = res->guest_memory_bo;
- (void) ttm_bo_reserve(&vbo->base, false, false, NULL);
+ (void) ttm_bo_reserve(&vbo->tbo, false, false, NULL);
vmw_bo_pin_reserved(vbo, false);
- ttm_bo_unreserve(&vbo->base);
+ ttm_bo_unreserve(&vbo->tbo);
}
vmw_resource_unreserve(res, false, false, false, NULL, 0UL);
@@ -1062,7 +1072,7 @@ void vmw_resource_dirty_update(struct vmw_resource *res, pgoff_t start,
* @num_prefault: Returns how many pages including the first have been
* cleaned and are ok to prefault
*/
-int vmw_resources_clean(struct vmw_buffer_object *vbo, pgoff_t start,
+int vmw_resources_clean(struct vmw_bo *vbo, pgoff_t start,
pgoff_t end, pgoff_t *num_prefault)
{
struct rb_node *cur = vbo->res_tree.rb_node;
@@ -1079,9 +1089,9 @@ int vmw_resources_clean(struct vmw_buffer_object *vbo, pgoff_t start,
struct vmw_resource *cur_res =
container_of(cur, struct vmw_resource, mob_node);
- if (cur_res->backup_offset >= res_end) {
+ if (cur_res->guest_memory_offset >= res_end) {
cur = cur->rb_left;
- } else if (cur_res->backup_offset + cur_res->backup_size <=
+ } else if (cur_res->guest_memory_offset + cur_res->guest_memory_size <=
res_start) {
cur = cur->rb_right;
} else {
@@ -1092,7 +1102,7 @@ int vmw_resources_clean(struct vmw_buffer_object *vbo, pgoff_t start,
}
/*
- * In order of increasing backup_offset, clean dirty resources
+ * In order of increasing guest_memory_offset, clean dirty resources
* intersecting the range.
*/
while (found) {
@@ -1108,13 +1118,13 @@ int vmw_resources_clean(struct vmw_buffer_object *vbo, pgoff_t start,
found->res_dirty = false;
}
- last_cleaned = found->backup_offset + found->backup_size;
+ last_cleaned = found->guest_memory_offset + found->guest_memory_size;
cur = rb_next(&found->mob_node);
if (!cur)
break;
found = container_of(cur, struct vmw_resource, mob_node);
- if (found->backup_offset >= res_end)
+ if (found->guest_memory_offset >= res_end)
break;
}
@@ -1123,7 +1133,7 @@ int vmw_resources_clean(struct vmw_buffer_object *vbo, pgoff_t start,
*/
*num_prefault = 1;
if (last_cleaned > res_start) {
- struct ttm_buffer_object *bo = &vbo->base;
+ struct ttm_buffer_object *bo = &vbo->tbo;
*num_prefault = __KERNEL_DIV_ROUND_UP(last_cleaned - res_start,
PAGE_SIZE);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource_priv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_resource_priv.h
index 3b7438b2d289..aa7cbd396bea 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource_priv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource_priv.h
@@ -58,10 +58,11 @@ struct vmw_user_resource_conv {
* struct vmw_res_func - members and functions common for a resource type
*
* @res_type: Enum that identifies the lru list to use for eviction.
- * @needs_backup: Whether the resource is guest-backed and needs
+ * @needs_guest_memory:Whether the resource is guest-backed and needs
* persistent buffer storage.
* @type_name: String that identifies the resource type.
- * @backup_placement: TTM placement for backup buffers.
+ * @domain: TTM placement for guest memory buffers.
+ * @busy_domain: TTM busy placement for guest memory buffers.
* @may_evict Whether the resource may be evicted.
* @create: Create a hardware resource.
* @destroy: Destroy a hardware resource.
@@ -81,9 +82,10 @@ struct vmw_user_resource_conv {
*/
struct vmw_res_func {
enum vmw_res_type res_type;
- bool needs_backup;
+ bool needs_guest_memory;
const char *type_name;
- struct ttm_placement *backup_placement;
+ u32 domain;
+ u32 busy_domain;
bool may_evict;
u32 prio;
u32 dirty_prio;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
index e1f36a09c59c..556a403b7eb5 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 OR MIT
/**************************************************************************
*
- * Copyright 2011-2022 VMware, Inc., Palo Alto, CA., USA
+ * Copyright 2011-2023 VMware, Inc., Palo Alto, CA., USA
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
@@ -25,13 +25,14 @@
*
**************************************************************************/
+#include "vmwgfx_bo.h"
+#include "vmwgfx_kms.h"
+
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_damage_helper.h>
#include <drm/drm_fourcc.h>
-#include "vmwgfx_kms.h"
-
#define vmw_crtc_to_sou(x) \
container_of(x, struct vmw_screen_object_unit, base.crtc)
#define vmw_encoder_to_sou(x) \
@@ -89,7 +90,7 @@ struct vmw_screen_object_unit {
struct vmw_display_unit base;
unsigned long buffer_size; /**< Size of allocated buffer */
- struct vmw_buffer_object *buffer; /**< Backing store buffer */
+ struct vmw_bo *buffer; /**< Backing store buffer */
bool defined;
};
@@ -148,7 +149,7 @@ static int vmw_sou_fifo_create(struct vmw_private *dev_priv,
sou->base.set_gui_y = cmd->obj.root.y;
/* Ok to assume that buffer is pinned in vram */
- vmw_bo_get_guest_ptr(&sou->buffer->base, &cmd->obj.backingStore.ptr);
+ vmw_bo_get_guest_ptr(&sou->buffer->tbo, &cmd->obj.backingStore.ptr);
cmd->obj.backingStore.pitch = mode->hdisplay * 4;
vmw_cmd_commit(dev_priv, fifo_size);
@@ -409,9 +410,13 @@ vmw_sou_primary_plane_prepare_fb(struct drm_plane *plane,
struct drm_crtc *crtc = plane->state->crtc ?: new_state->crtc;
struct vmw_plane_state *vps = vmw_plane_state_to_vps(new_state);
struct vmw_private *dev_priv;
- size_t size;
int ret;
-
+ struct vmw_bo_params bo_params = {
+ .domain = VMW_BO_DOMAIN_VRAM,
+ .busy_domain = VMW_BO_DOMAIN_VRAM,
+ .bo_type = ttm_bo_type_device,
+ .pin = true
+ };
if (!new_fb) {
vmw_bo_unreference(&vps->bo);
@@ -420,11 +425,11 @@ vmw_sou_primary_plane_prepare_fb(struct drm_plane *plane,
return 0;
}
- size = new_state->crtc_w * new_state->crtc_h * 4;
+ bo_params.size = new_state->crtc_w * new_state->crtc_h * 4;
dev_priv = vmw_priv(crtc->dev);
if (vps->bo) {
- if (vps->bo_size == size) {
+ if (vps->bo_size == bo_params.size) {
/*
* Note that this might temporarily up the pin-count
* to 2, until cleanup_fb() is called.
@@ -443,16 +448,12 @@ vmw_sou_primary_plane_prepare_fb(struct drm_plane *plane,
* resume the overlays, this is preferred to failing to alloc.
*/
vmw_overlay_pause_all(dev_priv);
- ret = vmw_bo_create(dev_priv, size,
- &vmw_vram_placement,
- false, true, &vmw_bo_bo_free, &vps->bo);
+ ret = vmw_bo_create(dev_priv, &bo_params, &vps->bo);
vmw_overlay_resume_all(dev_priv);
- if (ret) {
- vps->bo = NULL; /* vmw_bo_init frees on error */
+ if (ret)
return ret;
- }
- vps->bo_size = size;
+ vps->bo_size = bo_params.size;
/*
* TTM already thinks the buffer is pinned, but make sure the
@@ -489,7 +490,7 @@ static uint32_t vmw_sou_bo_define_gmrfb(struct vmw_du_update_plane *update,
gmr->body.format.colorDepth = depth;
gmr->body.format.reserved = 0;
gmr->body.bytesPerLine = update->vfb->base.pitches[0];
- vmw_bo_get_guest_ptr(&vfbbo->buffer->base, &gmr->body.ptr);
+ vmw_bo_get_guest_ptr(&vfbbo->buffer->tbo, &gmr->body.ptr);
return sizeof(*gmr);
}
@@ -546,7 +547,6 @@ static int vmw_sou_plane_update_bo(struct vmw_private *dev_priv,
bo_update.base.vfb = vfb;
bo_update.base.out_fence = out_fence;
bo_update.base.mutex = NULL;
- bo_update.base.cpu_blit = false;
bo_update.base.intr = true;
bo_update.base.calc_fifo_size = vmw_sou_bo_fifo_size;
@@ -707,7 +707,6 @@ static int vmw_sou_plane_update_surface(struct vmw_private *dev_priv,
srf_update.base.vfb = vfb;
srf_update.base.out_fence = out_fence;
srf_update.base.mutex = &dev_priv->cmdbuf_mutex;
- srf_update.base.cpu_blit = false;
srf_update.base.intr = true;
srf_update.base.calc_fifo_size = vmw_sou_surface_fifo_size;
@@ -947,7 +946,7 @@ int vmw_kms_sou_init_display(struct vmw_private *dev_priv)
static int do_bo_define_gmrfb(struct vmw_private *dev_priv,
struct vmw_framebuffer *framebuffer)
{
- struct vmw_buffer_object *buf =
+ struct vmw_bo *buf =
container_of(framebuffer, struct vmw_framebuffer_bo,
base)->buffer;
int depth = framebuffer->base.format->depth;
@@ -973,7 +972,7 @@ static int do_bo_define_gmrfb(struct vmw_private *dev_priv,
cmd->body.format.reserved = 0;
cmd->body.bytesPerLine = framebuffer->base.pitches[0];
/* Buffer is reserved in vram or GMR */
- vmw_bo_get_guest_ptr(&buf->base, &cmd->body.ptr);
+ vmw_bo_get_guest_ptr(&buf->tbo, &cmd->body.ptr);
vmw_cmd_commit(dev_priv, sizeof(*cmd));
return 0;
@@ -1216,14 +1215,16 @@ int vmw_kms_sou_do_bo_dirty(struct vmw_private *dev_priv,
struct vmw_fence_obj **out_fence,
struct drm_crtc *crtc)
{
- struct vmw_buffer_object *buf =
+ struct vmw_bo *buf =
container_of(framebuffer, struct vmw_framebuffer_bo,
base)->buffer;
struct vmw_kms_dirty dirty;
DECLARE_VAL_CONTEXT(val_ctx, NULL, 0);
int ret;
- ret = vmw_validation_add_bo(&val_ctx, buf, false, false);
+ vmw_bo_placement_set(buf, VMW_BO_DOMAIN_GMR | VMW_BO_DOMAIN_VRAM,
+ VMW_BO_DOMAIN_GMR | VMW_BO_DOMAIN_VRAM);
+ ret = vmw_validation_add_bo(&val_ctx, buf);
if (ret)
return ret;
@@ -1323,13 +1324,15 @@ int vmw_kms_sou_readback(struct vmw_private *dev_priv,
uint32_t num_clips,
struct drm_crtc *crtc)
{
- struct vmw_buffer_object *buf =
+ struct vmw_bo *buf =
container_of(vfb, struct vmw_framebuffer_bo, base)->buffer;
struct vmw_kms_dirty dirty;
DECLARE_VAL_CONTEXT(val_ctx, NULL, 0);
int ret;
- ret = vmw_validation_add_bo(&val_ctx, buf, false, false);
+ vmw_bo_placement_set(buf, VMW_BO_DOMAIN_GMR | VMW_BO_DOMAIN_VRAM,
+ VMW_BO_DOMAIN_GMR | VMW_BO_DOMAIN_VRAM);
+ ret = vmw_validation_add_bo(&val_ctx, buf);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
index 51e83dfa1cac..e7226db8b242 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 OR MIT
/**************************************************************************
*
- * Copyright 2009-2015 VMware, Inc., Palo Alto, CA., USA
+ * Copyright 2009-2023 VMware, Inc., Palo Alto, CA., USA
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
@@ -27,9 +27,10 @@
#include <drm/ttm/ttm_placement.h>
+#include "vmwgfx_binding.h"
+#include "vmwgfx_bo.h"
#include "vmwgfx_drv.h"
#include "vmwgfx_resource_priv.h"
-#include "vmwgfx_binding.h"
struct vmw_shader {
struct vmw_resource res;
@@ -88,12 +89,13 @@ const struct vmw_user_resource_conv *user_shader_converter =
static const struct vmw_res_func vmw_gb_shader_func = {
.res_type = vmw_res_shader,
- .needs_backup = true,
+ .needs_guest_memory = true,
.may_evict = true,
.prio = 3,
.dirty_prio = 3,
.type_name = "guest backed shaders",
- .backup_placement = &vmw_mob_placement,
+ .domain = VMW_BO_DOMAIN_MOB,
+ .busy_domain = VMW_BO_DOMAIN_MOB,
.create = vmw_gb_shader_create,
.destroy = vmw_gb_shader_destroy,
.bind = vmw_gb_shader_bind,
@@ -102,12 +104,13 @@ static const struct vmw_res_func vmw_gb_shader_func = {
static const struct vmw_res_func vmw_dx_shader_func = {
.res_type = vmw_res_shader,
- .needs_backup = true,
+ .needs_guest_memory = true,
.may_evict = true,
.prio = 3,
.dirty_prio = 3,
.type_name = "dx shaders",
- .backup_placement = &vmw_mob_placement,
+ .domain = VMW_BO_DOMAIN_MOB,
+ .busy_domain = VMW_BO_DOMAIN_MOB,
.create = vmw_dx_shader_create,
/*
* The destroy callback is only called with a committed resource on
@@ -158,7 +161,7 @@ static int vmw_gb_shader_init(struct vmw_private *dev_priv,
SVGA3dShaderType type,
uint8_t num_input_sig,
uint8_t num_output_sig,
- struct vmw_buffer_object *byte_code,
+ struct vmw_bo *byte_code,
void (*res_free) (struct vmw_resource *res))
{
struct vmw_shader *shader = vmw_res_to_shader(res);
@@ -175,10 +178,10 @@ static int vmw_gb_shader_init(struct vmw_private *dev_priv,
return ret;
}
- res->backup_size = size;
+ res->guest_memory_size = size;
if (byte_code) {
- res->backup = vmw_bo_reference(byte_code);
- res->backup_offset = offset;
+ res->guest_memory_bo = vmw_bo_reference(byte_code);
+ res->guest_memory_offset = offset;
}
shader->size = size;
shader->type = type;
@@ -259,8 +262,8 @@ static int vmw_gb_shader_bind(struct vmw_resource *res,
cmd->header.size = sizeof(cmd->body);
cmd->body.shid = res->id;
cmd->body.mobid = bo->resource->start;
- cmd->body.offsetInBytes = res->backup_offset;
- res->backup_dirty = false;
+ cmd->body.offsetInBytes = res->guest_memory_offset;
+ res->guest_memory_dirty = false;
vmw_cmd_commit(dev_priv, sizeof(*cmd));
return 0;
@@ -277,7 +280,7 @@ static int vmw_gb_shader_unbind(struct vmw_resource *res,
} *cmd;
struct vmw_fence_obj *fence;
- BUG_ON(res->backup->base.resource->mem_type != VMW_PL_MOB);
+ BUG_ON(res->guest_memory_bo->tbo.resource->mem_type != VMW_PL_MOB);
cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
if (unlikely(cmd == NULL))
@@ -397,8 +400,8 @@ static int vmw_dx_shader_unscrub(struct vmw_resource *res)
cmd->header.size = sizeof(cmd->body);
cmd->body.cid = shader->ctx->id;
cmd->body.shid = shader->id;
- cmd->body.mobid = res->backup->base.resource->start;
- cmd->body.offsetInBytes = res->backup_offset;
+ cmd->body.mobid = res->guest_memory_bo->tbo.resource->start;
+ cmd->body.offsetInBytes = res->guest_memory_offset;
vmw_cmd_commit(dev_priv, sizeof(*cmd));
vmw_cotable_add_resource(shader->cotable, &shader->cotable_head);
@@ -508,7 +511,7 @@ static int vmw_dx_shader_unbind(struct vmw_resource *res,
struct vmw_fence_obj *fence;
int ret;
- BUG_ON(res->backup->base.resource->mem_type != VMW_PL_MOB);
+ BUG_ON(res->guest_memory_bo->tbo.resource->mem_type != VMW_PL_MOB);
mutex_lock(&dev_priv->binding_mutex);
ret = vmw_dx_shader_scrub(res);
@@ -680,7 +683,7 @@ int vmw_shader_destroy_ioctl(struct drm_device *dev, void *data,
}
static int vmw_user_shader_alloc(struct vmw_private *dev_priv,
- struct vmw_buffer_object *buffer,
+ struct vmw_bo *buffer,
size_t shader_size,
size_t offset,
SVGA3dShaderType shader_type,
@@ -734,7 +737,7 @@ out:
static struct vmw_resource *vmw_shader_alloc(struct vmw_private *dev_priv,
- struct vmw_buffer_object *buffer,
+ struct vmw_bo *buffer,
size_t shader_size,
size_t offset,
SVGA3dShaderType shader_type)
@@ -771,7 +774,7 @@ static int vmw_shader_define(struct drm_device *dev, struct drm_file *file_priv,
{
struct vmw_private *dev_priv = vmw_priv(dev);
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
- struct vmw_buffer_object *buffer = NULL;
+ struct vmw_bo *buffer = NULL;
SVGA3dShaderType shader_type;
int ret;
@@ -782,7 +785,7 @@ static int vmw_shader_define(struct drm_device *dev, struct drm_file *file_priv,
return ret;
}
- if ((u64)buffer->base.base.size < (u64)size + (u64)offset) {
+ if ((u64)buffer->tbo.base.size < (u64)size + (u64)offset) {
VMW_DEBUG_USER("Illegal buffer- or shader size.\n");
ret = -EINVAL;
goto out_bad_arg;
@@ -807,7 +810,7 @@ static int vmw_shader_define(struct drm_device *dev, struct drm_file *file_priv,
num_output_sig, tfile, shader_handle);
out_bad_arg:
vmw_bo_unreference(&buffer);
- drm_gem_object_put(&buffer->base.base);
+ drm_gem_object_put(&buffer->tbo.base);
return ret;
}
@@ -884,28 +887,34 @@ int vmw_compat_shader_add(struct vmw_private *dev_priv,
struct list_head *list)
{
struct ttm_operation_ctx ctx = { false, true };
- struct vmw_buffer_object *buf;
+ struct vmw_bo *buf;
struct ttm_bo_kmap_obj map;
bool is_iomem;
int ret;
struct vmw_resource *res;
+ struct vmw_bo_params bo_params = {
+ .domain = VMW_BO_DOMAIN_SYS,
+ .busy_domain = VMW_BO_DOMAIN_SYS,
+ .bo_type = ttm_bo_type_device,
+ .size = size,
+ .pin = true
+ };
if (!vmw_shader_id_ok(user_key, shader_type))
return -EINVAL;
- ret = vmw_bo_create(dev_priv, size, &vmw_sys_placement,
- true, true, vmw_bo_bo_free, &buf);
+ ret = vmw_bo_create(dev_priv, &bo_params, &buf);
if (unlikely(ret != 0))
goto out;
- ret = ttm_bo_reserve(&buf->base, false, true, NULL);
+ ret = ttm_bo_reserve(&buf->tbo, false, true, NULL);
if (unlikely(ret != 0))
goto no_reserve;
/* Map and copy shader bytecode. */
- ret = ttm_bo_kmap(&buf->base, 0, PFN_UP(size), &map);
+ ret = ttm_bo_kmap(&buf->tbo, 0, PFN_UP(size), &map);
if (unlikely(ret != 0)) {
- ttm_bo_unreserve(&buf->base);
+ ttm_bo_unreserve(&buf->tbo);
goto no_reserve;
}
@@ -913,9 +922,9 @@ int vmw_compat_shader_add(struct vmw_private *dev_priv,
WARN_ON(is_iomem);
ttm_bo_kunmap(&map);
- ret = ttm_bo_validate(&buf->base, &vmw_sys_placement, &ctx);
+ ret = ttm_bo_validate(&buf->tbo, &buf->placement, &ctx);
WARN_ON(ret != 0);
- ttm_bo_unreserve(&buf->base);
+ ttm_bo_unreserve(&buf->tbo);
res = vmw_shader_alloc(dev_priv, buf, size, 0, shader_type);
if (unlikely(ret != 0))
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_so.c b/drivers/gpu/drm/vmwgfx/vmwgfx_so.c
index 4ea32b01efc0..5af4db6d1f18 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_so.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_so.c
@@ -24,6 +24,7 @@
*
**************************************************************************/
+#include "vmwgfx_bo.h"
#include "vmwgfx_drv.h"
#include "vmwgfx_resource_priv.h"
#include "vmwgfx_so.h"
@@ -81,10 +82,11 @@ static void vmw_view_commit_notify(struct vmw_resource *res,
static const struct vmw_res_func vmw_view_func = {
.res_type = vmw_res_view,
- .needs_backup = false,
+ .needs_guest_memory = false,
.may_evict = false,
.type_name = "DX view",
- .backup_placement = NULL,
+ .domain = VMW_BO_DOMAIN_SYS,
+ .busy_domain = VMW_BO_DOMAIN_SYS,
.create = vmw_view_create,
.commit_notify = vmw_view_commit_notify,
};
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
index 0090abe89254..ba0c0e12cfe9 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 OR MIT
/******************************************************************************
*
- * COPYRIGHT (C) 2014-2022 VMware, Inc., Palo Alto, CA., USA
+ * COPYRIGHT (C) 2014-2023 VMware, Inc., Palo Alto, CA., USA
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
@@ -25,14 +25,15 @@
*
******************************************************************************/
+#include "vmwgfx_bo.h"
+#include "vmwgfx_kms.h"
+#include "vmw_surface_cache.h"
+
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_damage_helper.h>
#include <drm/drm_fourcc.h>
-#include "vmwgfx_kms.h"
-#include "vmw_surface_cache.h"
-
#define vmw_crtc_to_stdu(x) \
container_of(x, struct vmw_screen_target_display_unit, base.crtc)
#define vmw_encoder_to_stdu(x) \
@@ -65,12 +66,11 @@ enum stdu_content_type {
*/
struct vmw_stdu_dirty {
struct vmw_kms_dirty base;
- SVGA3dTransferType transfer;
s32 left, right, top, bottom;
s32 fb_left, fb_top;
u32 pitch;
union {
- struct vmw_buffer_object *buf;
+ struct vmw_bo *buf;
u32 sid;
};
};
@@ -136,12 +136,6 @@ static void vmw_stdu_destroy(struct vmw_screen_target_display_unit *stdu);
* Screen Target Display Unit CRTC Functions
*****************************************************************************/
-static bool vmw_stdu_use_cpu_blit(const struct vmw_private *vmw)
-{
- return !(vmw->capabilities & SVGA_CAP_3D) || vmw->vram_size < (32 * 1024 * 1024);
-}
-
-
/**
* vmw_stdu_crtc_destroy - cleans up the STDU
*
@@ -451,93 +445,6 @@ static void vmw_stdu_crtc_atomic_disable(struct drm_crtc *crtc,
}
/**
- * vmw_stdu_bo_clip - Callback to encode a suface DMA command cliprect
- *
- * @dirty: The closure structure.
- *
- * Encodes a surface DMA command cliprect and updates the bounding box
- * for the DMA.
- */
-static void vmw_stdu_bo_clip(struct vmw_kms_dirty *dirty)
-{
- struct vmw_stdu_dirty *ddirty =
- container_of(dirty, struct vmw_stdu_dirty, base);
- struct vmw_stdu_dma *cmd = dirty->cmd;
- struct SVGA3dCopyBox *blit = (struct SVGA3dCopyBox *) &cmd[1];
-
- blit += dirty->num_hits;
- blit->srcx = dirty->fb_x;
- blit->srcy = dirty->fb_y;
- blit->x = dirty->unit_x1;
- blit->y = dirty->unit_y1;
- blit->d = 1;
- blit->w = dirty->unit_x2 - dirty->unit_x1;
- blit->h = dirty->unit_y2 - dirty->unit_y1;
- dirty->num_hits++;
-
- if (ddirty->transfer != SVGA3D_WRITE_HOST_VRAM)
- return;
-
- /* Destination bounding box */
- ddirty->left = min_t(s32, ddirty->left, dirty->unit_x1);
- ddirty->top = min_t(s32, ddirty->top, dirty->unit_y1);
- ddirty->right = max_t(s32, ddirty->right, dirty->unit_x2);
- ddirty->bottom = max_t(s32, ddirty->bottom, dirty->unit_y2);
-}
-
-/**
- * vmw_stdu_bo_fifo_commit - Callback to fill in and submit a DMA command.
- *
- * @dirty: The closure structure.
- *
- * Fills in the missing fields in a DMA command, and optionally encodes
- * a screen target update command, depending on transfer direction.
- */
-static void vmw_stdu_bo_fifo_commit(struct vmw_kms_dirty *dirty)
-{
- struct vmw_stdu_dirty *ddirty =
- container_of(dirty, struct vmw_stdu_dirty, base);
- struct vmw_screen_target_display_unit *stdu =
- container_of(dirty->unit, typeof(*stdu), base);
- struct vmw_stdu_dma *cmd = dirty->cmd;
- struct SVGA3dCopyBox *blit = (struct SVGA3dCopyBox *) &cmd[1];
- SVGA3dCmdSurfaceDMASuffix *suffix =
- (SVGA3dCmdSurfaceDMASuffix *) &blit[dirty->num_hits];
- size_t blit_size = sizeof(*blit) * dirty->num_hits + sizeof(*suffix);
-
- if (!dirty->num_hits) {
- vmw_cmd_commit(dirty->dev_priv, 0);
- return;
- }
-
- cmd->header.id = SVGA_3D_CMD_SURFACE_DMA;
- cmd->header.size = sizeof(cmd->body) + blit_size;
- vmw_bo_get_guest_ptr(&ddirty->buf->base, &cmd->body.guest.ptr);
- cmd->body.guest.pitch = ddirty->pitch;
- cmd->body.host.sid = stdu->display_srf->res.id;
- cmd->body.host.face = 0;
- cmd->body.host.mipmap = 0;
- cmd->body.transfer = ddirty->transfer;
- suffix->suffixSize = sizeof(*suffix);
- suffix->maximumOffset = ddirty->buf->base.base.size;
-
- if (ddirty->transfer == SVGA3D_WRITE_HOST_VRAM) {
- blit_size += sizeof(struct vmw_stdu_update);
-
- vmw_stdu_populate_update(&suffix[1], stdu->base.unit,
- ddirty->left, ddirty->right,
- ddirty->top, ddirty->bottom);
- }
-
- vmw_cmd_commit(dirty->dev_priv, sizeof(*cmd) + blit_size);
-
- stdu->display_srf->res.res_dirty = true;
- ddirty->left = ddirty->top = S32_MAX;
- ddirty->right = ddirty->bottom = S32_MIN;
-}
-
-
-/**
* vmw_stdu_bo_cpu_clip - Callback to encode a CPU blit
*
* @dirty: The closure structure.
@@ -597,62 +504,21 @@ static void vmw_stdu_bo_cpu_commit(struct vmw_kms_dirty *dirty)
return;
/* Assume we are blitting from Guest (bo) to Host (display_srf) */
- dst_pitch = stdu->display_srf->metadata.base_size.width * stdu->cpp;
- dst_bo = &stdu->display_srf->res.backup->base;
- dst_offset = ddirty->top * dst_pitch + ddirty->left * stdu->cpp;
-
- src_pitch = ddirty->pitch;
- src_bo = &ddirty->buf->base;
- src_offset = ddirty->fb_top * src_pitch + ddirty->fb_left * stdu->cpp;
-
- /* Swap src and dst if the assumption was wrong. */
- if (ddirty->transfer != SVGA3D_WRITE_HOST_VRAM) {
- swap(dst_pitch, src_pitch);
- swap(dst_bo, src_bo);
- swap(src_offset, dst_offset);
- }
+ src_pitch = stdu->display_srf->metadata.base_size.width * stdu->cpp;
+ src_bo = &stdu->display_srf->res.guest_memory_bo->tbo;
+ src_offset = ddirty->top * src_pitch + ddirty->left * stdu->cpp;
+
+ dst_pitch = ddirty->pitch;
+ dst_bo = &ddirty->buf->tbo;
+ dst_offset = ddirty->fb_top * dst_pitch + ddirty->fb_left * stdu->cpp;
(void) vmw_bo_cpu_blit(dst_bo, dst_offset, dst_pitch,
src_bo, src_offset, src_pitch,
width * stdu->cpp, height, &diff);
-
- if (ddirty->transfer == SVGA3D_WRITE_HOST_VRAM &&
- drm_rect_visible(&diff.rect)) {
- struct vmw_private *dev_priv;
- struct vmw_stdu_update *cmd;
- struct drm_clip_rect region;
- int ret;
-
- /* We are updating the actual surface, not a proxy */
- region.x1 = diff.rect.x1;
- region.x2 = diff.rect.x2;
- region.y1 = diff.rect.y1;
- region.y2 = diff.rect.y2;
- ret = vmw_kms_update_proxy(&stdu->display_srf->res, &region,
- 1, 1);
- if (ret)
- goto out_cleanup;
-
-
- dev_priv = vmw_priv(stdu->base.crtc.dev);
- cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
- if (!cmd)
- goto out_cleanup;
-
- vmw_stdu_populate_update(cmd, stdu->base.unit,
- region.x1, region.x2,
- region.y1, region.y2);
-
- vmw_cmd_commit(dev_priv, sizeof(*cmd));
- }
-
-out_cleanup:
- ddirty->left = ddirty->top = ddirty->fb_left = ddirty->fb_top = S32_MAX;
- ddirty->right = ddirty->bottom = S32_MIN;
}
/**
- * vmw_kms_stdu_dma - Perform a DMA transfer between a buffer-object backed
+ * vmw_kms_stdu_readback - Perform a readback from a buffer-object backed
* framebuffer and the screen target system.
*
* @dev_priv: Pointer to the device private structure.
@@ -665,9 +531,6 @@ out_cleanup:
* be NULL.
* @num_clips: Number of clip rects in @clips or @vclips.
* @increment: Increment to use when looping over @clips or @vclips.
- * @to_surface: Whether to DMA to the screen target system as opposed to
- * from the screen target system.
- * @interruptible: Whether to perform waits interruptible if possible.
* @crtc: If crtc is passed, perform stdu dma on that crtc only.
*
* If DMA-ing till the screen target system, the function will also notify
@@ -676,59 +539,49 @@ out_cleanup:
* Returns 0 on success, negative error code on failure. -ERESTARTSYS if
* interrupted.
*/
-int vmw_kms_stdu_dma(struct vmw_private *dev_priv,
- struct drm_file *file_priv,
- struct vmw_framebuffer *vfb,
- struct drm_vmw_fence_rep __user *user_fence_rep,
- struct drm_clip_rect *clips,
- struct drm_vmw_rect *vclips,
- uint32_t num_clips,
- int increment,
- bool to_surface,
- bool interruptible,
- struct drm_crtc *crtc)
+int vmw_kms_stdu_readback(struct vmw_private *dev_priv,
+ struct drm_file *file_priv,
+ struct vmw_framebuffer *vfb,
+ struct drm_vmw_fence_rep __user *user_fence_rep,
+ struct drm_clip_rect *clips,
+ struct drm_vmw_rect *vclips,
+ uint32_t num_clips,
+ int increment,
+ struct drm_crtc *crtc)
{
- struct vmw_buffer_object *buf =
+ struct vmw_bo *buf =
container_of(vfb, struct vmw_framebuffer_bo, base)->buffer;
struct vmw_stdu_dirty ddirty;
int ret;
- bool cpu_blit = vmw_stdu_use_cpu_blit(dev_priv);
DECLARE_VAL_CONTEXT(val_ctx, NULL, 0);
/*
- * VMs without 3D support don't have the surface DMA command and
- * we'll be using a CPU blit, and the framebuffer should be moved out
- * of VRAM.
+ * The GMR domain might seem confusing because it might seem like it should
+ * never happen with screen targets but e.g. the xorg vmware driver issues
+ * CMD_SURFACE_DMA for various pixmap updates which might transition our bo to
+ * a GMR. Instead of forcing another transition we can optimize the readback
+ * by reading directly from the GMR.
*/
- ret = vmw_validation_add_bo(&val_ctx, buf, false, cpu_blit);
+ vmw_bo_placement_set(buf,
+ VMW_BO_DOMAIN_MOB | VMW_BO_DOMAIN_SYS | VMW_BO_DOMAIN_GMR,
+ VMW_BO_DOMAIN_MOB | VMW_BO_DOMAIN_SYS | VMW_BO_DOMAIN_GMR);
+ ret = vmw_validation_add_bo(&val_ctx, buf);
if (ret)
return ret;
- ret = vmw_validation_prepare(&val_ctx, NULL, interruptible);
+ ret = vmw_validation_prepare(&val_ctx, NULL, true);
if (ret)
goto out_unref;
- ddirty.transfer = (to_surface) ? SVGA3D_WRITE_HOST_VRAM :
- SVGA3D_READ_HOST_VRAM;
ddirty.left = ddirty.top = S32_MAX;
ddirty.right = ddirty.bottom = S32_MIN;
ddirty.fb_left = ddirty.fb_top = S32_MAX;
ddirty.pitch = vfb->base.pitches[0];
ddirty.buf = buf;
- ddirty.base.fifo_commit = vmw_stdu_bo_fifo_commit;
- ddirty.base.clip = vmw_stdu_bo_clip;
- ddirty.base.fifo_reserve_size = sizeof(struct vmw_stdu_dma) +
- num_clips * sizeof(SVGA3dCopyBox) +
- sizeof(SVGA3dCmdSurfaceDMASuffix);
- if (to_surface)
- ddirty.base.fifo_reserve_size += sizeof(struct vmw_stdu_update);
-
-
- if (cpu_blit) {
- ddirty.base.fifo_commit = vmw_stdu_bo_cpu_commit;
- ddirty.base.clip = vmw_stdu_bo_cpu_clip;
- ddirty.base.fifo_reserve_size = 0;
- }
+
+ ddirty.base.fifo_commit = vmw_stdu_bo_cpu_commit;
+ ddirty.base.clip = vmw_stdu_bo_cpu_clip;
+ ddirty.base.fifo_reserve_size = 0;
ddirty.base.crtc = crtc;
@@ -1160,11 +1013,8 @@ vmw_stdu_primary_plane_prepare_fb(struct drm_plane *plane,
/*
* This should only happen if the buffer object is too large to create a
* proxy surface for.
- * If we are a 2D VM with a buffer object then we have to use CPU blit
- * so cache these mappings
*/
- if (vps->content_fb_type == SEPARATE_BO &&
- vmw_stdu_use_cpu_blit(dev_priv))
+ if (vps->content_fb_type == SEPARATE_BO)
vps->cpp = new_fb->pitches[0] / new_fb->width;
return 0;
@@ -1174,14 +1024,6 @@ out_srf_unref:
return ret;
}
-static uint32_t vmw_stdu_bo_fifo_size(struct vmw_du_update_plane *update,
- uint32_t num_hits)
-{
- return sizeof(struct vmw_stdu_dma) + sizeof(SVGA3dCopyBox) * num_hits +
- sizeof(SVGA3dCmdSurfaceDMASuffix) +
- sizeof(struct vmw_stdu_update);
-}
-
static uint32_t vmw_stdu_bo_fifo_size_cpu(struct vmw_du_update_plane *update,
uint32_t num_hits)
{
@@ -1189,68 +1031,6 @@ static uint32_t vmw_stdu_bo_fifo_size_cpu(struct vmw_du_update_plane *update,
sizeof(struct vmw_stdu_update);
}
-static uint32_t vmw_stdu_bo_populate_dma(struct vmw_du_update_plane *update,
- void *cmd, uint32_t num_hits)
-{
- struct vmw_screen_target_display_unit *stdu;
- struct vmw_framebuffer_bo *vfbbo;
- struct vmw_stdu_dma *cmd_dma = cmd;
-
- stdu = container_of(update->du, typeof(*stdu), base);
- vfbbo = container_of(update->vfb, typeof(*vfbbo), base);
-
- cmd_dma->header.id = SVGA_3D_CMD_SURFACE_DMA;
- cmd_dma->header.size = sizeof(cmd_dma->body) +
- sizeof(struct SVGA3dCopyBox) * num_hits +
- sizeof(SVGA3dCmdSurfaceDMASuffix);
- vmw_bo_get_guest_ptr(&vfbbo->buffer->base, &cmd_dma->body.guest.ptr);
- cmd_dma->body.guest.pitch = update->vfb->base.pitches[0];
- cmd_dma->body.host.sid = stdu->display_srf->res.id;
- cmd_dma->body.host.face = 0;
- cmd_dma->body.host.mipmap = 0;
- cmd_dma->body.transfer = SVGA3D_WRITE_HOST_VRAM;
-
- return sizeof(*cmd_dma);
-}
-
-static uint32_t vmw_stdu_bo_populate_clip(struct vmw_du_update_plane *update,
- void *cmd, struct drm_rect *clip,
- uint32_t fb_x, uint32_t fb_y)
-{
- struct SVGA3dCopyBox *box = cmd;
-
- box->srcx = fb_x;
- box->srcy = fb_y;
- box->srcz = 0;
- box->x = clip->x1;
- box->y = clip->y1;
- box->z = 0;
- box->w = drm_rect_width(clip);
- box->h = drm_rect_height(clip);
- box->d = 1;
-
- return sizeof(*box);
-}
-
-static uint32_t vmw_stdu_bo_populate_update(struct vmw_du_update_plane *update,
- void *cmd, struct drm_rect *bb)
-{
- struct vmw_screen_target_display_unit *stdu;
- struct vmw_framebuffer_bo *vfbbo;
- SVGA3dCmdSurfaceDMASuffix *suffix = cmd;
-
- stdu = container_of(update->du, typeof(*stdu), base);
- vfbbo = container_of(update->vfb, typeof(*vfbbo), base);
-
- suffix->suffixSize = sizeof(*suffix);
- suffix->maximumOffset = vfbbo->buffer->base.base.size;
-
- vmw_stdu_populate_update(&suffix[1], stdu->base.unit, bb->x1, bb->x2,
- bb->y1, bb->y2);
-
- return sizeof(*suffix) + sizeof(struct vmw_stdu_update);
-}
-
static uint32_t vmw_stdu_bo_pre_clip_cpu(struct vmw_du_update_plane *update,
void *cmd, uint32_t num_hits)
{
@@ -1300,11 +1080,11 @@ vmw_stdu_bo_populate_update_cpu(struct vmw_du_update_plane *update, void *cmd,
diff.cpp = stdu->cpp;
- dst_bo = &stdu->display_srf->res.backup->base;
+ dst_bo = &stdu->display_srf->res.guest_memory_bo->tbo;
dst_pitch = stdu->display_srf->metadata.base_size.width * stdu->cpp;
dst_offset = bb->y1 * dst_pitch + bb->x1 * stdu->cpp;
- src_bo = &vfbbo->buffer->base;
+ src_bo = &vfbbo->buffer->tbo;
src_pitch = update->vfb->base.pitches[0];
src_offset = bo_update->fb_top * src_pitch + bo_update->fb_left *
stdu->cpp;
@@ -1368,24 +1148,12 @@ static int vmw_stdu_plane_update_bo(struct vmw_private *dev_priv,
bo_update.base.vfb = vfb;
bo_update.base.out_fence = out_fence;
bo_update.base.mutex = NULL;
- bo_update.base.cpu_blit = vmw_stdu_use_cpu_blit(dev_priv);
bo_update.base.intr = false;
- /*
- * VM without 3D support don't have surface DMA command and framebuffer
- * should be moved out of VRAM.
- */
- if (bo_update.base.cpu_blit) {
- bo_update.base.calc_fifo_size = vmw_stdu_bo_fifo_size_cpu;
- bo_update.base.pre_clip = vmw_stdu_bo_pre_clip_cpu;
- bo_update.base.clip = vmw_stdu_bo_clip_cpu;
- bo_update.base.post_clip = vmw_stdu_bo_populate_update_cpu;
- } else {
- bo_update.base.calc_fifo_size = vmw_stdu_bo_fifo_size;
- bo_update.base.pre_clip = vmw_stdu_bo_populate_dma;
- bo_update.base.clip = vmw_stdu_bo_populate_clip;
- bo_update.base.post_clip = vmw_stdu_bo_populate_update;
- }
+ bo_update.base.calc_fifo_size = vmw_stdu_bo_fifo_size_cpu;
+ bo_update.base.pre_clip = vmw_stdu_bo_pre_clip_cpu;
+ bo_update.base.clip = vmw_stdu_bo_clip_cpu;
+ bo_update.base.post_clip = vmw_stdu_bo_populate_update_cpu;
return vmw_du_helper_plane_update(&bo_update.base);
}
@@ -1548,7 +1316,6 @@ static int vmw_stdu_plane_update_surface(struct vmw_private *dev_priv,
srf_update.vfb = vfb;
srf_update.out_fence = out_fence;
srf_update.mutex = &dev_priv->cmdbuf_mutex;
- srf_update.cpu_blit = false;
srf_update.intr = true;
if (vfbs->is_bo_proxy)
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_streamoutput.c b/drivers/gpu/drm/vmwgfx/vmwgfx_streamoutput.c
index 2de97419d5c9..edcc40659038 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_streamoutput.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_streamoutput.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 OR MIT
/**************************************************************************
*
- * Copyright © 2018-2019 VMware, Inc., Palo Alto, CA., USA
+ * Copyright © 2018-2023 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -26,11 +26,12 @@
*
**************************************************************************/
-#include <drm/ttm/ttm_placement.h>
-
+#include "vmwgfx_binding.h"
+#include "vmwgfx_bo.h"
#include "vmwgfx_drv.h"
#include "vmwgfx_resource_priv.h"
-#include "vmwgfx_binding.h"
+
+#include <drm/ttm/ttm_placement.h>
/**
* struct vmw_dx_streamoutput - Streamoutput resource metadata.
@@ -62,10 +63,11 @@ static void vmw_dx_streamoutput_commit_notify(struct vmw_resource *res,
static const struct vmw_res_func vmw_dx_streamoutput_func = {
.res_type = vmw_res_streamoutput,
- .needs_backup = true,
+ .needs_guest_memory = true,
.may_evict = false,
.type_name = "DX streamoutput",
- .backup_placement = &vmw_mob_placement,
+ .domain = VMW_BO_DOMAIN_MOB,
+ .busy_domain = VMW_BO_DOMAIN_MOB,
.create = vmw_dx_streamoutput_create,
.destroy = NULL, /* Command buffer managed resource. */
.bind = vmw_dx_streamoutput_bind,
@@ -104,8 +106,8 @@ static int vmw_dx_streamoutput_unscrub(struct vmw_resource *res)
cmd->header.id = SVGA_3D_CMD_DX_BIND_STREAMOUTPUT;
cmd->header.size = sizeof(cmd->body);
cmd->body.soid = so->id;
- cmd->body.mobid = res->backup->base.resource->start;
- cmd->body.offsetInBytes = res->backup_offset;
+ cmd->body.mobid = res->guest_memory_bo->tbo.resource->start;
+ cmd->body.offsetInBytes = res->guest_memory_offset;
cmd->body.sizeInBytes = so->size;
vmw_cmd_commit(dev_priv, sizeof(*cmd));
@@ -195,7 +197,7 @@ static int vmw_dx_streamoutput_unbind(struct vmw_resource *res, bool readback,
struct vmw_fence_obj *fence;
int ret;
- if (WARN_ON(res->backup->base.resource->mem_type != VMW_PL_MOB))
+ if (WARN_ON(res->guest_memory_bo->tbo.resource->mem_type != VMW_PL_MOB))
return -EINVAL;
mutex_lock(&dev_priv->binding_mutex);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
index dcfb003841b3..5db403ee8261 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 OR MIT
/**************************************************************************
*
- * Copyright 2009-2015 VMware, Inc., Palo Alto, CA., USA
+ * Copyright 2009-2023 VMware, Inc., Palo Alto, CA., USA
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
@@ -25,8 +25,7 @@
*
**************************************************************************/
-#include <drm/ttm/ttm_placement.h>
-
+#include "vmwgfx_bo.h"
#include "vmwgfx_drv.h"
#include "vmwgfx_resource_priv.h"
#include "vmwgfx_so.h"
@@ -34,6 +33,8 @@
#include "vmw_surface_cache.h"
#include "device_include/svga3d_surfacedefs.h"
+#include <drm/ttm/ttm_placement.h>
+
#define SVGA3D_FLAGS_64(upper32, lower32) (((uint64_t)upper32 << 32) | lower32)
#define SVGA3D_FLAGS_UPPER_32(svga3d_flags) (svga3d_flags >> 32)
#define SVGA3D_FLAGS_LOWER_32(svga3d_flags) \
@@ -125,12 +126,13 @@ const struct vmw_user_resource_conv *user_surface_converter =
static const struct vmw_res_func vmw_legacy_surface_func = {
.res_type = vmw_res_surface,
- .needs_backup = false,
+ .needs_guest_memory = false,
.may_evict = true,
.prio = 1,
.dirty_prio = 1,
.type_name = "legacy surfaces",
- .backup_placement = &vmw_srf_placement,
+ .domain = VMW_BO_DOMAIN_GMR,
+ .busy_domain = VMW_BO_DOMAIN_GMR | VMW_BO_DOMAIN_VRAM,
.create = &vmw_legacy_srf_create,
.destroy = &vmw_legacy_srf_destroy,
.bind = &vmw_legacy_srf_bind,
@@ -139,12 +141,13 @@ static const struct vmw_res_func vmw_legacy_surface_func = {
static const struct vmw_res_func vmw_gb_surface_func = {
.res_type = vmw_res_surface,
- .needs_backup = true,
+ .needs_guest_memory = true,
.may_evict = true,
.prio = 1,
.dirty_prio = 2,
.type_name = "guest backed surfaces",
- .backup_placement = &vmw_mob_placement,
+ .domain = VMW_BO_DOMAIN_MOB,
+ .busy_domain = VMW_BO_DOMAIN_MOB,
.create = vmw_gb_surface_create,
.destroy = vmw_gb_surface_destroy,
.bind = vmw_gb_surface_bind,
@@ -379,7 +382,7 @@ static void vmw_hw_surface_destroy(struct vmw_resource *res)
*/
mutex_lock(&dev_priv->cmdbuf_mutex);
- dev_priv->used_memory_size -= res->backup_size;
+ dev_priv->used_memory_size -= res->guest_memory_size;
mutex_unlock(&dev_priv->cmdbuf_mutex);
}
}
@@ -409,7 +412,7 @@ static int vmw_legacy_srf_create(struct vmw_resource *res)
return 0;
srf = vmw_res_to_srf(res);
- if (unlikely(dev_priv->used_memory_size + res->backup_size >=
+ if (unlikely(dev_priv->used_memory_size + res->guest_memory_size >=
dev_priv->memory_size))
return -EBUSY;
@@ -447,7 +450,7 @@ static int vmw_legacy_srf_create(struct vmw_resource *res)
* Surface memory usage accounting.
*/
- dev_priv->used_memory_size += res->backup_size;
+ dev_priv->used_memory_size += res->guest_memory_size;
return 0;
out_no_fifo:
@@ -524,7 +527,7 @@ static int vmw_legacy_srf_dma(struct vmw_resource *res,
static int vmw_legacy_srf_bind(struct vmw_resource *res,
struct ttm_validate_buffer *val_buf)
{
- if (!res->backup_dirty)
+ if (!res->guest_memory_dirty)
return 0;
return vmw_legacy_srf_dma(res, val_buf, true);
@@ -583,7 +586,7 @@ static int vmw_legacy_srf_destroy(struct vmw_resource *res)
* Surface memory usage accounting.
*/
- dev_priv->used_memory_size -= res->backup_size;
+ dev_priv->used_memory_size -= res->guest_memory_size;
/*
* Release the surface ID.
@@ -683,8 +686,8 @@ static void vmw_user_surface_base_release(struct ttm_base_object **p_base)
container_of(base, struct vmw_user_surface, prime.base);
struct vmw_resource *res = &user_srf->srf.res;
- if (res && res->backup)
- drm_gem_object_put(&res->backup->base.base);
+ if (res->guest_memory_bo)
+ drm_gem_object_put(&res->guest_memory_bo->tbo.base);
*p_base = NULL;
vmw_resource_unreference(&res);
@@ -812,7 +815,7 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
++cur_size;
}
}
- res->backup_size = cur_bo_offset;
+ res->guest_memory_size = cur_bo_offset;
if (metadata->scanout &&
metadata->num_sizes == 1 &&
metadata->sizes[0].width == VMW_CURSOR_SNOOP_WIDTH &&
@@ -856,14 +859,14 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
ret = vmw_gem_object_create_with_handle(dev_priv,
file_priv,
- res->backup_size,
+ res->guest_memory_size,
&backup_handle,
- &res->backup);
+ &res->guest_memory_bo);
if (unlikely(ret != 0)) {
vmw_resource_unreference(&res);
goto out_unlock;
}
- vmw_bo_reference(res->backup);
+ vmw_bo_reference(res->guest_memory_bo);
/*
* We don't expose the handle to the userspace and surface
* already holds a gem reference
@@ -872,7 +875,7 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
}
tmp = vmw_resource_reference(&srf->res);
- ret = ttm_prime_object_init(tfile, res->backup_size, &user_srf->prime,
+ ret = ttm_prime_object_init(tfile, res->guest_memory_size, &user_srf->prime,
req->shareable, VMW_RES_SURFACE,
&vmw_user_surface_base_release);
@@ -1186,7 +1189,7 @@ static int vmw_gb_surface_bind(struct vmw_resource *res,
BUG_ON(bo->resource->mem_type != VMW_PL_MOB);
- submit_size = sizeof(*cmd1) + (res->backup_dirty ? sizeof(*cmd2) : 0);
+ submit_size = sizeof(*cmd1) + (res->guest_memory_dirty ? sizeof(*cmd2) : 0);
cmd1 = VMW_CMD_RESERVE(dev_priv, submit_size);
if (unlikely(!cmd1))
@@ -1196,7 +1199,7 @@ static int vmw_gb_surface_bind(struct vmw_resource *res,
cmd1->header.size = sizeof(cmd1->body);
cmd1->body.sid = res->id;
cmd1->body.mobid = bo->resource->start;
- if (res->backup_dirty) {
+ if (res->guest_memory_dirty) {
cmd2 = (void *) &cmd1[1];
cmd2->header.id = SVGA_3D_CMD_UPDATE_GB_SURFACE;
cmd2->header.size = sizeof(cmd2->body);
@@ -1204,12 +1207,12 @@ static int vmw_gb_surface_bind(struct vmw_resource *res,
}
vmw_cmd_commit(dev_priv, submit_size);
- if (res->backup->dirty && res->backup_dirty) {
+ if (res->guest_memory_bo->dirty && res->guest_memory_dirty) {
/* We've just made a full upload. Cear dirty regions. */
vmw_bo_dirty_clear_res(res);
}
- res->backup_dirty = false;
+ res->guest_memory_dirty = false;
return 0;
}
@@ -1505,11 +1508,11 @@ vmw_gb_surface_define_internal(struct drm_device *dev,
if (req->base.buffer_handle != SVGA3D_INVALID_ID) {
ret = vmw_user_bo_lookup(file_priv, req->base.buffer_handle,
- &res->backup);
+ &res->guest_memory_bo);
if (ret == 0) {
- if (res->backup->base.base.size < res->backup_size) {
+ if (res->guest_memory_bo->tbo.base.size < res->guest_memory_size) {
VMW_DEBUG_USER("Surface backup buffer too small.\n");
- vmw_bo_unreference(&res->backup);
+ vmw_bo_unreference(&res->guest_memory_bo);
ret = -EINVAL;
goto out_unlock;
} else {
@@ -1520,11 +1523,11 @@ vmw_gb_surface_define_internal(struct drm_device *dev,
(drm_vmw_surface_flag_create_buffer |
drm_vmw_surface_flag_coherent)) {
ret = vmw_gem_object_create_with_handle(dev_priv, file_priv,
- res->backup_size,
+ res->guest_memory_size,
&backup_handle,
- &res->backup);
+ &res->guest_memory_bo);
if (ret == 0)
- vmw_bo_reference(res->backup);
+ vmw_bo_reference(res->guest_memory_bo);
}
if (unlikely(ret != 0)) {
@@ -1533,9 +1536,9 @@ vmw_gb_surface_define_internal(struct drm_device *dev,
}
if (req->base.drm_surface_flags & drm_vmw_surface_flag_coherent) {
- struct vmw_buffer_object *backup = res->backup;
+ struct vmw_bo *backup = res->guest_memory_bo;
- ttm_bo_reserve(&backup->base, false, false, NULL);
+ ttm_bo_reserve(&backup->tbo, false, false, NULL);
if (!res->func->dirty_alloc)
ret = -EINVAL;
if (!ret)
@@ -1544,7 +1547,7 @@ vmw_gb_surface_define_internal(struct drm_device *dev,
res->coherent = true;
ret = res->func->dirty_alloc(res);
}
- ttm_bo_unreserve(&backup->base);
+ ttm_bo_unreserve(&backup->tbo);
if (ret) {
vmw_resource_unreference(&res);
goto out_unlock;
@@ -1553,7 +1556,7 @@ vmw_gb_surface_define_internal(struct drm_device *dev,
}
tmp = vmw_resource_reference(res);
- ret = ttm_prime_object_init(tfile, res->backup_size, &user_srf->prime,
+ ret = ttm_prime_object_init(tfile, res->guest_memory_size, &user_srf->prime,
req->base.drm_surface_flags &
drm_vmw_surface_flag_shareable,
VMW_RES_SURFACE,
@@ -1566,11 +1569,11 @@ vmw_gb_surface_define_internal(struct drm_device *dev,
}
rep->handle = user_srf->prime.base.handle;
- rep->backup_size = res->backup_size;
- if (res->backup) {
+ rep->backup_size = res->guest_memory_size;
+ if (res->guest_memory_bo) {
rep->buffer_map_handle =
- drm_vma_node_offset_addr(&res->backup->base.base.vma_node);
- rep->buffer_size = res->backup->base.base.size;
+ drm_vma_node_offset_addr(&res->guest_memory_bo->tbo.base.vma_node);
+ rep->buffer_size = res->guest_memory_bo->tbo.base.size;
rep->buffer_handle = backup_handle;
} else {
rep->buffer_map_handle = 0;
@@ -1613,14 +1616,14 @@ vmw_gb_surface_reference_internal(struct drm_device *dev,
user_srf = container_of(base, struct vmw_user_surface, prime.base);
srf = &user_srf->srf;
- if (!srf->res.backup) {
+ if (!srf->res.guest_memory_bo) {
DRM_ERROR("Shared GB surface is missing a backup buffer.\n");
goto out_bad_resource;
}
metadata = &srf->metadata;
mutex_lock(&dev_priv->cmdbuf_mutex); /* Protect res->backup */
- ret = drm_gem_handle_create(file_priv, &srf->res.backup->base.base,
+ ret = drm_gem_handle_create(file_priv, &srf->res.guest_memory_bo->tbo.base,
&backup_handle);
mutex_unlock(&dev_priv->cmdbuf_mutex);
if (ret != 0) {
@@ -1639,11 +1642,11 @@ vmw_gb_surface_reference_internal(struct drm_device *dev,
rep->creq.base.buffer_handle = backup_handle;
rep->creq.base.base_size = metadata->base_size;
rep->crep.handle = user_srf->prime.base.handle;
- rep->crep.backup_size = srf->res.backup_size;
+ rep->crep.backup_size = srf->res.guest_memory_size;
rep->crep.buffer_handle = backup_handle;
rep->crep.buffer_map_handle =
- drm_vma_node_offset_addr(&srf->res.backup->base.base.vma_node);
- rep->crep.buffer_size = srf->res.backup->base.base.size;
+ drm_vma_node_offset_addr(&srf->res.guest_memory_bo->tbo.base.vma_node);
+ rep->crep.buffer_size = srf->res.guest_memory_bo->tbo.base.size;
rep->creq.version = drm_vmw_gb_surface_v1;
rep->creq.svga3d_flags_upper_32_bits =
@@ -1742,12 +1745,12 @@ static void vmw_surface_tex_dirty_range_add(struct vmw_resource *res,
{
struct vmw_surface_dirty *dirty =
(struct vmw_surface_dirty *) res->dirty;
- size_t backup_end = res->backup_offset + res->backup_size;
+ size_t backup_end = res->guest_memory_offset + res->guest_memory_size;
struct vmw_surface_loc loc1, loc2;
const struct vmw_surface_cache *cache;
- start = max_t(size_t, start, res->backup_offset) - res->backup_offset;
- end = min(end, backup_end) - res->backup_offset;
+ start = max_t(size_t, start, res->guest_memory_offset) - res->guest_memory_offset;
+ end = min(end, backup_end) - res->guest_memory_offset;
cache = &dirty->cache;
vmw_surface_get_loc(cache, &loc1, start);
vmw_surface_get_loc(cache, &loc2, end - 1);
@@ -1794,13 +1797,13 @@ static void vmw_surface_buf_dirty_range_add(struct vmw_resource *res,
struct vmw_surface_dirty *dirty =
(struct vmw_surface_dirty *) res->dirty;
const struct vmw_surface_cache *cache = &dirty->cache;
- size_t backup_end = res->backup_offset + cache->mip_chain_bytes;
+ size_t backup_end = res->guest_memory_offset + cache->mip_chain_bytes;
SVGA3dBox *box = &dirty->boxes[0];
u32 box_c2;
box->h = box->d = 1;
- start = max_t(size_t, start, res->backup_offset) - res->backup_offset;
- end = min(end, backup_end) - res->backup_offset;
+ start = max_t(size_t, start, res->guest_memory_offset) - res->guest_memory_offset;
+ end = min(end, backup_end) - res->guest_memory_offset;
box_c2 = box->x + box->w;
if (box->w == 0 || box->x > start)
box->x = start;
@@ -1816,8 +1819,8 @@ static void vmw_surface_dirty_range_add(struct vmw_resource *res, size_t start,
{
struct vmw_surface *srf = vmw_res_to_srf(res);
- if (WARN_ON(end <= res->backup_offset ||
- start >= res->backup_offset + res->backup_size))
+ if (WARN_ON(end <= res->guest_memory_offset ||
+ start >= res->guest_memory_offset + res->guest_memory_size))
return;
if (srf->metadata.format == SVGA3D_BUFFER)
@@ -2074,7 +2077,7 @@ int vmw_gb_surface_define(struct vmw_private *dev_priv,
if (metadata->flags & SVGA3D_SURFACE_MULTISAMPLE)
sample_count = metadata->multisample_count;
- srf->res.backup_size =
+ srf->res.guest_memory_size =
vmw_surface_get_serialized_size_extended(
metadata->format,
metadata->base_size,
@@ -2083,7 +2086,7 @@ int vmw_gb_surface_define(struct vmw_private *dev_priv,
sample_count);
if (metadata->flags & SVGA3D_SURFACE_BIND_STREAM_OUTPUT)
- srf->res.backup_size += sizeof(SVGA3dDXSOState);
+ srf->res.guest_memory_size += sizeof(SVGA3dDXSOState);
/*
* Don't set SVGA3D_SURFACE_SCREENTARGET flag for a scanout surface with
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c
index 856a352a72a6..af8562c95cc3 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 OR MIT
/**************************************************************************
*
- * Copyright 2009-2015 VMware, Inc., Palo Alto, CA., USA
+ * Copyright 2009-2023 VMware, Inc., Palo Alto, CA., USA
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
@@ -25,6 +25,7 @@
*
**************************************************************************/
+#include "vmwgfx_bo.h"
#include "vmwgfx_drv.h"
#include <drm/ttm/ttm_placement.h>
@@ -49,13 +50,6 @@ static const struct ttm_place gmr_placement_flags = {
.flags = 0
};
-static const struct ttm_place mob_placement_flags = {
- .fpfn = 0,
- .lpfn = 0,
- .mem_type = VMW_PL_MOB,
- .flags = 0
-};
-
struct ttm_placement vmw_vram_placement = {
.num_placement = 1,
.placement = &vram_placement_flags,
@@ -77,27 +71,6 @@ static const struct ttm_place vram_gmr_placement_flags[] = {
}
};
-static const struct ttm_place gmr_vram_placement_flags[] = {
- {
- .fpfn = 0,
- .lpfn = 0,
- .mem_type = VMW_PL_GMR,
- .flags = 0
- }, {
- .fpfn = 0,
- .lpfn = 0,
- .mem_type = TTM_PL_VRAM,
- .flags = 0
- }
-};
-
-static const struct ttm_place vmw_sys_placement_flags = {
- .fpfn = 0,
- .lpfn = 0,
- .mem_type = VMW_PL_SYSTEM,
- .flags = 0
-};
-
struct ttm_placement vmw_vram_gmr_placement = {
.num_placement = 2,
.placement = vram_gmr_placement_flags,
@@ -105,13 +78,6 @@ struct ttm_placement vmw_vram_gmr_placement = {
.busy_placement = &gmr_placement_flags
};
-struct ttm_placement vmw_vram_sys_placement = {
- .num_placement = 1,
- .placement = &vram_placement_flags,
- .num_busy_placement = 1,
- .busy_placement = &sys_placement_flags
-};
-
struct ttm_placement vmw_sys_placement = {
.num_placement = 1,
.placement = &sys_placement_flags,
@@ -119,53 +85,6 @@ struct ttm_placement vmw_sys_placement = {
.busy_placement = &sys_placement_flags
};
-struct ttm_placement vmw_pt_sys_placement = {
- .num_placement = 1,
- .placement = &vmw_sys_placement_flags,
- .num_busy_placement = 1,
- .busy_placement = &vmw_sys_placement_flags
-};
-
-static const struct ttm_place nonfixed_placement_flags[] = {
- {
- .fpfn = 0,
- .lpfn = 0,
- .mem_type = TTM_PL_SYSTEM,
- .flags = 0
- }, {
- .fpfn = 0,
- .lpfn = 0,
- .mem_type = VMW_PL_GMR,
- .flags = 0
- }, {
- .fpfn = 0,
- .lpfn = 0,
- .mem_type = VMW_PL_MOB,
- .flags = 0
- }
-};
-
-struct ttm_placement vmw_srf_placement = {
- .num_placement = 1,
- .num_busy_placement = 2,
- .placement = &gmr_placement_flags,
- .busy_placement = gmr_vram_placement_flags
-};
-
-struct ttm_placement vmw_mob_placement = {
- .num_placement = 1,
- .num_busy_placement = 1,
- .placement = &mob_placement_flags,
- .busy_placement = &mob_placement_flags
-};
-
-struct ttm_placement vmw_nonfixed_placement = {
- .num_placement = 3,
- .placement = nonfixed_placement_flags,
- .num_busy_placement = 1,
- .busy_placement = &sys_placement_flags
-};
-
const size_t vmw_tt_size = sizeof(struct vmw_ttm_tt);
/**
@@ -508,7 +427,7 @@ static struct ttm_tt *vmw_ttm_tt_create(struct ttm_buffer_object *bo,
if (!vmw_be)
return NULL;
- vmw_be->dev_priv = container_of(bo->bdev, struct vmw_private, bdev);
+ vmw_be->dev_priv = vmw_priv_from_ttm(bo->bdev);
vmw_be->mob = NULL;
if (vmw_be->dev_priv->map_mode == vmw_dma_alloc_coherent)
@@ -534,7 +453,7 @@ static void vmw_evict_flags(struct ttm_buffer_object *bo,
static int vmw_ttm_io_mem_reserve(struct ttm_device *bdev, struct ttm_resource *mem)
{
- struct vmw_private *dev_priv = container_of(bdev, struct vmw_private, bdev);
+ struct vmw_private *dev_priv = vmw_priv_from_ttm(bdev);
switch (mem->mem_type) {
case TTM_PL_SYSTEM:
@@ -596,9 +515,13 @@ static int vmw_move(struct ttm_buffer_object *bo,
struct ttm_resource *new_mem,
struct ttm_place *hop)
{
- struct ttm_resource_manager *old_man = ttm_manager_type(bo->bdev, bo->resource->mem_type);
- struct ttm_resource_manager *new_man = ttm_manager_type(bo->bdev, new_mem->mem_type);
- int ret;
+ struct ttm_resource_manager *new_man;
+ struct ttm_resource_manager *old_man = NULL;
+ int ret = 0;
+
+ new_man = ttm_manager_type(bo->bdev, new_mem->mem_type);
+ if (bo->resource)
+ old_man = ttm_manager_type(bo->bdev, bo->resource->mem_type);
if (new_man->use_tt && !vmw_memtype_is_system(new_mem->mem_type)) {
ret = vmw_ttm_bind(bo->bdev, bo->ttm, new_mem);
@@ -606,9 +529,15 @@ static int vmw_move(struct ttm_buffer_object *bo,
return ret;
}
+ if (!bo->resource || (bo->resource->mem_type == TTM_PL_SYSTEM &&
+ bo->ttm == NULL)) {
+ ttm_bo_move_null(bo, new_mem);
+ return 0;
+ }
+
vmw_move_notify(bo, bo->resource, new_mem);
- if (old_man->use_tt && new_man->use_tt) {
+ if (old_man && old_man->use_tt && new_man->use_tt) {
if (vmw_memtype_is_system(bo->resource->mem_type)) {
ttm_bo_move_null(bo, new_mem);
return 0;
@@ -645,34 +574,39 @@ struct ttm_device_funcs vmw_bo_driver = {
};
int vmw_bo_create_and_populate(struct vmw_private *dev_priv,
- unsigned long bo_size,
- struct ttm_buffer_object **bo_p)
+ size_t bo_size, u32 domain,
+ struct vmw_bo **bo_p)
{
struct ttm_operation_ctx ctx = {
.interruptible = false,
.no_wait_gpu = false
};
- struct ttm_buffer_object *bo;
+ struct vmw_bo *vbo;
int ret;
+ struct vmw_bo_params bo_params = {
+ .domain = domain,
+ .busy_domain = domain,
+ .bo_type = ttm_bo_type_kernel,
+ .size = bo_size,
+ .pin = true
+ };
- ret = vmw_bo_create_kernel(dev_priv, bo_size,
- &vmw_pt_sys_placement,
- &bo);
+ ret = vmw_bo_create(dev_priv, &bo_params, &vbo);
if (unlikely(ret != 0))
return ret;
- ret = ttm_bo_reserve(bo, false, true, NULL);
+ ret = ttm_bo_reserve(&vbo->tbo, false, true, NULL);
BUG_ON(ret != 0);
- ret = vmw_ttm_populate(bo->bdev, bo->ttm, &ctx);
+ ret = vmw_ttm_populate(vbo->tbo.bdev, vbo->tbo.ttm, &ctx);
if (likely(ret == 0)) {
struct vmw_ttm_tt *vmw_tt =
- container_of(bo->ttm, struct vmw_ttm_tt, dma_ttm);
+ container_of(vbo->tbo.ttm, struct vmw_ttm_tt, dma_ttm);
ret = vmw_ttm_map_dma(vmw_tt);
}
- ttm_bo_unreserve(bo);
+ ttm_bo_unreserve(&vbo->tbo);
if (likely(ret == 0))
- *bo_p = bo;
+ *bo_p = vbo;
return ret;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_va.c b/drivers/gpu/drm/vmwgfx/vmwgfx_va.c
index 6ad744ae07f5..d140089e53d4 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_va.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_va.c
@@ -25,6 +25,7 @@
*
**************************************************************************/
+#include "vmwgfx_bo.h"
#include "vmwgfx_drv.h"
#include "vmwgfx_resource_priv.h"
@@ -80,10 +81,11 @@ static void vmw_stream_set_arg_handle(void *data, u32 handle)
static const struct vmw_simple_resource_func va_stream_func = {
.res_func = {
.res_type = vmw_res_stream,
- .needs_backup = false,
+ .needs_guest_memory = false,
.may_evict = false,
.type_name = "overlay stream",
- .backup_placement = NULL,
+ .domain = VMW_BO_DOMAIN_SYS,
+ .busy_domain = VMW_BO_DOMAIN_SYS,
.create = NULL,
.destroy = NULL,
.bind = NULL,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c b/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c
index f5c4a40fb16d..aaacbdcbd742 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 OR MIT
/**************************************************************************
*
- * Copyright © 2018 - 2022 VMware, Inc., Palo Alto, CA., USA
+ * Copyright © 2018 - 2023 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -25,9 +25,12 @@
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
-#include <linux/slab.h>
-#include "vmwgfx_validation.h"
+#include "vmwgfx_bo.h"
#include "vmwgfx_drv.h"
+#include "vmwgfx_resource_priv.h"
+#include "vmwgfx_validation.h"
+
+#include <linux/slab.h>
#define VMWGFX_VALIDATION_MEM_GRAN (16*PAGE_SIZE)
@@ -38,8 +41,6 @@
* @hash: A hash entry used for the duplicate detection hash table.
* @coherent_count: If switching backup buffers, number of new coherent
* resources that will have this buffer as a backup buffer.
- * @as_mob: Validate as mob.
- * @cpu_blit: Validate for cpu blit access.
*
* Bit fields are used since these structures are allocated and freed in
* large numbers and space conservation is desired.
@@ -48,21 +49,19 @@ struct vmw_validation_bo_node {
struct ttm_validate_buffer base;
struct vmwgfx_hash_item hash;
unsigned int coherent_count;
- u32 as_mob : 1;
- u32 cpu_blit : 1;
};
/**
* struct vmw_validation_res_node - Resource validation metadata.
* @head: List head for the resource validation list.
* @hash: A hash entry used for the duplicate detection hash table.
* @res: Reference counted resource pointer.
- * @new_backup: Non ref-counted pointer to new backup buffer to be assigned
- * to a resource.
- * @new_backup_offset: Offset into the new backup mob for resources that can
- * share MOBs.
+ * @new_guest_memory_bo: Non ref-counted pointer to new guest memory buffer
+ * to be assigned to a resource.
+ * @new_guest_memory_offset: Offset into the new backup mob for resources
+ * that can share MOBs.
* @no_buffer_needed: Kernel does not need to allocate a MOB during validation,
* the command stream provides a mob bind operation.
- * @switching_backup: The validation process is switching backup MOB.
+ * @switching_guest_memory_bo: The validation process is switching backup MOB.
* @first_usage: True iff the resource has been seen only once in the current
* validation batch.
* @reserved: Whether the resource is currently reserved by this process.
@@ -77,10 +76,10 @@ struct vmw_validation_res_node {
struct list_head head;
struct vmwgfx_hash_item hash;
struct vmw_resource *res;
- struct vmw_buffer_object *new_backup;
- unsigned long new_backup_offset;
+ struct vmw_bo *new_guest_memory_bo;
+ unsigned long new_guest_memory_offset;
u32 no_buffer_needed : 1;
- u32 switching_backup : 1;
+ u32 switching_guest_memory_bo : 1;
u32 first_usage : 1;
u32 reserved : 1;
u32 dirty : 1;
@@ -173,7 +172,7 @@ static void vmw_validation_mem_free(struct vmw_validation_context *ctx)
*/
static struct vmw_validation_bo_node *
vmw_validation_find_bo_dup(struct vmw_validation_context *ctx,
- struct vmw_buffer_object *vbo)
+ struct vmw_bo *vbo)
{
struct vmw_validation_bo_node *bo_node = NULL;
@@ -194,7 +193,7 @@ vmw_validation_find_bo_dup(struct vmw_validation_context *ctx,
struct vmw_validation_bo_node *entry;
list_for_each_entry(entry, &ctx->bo_list, base.head) {
- if (entry->base.bo == &vbo->base) {
+ if (entry->base.bo == &vbo->tbo) {
bo_node = entry;
break;
}
@@ -258,26 +257,16 @@ out:
* vmw_validation_add_bo - Add a buffer object to the validation context.
* @ctx: The validation context.
* @vbo: The buffer object.
- * @as_mob: Validate as mob, otherwise suitable for GMR operations.
- * @cpu_blit: Validate in a page-mappable location.
*
* Return: Zero on success, negative error code otherwise.
*/
int vmw_validation_add_bo(struct vmw_validation_context *ctx,
- struct vmw_buffer_object *vbo,
- bool as_mob,
- bool cpu_blit)
+ struct vmw_bo *vbo)
{
struct vmw_validation_bo_node *bo_node;
bo_node = vmw_validation_find_bo_dup(ctx, vbo);
- if (bo_node) {
- if (bo_node->as_mob != as_mob ||
- bo_node->cpu_blit != cpu_blit) {
- DRM_ERROR("Inconsistent buffer usage.\n");
- return -EINVAL;
- }
- } else {
+ if (!bo_node) {
struct ttm_validate_buffer *val_buf;
bo_node = vmw_validation_mem_alloc(ctx, sizeof(*bo_node));
@@ -290,13 +279,11 @@ int vmw_validation_add_bo(struct vmw_validation_context *ctx,
bo_node->hash.key);
}
val_buf = &bo_node->base;
- val_buf->bo = ttm_bo_get_unless_zero(&vbo->base);
+ val_buf->bo = ttm_bo_get_unless_zero(&vbo->tbo);
if (!val_buf->bo)
return -ESRCH;
val_buf->num_shared = 0;
list_add_tail(&val_buf->head, &ctx->bo_list);
- bo_node->as_mob = as_mob;
- bo_node->cpu_blit = cpu_blit;
}
return 0;
@@ -406,23 +393,23 @@ void vmw_validation_res_set_dirty(struct vmw_validation_context *ctx,
* the resource.
* @vbo: The new backup buffer object MOB. This buffer object needs to have
* already been registered with the validation context.
- * @backup_offset: Offset into the new backup MOB.
+ * @guest_memory_offset: Offset into the new backup MOB.
*/
void vmw_validation_res_switch_backup(struct vmw_validation_context *ctx,
void *val_private,
- struct vmw_buffer_object *vbo,
- unsigned long backup_offset)
+ struct vmw_bo *vbo,
+ unsigned long guest_memory_offset)
{
struct vmw_validation_res_node *val;
val = container_of(val_private, typeof(*val), private);
- val->switching_backup = 1;
+ val->switching_guest_memory_bo = 1;
if (val->first_usage)
val->no_buffer_needed = 1;
- val->new_backup = vbo;
- val->new_backup_offset = backup_offset;
+ val->new_guest_memory_bo = vbo;
+ val->new_guest_memory_offset = guest_memory_offset;
}
/**
@@ -450,21 +437,22 @@ int vmw_validation_res_reserve(struct vmw_validation_context *ctx,
goto out_unreserve;
val->reserved = 1;
- if (res->backup) {
- struct vmw_buffer_object *vbo = res->backup;
+ if (res->guest_memory_bo) {
+ struct vmw_bo *vbo = res->guest_memory_bo;
- ret = vmw_validation_add_bo
- (ctx, vbo, vmw_resource_needs_backup(res),
- false);
+ vmw_bo_placement_set(vbo,
+ res->func->domain,
+ res->func->busy_domain);
+ ret = vmw_validation_add_bo(ctx, vbo);
if (ret)
goto out_unreserve;
}
- if (val->switching_backup && val->new_backup &&
+ if (val->switching_guest_memory_bo && val->new_guest_memory_bo &&
res->coherent) {
struct vmw_validation_bo_node *bo_node =
vmw_validation_find_bo_dup(ctx,
- val->new_backup);
+ val->new_guest_memory_bo);
if (WARN_ON(!bo_node)) {
ret = -EINVAL;
@@ -507,9 +495,9 @@ void vmw_validation_res_unreserve(struct vmw_validation_context *ctx,
vmw_resource_unreserve(val->res,
val->dirty_set,
val->dirty,
- val->switching_backup,
- val->new_backup,
- val->new_backup_offset);
+ val->switching_guest_memory_bo,
+ val->new_guest_memory_bo,
+ val->new_guest_memory_offset);
}
}
@@ -517,17 +505,14 @@ void vmw_validation_res_unreserve(struct vmw_validation_context *ctx,
* vmw_validation_bo_validate_single - Validate a single buffer object.
* @bo: The TTM buffer object base.
* @interruptible: Whether to perform waits interruptible if possible.
- * @validate_as_mob: Whether to validate in MOB memory.
*
* Return: Zero on success, -ERESTARTSYS if interrupted. Negative error
* code on failure.
*/
-int vmw_validation_bo_validate_single(struct ttm_buffer_object *bo,
- bool interruptible,
- bool validate_as_mob)
+static int vmw_validation_bo_validate_single(struct ttm_buffer_object *bo,
+ bool interruptible)
{
- struct vmw_buffer_object *vbo =
- container_of(bo, struct vmw_buffer_object, base);
+ struct vmw_bo *vbo = to_vmw_bo(&bo->base);
struct ttm_operation_ctx ctx = {
.interruptible = interruptible,
.no_wait_gpu = false
@@ -537,30 +522,20 @@ int vmw_validation_bo_validate_single(struct ttm_buffer_object *bo,
if (atomic_read(&vbo->cpu_writers))
return -EBUSY;
- if (vbo->base.pin_count > 0)
+ if (vbo->tbo.pin_count > 0)
return 0;
- if (validate_as_mob)
- return ttm_bo_validate(bo, &vmw_mob_placement, &ctx);
-
- /**
- * Put BO in VRAM if there is space, otherwise as a GMR.
- * If there is no space in VRAM and GMR ids are all used up,
- * start evicting GMRs to make room. If the DMA buffer can't be
- * used as a GMR, this will return -ENOMEM.
- */
-
- ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, &ctx);
+ ret = ttm_bo_validate(bo, &vbo->placement, &ctx);
if (ret == 0 || ret == -ERESTARTSYS)
return ret;
- /**
- * If that failed, try VRAM again, this time evicting
+ /*
+ * If that failed, try again, this time evicting
* previous contents.
*/
+ ctx.allow_res_evict = true;
- ret = ttm_bo_validate(bo, &vmw_vram_placement, &ctx);
- return ret;
+ return ttm_bo_validate(bo, &vbo->placement, &ctx);
}
/**
@@ -578,21 +553,10 @@ int vmw_validation_bo_validate(struct vmw_validation_context *ctx, bool intr)
int ret;
list_for_each_entry(entry, &ctx->bo_list, base.head) {
- struct vmw_buffer_object *vbo =
- container_of(entry->base.bo, typeof(*vbo), base);
-
- if (entry->cpu_blit) {
- struct ttm_operation_ctx ttm_ctx = {
- .interruptible = intr,
- .no_wait_gpu = false
- };
-
- ret = ttm_bo_validate(entry->base.bo,
- &vmw_nonfixed_placement, &ttm_ctx);
- } else {
- ret = vmw_validation_bo_validate_single
- (entry->base.bo, intr, entry->as_mob);
- }
+ struct vmw_bo *vbo = to_vmw_bo(&entry->base.bo->base);
+
+ ret = vmw_validation_bo_validate_single(entry->base.bo, intr);
+
if (ret)
return ret;
@@ -639,7 +603,7 @@ int vmw_validation_res_validate(struct vmw_validation_context *ctx, bool intr)
list_for_each_entry(val, &ctx->resource_list, head) {
struct vmw_resource *res = val->res;
- struct vmw_buffer_object *backup = res->backup;
+ struct vmw_bo *backup = res->guest_memory_bo;
ret = vmw_resource_validate(res, intr, val->dirty_set &&
val->dirty);
@@ -650,12 +614,12 @@ int vmw_validation_res_validate(struct vmw_validation_context *ctx, bool intr)
}
/* Check if the resource switched backup buffer */
- if (backup && res->backup && (backup != res->backup)) {
- struct vmw_buffer_object *vbo = res->backup;
+ if (backup && res->guest_memory_bo && backup != res->guest_memory_bo) {
+ struct vmw_bo *vbo = res->guest_memory_bo;
- ret = vmw_validation_add_bo
- (ctx, vbo, vmw_resource_needs_backup(res),
- false);
+ vmw_bo_placement_set(vbo, res->func->domain,
+ res->func->busy_domain);
+ ret = vmw_validation_add_bo(ctx, vbo);
if (ret)
return ret;
}
@@ -889,9 +853,7 @@ void vmw_validation_bo_backoff(struct vmw_validation_context *ctx)
list_for_each_entry(entry, &ctx->bo_list, base.head) {
if (entry->coherent_count) {
unsigned int coherent_count = entry->coherent_count;
- struct vmw_buffer_object *vbo =
- container_of(entry->base.bo, typeof(*vbo),
- base);
+ struct vmw_bo *vbo = to_vmw_bo(&entry->base.bo->base);
while (coherent_count--)
vmw_bo_dirty_release(vbo);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_validation.h b/drivers/gpu/drm/vmwgfx/vmwgfx_validation.h
index ab9ec226f433..240ee0c4ebfd 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_validation.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_validation.h
@@ -73,7 +73,7 @@ struct vmw_validation_context {
size_t total_mem;
};
-struct vmw_buffer_object;
+struct vmw_bo;
struct vmw_resource;
struct vmw_fence_obj;
@@ -159,11 +159,7 @@ static inline unsigned int vmw_validation_align(unsigned int val)
}
int vmw_validation_add_bo(struct vmw_validation_context *ctx,
- struct vmw_buffer_object *vbo,
- bool as_mob, bool cpu_blit);
-int vmw_validation_bo_validate_single(struct ttm_buffer_object *bo,
- bool interruptible,
- bool validate_as_mob);
+ struct vmw_bo *vbo);
int vmw_validation_bo_validate(struct vmw_validation_context *ctx, bool intr);
void vmw_validation_unref_lists(struct vmw_validation_context *ctx);
int vmw_validation_add_resource(struct vmw_validation_context *ctx,
@@ -179,7 +175,7 @@ void vmw_validation_res_unreserve(struct vmw_validation_context *ctx,
bool backoff);
void vmw_validation_res_switch_backup(struct vmw_validation_context *ctx,
void *val_private,
- struct vmw_buffer_object *vbo,
+ struct vmw_bo *vbo,
unsigned long backup_offset);
int vmw_validation_res_validate(struct vmw_validation_context *ctx, bool intr);
diff --git a/drivers/gpu/drm/xlnx/zynqmp_kms.c b/drivers/gpu/drm/xlnx/zynqmp_kms.c
index 776ef5480206..a7f8611be6f4 100644
--- a/drivers/gpu/drm/xlnx/zynqmp_kms.c
+++ b/drivers/gpu/drm/xlnx/zynqmp_kms.c
@@ -19,7 +19,7 @@
#include <drm/drm_device.h>
#include <drm/drm_drv.h>
#include <drm/drm_encoder.h>
-#include <drm/drm_fbdev_generic.h>
+#include <drm/drm_fbdev_dma.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_dma_helper.h>
@@ -515,7 +515,7 @@ int zynqmp_dpsub_drm_init(struct zynqmp_dpsub *dpsub)
goto err_poll_fini;
/* Initialize fbdev generic emulation. */
- drm_fbdev_generic_setup(drm, 24);
+ drm_fbdev_dma_setup(drm, 24);
return 0;
diff --git a/drivers/gpu/ipu-v3/Kconfig b/drivers/gpu/ipu-v3/Kconfig
index 061fb990c120..7dece2a53c5c 100644
--- a/drivers/gpu/ipu-v3/Kconfig
+++ b/drivers/gpu/ipu-v3/Kconfig
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0-only
config IMX_IPUV3_CORE
tristate "IPUv3 core support"
- depends on SOC_IMX5 || SOC_IMX6Q || ARCH_MULTIPLATFORM || COMPILE_TEST
+ depends on SOC_IMX5 || SOC_IMX6Q || COMPILE_TEST
depends on DRM || !DRM # if DRM=m, this can't be 'y'
select BITREVERSE
select GENERIC_ALLOCATOR if DRM